lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Sat, 16 Aug 2008 14:27:31 +0200
From:	Jarek Poplawski <jarkao2@...il.com>
To:	Denys Fedoryshchenko <denys@...p.net.lb>
Cc:	netdev@...r.kernel.org
Subject: Re: panic 2.6.27-rc3-git2, qdisc_dequeue_head

On Sat, Aug 16, 2008 at 01:05:46PM +0300, Denys Fedoryshchenko wrote:
> Sorry, one more without wrapping

So this patch proved without wrapping that I'm a moron!

Here is take 3, I hope more useful.

Sorry,
Jarek P.

---

 include/linux/skbuff.h  |   44 ++++++++++++++++++++++++++++++++------------
 net/sched/sch_generic.c |    5 ++++-
 2 files changed, 36 insertions(+), 13 deletions(-)

diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 358661c..506142e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -120,6 +120,7 @@ struct sk_buff_head {
 
 	__u32		qlen;
 	spinlock_t	lock;
+	spinlock_t	lock_debug;
 };
 
 struct sk_buff;
@@ -657,6 +658,7 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
 static inline void skb_queue_head_init(struct sk_buff_head *list)
 {
 	spin_lock_init(&list->lock);
+	spin_lock_init(&list->lock_debug);
 	list->prev = list->next = (struct sk_buff *)list;
 	list->qlen = 0;
 }
@@ -679,10 +681,16 @@ static inline void __skb_insert(struct sk_buff *newsk,
 				struct sk_buff *prev, struct sk_buff *next,
 				struct sk_buff_head *list)
 {
-	newsk->next = next;
-	newsk->prev = prev;
-	next->prev  = prev->next = newsk;
-	list->qlen++;
+	if (spin_trylock(&list->lock_debug)) {
+		newsk->next = next;
+		newsk->prev = prev;
+		next->prev  = prev->next = newsk;
+		list->qlen++;
+		spin_unlock(&list->lock_debug);
+	} else {
+		kfree_skb(newsk);
+		WARN_ON(1);
+	}
 }
 
 /**
@@ -775,10 +783,16 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
 {
-	struct sk_buff *skb = skb_peek(list);
-	if (skb)
-		__skb_unlink(skb, list);
-	return skb;
+	if (spin_trylock(&list->lock_debug)) {
+		struct sk_buff *skb = skb_peek(list);
+		if (skb)
+			__skb_unlink(skb, list);
+		spin_unlock(&list->lock_debug);
+		return skb;
+	} else {
+		WARN_ON(1);
+		return NULL;
+	}
 }
 
 /**
@@ -792,10 +806,16 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
 {
-	struct sk_buff *skb = skb_peek_tail(list);
-	if (skb)
-		__skb_unlink(skb, list);
-	return skb;
+	if (spin_trylock(&list->lock_debug)) {
+		struct sk_buff *skb = skb_peek_tail(list);
+		if (skb)
+			__skb_unlink(skb, list);
+		spin_unlock(&list->lock_debug);
+		return skb;
+	} else {
+		WARN_ON(1);
+		return NULL;
+	}
 }
 
 
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4685746..065a8b0 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -525,6 +525,7 @@ static void __qdisc_destroy(struct rcu_head *head)
 {
 	struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
 	const struct Qdisc_ops  *ops = qdisc->ops;
+	int p;
 
 #ifdef CONFIG_NET_SCHED
 	qdisc_put_stab(qdisc->stab);
@@ -540,7 +541,9 @@ static void __qdisc_destroy(struct rcu_head *head)
 
 	kfree_skb(qdisc->gso_skb);
 
-	kfree((char *) qdisc - qdisc->padded);
+	p = qdisc->padded;
+	memset(qdisc, 0xf0 , sizeof(*qdisc));
+	kfree((char *) qdisc - p);
 }
 
 /* Under qdisc_lock(qdisc) and BH! */
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ