[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170313083352.613621959@linuxfoundation.org>
Date: Mon, 13 Mar 2017 16:39:01 +0800
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Jiri Slaby <jslaby@...e.cz>,
Dmitry Vyukov <dvyukov@...gle.com>
Subject: [PATCH 4.4 01/36] TTY: n_hdlc, fix lockdep false positive
4.4-stable review patch. If anyone has any objections, please let me know.
------------------
From: Jiri Slaby <jslaby@...e.cz>
commit e9b736d88af1a143530565929390cadf036dc799 upstream.
The class of 4 n_hdls buf locks is the same because a single function
n_hdlc_buf_list_init is used to init all the locks. But since
flush_tx_queue takes n_hdlc->tx_buf_list.spinlock and then calls
n_hdlc_buf_put which takes n_hdlc->tx_free_buf_list.spinlock, lockdep
emits a warning:
=============================================
[ INFO: possible recursive locking detected ]
4.3.0-25.g91e30a7-default #1 Not tainted
---------------------------------------------
a.out/1248 is trying to acquire lock:
(&(&list->spinlock)->rlock){......}, at: [<ffffffffa01fd020>] n_hdlc_buf_put+0x20/0x60 [n_hdlc]
but task is already holding lock:
(&(&list->spinlock)->rlock){......}, at: [<ffffffffa01fdc07>] n_hdlc_tty_ioctl+0x127/0x1d0 [n_hdlc]
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(&(&list->spinlock)->rlock);
lock(&(&list->spinlock)->rlock);
*** DEADLOCK ***
May be due to missing lock nesting notation
2 locks held by a.out/1248:
#0: (&tty->ldisc_sem){++++++}, at: [<ffffffff814c9eb0>] tty_ldisc_ref_wait+0x20/0x50
#1: (&(&list->spinlock)->rlock){......}, at: [<ffffffffa01fdc07>] n_hdlc_tty_ioctl+0x127/0x1d0 [n_hdlc]
...
Call Trace:
...
[<ffffffff81738fd0>] _raw_spin_lock_irqsave+0x50/0x70
[<ffffffffa01fd020>] n_hdlc_buf_put+0x20/0x60 [n_hdlc]
[<ffffffffa01fdc24>] n_hdlc_tty_ioctl+0x144/0x1d0 [n_hdlc]
[<ffffffff814c25c1>] tty_ioctl+0x3f1/0xe40
...
Fix it by initializing the spin_locks separately. This removes also
reduntand memset of a freshly kzallocated space.
Signed-off-by: Jiri Slaby <jslaby@...e.cz>
Reported-by: Dmitry Vyukov <dvyukov@...gle.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/tty/n_hdlc.c | 19 ++++---------------
1 file changed, 4 insertions(+), 15 deletions(-)
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -159,7 +159,6 @@ struct n_hdlc {
/*
* HDLC buffer list manipulation functions
*/
-static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list);
static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
struct n_hdlc_buf *buf);
static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
@@ -853,10 +852,10 @@ static struct n_hdlc *n_hdlc_alloc(void)
if (!n_hdlc)
return NULL;
- n_hdlc_buf_list_init(&n_hdlc->rx_free_buf_list);
- n_hdlc_buf_list_init(&n_hdlc->tx_free_buf_list);
- n_hdlc_buf_list_init(&n_hdlc->rx_buf_list);
- n_hdlc_buf_list_init(&n_hdlc->tx_buf_list);
+ spin_lock_init(&n_hdlc->rx_free_buf_list.spinlock);
+ spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock);
+ spin_lock_init(&n_hdlc->rx_buf_list.spinlock);
+ spin_lock_init(&n_hdlc->tx_buf_list.spinlock);
/* allocate free rx buffer list */
for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
@@ -885,16 +884,6 @@ static struct n_hdlc *n_hdlc_alloc(void)
} /* end of n_hdlc_alloc() */
/**
- * n_hdlc_buf_list_init - initialize specified HDLC buffer list
- * @list - pointer to buffer list
- */
-static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list)
-{
- memset(list, 0, sizeof(*list));
- spin_lock_init(&list->spinlock);
-} /* end of n_hdlc_buf_list_init() */
-
-/**
* n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
* @list - pointer to buffer list
* @buf - pointer to buffer
Powered by blists - more mailing lists