[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20091118141700.3346.53298.stgit@localhost.localdomain>
Date: Wed, 18 Nov 2009 14:17:10 +0000
From: Alan Cox <alan@...ux.intel.com>
To: greg@...ah.com, linux-kernel@...r.kernel.org
Subject: [PATCH 12/12] moxa: split open lock
moxa_openlock is used for several situations where we want to handle the
case of an ioctl that crosses many ports (not just the open tty), and also
cases where an open races a deinit (eg a pci unplug) and we hangup a port
before we can cope with that.
The non open race cases can use the moxa_lock spinlock. This simplifies sorting
out the remaining mess.
Signed-off-by: Alan Cox <alan@...ux.intel.com>
---
drivers/char/moxa.c | 23 +++++++++++------------
1 files changed, 11 insertions(+), 12 deletions(-)
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index f8e673a..16b8aed 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -163,6 +163,7 @@ static struct mon_str moxaLog;
static unsigned int moxaFuncTout = HZ / 2;
static unsigned int moxaLowWaterChk;
static DEFINE_MUTEX(moxa_openlock);
+static DEFINE_SPINLOCK(moxa_lock);
/* Variables for insmod */
#ifdef MODULE
static unsigned long baseaddr[MAX_BOARDS];
@@ -313,22 +314,20 @@ static int moxa_ioctl(struct tty_struct *tty, struct file *file,
struct moxa_port *p;
unsigned int i, j;
- mutex_lock(&moxa_openlock);
for (i = 0; i < MAX_BOARDS; i++) {
p = moxa_boards[i].ports;
for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) {
memset(&tmp, 0, sizeof(tmp));
+ spin_lock_bh(&moxa_lock);
if (moxa_boards[i].ready) {
tmp.inq = MoxaPortRxQueue(p);
tmp.outq = MoxaPortTxQueue(p);
}
- if (copy_to_user(argm, &tmp, sizeof(tmp))) {
- mutex_unlock(&moxa_openlock);
+ spin_unlock_bh(&moxa_lock);
+ if (copy_to_user(argm, &tmp, sizeof(tmp)))
return -EFAULT;
- }
}
}
- mutex_unlock(&moxa_openlock);
break;
} case MOXA_GET_OQUEUE:
status = MoxaPortTxQueue(ch);
@@ -344,16 +343,20 @@ static int moxa_ioctl(struct tty_struct *tty, struct file *file,
struct moxa_port *p;
unsigned int i, j;
- mutex_lock(&moxa_openlock);
for (i = 0; i < MAX_BOARDS; i++) {
p = moxa_boards[i].ports;
for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) {
struct tty_struct *ttyp;
memset(&tmp, 0, sizeof(tmp));
- if (!moxa_boards[i].ready)
+ spin_lock_bh(&moxa_lock);
+ if (!moxa_boards[i].ready) {
+ spin_unlock_bh(&moxa_lock);
goto copy;
+ }
status = MoxaPortLineStatus(p);
+ spin_unlock_bh(&moxa_lock);
+
if (status & 1)
tmp.cts = 1;
if (status & 2)
@@ -368,13 +371,10 @@ static int moxa_ioctl(struct tty_struct *tty, struct file *file,
tmp.cflag = ttyp->termios->c_cflag;
tty_kref_put(tty);
copy:
- if (copy_to_user(argm, &tmp, sizeof(tmp))) {
- mutex_unlock(&moxa_openlock);
+ if (copy_to_user(argm, &tmp, sizeof(tmp)))
return -EFAULT;
- }
}
}
- mutex_unlock(&moxa_openlock);
break;
}
case TIOCGSERIAL:
@@ -427,7 +427,6 @@ static const struct tty_port_operations moxa_port_ops = {
static struct tty_driver *moxaDriver;
static DEFINE_TIMER(moxaTimer, moxa_poll, 0, 0);
-static DEFINE_SPINLOCK(moxa_lock);
/*
* HW init
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists