[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1273634462-2672-4-git-send-email-walken@google.com>
Date: Tue, 11 May 2010 20:20:53 -0700
From: Michel Lespinasse <walken@...gle.com>
To: Linus Torvalds <torvalds@...ux-foundation.org>,
David Howells <dhowells@...hat.com>,
Ingo Molnar <mingo@...e.hu>,
Thomas Gleixner <tglx@...utronix.de>
Cc: LKML <linux-kernel@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Mike Waychison <mikew@...gle.com>,
Suleiman Souhlal <suleiman@...gle.com>,
Ying Han <yinghan@...gle.com>,
Michel Lespinasse <walken@...gle.com>
Subject: [PATCH 03/12] rwsem: let RWSEM_WAITING_BIAS represent any number of waiting threads
Previously each waiting thread added a bias of RWSEM_WAITING_BIAS. With this
change, the bias is added only once when the wait list is non-empty.
This has a few nice properties which will be used in following changes:
- when the mutex is held and the waiter list is known to be non-empty,
count < RWSEM_WAITING_BIAS <=> there is an active writer on that sem
- count == RWSEM_WAITING_BIAS <=> there are waiting threads and no
active readers/writers on that sem
Signed-off-by: Michel Lespinasse <walken@...gle.com>
---
lib/rwsem.c | 23 +++++++++++++++--------
1 files changed, 15 insertions(+), 8 deletions(-)
diff --git a/lib/rwsem.c b/lib/rwsem.c
index caee9b7..0fb6e38 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -52,7 +52,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
struct rwsem_waiter *waiter;
struct task_struct *tsk;
struct list_head *next;
- signed long oldcount, woken, loop;
+ signed long oldcount, woken, loop, adjustment;
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
@@ -68,9 +68,12 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
* write lock. However, we only only wake someone up
* if we can transition the active part of the count from 0 -> 1
*/
+ adjustment = RWSEM_ACTIVE_WRITE_BIAS;
+ if (waiter->list.next == &sem->wait_list)
+ adjustment -= RWSEM_WAITING_BIAS;
+
retry_writer:
- oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
- - RWSEM_ACTIVE_BIAS;
+ oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
if (oldcount & RWSEM_ACTIVE_MASK)
/* Someone grabbed the sem already */
goto undo_writer;
@@ -106,7 +109,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
} while (waiter->flags & RWSEM_WAITING_FOR_READ);
loop = woken;
- woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS;
+ woken *= RWSEM_ACTIVE_READ_BIAS;
+ if (waiter->flags & RWSEM_WAITING_FOR_READ)
+ /* hit end of list above */
+ woken -= RWSEM_WAITING_BIAS;
retry_readers:
oldcount = rwsem_atomic_update(woken, sem) - woken;
@@ -133,7 +139,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
/* undo the change to count, but check for a transition 1->0 */
undo_writer:
- if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
+ if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK)
goto out;
goto retry_writer;
@@ -160,6 +166,8 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
waiter->task = tsk;
get_task_struct(tsk);
+ if (list_empty(&sem->wait_list))
+ adjustment += RWSEM_WAITING_BIAS;
list_add_tail(&waiter->list, &sem->wait_list);
/* we're now waiting on the lock, but no longer actively read-locking */
@@ -193,8 +201,7 @@ rwsem_down_read_failed(struct rw_semaphore *sem)
struct rwsem_waiter waiter;
waiter.flags = RWSEM_WAITING_FOR_READ;
- rwsem_down_failed_common(sem, &waiter,
- RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
+ rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_READ_BIAS);
return sem;
}
@@ -207,7 +214,7 @@ rwsem_down_write_failed(struct rw_semaphore *sem)
struct rwsem_waiter waiter;
waiter.flags = RWSEM_WAITING_FOR_WRITE;
- rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
+ rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_WRITE_BIAS);
return sem;
}
--
1.7.0.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists