[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4405.1274276055@redhat.com>
Date: Wed, 19 May 2010 14:34:15 +0100
From: David Howells <dhowells@...hat.com>
To: Michel Lespinasse <walken@...gle.com>
Cc: dhowells@...hat.com,
Linus Torvalds <torvalds@...ux-foundation.org>,
Ingo Molnar <mingo@...e.hu>,
Thomas Gleixner <tglx@...utronix.de>,
LKML <linux-kernel@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Mike Waychison <mikew@...gle.com>,
Suleiman Souhlal <suleiman@...gle.com>,
Ying Han <yinghan@...gle.com>
Subject: Re: [PATCH 08/10] rwsem: down_read_critical infrastructure support
Michel Lespinasse <walken@...gle.com> wrote:
> #define RWSEM_WAITING_FOR_READ 0x00000001
> #define RWSEM_WAITING_FOR_WRITE 0x00000002
> +#define RWSEM_UNFAIR 0x00000004
Can I suggest you change this to:
enum rwsem_waiter_type {
RWSEM_WAITING_FOR_WRITE,
RWSEM_WAITING_FOR_READ,
RWSEM_WAITING_FOR_UNFAIR_READ
};
and then change:
> unsigned int flags;
to:
enum rwsem_waiter_type type;
and use this throughout. It simplifies some of the code too. See attached
patch.
David
---
rwsem: Make waiter type an enum in the slow path of the asm-optimised version
Make the waiter type in the asm-optimised version of the slow path an
enumeration rather than a bitmask.
Signed-off-by: David Howells <dhowells@...hat.com>
---
diff --git a/lib/rwsem.c b/lib/rwsem.c
index b3fe179..8aa2238 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -28,13 +28,16 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
EXPORT_SYMBOL(__init_rwsem);
+enum rwsem_waiter_type {
+ RWSEM_WAITING_FOR_WRITE,
+ RWSEM_WAITING_FOR_READ,
+ RWSEM_WAITING_FOR_UNFAIR_READ
+};
+
struct rwsem_waiter {
struct list_head list;
struct task_struct *task;
- unsigned int flags;
-#define RWSEM_WAITING_FOR_READ 0x00000001
-#define RWSEM_WAITING_FOR_WRITE 0x00000002
-#define RWSEM_UNFAIR 0x00000004
+ enum rwsem_waiter_type type;
};
/* Wake types for __rwsem_do_wake(). Note that RWSEM_WAKE_NO_ACTIVE and
@@ -64,7 +67,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
signed long oldcount, woken, loop, adjustment;
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
- if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
+ if (waiter->type != RWSEM_WAITING_FOR_WRITE)
goto readers_only;
if (wake_type == RWSEM_WAKE_READ_OWNED)
@@ -133,10 +136,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
waiter = list_entry(waiter->list.next,
struct rwsem_waiter, list);
- } while (waiter->flags & RWSEM_WAITING_FOR_READ);
+ } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
adjustment = woken * RWSEM_ACTIVE_READ_BIAS;
- if (waiter->flags & RWSEM_WAITING_FOR_READ)
+ if (waiter->type != RWSEM_WAITING_FOR_WRITE)
/* hit end of list above */
adjustment -= RWSEM_WAITING_BIAS;
@@ -172,7 +175,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
*/
static struct rw_semaphore __sched *
rwsem_down_failed_common(struct rw_semaphore *sem,
- unsigned int flags, signed long adjustment)
+ enum rwsem_waiter_type type, signed long adjustment)
{
struct rwsem_waiter waiter;
struct task_struct *tsk = current;
@@ -183,13 +186,13 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
/* set up my own style of waitqueue */
spin_lock_irq(&sem->wait_lock);
waiter.task = tsk;
- waiter.flags = flags;
+ waiter.type = type;
get_task_struct(tsk);
if (list_empty(&sem->wait_list))
adjustment += RWSEM_WAITING_BIAS;
- if (flags & RWSEM_UNFAIR)
+ if (type & RWSEM_WAITING_FOR_UNFAIR_READ)
list_add(&waiter.list, &sem->wait_list);
else
list_add_tail(&waiter.list, &sem->wait_list);
@@ -242,8 +245,7 @@ rwsem_down_read_failed(struct rw_semaphore *sem)
asmregparm struct rw_semaphore __sched *
rwsem_down_read_unfair_failed(struct rw_semaphore *sem)
{
- return rwsem_down_failed_common(sem,
- RWSEM_WAITING_FOR_READ | RWSEM_UNFAIR,
+ return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_UNFAIR_READ,
-RWSEM_ACTIVE_READ_BIAS);
}
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists