[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20080801211702.3469.18381.stgit@lsg.lsg.lab.novell.com>
Date: Fri, 01 Aug 2008 15:17:02 -0600
From: Gregory Haskins <ghaskins@...ell.com>
To: mingo@...e.hu, paulmck@...ux.vnet.ibm.com, peterz@...radead.org,
tglx@...utronix.de, rosted@...dmis.org
Cc: linux-kernel@...r.kernel.org, linux-rt-users@...r.kernel.org,
gregory.haskins@...il.com
Subject: [PATCH RT RFC 3/7] rtmutex: formally initialize the rt_mutex_waiters
We will be adding more logic to rt_mutex_waiters and therefore lets
centralize the initialization to make this easier going forward.
Signed-off-by: Gregory Haskins <ghaskins@...ell.com>
---
kernel/rtmutex.c | 26 ++++++++++++++------------
1 files changed, 14 insertions(+), 12 deletions(-)
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 7d11380..12de859 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -805,6 +805,15 @@ static int adaptive_wait(struct rt_mutex_waiter *waiter,
}
#endif
+static void init_waiter(struct rt_mutex_waiter *waiter)
+{
+ memset(waiter, 0, sizeof(*waiter));
+
+ debug_rt_mutex_init_waiter(waiter);
+ waiter->task = NULL;
+ waiter->write_lock = 0;
+}
+
/*
* Slow path lock function spin_lock style: this variant is very
* careful not to miss any non-lock wakeups.
@@ -823,9 +832,7 @@ rt_spin_lock_slowlock(struct rt_mutex *lock)
struct task_struct *orig_owner;
int missed = 0;
- debug_rt_mutex_init_waiter(&waiter);
- waiter.task = NULL;
- waiter.write_lock = 0;
+ init_waiter(&waiter);
spin_lock_irqsave(&lock->wait_lock, flags);
init_lists(lock);
@@ -1324,6 +1331,8 @@ rt_read_slowlock(struct rw_mutex *rwm, int mtx)
int saved_lock_depth = -1;
unsigned long saved_state = -1, state, flags;
+ init_waiter(&waiter);
+
spin_lock_irqsave(&mutex->wait_lock, flags);
init_rw_lists(rwm);
@@ -1335,10 +1344,6 @@ rt_read_slowlock(struct rw_mutex *rwm, int mtx)
/* Owner is a writer (or a blocked writer). Block on the lock */
- debug_rt_mutex_init_waiter(&waiter);
- waiter.task = NULL;
- waiter.write_lock = 0;
-
if (mtx) {
/*
* We drop the BKL here before we go into the wait loop to avoid a
@@ -1538,8 +1543,7 @@ rt_write_slowlock(struct rw_mutex *rwm, int mtx)
int saved_lock_depth = -1;
unsigned long flags, saved_state = -1, state;
- debug_rt_mutex_init_waiter(&waiter);
- waiter.task = NULL;
+ init_waiter(&waiter);
/* we do PI different for writers that are blocked */
waiter.write_lock = 1;
@@ -2270,9 +2274,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct rt_mutex_waiter waiter;
unsigned long flags;
- debug_rt_mutex_init_waiter(&waiter);
- waiter.task = NULL;
- waiter.write_lock = 0;
+ init_waiter(&waiter);
spin_lock_irqsave(&lock->wait_lock, flags);
init_lists(lock);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists