[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210901222825.6313-3-dave@stgolabs.net>
Date: Wed, 1 Sep 2021 15:28:25 -0700
From: Davidlohr Bueso <dave@...olabs.net>
To: tglx@...utronix.de
Cc: peterz@...radead.org, mingo@...nel.org, rostedt@...dmis.org,
longman@...hat.com, bigeasy@...utronix.de, boqun.feng@...il.com,
dave@...olabs.net, linux-kernel@...r.kernel.org,
Davidlohr Bueso <dbueso@...e.de>
Subject: [PATCH 2/2] locking/rwbase_rt: Lockless reader waking up a writer
As with the rest of our sleeping locks, use a wake_q to
allow waking up the writer without having to hold the
wait_lock across the operation. While this is ideally
for batching wakeups, single wakeup usage as still shown
to be beneficial vs the cost of try_to_wakeup when the
lock is contended.
Signed-off-by: Davidlohr Bueso <dbueso@...e.de>
---
kernel/locking/rwbase_rt.c | 4 +++-
kernel/sched/core.c | 16 +++++++++++++---
2 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/kernel/locking/rwbase_rt.c b/kernel/locking/rwbase_rt.c
index 4ba15088e640..3444bc709973 100644
--- a/kernel/locking/rwbase_rt.c
+++ b/kernel/locking/rwbase_rt.c
@@ -141,6 +141,7 @@ static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
{
struct rt_mutex_base *rtm = &rwb->rtmutex;
struct task_struct *owner;
+ DEFINE_WAKE_Q(wake_q);
raw_spin_lock_irq(&rtm->wait_lock);
/*
@@ -151,9 +152,10 @@ static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
*/
owner = rt_mutex_owner(rtm);
if (owner)
- wake_up_state(owner, state);
+ wake_q_add(&wake_q, owner);
raw_spin_unlock_irq(&rtm->wait_lock);
+ wake_up_q_state(&wake_q, state);
}
static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7fc3d22bc6d8..22c77742f1a7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4151,7 +4151,7 @@ void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
put_task_struct(task);
}
-void wake_up_q(struct wake_q_head *head)
+static void __wake_up_q(struct wake_q_head *head, unsigned int state)
{
struct wake_q_node *node = head->first;
@@ -4164,14 +4164,24 @@ void wake_up_q(struct wake_q_head *head)
task->wake_q.next = NULL;
/*
- * wake_up_process() executes a full barrier, which pairs with
+ * try_to_wake_up() executes a full barrier, which pairs with
* the queueing in wake_q_add() so as not to miss wakeups.
*/
- wake_up_process(task);
+ try_to_wake_up(task, state, 0);
put_task_struct(task);
}
}
+void wake_up_q(struct wake_q_head *head)
+{
+ __wake_up_q(head, TASK_NORMAL);
+}
+
+void wake_up_q_state(struct wake_q_head *head, unsigned int state)
+{
+ __wake_up_q(head, state);
+}
+
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
--
2.26.2
Powered by blists - more mailing lists