[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221128153628.541361-3-urezki@gmail.com>
Date: Mon, 28 Nov 2022 16:36:27 +0100
From: "Uladzislau Rezki (Sony)" <urezki@...il.com>
To: LKML <linux-kernel@...r.kernel.org>, RCU <rcu@...r.kernel.org>,
"Paul E . McKenney" <paulmck@...nel.org>
Cc: Frederic Weisbecker <frederic@...nel.org>,
Neeraj Upadhyay <neeraj.iitr10@...il.com>,
Joel Fernandes <joel@...lfernandes.org>,
Uladzislau Rezki <urezki@...il.com>,
Oleksiy Avramchenko <oleksiy.avramchenko@...y.com>
Subject: [PATCH 3/4] rcu/kvfree: Move need_offload_krc() out of krcp->lock
Currently a need_offload_krc() function requires the krcp->lock
to be held because krcp->head can not be checked concurrently.
Fix it by updating the krcp->head using WRITE_ONCE() macro so
it becomes lock-free and safe for readers to see a valid data
without any locking.
Signed-off-by: Uladzislau Rezki (Sony) <urezki@...il.com>
---
kernel/rcu/tree.c | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 3b5f6036d884..f68ddbef2a33 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3218,7 +3218,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
// objects queued on the linked list.
if (!krwp->head_free) {
krwp->head_free = krcp->head;
- krcp->head = NULL;
+ WRITE_ONCE(krcp->head, NULL);
}
WRITE_ONCE(krcp->count, 0);
@@ -3232,6 +3232,8 @@ static void kfree_rcu_monitor(struct work_struct *work)
}
}
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
+
// If there is nothing to detach, it means that our job is
// successfully done here. In case of having at least one
// of the channels that is still busy we should rearm the
@@ -3239,8 +3241,6 @@ static void kfree_rcu_monitor(struct work_struct *work)
// still in progress.
if (need_offload_krc(krcp))
schedule_delayed_monitor_work(krcp);
-
- raw_spin_unlock_irqrestore(&krcp->lock, flags);
}
static enum hrtimer_restart
@@ -3415,7 +3415,7 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
head->func = func;
head->next = krcp->head;
- krcp->head = head;
+ WRITE_ONCE(krcp->head, head);
success = true;
}
@@ -3492,15 +3492,12 @@ static struct shrinker kfree_rcu_shrinker = {
void __init kfree_rcu_scheduler_running(void)
{
int cpu;
- unsigned long flags;
for_each_possible_cpu(cpu) {
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
- raw_spin_lock_irqsave(&krcp->lock, flags);
if (need_offload_krc(krcp))
schedule_delayed_monitor_work(krcp);
- raw_spin_unlock_irqrestore(&krcp->lock, flags);
}
}
--
2.30.2
Powered by blists - more mailing lists