[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221128153628.541361-2-urezki@gmail.com>
Date: Mon, 28 Nov 2022 16:36:26 +0100
From: "Uladzislau Rezki (Sony)" <urezki@...il.com>
To: LKML <linux-kernel@...r.kernel.org>, RCU <rcu@...r.kernel.org>,
"Paul E . McKenney" <paulmck@...nel.org>
Cc: Frederic Weisbecker <frederic@...nel.org>,
Neeraj Upadhyay <neeraj.iitr10@...il.com>,
Joel Fernandes <joel@...lfernandes.org>,
Uladzislau Rezki <urezki@...il.com>,
Oleksiy Avramchenko <oleksiy.avramchenko@...y.com>
Subject: [PATCH 2/4] rcu/kvfree: Move bulk/list reclaim to separate functions
There are two different paths how a memory is reclaimed.
Currently it is open-coded what makes it a bit messy and
less easy to read.
Introduce two separate functions kvfree_rcu_list() and
kvfree_rcu_bulk() to cover two independent cases.
Please note, this patch does not introduce any functional
change it is only limited by refactoring of code.
Signed-off-by: Uladzislau Rezki (Sony) <urezki@...il.com>
---
kernel/rcu/tree.c | 114 ++++++++++++++++++++++++++--------------------
1 file changed, 65 insertions(+), 49 deletions(-)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 74d6889dcc50..3b5f6036d884 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3053,6 +3053,65 @@ drain_page_cache(struct kfree_rcu_cpu *krcp)
return freed;
}
+static void
+kvfree_rcu_bulk(struct kfree_rcu_cpu *krcp,
+ struct kvfree_rcu_bulk_data *bnode, int idx)
+{
+ unsigned long flags;
+ int i;
+
+ debug_rcu_bhead_unqueue(bnode);
+
+ rcu_lock_acquire(&rcu_callback_map);
+ if (idx == 0) { // kmalloc() / kfree().
+ trace_rcu_invoke_kfree_bulk_callback(
+ rcu_state.name, bnode->nr_records,
+ bnode->records);
+
+ kfree_bulk(bnode->nr_records, bnode->records);
+ } else { // vmalloc() / vfree().
+ for (i = 0; i < bnode->nr_records; i++) {
+ trace_rcu_invoke_kvfree_callback(
+ rcu_state.name, bnode->records[i], 0);
+
+ vfree(bnode->records[i]);
+ }
+ }
+ rcu_lock_release(&rcu_callback_map);
+
+ raw_spin_lock_irqsave(&krcp->lock, flags);
+ if (put_cached_bnode(krcp, bnode))
+ bnode = NULL;
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
+
+ if (bnode)
+ free_page((unsigned long) bnode);
+
+ cond_resched_tasks_rcu_qs();
+}
+
+static void
+kvfree_rcu_list(struct rcu_head *head)
+{
+ struct rcu_head *next;
+
+ for (; head; head = next) {
+ unsigned long offset = (unsigned long)head->func;
+ void *ptr = (void *)head - offset;
+
+ next = head->next;
+ debug_rcu_head_unqueue((struct rcu_head *)ptr);
+ rcu_lock_acquire(&rcu_callback_map);
+ trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
+
+ if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
+ kvfree(ptr);
+
+ rcu_lock_release(&rcu_callback_map);
+ cond_resched_tasks_rcu_qs();
+ }
+}
+
/*
* This function is invoked in workqueue context after a grace period.
* It frees all the objects queued on ->bulk_head_free or ->head_free.
@@ -3062,10 +3121,10 @@ static void kfree_rcu_work(struct work_struct *work)
unsigned long flags;
struct kvfree_rcu_bulk_data *bnode, *n;
struct list_head bulk_head[FREE_N_CHANNELS];
- struct rcu_head *head, *next;
+ struct rcu_head *head;
struct kfree_rcu_cpu *krcp;
struct kfree_rcu_cpu_work *krwp;
- int i, j;
+ int i;
krwp = container_of(to_rcu_work(work),
struct kfree_rcu_cpu_work, rcu_work);
@@ -3082,38 +3141,9 @@ static void kfree_rcu_work(struct work_struct *work)
raw_spin_unlock_irqrestore(&krcp->lock, flags);
// Handle the first two channels.
- for (i = 0; i < FREE_N_CHANNELS; i++) {
- list_for_each_entry_safe(bnode, n, &bulk_head[i], list) {
- debug_rcu_bhead_unqueue(bnode);
-
- rcu_lock_acquire(&rcu_callback_map);
- if (i == 0) { // kmalloc() / kfree().
- trace_rcu_invoke_kfree_bulk_callback(
- rcu_state.name, bnode->nr_records,
- bnode->records);
-
- kfree_bulk(bnode->nr_records, bnode->records);
- } else { // vmalloc() / vfree().
- for (j = 0; j < bnode->nr_records; j++) {
- trace_rcu_invoke_kvfree_callback(
- rcu_state.name, bnode->records[j], 0);
-
- vfree(bnode->records[j]);
- }
- }
- rcu_lock_release(&rcu_callback_map);
-
- raw_spin_lock_irqsave(&krcp->lock, flags);
- if (put_cached_bnode(krcp, bnode))
- bnode = NULL;
- raw_spin_unlock_irqrestore(&krcp->lock, flags);
-
- if (bnode)
- free_page((unsigned long) bnode);
-
- cond_resched_tasks_rcu_qs();
- }
- }
+ for (i = 0; i < FREE_N_CHANNELS; i++)
+ list_for_each_entry_safe(bnode, n, &bulk_head[i], list)
+ kvfree_rcu_bulk(krcp, bnode, i);
/*
* This is used when the "bulk" path can not be used for the
@@ -3122,21 +3152,7 @@ static void kfree_rcu_work(struct work_struct *work)
* queued on a linked list through their rcu_head structures.
* This list is named "Channel 3".
*/
- for (; head; head = next) {
- unsigned long offset = (unsigned long)head->func;
- void *ptr = (void *)head - offset;
-
- next = head->next;
- debug_rcu_head_unqueue((struct rcu_head *)ptr);
- rcu_lock_acquire(&rcu_callback_map);
- trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
-
- if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
- kvfree(ptr);
-
- rcu_lock_release(&rcu_callback_map);
- cond_resched_tasks_rcu_qs();
- }
+ kvfree_rcu_list(head);
}
static bool
--
2.30.2
Powered by blists - more mailing lists