[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200315181840.6966-6-urezki@gmail.com>
Date: Sun, 15 Mar 2020 19:18:39 +0100
From: "Uladzislau Rezki (Sony)" <urezki@...il.com>
To: LKML <linux-kernel@...r.kernel.org>,
"Paul E . McKenney" <paulmck@...nel.org>,
Joel Fernandes <joel@...lfernandes.org>
Cc: RCU <rcu@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Uladzislau Rezki <urezki@...il.com>,
Steven Rostedt <rostedt@...dmis.org>,
Oleksiy Avramchenko <oleksiy.avramchenko@...ymobile.com>
Subject: [PATCH v1 5/6] rcu: rename kfree_call_rcu()/__kfree_rcu()
Rename kfree_call_rcu() to the kvfree_call_rcu().
The reason is, it is capable of freeing vmalloc()
memory now.
Do the same with __kfree_rcu() macro, it becomes
__kvfree_rcu(), the reason is the same as pointed
above.
Signed-off-by: Uladzislau Rezki (Sony) <urezki@...il.com>
---
include/linux/rcupdate.h | 8 ++++----
include/linux/rcutiny.h | 2 +-
include/linux/rcutree.h | 2 +-
kernel/rcu/tree.c | 8 ++++----
4 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index e4961631a44f..6c660fa1f551 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -805,10 +805,10 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
/*
* Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
*/
-#define __kfree_rcu(head, offset) \
+#define __kvfree_rcu(head, offset) \
do { \
BUILD_BUG_ON(!__is_kvfree_rcu_offset(offset)); \
- kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
+ kvfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
} while (0)
/**
@@ -827,7 +827,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* Because the functions are not allowed in the low-order 4096 bytes of
* kernel virtual memory, offsets up to 4095 bytes can be accommodated.
* If the offset is larger than 4095 bytes, a compile-time error will
- * be generated in __kfree_rcu(). If this error is triggered, you can
+ * be generated in __kvfree_rcu(). If this error is triggered, you can
* either fall back to use of call_rcu() or rearrange the structure to
* position the rcu_head structure into the first 4096 bytes.
*
@@ -842,7 +842,7 @@ do { \
typeof (ptr) ___p = (ptr); \
\
if (___p) \
- __kfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \
+ __kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \
} while (0)
/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 045c28b71f4f..4cae3dd77173 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -34,7 +34,7 @@ static inline void synchronize_rcu_expedited(void)
synchronize_rcu();
}
-static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
{
call_rcu(head, func);
}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 45f3f66bb04d..3a7829d69fef 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -33,7 +33,7 @@ static inline void rcu_virt_note_context_switch(int cpu)
}
void synchronize_rcu_expedited(void);
-void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
+void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier(void);
bool rcu_eqs_special_set(int cpu);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index bb9544238396..19e6cb970c38 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3054,18 +3054,18 @@ kfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp,
}
/*
- * Queue a request for lazy invocation of kfree_bulk()/kfree() after a grace
+ * Queue a request for lazy invocation of kfree_bulk()/kvfree() after a grace
* period. Please note there are two paths are maintained, one is the main one
* that uses kfree_bulk() interface and second one is emergency one, that is
* used only when the main path can not be maintained temporary, due to memory
* pressure.
*
- * Each kfree_call_rcu() request is added to a batch. The batch will be drained
+ * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
* every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
* be free'd in workqueue context. This allows us to: batch requests together to
* reduce the number of grace periods during heavy kfree_rcu() load.
*/
-void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
{
unsigned long flags;
struct kfree_rcu_cpu *krcp;
@@ -3112,7 +3112,7 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
spin_unlock(&krcp->lock);
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(kfree_call_rcu);
+EXPORT_SYMBOL_GPL(kvfree_call_rcu);
void __init kfree_rcu_scheduler_running(void)
{
--
2.20.1
Powered by blists - more mailing lists