[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201029165019.14218-15-urezki@gmail.com>
Date: Thu, 29 Oct 2020 17:50:18 +0100
From: "Uladzislau Rezki (Sony)" <urezki@...il.com>
To: LKML <linux-kernel@...r.kernel.org>, RCU <rcu@...r.kernel.org>,
"Paul E . McKenney" <paulmck@...nel.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Peter Zijlstra <peterz@...radead.org>,
Michal Hocko <mhocko@...e.com>,
Thomas Gleixner <tglx@...utronix.de>,
"Theodore Y . Ts'o" <tytso@....edu>,
Joel Fernandes <joel@...lfernandes.org>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Uladzislau Rezki <urezki@...il.com>,
Oleksiy Avramchenko <oleksiy.avramchenko@...ymobile.com>
Subject: [PATCH 15/16] rcu/tree: Allocate a page when caller is preemptible
Given that CONFIG_PREEMPT_COUNT is unconditionally enabled by the
earlier commits in this series, the preemptible() macro now properly
detects preempt-disable code regions even in kernels built with
CONFIG_PREEMPT_NONE.
This commit therefore uses preemptible() to determine whether allocation
is possible at all for double-argument kvfree_rcu(). If !preemptible(),
then allocation is not possible, and kvfree_rcu() falls back to using
the less cache-friendly rcu_head approach. Even when preemptible(),
the caller might be involved in reclaim, so the GFP_ flags used by
double-argument kvfree_rcu() must avoid invoking reclaim processing.
Note that single-argument kvfree_rcu() must be invoked in sleepable
contexts, and that its fallback is the relatively high latency
synchronize_rcu(). Single-argument kvfree_rcu() therefore uses
GFP_KERNEL|__GFP_RETRY_MAYFAIL to allow limited sleeping within the
memory allocator.
[ paulmck: Add add_ptr_to_bulk_krc_lock header comment per Michal Hocko. ]
Signed-off-by: Uladzislau Rezki (Sony) <urezki@...il.com>
Signed-off-by: Paul E. McKenney <paulmck@...nel.org>
---
kernel/rcu/tree.c | 48 ++++++++++++++++++++++++++++++-----------------
1 file changed, 31 insertions(+), 17 deletions(-)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f2da2a1cc716..3f9b016a44dc 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3406,37 +3406,55 @@ run_page_cache_worker(struct kfree_rcu_cpu *krcp)
}
}
+// Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
+// state specified by flags. If can_sleep is true, the caller must
+// be schedulable and not be holding any locks or mutexes that might be
+// acquired by the memory allocator or anything that it might invoke.
+// If !can_sleep, then if !preemptible() no allocation will be undertaken,
+// otherwise the allocation will use GFP_ATOMIC to avoid the remainder of
+// the aforementioned deadlock possibilities. Returns true if ptr was
+// successfully recorded, else the caller must use a fallback.
static inline bool
-kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
+add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
+ unsigned long *flags, void *ptr, bool can_sleep)
{
struct kvfree_rcu_bulk_data *bnode;
+ bool can_alloc_page = preemptible();
+ gfp_t gfp = (can_sleep ? GFP_KERNEL | __GFP_RETRY_MAYFAIL :
+ GFP_ATOMIC) | __GFP_NOWARN;
int idx;
- if (unlikely(!krcp->initialized))
+ *krcp = krc_this_cpu_lock(flags);
+ if (unlikely(!(*krcp)->initialized))
return false;
- lockdep_assert_held(&krcp->lock);
idx = !!is_vmalloc_addr(ptr);
/* Check if a new block is required. */
- if (!krcp->bkvhead[idx] ||
- krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
- bnode = get_cached_bnode(krcp);
- /* Switch to emergency path. */
+ if (!(*krcp)->bkvhead[idx] ||
+ (*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
+ bnode = get_cached_bnode(*krcp);
+ if (!bnode && can_alloc_page) {
+ krc_this_cpu_unlock(*krcp, *flags);
+ bnode = (struct kvfree_rcu_bulk_data *)
+ __get_free_page(gfp);
+ *krcp = krc_this_cpu_lock(flags);
+ }
+
if (!bnode)
return false;
/* Initialize the new block. */
bnode->nr_records = 0;
- bnode->next = krcp->bkvhead[idx];
+ bnode->next = (*krcp)->bkvhead[idx];
/* Attach it to the head. */
- krcp->bkvhead[idx] = bnode;
+ (*krcp)->bkvhead[idx] = bnode;
}
/* Finally insert. */
- krcp->bkvhead[idx]->records
- [krcp->bkvhead[idx]->nr_records++] = ptr;
+ (*krcp)->bkvhead[idx]->records
+ [(*krcp)->bkvhead[idx]->nr_records++] = ptr;
return true;
}
@@ -3474,20 +3492,16 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
ptr = (unsigned long *) func;
}
- krcp = krc_this_cpu_lock(&flags);
-
// Queue the object but don't yet schedule the batch.
if (debug_rcu_head_queue(ptr)) {
// Probable double kfree_rcu(), just leak.
WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
__func__, head);
- // Mark as success and leave.
- success = true;
- goto unlock_return;
+ return;
}
- success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr);
+ success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
if (!success) {
run_page_cache_worker(krcp);
--
2.20.1
Powered by blists - more mailing lists