[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210120184046.GE2743@paulmck-ThinkPad-P72>
Date: Wed, 20 Jan 2021 10:40:46 -0800
From: "Paul E. McKenney" <paulmck@...nel.org>
To: "Uladzislau Rezki (Sony)" <urezki@...il.com>
Cc: LKML <linux-kernel@...r.kernel.org>, RCU <rcu@...r.kernel.org>,
Michael Ellerman <mpe@...erman.id.au>,
Andrew Morton <akpm@...ux-foundation.org>,
Daniel Axtens <dja@...ens.net>,
Frederic Weisbecker <frederic@...nel.org>,
Neeraj Upadhyay <neeraju@...eaurora.org>,
Joel Fernandes <joel@...lfernandes.org>,
Peter Zijlstra <peterz@...radead.org>,
Michal Hocko <mhocko@...e.com>,
Thomas Gleixner <tglx@...utronix.de>,
"Theodore Y . Ts'o" <tytso@....edu>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Oleksiy Avramchenko <oleksiy.avramchenko@...ymobile.com>
Subject: Re: [PATCH 1/3] kvfree_rcu: Allocate a page for a single argument
On Wed, Jan 20, 2021 at 05:21:46PM +0100, Uladzislau Rezki (Sony) wrote:
> For a single argument we can directly request a page from a caller
> context when a "carry page block" is run out of free spots. Instead
> of hitting a slow path we can request an extra page by demand and
> proceed with a fast path.
>
> A single-argument kvfree_rcu() must be invoked in sleepable contexts,
> and that its fallback is the relatively high latency synchronize_rcu().
> Single-argument kvfree_rcu() therefore uses GFP_KERNEL|__GFP_RETRY_MAYFAIL
> to allow limited sleeping within the memory allocator.
>
> [ paulmck: Add add_ptr_to_bulk_krc_lock header comment per Michal Hocko. ]
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@...il.com>
> Signed-off-by: Paul E. McKenney <paulmck@...nel.org>
Queued all three for review and testing, thank you!
Thanx, Paul
> ---
> kernel/rcu/tree.c | 42 ++++++++++++++++++++++++++----------------
> 1 file changed, 26 insertions(+), 16 deletions(-)
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index e04e336bee42..2014fb22644d 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -3465,37 +3465,50 @@ run_page_cache_worker(struct kfree_rcu_cpu *krcp)
> }
> }
>
> +// Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
> +// state specified by flags. If can_alloc is true, the caller must
> +// be schedulable and not be holding any locks or mutexes that might be
> +// acquired by the memory allocator or anything that it might invoke.
> +// Returns true if ptr was successfully recorded, else the caller must
> +// use a fallback.
> static inline bool
> -kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
> +add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
> + unsigned long *flags, void *ptr, bool can_alloc)
> {
> struct kvfree_rcu_bulk_data *bnode;
> int idx;
>
> - if (unlikely(!krcp->initialized))
> + *krcp = krc_this_cpu_lock(flags);
> + if (unlikely(!(*krcp)->initialized))
> return false;
>
> - lockdep_assert_held(&krcp->lock);
> idx = !!is_vmalloc_addr(ptr);
>
> /* Check if a new block is required. */
> - if (!krcp->bkvhead[idx] ||
> - krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
> - bnode = get_cached_bnode(krcp);
> - /* Switch to emergency path. */
> + if (!(*krcp)->bkvhead[idx] ||
> + (*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
> + bnode = get_cached_bnode(*krcp);
> + if (!bnode && can_alloc) {
> + krc_this_cpu_unlock(*krcp, *flags);
> + bnode = (struct kvfree_rcu_bulk_data *)
> + __get_free_page(GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
> + *krcp = krc_this_cpu_lock(flags);
> + }
> +
> if (!bnode)
> return false;
>
> /* Initialize the new block. */
> bnode->nr_records = 0;
> - bnode->next = krcp->bkvhead[idx];
> + bnode->next = (*krcp)->bkvhead[idx];
>
> /* Attach it to the head. */
> - krcp->bkvhead[idx] = bnode;
> + (*krcp)->bkvhead[idx] = bnode;
> }
>
> /* Finally insert. */
> - krcp->bkvhead[idx]->records
> - [krcp->bkvhead[idx]->nr_records++] = ptr;
> + (*krcp)->bkvhead[idx]->records
> + [(*krcp)->bkvhead[idx]->nr_records++] = ptr;
>
> return true;
> }
> @@ -3533,8 +3546,6 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
> ptr = (unsigned long *) func;
> }
>
> - krcp = krc_this_cpu_lock(&flags);
> -
> // Queue the object but don't yet schedule the batch.
> if (debug_rcu_head_queue(ptr)) {
> // Probable double kfree_rcu(), just leak.
> @@ -3542,12 +3553,11 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
> __func__, head);
>
> // Mark as success and leave.
> - success = true;
> - goto unlock_return;
> + return;
> }
>
> kasan_record_aux_stack(ptr);
> - success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr);
> + success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
> if (!success) {
> run_page_cache_worker(krcp);
>
> --
> 2.20.1
>
Powered by blists - more mailing lists