[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20201029194724.GN3249@paulmck-ThinkPad-P72>
Date: Thu, 29 Oct 2020 12:47:24 -0700
From: "Paul E. McKenney" <paulmck@...nel.org>
To: "Uladzislau Rezki (Sony)" <urezki@...il.com>
Cc: LKML <linux-kernel@...r.kernel.org>, RCU <rcu@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Peter Zijlstra <peterz@...radead.org>,
Michal Hocko <mhocko@...e.com>,
Thomas Gleixner <tglx@...utronix.de>,
"Theodore Y . Ts'o" <tytso@....edu>,
Joel Fernandes <joel@...lfernandes.org>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Oleksiy Avramchenko <oleksiy.avramchenko@...ymobile.com>
Subject: Re: [PATCH 16/16] rcu/tree: Use delayed work instead of hrtimer to
refill the cache
On Thu, Oct 29, 2020 at 05:50:19PM +0100, Uladzislau Rezki (Sony) wrote:
> A CONFIG_PREEMPT_COUNT is unconditionally enabled, thus a page
> can be obtained directly from a kvfree_rcu() path. To distinguish
> that and take a decision the preemptable() macro is used when it
> is save to enter allocator.
>
> It means that refilling a cache is not important from timing point
> of view. Switch to a delayed work, so the actual work is queued from
> the timer interrupt with 1 jiffy delay. An immediate placing a task
> on a current CPU can lead to rq->lock double lock. That is why a
> delayed method is in place.
>
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@...il.com>
Thank you, Uladzislau!
I applied this on top of v5.10-rc1 and got the following from the
single-CPU builds:
SYNC include/config/auto.conf.cmd
DESCEND objtool
CC kernel/bounds.s
CALL scripts/atomic/check-atomics.sh
UPD include/generated/bounds.h
CC arch/x86/kernel/asm-offsets.s
In file included from ./include/asm-generic/atomic-instrumented.h:20:0,
from ./include/linux/atomic.h:82,
from ./include/linux/crypto.h:15,
from arch/x86/kernel/asm-offsets.c:9:
./include/linux/pagemap.h: In function ‘__page_cache_add_speculative’:
./include/linux/build_bug.h:30:34: error: called object is not a function or function pointer
#define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e))))
~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
./include/linux/mmdebug.h:45:25: note: in expansion of macro ‘BUILD_BUG_ON_INVALID’
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
^~~~~~~~~~~~~~~~~~~~
./include/linux/pagemap.h:207:2: note: in expansion of macro ‘VM_BUG_ON’
VM_BUG_ON(preemptible())
^~~~~~~~~
scripts/Makefile.build:117: recipe for target 'arch/x86/kernel/asm-offsets.s' failed
make[1]: *** [arch/x86/kernel/asm-offsets.s] Error 1
Makefile:1199: recipe for target 'prepare0' failed
make: *** [prepare0] Error 2
I vaguely recall something like this showing up in the previous series
and that we did something or another to address it. Could you please
check against the old series at -rcu branch dev.2020.10.22a? (I verified
that the old series does run correctly in the single-CPU scenarios.)
Thanx, Paul
> ---
> kernel/rcu/tree.c | 26 +++++---------------------
> 1 file changed, 5 insertions(+), 21 deletions(-)
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 3f9b016a44dc..9ea55f800b4b 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -3086,7 +3086,6 @@ struct kfree_rcu_cpu_work {
> * per-cpu lock.
> * @page_cache_work: A work to refill the cache when it is empty
> * @work_in_progress: Indicates that page_cache_work is running
> - * @hrtimer: A hrtimer for scheduling a page_cache_work
> * @nr_bkv_objs: number of allocated objects at @bkvcache.
> *
> * This is a per-CPU structure. The reason that it is not included in
> @@ -3104,9 +3103,8 @@ struct kfree_rcu_cpu {
> bool initialized;
> int count;
>
> - struct work_struct page_cache_work;
> + struct delayed_work page_cache_work;
> atomic_t work_in_progress;
> - struct hrtimer hrtimer;
>
> struct llist_head bkvcache;
> int nr_bkv_objs;
> @@ -3355,22 +3353,12 @@ static void kfree_rcu_monitor(struct work_struct *work)
> raw_spin_unlock_irqrestore(&krcp->lock, flags);
> }
>
> -static enum hrtimer_restart
> -schedule_page_work_fn(struct hrtimer *t)
> -{
> - struct kfree_rcu_cpu *krcp =
> - container_of(t, struct kfree_rcu_cpu, hrtimer);
> -
> - queue_work(system_highpri_wq, &krcp->page_cache_work);
> - return HRTIMER_NORESTART;
> -}
> -
> static void fill_page_cache_func(struct work_struct *work)
> {
> struct kvfree_rcu_bulk_data *bnode;
> struct kfree_rcu_cpu *krcp =
> container_of(work, struct kfree_rcu_cpu,
> - page_cache_work);
> + page_cache_work.work);
> unsigned long flags;
> bool pushed;
> int i;
> @@ -3398,12 +3386,8 @@ static void
> run_page_cache_worker(struct kfree_rcu_cpu *krcp)
> {
> if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
> - !atomic_xchg(&krcp->work_in_progress, 1)) {
> - hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC,
> - HRTIMER_MODE_REL);
> - krcp->hrtimer.function = schedule_page_work_fn;
> - hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
> - }
> + !atomic_xchg(&krcp->work_in_progress, 1))
> + schedule_delayed_work(&krcp->page_cache_work, 1);
> }
>
> // Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
> @@ -4503,7 +4487,7 @@ static void __init kfree_rcu_batch_init(void)
> }
>
> INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
> - INIT_WORK(&krcp->page_cache_work, fill_page_cache_func);
> + INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
> krcp->initialized = true;
> }
> if (register_shrinker(&kfree_rcu_shrinker))
> --
> 2.20.1
>
Powered by blists - more mailing lists