lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <84ab6b4a-6fc4-be3f-d990-1f46265a46e6@quicinc.com>
Date:   Wed, 1 Dec 2021 14:55:16 +0530
From:   Neeraj Upadhyay <quic_neeraju@...cinc.com>
To:     Frederic Weisbecker <frederic@...nel.org>,
        "Paul E . McKenney" <paulmck@...nel.org>
CC:     LKML <linux-kernel@...r.kernel.org>,
        Uladzislau Rezki <urezki@...il.com>,
        Boqun Feng <boqun.feng@...il.com>,
        Josh Triplett <josh@...htriplett.org>,
        Joel Fernandes <joel@...lfernandes.org>, <rcu@...r.kernel.org>
Subject: Re: [PATCH 1/6] rcu/nocb: Remove rdp from nocb list when de-offloaded



On 11/23/2021 6:07 AM, Frederic Weisbecker wrote:
> nocb_gp_wait() iterates all CPUs within the rcuog's group even if they
> are have been de-offloaded. This is suboptimal if only few CPUs are
> offloaded within the group. And this will become even more a problem
> when a nocb kthread will be created for all possible CPUs in the future.
> 
> Therefore use a standard double linked list to link all the offloaded
> rdps and safely add/del their nodes as we (de-)offloaded them.
> 
> Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
> Cc: Neeraj Upadhyay <quic_neeraju@...cinc.com>
> Cc: Boqun Feng <boqun.feng@...il.com>
> Cc: Uladzislau Rezki <urezki@...il.com>
> Cc: Josh Triplett <josh@...htriplett.org>
> Cc: Joel Fernandes <joel@...lfernandes.org>
> ---

Few queries below.

Reviewed-by: Neeraj Upadhyay <quic_neeraju@...cinc.com>



>   kernel/rcu/tree.h      |  7 +++++--
>   kernel/rcu/tree_nocb.h | 37 ++++++++++++++++++++++++++++++-------
>   2 files changed, 35 insertions(+), 9 deletions(-)
> 
> diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
> index deeaf2fee714..486fc901bd08 100644
> --- a/kernel/rcu/tree.h
> +++ b/kernel/rcu/tree.h
> @@ -221,8 +221,11 @@ struct rcu_data {
>   	struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */
>   	bool nocb_cb_sleep;		/* Is the nocb CB thread asleep? */
>   	struct task_struct *nocb_cb_kthread;
> -	struct rcu_data *nocb_next_cb_rdp;
> -					/* Next rcu_data in wakeup chain. */
> +	struct list_head nocb_head_rdp; /*
> +					 * Head of rcu_data list in wakeup chain,
> +					 * if rdp_gp.
> +					 */
> +	struct list_head nocb_entry_rdp; /* rcu_data node in wakeup chain. */
>   
>   	/* The following fields are used by CB kthread, hence new cacheline. */
>   	struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp;
> diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
> index 2461fe8d0c23..cc1165559177 100644
> --- a/kernel/rcu/tree_nocb.h
> +++ b/kernel/rcu/tree_nocb.h
> @@ -625,7 +625,15 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
>   	 * and the global grace-period kthread are awakened if needed.
>   	 */
>   	WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
> -	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
> +	/*
> +	 * An rdp can be removed from the list after being de-offloaded or added
> +	 * to the list before being (re-)offloaded. If the below loop happens while
> +	 * an rdp is de-offloaded and then re-offloaded shortly afterward, we may
> +	 * shortcut and ignore a part of the rdp list due to racy list iteration.
> +	 * Fortunately a new run through the entire loop is forced after an rdp is
> +	 * added here so that such race get quickly fixed.
> +	 */
> +	list_for_each_entry_rcu(rdp, &my_rdp->nocb_head_rdp, nocb_entry_rdp, 1) {

Can we hit a (unlikely) case where repeated calls to de-offload/offload 
cause this loop to continue for long time?


>   		bool needwake_state = false;
>   
>   		if (!nocb_gp_enabled_cb(rdp))

Now that we can probe flags here, without holding the nocb_gp_lock first 
( the case where de-offload and offload happens while we are iterating 
the list); can it cause a WARNING from below code?


	WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
	rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);

The sequence like this is possible?

1. <de-offload>
     Clear SEGCBLIST_OFFLOADED
2. nocb_gp_wait() clears SEGCBLIST_KTHREAD_GP in 
nocb_gp_update_state_deoffloading() and continues to next rdp.
3. <offload>
     rdp_offload_toggle() hasn't been called yet.
4. rcuog thread migrates to different CPU, while executing the
loop in nocb_gp_wait().
5. nocb_gp_wait() reaches the tail rdp.
6. Current CPU , where  rcog thread is running hasn't observed
SEGCBLIST_OFFLOADED clearing done in step 1; so, nocb_gp_enabled_cb()
passes.
7. nocb_gp_wait() acquires the rdp's nocb lock and read the state to
be deoffloaded; however, SEGCBLIST_KTHREAD_GP is not set and
we hit WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, 
SEGCBLIST_KTHREAD_GP));

Thanks
Neeraj

> @@ -1003,6 +1011,8 @@ static long rcu_nocb_rdp_deoffload(void *arg)
>   	swait_event_exclusive(rdp->nocb_state_wq,
>   			      !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
>   							SEGCBLIST_KTHREAD_GP));
> +	/* Don't bother iterate this one anymore on nocb_gp_wait() */
> +	list_del_rcu(&rdp->nocb_entry_rdp);
>   	/*
>   	 * Lock one last time to acquire latest callback updates from kthreads
>   	 * so we can later handle callbacks locally without locking.
> @@ -1066,6 +1076,15 @@ static long rcu_nocb_rdp_offload(void *arg)
>   		return -EINVAL;
>   
>   	pr_info("Offloading %d\n", rdp->cpu);
> +
> +	/*
> +	 * Iterate this CPU on nocb_gp_wait(). We do it before locking nocb_gp_lock,
> +	 * resetting nocb_gp_sleep and waking up the related "rcuog". Since nocb_gp_wait()
> +	 * in turn locks nocb_gp_lock before setting nocb_gp_sleep again, we are guaranteed
> +	 * to iterate this new rdp before "rcuog" goes to sleep again.
> +	 */
> +	list_add_tail_rcu(&rdp->nocb_entry_rdp, &rdp->nocb_gp_rdp->nocb_head_rdp);
> +
>   	/*
>   	 * Can't use rcu_nocb_lock_irqsave() before SEGCBLIST_LOCKING
>   	 * is set.
> @@ -1268,7 +1287,6 @@ static void __init rcu_organize_nocb_kthreads(void)
>   	int nl = 0;  /* Next GP kthread. */
>   	struct rcu_data *rdp;
>   	struct rcu_data *rdp_gp = NULL;  /* Suppress misguided gcc warn. */
> -	struct rcu_data *rdp_prev = NULL;
>   
>   	if (!cpumask_available(rcu_nocb_mask))
>   		return;
> @@ -1288,8 +1306,8 @@ static void __init rcu_organize_nocb_kthreads(void)
>   			/* New GP kthread, set up for CBs & next GP. */
>   			gotnocbs = true;
>   			nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
> -			rdp->nocb_gp_rdp = rdp;
>   			rdp_gp = rdp;
> +			INIT_LIST_HEAD(&rdp->nocb_head_rdp);
>   			if (dump_tree) {
>   				if (!firsttime)
>   					pr_cont("%s\n", gotnocbscbs
> @@ -1302,12 +1320,11 @@ static void __init rcu_organize_nocb_kthreads(void)
>   		} else {
>   			/* Another CB kthread, link to previous GP kthread. */
>   			gotnocbscbs = true;
> -			rdp->nocb_gp_rdp = rdp_gp;
> -			rdp_prev->nocb_next_cb_rdp = rdp;
>   			if (dump_tree)
>   				pr_cont(" %d", cpu);
>   		}
> -		rdp_prev = rdp;
> +		rdp->nocb_gp_rdp = rdp_gp;
> +		list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp);
>   	}
>   	if (gotnocbs && dump_tree)
>   		pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
> @@ -1369,6 +1386,7 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
>   {
>   	char bufw[20];
>   	char bufr[20];
> +	struct rcu_data *nocb_next_rdp;
>   	struct rcu_segcblist *rsclp = &rdp->cblist;
>   	bool waslocked;
>   	bool wassleep;
> @@ -1376,11 +1394,16 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
>   	if (rdp->nocb_gp_rdp == rdp)
>   		show_rcu_nocb_gp_state(rdp);
>   
> +	nocb_next_rdp = list_next_or_null_rcu(&rdp->nocb_gp_rdp->nocb_head_rdp,
> +					      &rdp->nocb_entry_rdp,
> +					      typeof(*rdp),
> +					      nocb_entry_rdp);
> +
>   	sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
>   	sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
>   	pr_info("   CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
>   		rdp->cpu, rdp->nocb_gp_rdp->cpu,
> -		rdp->nocb_next_cb_rdp ? rdp->nocb_next_cb_rdp->cpu : -1,
> +		nocb_next_rdp ? nocb_next_rdp->cpu : -1,
>   		"kK"[!!rdp->nocb_cb_kthread],
>   		"bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
>   		"cC"[!!atomic_read(&rdp->nocb_lock_contended)],
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ