[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180117232913.GY9671@linux.vnet.ibm.com>
Date: Wed, 17 Jan 2018 15:29:13 -0800
From: "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
To: Matthew Wilcox <willy@...radead.org>
Cc: Josh Triplett <josh@...htriplett.org>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH] rcu: Use wrapper for lockdep asserts
On Wed, Jan 17, 2018 at 06:24:30AM -0800, Matthew Wilcox wrote:
>
> From: Matthew Wilcox <mawilcox@...rosoft.com>
>
> Commits c0b334c5bfa9 and ea9b0c8a26a2 introduced new sparse warnings
> by accessing rcu_node->lock directly and ignoring the __private
> marker. Introduce a new wrapper and use it. Also fix a similar problem
> in srcutree.c introduced by a3883df3935e.
>
> Signed-off-by: Matthew Wilcox <mawilcox@...rosoft.com>
Good catch! Applied for review and testing.
For some reason, I was expecting 0day to catch this sort of thing...
Thanx, Paul
> diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
> index 59c471de342a..30db0a581628 100644
> --- a/kernel/rcu/rcu.h
> +++ b/kernel/rcu/rcu.h
> @@ -360,7 +360,7 @@ do { \
> } while (0)
>
> #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \
> - raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
> + raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)
>
> #define raw_spin_trylock_rcu_node(p) \
> ({ \
> @@ -371,6 +371,9 @@ do { \
> ___locked; \
> })
>
> +#define raw_lockdep_assert_held_rcu_node(p) \
> + lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
> +
> #endif /* #if defined(SRCU) || !defined(TINY_RCU) */
>
> #ifdef CONFIG_TINY_RCU
> diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
> index 6d5880089ff6..b7bdd3ff4611 100644
> --- a/kernel/rcu/srcutree.c
> +++ b/kernel/rcu/srcutree.c
> @@ -412,7 +412,7 @@ static void srcu_gp_start(struct srcu_struct *sp)
> struct srcu_data *sdp = this_cpu_ptr(sp->sda);
> int state;
>
> - lockdep_assert_held(&sp->lock);
> + lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
> WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
> rcu_segcblist_advance(&sdp->srcu_cblist,
> rcu_seq_current(&sp->srcu_gp_seq));
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index f9c0ca2ccf0c..aba8dd0cc1f8 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -1245,7 +1245,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
> */
> static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
> {
> - lockdep_assert_held(&rnp->lock);
> + raw_lockdep_assert_held_rcu_node(rnp);
> if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum))
> WRITE_ONCE(rdp->gpwrap, true);
> if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum))
> @@ -1712,7 +1712,7 @@ void rcu_cpu_stall_reset(void)
> static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
> struct rcu_node *rnp)
> {
> - lockdep_assert_held(&rnp->lock);
> + raw_lockdep_assert_held_rcu_node(rnp);
>
> /*
> * If RCU is idle, we just wait for the next grace period.
> @@ -1759,7 +1759,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
> bool ret = false;
> struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
>
> - lockdep_assert_held(&rnp->lock);
> + raw_lockdep_assert_held_rcu_node(rnp);
>
> /*
> * Pick up grace-period number for new callbacks. If this
> @@ -1887,7 +1887,7 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
> {
> bool ret = false;
>
> - lockdep_assert_held(&rnp->lock);
> + raw_lockdep_assert_held_rcu_node(rnp);
>
> /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
> if (!rcu_segcblist_pend_cbs(&rdp->cblist))
> @@ -1927,7 +1927,7 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
> static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
> struct rcu_data *rdp)
> {
> - lockdep_assert_held(&rnp->lock);
> + raw_lockdep_assert_held_rcu_node(rnp);
>
> /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
> if (!rcu_segcblist_pend_cbs(&rdp->cblist))
> @@ -1955,7 +1955,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
> bool ret;
> bool need_gp;
>
> - lockdep_assert_held(&rnp->lock);
> + raw_lockdep_assert_held_rcu_node(rnp);
>
> /* Handle the ends of any preceding grace periods first. */
> if (rdp->completed == rnp->completed &&
> @@ -2380,7 +2380,7 @@ static bool
> rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
> struct rcu_data *rdp)
> {
> - lockdep_assert_held(&rnp->lock);
> + raw_lockdep_assert_held_rcu_node(rnp);
> if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
> /*
> * Either we have not yet spawned the grace-period
> @@ -2442,7 +2442,7 @@ static bool rcu_start_gp(struct rcu_state *rsp)
> static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
> __releases(rcu_get_root(rsp)->lock)
> {
> - lockdep_assert_held(&rcu_get_root(rsp)->lock);
> + raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp));
> WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
> WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
> raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
> @@ -2467,7 +2467,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
> unsigned long oldmask = 0;
> struct rcu_node *rnp_c;
>
> - lockdep_assert_held(&rnp->lock);
> + raw_lockdep_assert_held_rcu_node(rnp);
>
> /* Walk up the rcu_node hierarchy. */
> for (;;) {
> @@ -2531,7 +2531,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
> unsigned long mask;
> struct rcu_node *rnp_p;
>
> - lockdep_assert_held(&rnp->lock);
> + raw_lockdep_assert_held_rcu_node(rnp);
> if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
> rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
> raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
> @@ -2676,7 +2676,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
> long mask;
> struct rcu_node *rnp = rnp_leaf;
>
> - lockdep_assert_held(&rnp->lock);
> + raw_lockdep_assert_held_rcu_node(rnp);
> if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
> rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
> return;
> @@ -3697,7 +3697,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
> long mask;
> struct rcu_node *rnp = rnp_leaf;
>
> - lockdep_assert_held(&rnp->lock);
> + raw_lockdep_assert_held_rcu_node(rnp);
> for (;;) {
> mask = rnp->grpmask;
> rnp = rnp->parent;
> diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
> index db85ca3975f1..80d29c6fb572 100644
> --- a/kernel/rcu/tree_plugin.h
> +++ b/kernel/rcu/tree_plugin.h
> @@ -181,7 +181,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
> (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
> struct task_struct *t = current;
>
> - lockdep_assert_held(&rnp->lock);
> + raw_lockdep_assert_held_rcu_node(rnp);
> WARN_ON_ONCE(rdp->mynode != rnp);
> WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1);
>
> @@ -1043,7 +1043,7 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
> {
> struct task_struct *t;
>
> - lockdep_assert_held(&rnp->lock);
> + raw_lockdep_assert_held_rcu_node(rnp);
> if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
> raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
> return;
>
Powered by blists - more mailing lists