lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1562607152.8510.5.camel@lca.pw>
Date:   Mon, 08 Jul 2019 13:32:32 -0400
From:   Qian Cai <cai@....pw>
To:     Yuyang Du <duyuyang@...il.com>, peterz@...radead.org,
        will.deacon@....com, mingo@...nel.org
Cc:     linux-kernel@...r.kernel.org
Subject: Re: [PATCH] locking/lockdep: Fix lock IRQ usage initialization bug

I saw Ingo send a pull request to Linus for 5.3 [1] includes the offensive
commit "locking/lockdep: Consolidate lock usage bit initialization" but did not
include this patch.

[1] https://lore.kernel.org/lkml/20190708093516.GA57558@gmail.com/

On Mon, 2019-06-10 at 13:52 +0800, Yuyang Du wrote:
> The commit:
> 
>   091806515124b20 ("locking/lockdep: Consolidate lock usage bit
> initialization")
> 
> misses marking LOCK_USED flag at IRQ usage initialization when
> CONFIG_TRACE_IRQFLAGS
> or CONFIG_PROVE_LOCKING is not defined. Fix it.
> 
> Reported-by: Qian Cai <cai@....pw>
> Signed-off-by: Yuyang Du <duyuyang@...il.com>
> ---
>  kernel/locking/lockdep.c | 110 +++++++++++++++++++++++-----------------------
> -
>  1 file changed, 53 insertions(+), 57 deletions(-)
> 
> diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> index 48a840a..c3db987 100644
> --- a/kernel/locking/lockdep.c
> +++ b/kernel/locking/lockdep.c
> @@ -3460,9 +3460,61 @@ void trace_softirqs_off(unsigned long ip)
>  		debug_atomic_inc(redundant_softirqs_off);
>  }
>  
> +static inline unsigned int task_irq_context(struct task_struct *task)
> +{
> +	return 2 * !!task->hardirq_context + !!task->softirq_context;
> +}
> +
> +static int separate_irq_context(struct task_struct *curr,
> +		struct held_lock *hlock)
> +{
> +	unsigned int depth = curr->lockdep_depth;
> +
> +	/*
> +	 * Keep track of points where we cross into an interrupt context:
> +	 */
> +	if (depth) {
> +		struct held_lock *prev_hlock;
> +
> +		prev_hlock = curr->held_locks + depth-1;
> +		/*
> +		 * If we cross into another context, reset the
> +		 * hash key (this also prevents the checking and the
> +		 * adding of the dependency to 'prev'):
> +		 */
> +		if (prev_hlock->irq_context != hlock->irq_context)
> +			return 1;
> +	}
> +	return 0;
> +}
> +
> +#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> +
> +static inline
> +int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
> +		enum lock_usage_bit new_bit)
> +{
> +	WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
> +	return 1;
> +}
> +
> +static inline unsigned int task_irq_context(struct task_struct *task)
> +{
> +	return 0;
> +}
> +
> +static inline int separate_irq_context(struct task_struct *curr,
> +		struct held_lock *hlock)
> +{
> +	return 0;
> +}
> +
> +#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> +
>  static int
>  mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
>  {
> +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
>  	if (!check)
>  		goto lock_used;
>  
> @@ -3510,6 +3562,7 @@ void trace_softirqs_off(unsigned long ip)
>  	}
>  
>  lock_used:
> +#endif
>  	/* mark it as used: */
>  	if (!mark_lock(curr, hlock, LOCK_USED))
>  		return 0;
> @@ -3517,63 +3570,6 @@ void trace_softirqs_off(unsigned long ip)
>  	return 1;
>  }
>  
> -static inline unsigned int task_irq_context(struct task_struct *task)
> -{
> -	return 2 * !!task->hardirq_context + !!task->softirq_context;
> -}
> -
> -static int separate_irq_context(struct task_struct *curr,
> -		struct held_lock *hlock)
> -{
> -	unsigned int depth = curr->lockdep_depth;
> -
> -	/*
> -	 * Keep track of points where we cross into an interrupt context:
> -	 */
> -	if (depth) {
> -		struct held_lock *prev_hlock;
> -
> -		prev_hlock = curr->held_locks + depth-1;
> -		/*
> -		 * If we cross into another context, reset the
> -		 * hash key (this also prevents the checking and the
> -		 * adding of the dependency to 'prev'):
> -		 */
> -		if (prev_hlock->irq_context != hlock->irq_context)
> -			return 1;
> -	}
> -	return 0;
> -}
> -
> -#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> -
> -static inline
> -int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
> -		enum lock_usage_bit new_bit)
> -{
> -	WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
> -	return 1;
> -}
> -
> -static inline int
> -mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
> -{
> -	return 1;
> -}
> -
> -static inline unsigned int task_irq_context(struct task_struct *task)
> -{
> -	return 0;
> -}
> -
> -static inline int separate_irq_context(struct task_struct *curr,
> -		struct held_lock *hlock)
> -{
> -	return 0;
> -}
> -
> -#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> -
>  /*
>   * Mark a lock with a usage bit, and validate the state transition:
>   */

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ