[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <91824e90-0319-467c-a7a7-acda9464a542@redhat.com>
Date: Mon, 18 Dec 2023 12:05:53 -0500
From: Waiman Long <longman@...hat.com>
To: Kent Overstreet <kent.overstreet@...ux.dev>,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org
Cc: tglx@...utronix.de, x86@...nel.org, tj@...nel.org, peterz@...radead.org,
mathieu.desnoyers@...icios.com, paulmck@...nel.org, keescook@...omium.org,
dave.hansen@...ux.intel.com, mingo@...hat.com, will@...nel.org,
boqun.feng@...il.com, brauner@...nel.org
Subject: Re: [PATCH 43/50] lockdep: move held_lock to lockdep_types.h
On 12/15/23 22:32, Kent Overstreet wrote:
> held_lock is embedded in task_struct, and we don't want sched.h pulling
> in all of lockdep.h
>
> Signed-off-by: Kent Overstreet <kent.overstreet@...ux.dev>
> ---
> include/linux/lockdep.h | 57 -----------------------------------
> include/linux/lockdep_types.h | 57 +++++++++++++++++++++++++++++++++++
> 2 files changed, 57 insertions(+), 57 deletions(-)
>
> diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
> index dc2844b071c2..08b0d1d9d78b 100644
> --- a/include/linux/lockdep.h
> +++ b/include/linux/lockdep.h
> @@ -82,63 +82,6 @@ struct lock_chain {
> u64 chain_key;
> };
>
> -#define MAX_LOCKDEP_KEYS_BITS 13
> -#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
> -#define INITIAL_CHAIN_KEY -1
> -
> -struct held_lock {
> - /*
> - * One-way hash of the dependency chain up to this point. We
> - * hash the hashes step by step as the dependency chain grows.
> - *
> - * We use it for dependency-caching and we skip detection
> - * passes and dependency-updates if there is a cache-hit, so
> - * it is absolutely critical for 100% coverage of the validator
> - * to have a unique key value for every unique dependency path
> - * that can occur in the system, to make a unique hash value
> - * as likely as possible - hence the 64-bit width.
> - *
> - * The task struct holds the current hash value (initialized
> - * with zero), here we store the previous hash value:
> - */
> - u64 prev_chain_key;
> - unsigned long acquire_ip;
> - struct lockdep_map *instance;
> - struct lockdep_map *nest_lock;
> -#ifdef CONFIG_LOCK_STAT
> - u64 waittime_stamp;
> - u64 holdtime_stamp;
> -#endif
> - /*
> - * class_idx is zero-indexed; it points to the element in
> - * lock_classes this held lock instance belongs to. class_idx is in
> - * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
> - */
> - unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
> - /*
> - * The lock-stack is unified in that the lock chains of interrupt
> - * contexts nest ontop of process context chains, but we 'separate'
> - * the hashes by starting with 0 if we cross into an interrupt
> - * context, and we also keep do not add cross-context lock
> - * dependencies - the lock usage graph walking covers that area
> - * anyway, and we'd just unnecessarily increase the number of
> - * dependencies otherwise. [Note: hardirq and softirq contexts
> - * are separated from each other too.]
> - *
> - * The following field is used to detect when we cross into an
> - * interrupt context:
> - */
> - unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
> - unsigned int trylock:1; /* 16 bits */
> -
> - unsigned int read:2; /* see lock_acquire() comment */
> - unsigned int check:1; /* see lock_acquire() comment */
> - unsigned int hardirqs_off:1;
> - unsigned int sync:1;
> - unsigned int references:11; /* 32 bits */
> - unsigned int pin_count;
> -};
> -
> /*
> * Initialization, self-test and debugging-output methods:
> */
> diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
> index 2ebc323d345a..9c533c8d701e 100644
> --- a/include/linux/lockdep_types.h
> +++ b/include/linux/lockdep_types.h
> @@ -198,6 +198,63 @@ struct lockdep_map {
>
> struct pin_cookie { unsigned int val; };
>
> +#define MAX_LOCKDEP_KEYS_BITS 13
> +#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
> +#define INITIAL_CHAIN_KEY -1
> +
> +struct held_lock {
> + /*
> + * One-way hash of the dependency chain up to this point. We
> + * hash the hashes step by step as the dependency chain grows.
> + *
> + * We use it for dependency-caching and we skip detection
> + * passes and dependency-updates if there is a cache-hit, so
> + * it is absolutely critical for 100% coverage of the validator
> + * to have a unique key value for every unique dependency path
> + * that can occur in the system, to make a unique hash value
> + * as likely as possible - hence the 64-bit width.
> + *
> + * The task struct holds the current hash value (initialized
> + * with zero), here we store the previous hash value:
> + */
> + u64 prev_chain_key;
> + unsigned long acquire_ip;
> + struct lockdep_map *instance;
> + struct lockdep_map *nest_lock;
> +#ifdef CONFIG_LOCK_STAT
> + u64 waittime_stamp;
> + u64 holdtime_stamp;
> +#endif
> + /*
> + * class_idx is zero-indexed; it points to the element in
> + * lock_classes this held lock instance belongs to. class_idx is in
> + * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
> + */
> + unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
> + /*
> + * The lock-stack is unified in that the lock chains of interrupt
> + * contexts nest ontop of process context chains, but we 'separate'
> + * the hashes by starting with 0 if we cross into an interrupt
> + * context, and we also keep do not add cross-context lock
> + * dependencies - the lock usage graph walking covers that area
> + * anyway, and we'd just unnecessarily increase the number of
> + * dependencies otherwise. [Note: hardirq and softirq contexts
> + * are separated from each other too.]
> + *
> + * The following field is used to detect when we cross into an
> + * interrupt context:
> + */
> + unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
> + unsigned int trylock:1; /* 16 bits */
> +
> + unsigned int read:2; /* see lock_acquire() comment */
> + unsigned int check:1; /* see lock_acquire() comment */
> + unsigned int hardirqs_off:1;
> + unsigned int sync:1;
> + unsigned int references:11; /* 32 bits */
> + unsigned int pin_count;
> +};
> +
> #else /* !CONFIG_LOCKDEP */
>
> /*
Acked-by: Waiman Long <longman@...hat.com>
Powered by blists - more mailing lists