lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250224081956.knanS8L_@linutronix.de>
Date: Mon, 24 Feb 2025 09:19:56 +0100
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To: Sergey Senozhatsky <senozhatsky@...omium.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
	Yosry Ahmed <yosry.ahmed@...ux.dev>,
	Hillf Danton <hdanton@...a.com>, Kairui Song <ryncsn@...il.com>,
	Minchan Kim <minchan@...nel.org>, linux-mm@...ck.org,
	linux-kernel@...r.kernel.org
Subject: Re: [PATCH v8 01/17] zram: sleepable entry locking

On 2025-02-22 07:25:32 [+0900], Sergey Senozhatsky wrote:
…
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index 9f5020b077c5..37c5651305c2 100644
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -58,19 +58,62 @@ static void zram_free_page(struct zram *zram, size_t index);
>  static int zram_read_from_zspool(struct zram *zram, struct page *page,
>  				 u32 index);
>  
> -static int zram_slot_trylock(struct zram *zram, u32 index)
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> +#define slot_dep_map(zram, index) (&(zram)->table[(index)].dep_map)
> +#define zram_lock_class(zram) (&(zram)->lock_class)
> +#else
> +#define slot_dep_map(zram, index) NULL
> +#define zram_lock_class(zram) NULL
> +#endif

That CONFIG_DEBUG_LOCK_ALLOC here is not needed because dep_map as well
as lock_class goes away in !CONFIG_DEBUG_LOCK_ALLOC case.

> +static void zram_slot_lock_init(struct zram *zram, u32 index)
>  {
> -	return spin_trylock(&zram->table[index].lock);
> +	lockdep_init_map(slot_dep_map(zram, index),
> +			 "zram->table[index].lock",
> +			 zram_lock_class(zram), 0);
> +}
Why do need zram_lock_class and slot_dep_map? As far as I can tell, you
init both in the same place and you acquire both in the same place.
Therefore it looks like you tell lockdep that you acquire two locks
while it would be enough to do it with one.

> +/*
> + * entry locking rules:
> + *
> + * 1) Lock is exclusive
> + *
> + * 2) lock() function can sleep waiting for the lock
> + *
> + * 3) Lock owner can sleep
> + *
> + * 4) Use TRY lock variant when in atomic context
> + *    - must check return value and handle locking failers
> + */
> +static __must_check bool zram_slot_trylock(struct zram *zram, u32 index)
> +{
> +	unsigned long *lock = &zram->table[index].flags;
> +
> +	if (!test_and_set_bit_lock(ZRAM_ENTRY_LOCK, lock)) {
> +		mutex_acquire(slot_dep_map(zram, index), 0, 1, _RET_IP_);
> +		lock_acquired(slot_dep_map(zram, index), _RET_IP_);
> +		return true;
> +	}
> +
> +	lock_contended(slot_dep_map(zram, index), _RET_IP_);
> +	return false;
>  }
>  
>  static void zram_slot_lock(struct zram *zram, u32 index)
>  {
> -	spin_lock(&zram->table[index].lock);
> +	unsigned long *lock = &zram->table[index].flags;
> +
> +	mutex_acquire(slot_dep_map(zram, index), 0, 0, _RET_IP_);
> +	wait_on_bit_lock(lock, ZRAM_ENTRY_LOCK, TASK_UNINTERRUPTIBLE);
> +	lock_acquired(slot_dep_map(zram, index), _RET_IP_);

This looks odd. The first mutex_acquire() can be invoked twice by two
threads, right? The first thread gets both (mutex_acquire() and
lock_acquired()) while, the second gets mutex_acquire() and blocks on
wait_on_bit_lock()).

>  }
>  
>  static void zram_slot_unlock(struct zram *zram, u32 index)
>  {
> -	spin_unlock(&zram->table[index].lock);
> +	unsigned long *lock = &zram->table[index].flags;
> +
> +	mutex_release(slot_dep_map(zram, index), _RET_IP_);
> +	clear_and_wake_up_bit(ZRAM_ENTRY_LOCK, lock);
>  }
>  
>  static inline bool init_done(struct zram *zram)
…
> diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
> index db78d7c01b9a..794c9234e627 100644
> --- a/drivers/block/zram/zram_drv.h
> +++ b/drivers/block/zram/zram_drv.h
> @@ -58,13 +58,18 @@ enum zram_pageflags {
>  	__NR_ZRAM_PAGEFLAGS,
>  };
>  
> -/*-- Data structures */
> -
> -/* Allocated for each disk page */
> +/*
> + * Allocated for each disk page.  We use bit-lock (ZRAM_ENTRY_LOCK bit
> + * of flags) to save memory.  There can be plenty of entries and standard
> + * locking primitives (e.g. mutex) will significantly increase sizeof()
> + * of each entry and hence of the meta table.
> + */
>  struct zram_table_entry {
>  	unsigned long handle;
> -	unsigned int flags;
> -	spinlock_t lock;
> +	unsigned long flags;
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> +	struct lockdep_map dep_map;
> +#endif
>  #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
>  	ktime_t ac_time;
>  #endif
> @@ -137,5 +142,8 @@ struct zram {
>  	struct dentry *debugfs_dir;
>  #endif
>  	atomic_t pp_in_progress;
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> +	struct lock_class_key lock_class;
> +#endif
As mentioned earlier, no need for CONFIG_DEBUG_LOCK_ALLOC.

>  };
>  #endif
> -- 
> 2.48.1.601.g30ceb7b040-goog
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ