lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <3aeb1d0e-6c74-4bfe-914d-22ba4152bc7f@oracle.com>
Date: Thu, 13 Mar 2025 07:41:11 +0000
From: John Garry <john.g.garry@...cle.com>
To: Christoph Hellwig <hch@...radead.org>
Cc: Dave Chinner <david@...morbit.com>, brauner@...nel.org, djwong@...nel.org,
        cem@...nel.org, linux-xfs@...r.kernel.org,
        linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
        ojaswin@...ux.ibm.com, ritesh.list@...il.com,
        martin.petersen@...cle.com
Subject: Re: [PATCH RFC v5 10/10] iomap: Rename ATOMIC flags again

On 13/03/2025 07:02, Christoph Hellwig wrote:
> On Thu, Mar 13, 2025 at 06:28:03AM +0000, John Garry wrote:
>>>> I'd rather have a
>>>>
>>>>       blk_opf_t extra_flags;
>>>>
>>>> in the caller that gets REQ_FUA and REQ_ATOMIC assigned as needed,
>>>> and then just clear
>>> Yep, that is cleaner..
>>
>> This suggestion is not clear to me.
>>
>> Is it that iomap_dio_bio_iter() [the only caller of iomap_dio_bio_opflags()]
>> sets REQ_FUA and REQ_ATOMIC in extra_flags, and then we extra_flags |
>> bio_opf?
> 
> Yes.
> 
>> Note that iomap_dio_bio_opflags() does still use use_fua for clearing
>> IOMAP_DIO_WRITE_THROUGH.
> 
> You can check for REQ_FUA in extra_flags (or the entire op).
 > >> And to me it seems nicer to set all the REQ_ flags in one place.
> 
> Passing multiple bool arguments just loses way too much context.  But
> if you really want everything in one place you could probably build
> the entire blk_opf_t in iomap_dio_bio_iter, and avoid having to
> recalculate it for every bio.
> 

Yeah, when we start taking use_fua and atomic_bio args from 
iomap_dio_bio_opflags(), then iomap_dio_bio_opflags() becomes a shell of 
the function.

So how about this (I would re-add the write through comment):

--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -311,30 +311,6 @@ static int iomap_dio_zero(const struct iomap_iter 
*iter, struct iomap_dio *dio,
  	return 0;
  }

-/*
- * Figure out the bio's operation flags from the dio request, the
- * mapping, and whether or not we want FUA.  Note that we can end up
- * clearing the WRITE_THROUGH flag in the dio request.
- */
-static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio,
-		const struct iomap *iomap, bool use_fua, bool atomic_hw)
-{
-	blk_opf_t opflags = REQ_SYNC | REQ_IDLE;
-
-	if (!(dio->flags & IOMAP_DIO_WRITE))
-		return REQ_OP_READ;
-
-	opflags |= REQ_OP_WRITE;
-	if (use_fua)
-		opflags |= REQ_FUA;
-	else
-		dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
-	if (atomic_hw)
-		opflags |= REQ_ATOMIC;
-
-	return opflags;
-}
-
  static int iomap_dio_bio_iter(struct iomap_iter *iter, struct 
iomap_dio *dio)
  {
  	const struct iomap *iomap = &iter->iomap;
@@ -346,13 +322,20 @@ static int iomap_dio_bio_iter(struct iomap_iter 
*iter, struct iomap_dio *dio)
  	blk_opf_t bio_opf;
  	struct bio *bio;
  	bool need_zeroout = false;
-	bool use_fua = false;
  	int nr_pages, ret = 0;
  	u64 copied = 0;
  	size_t orig_count;

-	if (atomic_hw && length != iter->len)
-		return -EINVAL;
+	if (dio->flags & IOMAP_DIO_WRITE) {
+		bio_opf = REQ_OP_WRITE;
+		if (atomic_hw)  {
+			if (length != iter->len)
+				return -EINVAL;
+			bio_opf |= REQ_ATOMIC;
+		}
+	} else {
+		bio_opf = REQ_OP_READ;
+	}

  	if ((pos | length) & (bdev_logical_block_size(iomap->bdev) - 1) ||
  	    !bdev_iter_is_aligned(iomap->bdev, dio->submit.iter))
@@ -382,10 +365,12 @@ static int iomap_dio_bio_iter(struct iomap_iter 
*iter, struct iomap_dio *dio)
  		 */
  		if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
  		    (dio->flags & IOMAP_DIO_WRITE_THROUGH) &&
-		    (bdev_fua(iomap->bdev) || !bdev_write_cache(iomap->bdev)))
-			use_fua = true;
-		else if (dio->flags & IOMAP_DIO_NEED_SYNC)
+		    (bdev_fua(iomap->bdev) || !bdev_write_cache(iomap->bdev))) {
+			bio_opf |= REQ_FUA; //reads as well?
+			dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
+		} else if (dio->flags & IOMAP_DIO_NEED_SYNC) {
  			dio->flags &= ~IOMAP_DIO_CALLER_COMP;
+		}
  	}

  	/*
@@ -407,7 +392,7 @@ static int iomap_dio_bio_iter(struct iomap_iter 
*iter, struct iomap_dio *dio)
  	 * during completion processing.
  	 */
  	if (need_zeroout ||
-	    ((dio->flags & IOMAP_DIO_NEED_SYNC) && !use_fua) ||
+	    ((dio->flags & IOMAP_DIO_NEED_SYNC) && !(bio_opf & REQ_FUA)) ||
  	    ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode)))
  		dio->flags &= ~IOMAP_DIO_CALLER_COMP;

@@ -428,8 +413,6 @@ static int iomap_dio_bio_iter(struct iomap_iter 
*iter, struct iomap_dio *dio)
  			goto out;
  	}

-	bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua, atomic_hw);
-
  	nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
  	do {
  		size_t n;
-- 


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ