lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <09907144-45ca-4a48-8831-2f98518cbca4@oracle.com>
Date: Wed, 5 Jun 2024 12:21:51 +0100
From: John Garry <john.g.garry@...cle.com>
To: Christoph Hellwig <hch@....de>, Hannes Reinecke <hare@...e.de>
Cc: axboe@...nel.dk, kbusch@...nel.org, sagi@...mberg.me, jejb@...ux.ibm.com,
        martin.petersen@...cle.com, djwong@...nel.org, viro@...iv.linux.org.uk,
        brauner@...nel.org, dchinner@...hat.com, jack@...e.cz,
        linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
        linux-nvme@...ts.infradead.org, linux-fsdevel@...r.kernel.org,
        tytso@....edu, jbongio@...gle.com, linux-scsi@...r.kernel.org,
        ojaswin@...ux.ibm.com, linux-aio@...ck.org,
        linux-btrfs@...r.kernel.org, io-uring@...r.kernel.org,
        nilay@...ux.ibm.com, ritesh.list@...il.com, willy@...radead.org,
        Himanshu Madhani <himanshu.madhani@...cle.com>
Subject: Re: [PATCH v7 4/9] block: Add core atomic write support

On 05/06/2024 09:32, Christoph Hellwig wrote:
> On Mon, Jun 03, 2024 at 02:29:26PM +0100, John Garry wrote:
>> I think that some of the logic could be re-used.
>> rq_straddles_atomic_write_boundary() is checked in merging of reqs/bios (to
>> see if the resultant req straddles a boundary).
>>
>> So instead of saying: "will the resultant req straddle a boundary",
>> re-using path like blk_rq_get_max_sectors() -> blk_chunk_sectors_left(), we
>> check "is there space within the boundary limit to add this req/bio". We
>> need to take care of front and back merges, though.
> 
> Yes, we've used the trick to pass in the relevant limit in explicitly
> to reuse infrastructure in other places, e.g. max_hw_sectors vs
> max_zone_append_sectors for adding to a bio while respecting hardware
> limits.
> 

I assume that you are talking about something like 
queue_limits_max_zone_append_sectors().

Anyway, below is the prep patch I was considering for this re-use. It's 
just renaming any infrastructure for "chunk_sectors" to generic 
"boundary_sectors".

------>8-------

The purpose of the chunk_sectors limit is to ensure that a mergeable 
request fits within the boundary of the chunck_sector value.

Such a feature will be useful for other request_queue boundary limits, 
so generalize the chunk_sectors merge code.

This idea was proposed by Hannes Reinecke.

Signed-off-by: John Garry <john.g.garry@...cle.com>

diff --git a/block/blk-merge.c b/block/blk-merge.c
index 8957e08e020c..6574c8b64ecc 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -168,11 +168,12 @@ static inline unsigned get_max_io_size(struct bio 
*bio,
  	unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
  	unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
  	unsigned max_sectors = lim->max_sectors, start, end;
+	unsigned int boundary_sectors = lim->chunk_sectors;

-	if (lim->chunk_sectors) {
+	if (boundary_sectors) {
  		max_sectors = min(max_sectors,
-			blk_chunk_sectors_left(bio->bi_iter.bi_sector,
-					       lim->chunk_sectors));
+			blk_boundary_sectors_left(bio->bi_iter.bi_sector,
+					      boundary_sectors));
  	}

  	start = bio->bi_iter.bi_sector & (pbs - 1);
@@ -588,19 +589,19 @@ static inline unsigned int 
blk_rq_get_max_sectors(struct request *rq,
  						  sector_t offset)
  {
  	struct request_queue *q = rq->q;
-	unsigned int max_sectors;
+	unsigned int max_sectors, boundary_sectors = q->limits.chunk_sectors;

  	if (blk_rq_is_passthrough(rq))
  		return q->limits.max_hw_sectors;

  	max_sectors = blk_queue_get_max_sectors(rq);

-	if (!q->limits.chunk_sectors ||
+	if (!boundary_sectors ||
  	    req_op(rq) == REQ_OP_DISCARD ||
  	    req_op(rq) == REQ_OP_SECURE_ERASE)
  		return max_sectors;
  	return min(max_sectors,
-		   blk_chunk_sectors_left(offset, q->limits.chunk_sectors));
+		   blk_boundary_sectors_left(offset, boundary_sectors));
  }

  static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 13037d6a6f62..b648253c2300 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1188,7 +1188,7 @@ static sector_t __max_io_len(struct dm_target *ti, 
sector_t sector,
  		return len;
  	return min_t(sector_t, len,
  		min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
-		    blk_chunk_sectors_left(target_offset, max_granularity)));
+		    blk_boundary_sectors_left(target_offset, max_granularity)));
  }

  static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ac8e0cb2353a..7657698b47f4 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -866,14 +866,14 @@ static inline bool bio_straddles_zones(struct bio 
*bio)
  }

  /*
- * Return how much of the chunk is left to be used for I/O at a given 
offset.
+ * Return how much within the boundary is left to be used for I/O at a 
given offset.
   */
-static inline unsigned int blk_chunk_sectors_left(sector_t offset,
-		unsigned int chunk_sectors)
+static inline unsigned int blk_boundary_sectors_left(sector_t offset,
+		unsigned int boundary_sectors)
  {
-	if (unlikely(!is_power_of_2(chunk_sectors)))
-		return chunk_sectors - sector_div(offset, chunk_sectors);
-	return chunk_sectors - (offset & (chunk_sectors - 1));
+	if (unlikely(!is_power_of_2(boundary_sectors)))
+		return boundary_sectors - sector_div(offset, boundary_sectors);
+	return boundary_sectors - (offset & (boundary_sectors - 1));
  }

  /**
-- 
2.31.1






Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ