[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9f91936a-7dd7-2ee6-3293-f199ada85210@suse.de>
Date: Thu, 7 Apr 2022 16:03:09 +0800
From: Coly Li <colyli@...e.de>
To: Christoph Hellwig <hch@....de>
Cc: dm-devel@...hat.com, linux-xfs@...r.kernel.org,
linux-fsdevel@...r.kernel.org, linux-um@...ts.infradead.org,
linux-block@...r.kernel.org, drbd-dev@...ts.linbit.com,
nbd@...er.debian.org, ceph-devel@...r.kernel.org,
virtualization@...ts.linux-foundation.org,
xen-devel@...ts.xenproject.org, linux-bcache@...r.kernel.org,
linux-raid@...r.kernel.org, linux-mmc@...r.kernel.org,
linux-mtd@...ts.infradead.org, linux-nvme@...ts.infradead.org,
linux-s390@...r.kernel.org, linux-scsi@...r.kernel.org,
target-devel@...r.kernel.org, linux-btrfs@...r.kernel.org,
linux-ext4@...r.kernel.org, linux-f2fs-devel@...ts.sourceforge.net,
cluster-devel@...hat.com, jfs-discussion@...ts.sourceforge.net,
linux-nilfs@...r.kernel.org, ntfs3@...ts.linux.dev,
ocfs2-devel@....oracle.com, linux-mm@...ck.org,
Jens Axboe <axboe@...nel.dk>
Subject: Re: [PATCH 22/27] block: refactor discard bio size limiting
On 4/6/22 2:05 PM, Christoph Hellwig wrote:
> Move all the logic to limit the discard bio size into a common helper
> so that it is better documented.
>
> Signed-off-by: Christoph Hellwig <hch@....de>
Acked-by: Coly Li <colyli@...e.de>
Thanks for the change.
Coly Li
> ---
> block/blk-lib.c | 59 ++++++++++++++++++++++++-------------------------
> block/blk.h | 14 ------------
> 2 files changed, 29 insertions(+), 44 deletions(-)
>
> diff --git a/block/blk-lib.c b/block/blk-lib.c
> index 237d60d8b5857..2ae32a722851c 100644
> --- a/block/blk-lib.c
> +++ b/block/blk-lib.c
> @@ -10,6 +10,32 @@
>
> #include "blk.h"
>
> +static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
> +{
> + unsigned int discard_granularity =
> + bdev_get_queue(bdev)->limits.discard_granularity;
> + sector_t granularity_aligned_sector;
> +
> + if (bdev_is_partition(bdev))
> + sector += bdev->bd_start_sect;
> +
> + granularity_aligned_sector =
> + round_up(sector, discard_granularity >> SECTOR_SHIFT);
> +
> + /*
> + * Make sure subsequent bios start aligned to the discard granularity if
> + * it needs to be split.
> + */
> + if (granularity_aligned_sector != sector)
> + return granularity_aligned_sector - sector;
> +
> + /*
> + * Align the bio size to the discard granularity to make splitting the bio
> + * at discard granularity boundaries easier in the driver if needed.
> + */
> + return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT;
> +}
> +
> int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
> sector_t nr_sects, gfp_t gfp_mask, int flags,
> struct bio **biop)
> @@ -17,7 +43,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
> struct request_queue *q = bdev_get_queue(bdev);
> struct bio *bio = *biop;
> unsigned int op;
> - sector_t bs_mask, part_offset = 0;
> + sector_t bs_mask;
>
> if (bdev_read_only(bdev))
> return -EPERM;
> @@ -48,36 +74,9 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
> if (!nr_sects)
> return -EINVAL;
>
> - /* In case the discard request is in a partition */
> - if (bdev_is_partition(bdev))
> - part_offset = bdev->bd_start_sect;
> -
> while (nr_sects) {
> - sector_t granularity_aligned_lba, req_sects;
> - sector_t sector_mapped = sector + part_offset;
> -
> - granularity_aligned_lba = round_up(sector_mapped,
> - q->limits.discard_granularity >> SECTOR_SHIFT);
> -
> - /*
> - * Check whether the discard bio starts at a discard_granularity
> - * aligned LBA,
> - * - If no: set (granularity_aligned_lba - sector_mapped) to
> - * bi_size of the first split bio, then the second bio will
> - * start at a discard_granularity aligned LBA on the device.
> - * - If yes: use bio_aligned_discard_max_sectors() as the max
> - * possible bi_size of the first split bio. Then when this bio
> - * is split in device drive, the split ones are very probably
> - * to be aligned to discard_granularity of the device's queue.
> - */
> - if (granularity_aligned_lba == sector_mapped)
> - req_sects = min_t(sector_t, nr_sects,
> - bio_aligned_discard_max_sectors(q));
> - else
> - req_sects = min_t(sector_t, nr_sects,
> - granularity_aligned_lba - sector_mapped);
> -
> - WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
> + sector_t req_sects =
> + min(nr_sects, bio_discard_limit(bdev, sector));
>
> bio = blk_next_bio(bio, bdev, 0, op, gfp_mask);
> bio->bi_iter.bi_sector = sector;
> diff --git a/block/blk.h b/block/blk.h
> index 8ccbc6e076369..1fdc1d28e6d60 100644
> --- a/block/blk.h
> +++ b/block/blk.h
> @@ -346,20 +346,6 @@ static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
> return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
> }
>
> -/*
> - * The max bio size which is aligned to q->limits.discard_granularity. This
> - * is a hint to split large discard bio in generic block layer, then if device
> - * driver needs to split the discard bio into smaller ones, their bi_size can
> - * be very probably and easily aligned to discard_granularity of the device's
> - * queue.
> - */
> -static inline unsigned int bio_aligned_discard_max_sectors(
> - struct request_queue *q)
> -{
> - return round_down(UINT_MAX, q->limits.discard_granularity) >>
> - SECTOR_SHIFT;
> -}
> -
> /*
> * Internal io_context interface
> */
Powered by blists - more mailing lists