lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220415101053.554495-7-yukuai3@huawei.com>
Date:   Fri, 15 Apr 2022 18:10:51 +0800
From:   Yu Kuai <yukuai3@...wei.com>
To:     <axboe@...nel.dk>, <bvanassche@....org>,
        <andriy.shevchenko@...ux.intel.com>, <john.garry@...wei.com>,
        <ming.lei@...hat.com>, <qiulaibin@...wei.com>
CC:     <linux-block@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
        <yukuai3@...wei.com>, <yi.zhang@...wei.com>
Subject: [PATCH -next RFC v3 6/8] blk-mq: force tag preemption for split bios

For HDD, sequential io is much faster than random io, thus it's better
to issue split io continuously. However, this is broken when tag preemption
is disabled, because wakers can only get one tag each time.

Thus tag preemption should be enabled for split bios, at least for HDD,
specifically the first bio won't preempt tag, and following split bios
will preempt tag.

Signed-off-by: Yu Kuai <yukuai3@...wei.com>
---
 block/blk-merge.c         | 8 +++++++-
 block/blk-mq.c            | 1 +
 include/linux/blk_types.h | 4 ++++
 3 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/block/blk-merge.c b/block/blk-merge.c
index 7771dacc99cb..85c285023f5e 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -343,12 +343,18 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio,
 
 	if (split) {
 		/* there isn't chance to merge the splitted bio */
-		split->bi_opf |= REQ_NOMERGE;
+		split->bi_opf |= (REQ_NOMERGE | REQ_SPLIT);
+		if ((*bio)->bi_opf & REQ_SPLIT)
+			split->bi_opf |= REQ_PREEMPT;
+		else
+			(*bio)->bi_opf |= REQ_SPLIT;
 
 		bio_chain(split, *bio);
 		trace_block_split(split, (*bio)->bi_iter.bi_sector);
 		submit_bio_noacct(*bio);
 		*bio = split;
+	} else if ((*bio)->bi_opf & REQ_SPLIT) {
+		(*bio)->bi_opf |= REQ_PREEMPT;
 	}
 }
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 32beacbad5e2..a889f01d2cdf 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2766,6 +2766,7 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
 		.q		= q,
 		.nr_tags	= 1,
 		.cmd_flags	= bio->bi_opf,
+		.preempt	= (bio->bi_opf & REQ_PREEMPT),
 	};
 	struct request *rq;
 
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index c62274466e72..046a34c81ec4 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -418,6 +418,8 @@ enum req_flag_bits {
 	/* for driver use */
 	__REQ_DRV,
 	__REQ_SWAP,		/* swapping request. */
+	__REQ_SPLIT,		/* IO is split. */
+	__REQ_PREEMPT,		/* IO will preempt tag. */
 	__REQ_NR_BITS,		/* stops here */
 };
 
@@ -443,6 +445,8 @@ enum req_flag_bits {
 
 #define REQ_DRV			(1ULL << __REQ_DRV)
 #define REQ_SWAP		(1ULL << __REQ_SWAP)
+#define REQ_SPLIT		(1ULL << __REQ_SPLIT)
+#define REQ_PREEMPT		(1ULL << __REQ_PREEMPT)
 
 #define REQ_FAILFAST_MASK \
 	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
-- 
2.31.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ