diff --git a/block/blk-mq.c b/block/blk-mq.c index b038ec680e84..81409162e662 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1959,10 +1959,14 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) data.cmd_flags = bio->bi_opf; rq = blk_mq_get_request(q, bio, &data); if (unlikely(!rq)) { + blk_qc_t ret = BLK_QC_T_NONE; + rq_qos_cleanup(q, bio); - if (bio->bi_opf & REQ_NOWAIT) + if (bio->bi_opf & REQ_NOWAIT_RET) + ret = BLK_QC_T_EAGAIN; + else if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); - return BLK_QC_T_NONE; + return ret; } trace_block_getrq(q, bio, bio->bi_opf); diff --git a/fs/block_dev.c b/fs/block_dev.c index f00b569a9f89..6a09f483f15c 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -344,15 +344,24 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) struct bio *bio; bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; bool is_read = (iov_iter_rw(iter) == READ), is_sync; + bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0; loff_t pos = iocb->ki_pos; blk_qc_t qc = BLK_QC_T_NONE; + gfp_t gfp; int ret = 0; if ((pos | iov_iter_alignment(iter)) & (bdev_logical_block_size(bdev) - 1)) return -EINVAL; - bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool); + if (nowait) + gfp = GFP_NOWAIT; + else + gfp = GFP_KERNEL; + + bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool); + if (!bio) + return -EAGAIN; dio = container_of(bio, struct blkdev_dio, bio); dio->is_sync = is_sync = is_sync_kiocb(iocb); @@ -397,6 +406,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) bio->bi_opf = dio_bio_write_op(iocb); task_io_account_write(bio->bi_iter.bi_size); } + if (nowait) + bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_RET); dio->size += bio->bi_iter.bi_size; pos += bio->bi_iter.bi_size; @@ -411,6 +422,10 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) } qc = submit_bio(bio); + if (qc == BLK_QC_T_EAGAIN) { + ret = -EAGAIN; + goto err; + } if (polled) WRITE_ONCE(iocb->ki_cookie, qc); @@ -431,7 +446,12 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) atomic_inc(&dio->ref); } - submit_bio(bio); + qc = submit_bio(bio); + if (qc == BLK_QC_T_EAGAIN) { + ret = -EAGAIN; + goto err; + } + bio = bio_alloc(GFP_KERNEL, nr_pages); } @@ -452,6 +472,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) } __set_current_state(TASK_RUNNING); +out: if (!ret) ret = blk_status_to_errno(dio->bio.bi_status); if (likely(!ret)) @@ -459,6 +480,10 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) bio_put(&dio->bio); return ret; +err: + if (!is_poll) + blk_finish_plug(&plug); + goto out; } static ssize_t diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index feff3fe4467e..3d89a3c04977 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -311,6 +311,7 @@ enum req_flag_bits { __REQ_RAHEAD, /* read ahead, can fail anytime */ __REQ_BACKGROUND, /* background IO */ __REQ_NOWAIT, /* Don't wait if request will block */ + __REQ_NOWAIT_RET, /* Return would-block error inline */ /* * When a shared kthread needs to issue a bio for a cgroup, doing * so synchronously can lead to priority inversions as the kthread @@ -345,6 +346,7 @@ enum req_flag_bits { #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) +#define REQ_NOWAIT_RET (1ULL << __REQ_NOWAIT_RET) #define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) @@ -418,6 +420,7 @@ static inline int op_stat_group(unsigned int op) typedef unsigned int blk_qc_t; #define BLK_QC_T_NONE -1U +#define BLK_QC_T_EAGAIN -2U #define BLK_QC_T_SHIFT 16 #define BLK_QC_T_INTERNAL (1U << 31)