lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sat, 21 Dec 2019 19:20:26 +0300
From:   Pavel Begunkov <asml.silence@...il.com>
To:     Jens Axboe <axboe@...nel.dk>, io-uring@...r.kernel.org,
        linux-kernel@...r.kernel.org
Cc:     Tejun Heo <tj@...nel.org>, Dennis Zhou <dennis@...nel.org>,
        Christoph Lameter <cl@...ux.com>
Subject: Re: [PATCH RFC v2 3/3] io_uring: batch get(ctx->ref) across submits

On 21/12/2019 19:15, Pavel Begunkov wrote:
> Double account ctx->refs keeping number of taken refs in ctx. As
> io_uring gets per-request ctx->refs during submission, while holding
> ctx->uring_lock, this allows in most of the time to bypass
> percpu_ref_get*() and its overhead.

Jens, could you please benchmark with this one? Especially for offloaded QD1
case. I haven't got any difference for nops test and don't have a decent SSD
at hands to test it myself. We could drop it, if there is no benefit.

This rewrites that @extra_refs from the second one, so I left it for now.


> 
> Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
> ---
>  fs/io_uring.c | 32 +++++++++++++++++++++++++-------
>  1 file changed, 25 insertions(+), 7 deletions(-)
> 
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 5392134f042f..eef09de94609 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -84,6 +84,9 @@
>  #define IORING_MAX_ENTRIES	32768
>  #define IORING_MAX_CQ_ENTRIES	(2 * IORING_MAX_ENTRIES)
>  
> +/* Not less than IORING_MAX_ENTRIES, so can grab once per submission loop */
> +#define IORING_REFS_THRESHOLD	IORING_MAX_ENTRIES
> +
>  /*
>   * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
>   */
> @@ -197,6 +200,7 @@ struct fixed_file_data {
>  struct io_ring_ctx {
>  	struct {
>  		struct percpu_ref	refs;
> +		unsigned long		taken_refs; /* used under @uring_lock */
>  	} ____cacheline_aligned_in_smp;
>  
>  	struct {
> @@ -690,6 +694,13 @@ static void io_ring_ctx_ref_free(struct percpu_ref *ref)
>  	complete(&ctx->completions[0]);
>  }
>  
> +static void io_free_taken_refs(struct io_ring_ctx *ctx)
> +{
> +	if (ctx->taken_refs)
> +		percpu_ref_put_many(&ctx->refs, ctx->taken_refs);
> +	ctx->taken_refs = 0;
> +}
> +
>  static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
>  {
>  	struct io_ring_ctx *ctx;
> @@ -4388,7 +4399,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
>  	struct io_submit_state state, *statep = NULL;
>  	struct io_kiocb *link = NULL;
>  	int i, submitted = 0;
> -	unsigned int extra_refs;
>  	bool mm_fault = false;
>  
>  	/* if we have a backlog and couldn't flush it all, return BUSY */
> @@ -4398,9 +4408,15 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
>  			return -EBUSY;
>  	}
>  
> -	if (!percpu_ref_tryget_many(&ctx->refs, nr))
> -		return -EAGAIN;
> -	extra_refs = nr;
> +	if (ctx->taken_refs < IORING_REFS_THRESHOLD) {
> +		if (unlikely(percpu_ref_is_dying(&ctx->refs))) {
> +			io_free_taken_refs(ctx);
> +			return -ENXIO;
> +		}
> +		if (!percpu_ref_tryget_many(&ctx->refs, IORING_REFS_THRESHOLD))
> +			return -EAGAIN;
> +		ctx->taken_refs += IORING_REFS_THRESHOLD;
> +	}
>  
>  	if (nr > IO_PLUG_THRESHOLD) {
>  		io_submit_state_start(&state, nr);
> @@ -4417,8 +4433,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
>  				submitted = -EAGAIN;
>  			break;
>  		}
> -		--extra_refs;
>  		if (!io_get_sqring(ctx, req, &sqe)) {
> +			/* not submitted, but a ref is freed */
> +			ctx->taken_refs--;
>  			__io_free_req(req);
>  			break;
>  		}
> @@ -4454,8 +4471,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
>  		io_queue_link_head(link);
>  	if (statep)
>  		io_submit_state_end(&state);
> -	if (extra_refs)
> -		percpu_ref_put_many(&ctx->refs, extra_refs);
> +	ctx->taken_refs -= submitted;
>  
>  	 /* Commit SQ ring head once we've consumed and submitted all SQEs */
>  	io_commit_sqring(ctx);
> @@ -5731,6 +5747,7 @@ static int io_uring_fasync(int fd, struct file *file, int on)
>  static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
>  {
>  	mutex_lock(&ctx->uring_lock);
> +	io_free_taken_refs(ctx);
>  	percpu_ref_kill(&ctx->refs);
>  	mutex_unlock(&ctx->uring_lock);
>  
> @@ -6196,6 +6213,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
>  
>  	if (opcode != IORING_UNREGISTER_FILES &&
>  	    opcode != IORING_REGISTER_FILES_UPDATE) {
> +		io_free_taken_refs(ctx);
>  		percpu_ref_kill(&ctx->refs);
>  
>  		/*
> 

-- 
Pavel Begunkov



Download attachment "signature.asc" of type "application/pgp-signature" (834 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ