lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180321155812.GA9382@redhat.com>
Date:   Wed, 21 Mar 2018 16:58:13 +0100
From:   Oleg Nesterov <oleg@...hat.com>
To:     Tejun Heo <tj@...nel.org>
Cc:     torvalds@...ux-foundation.org, jannh@...gle.com,
        paulmck@...ux.vnet.ibm.com, bcrl@...ck.org,
        viro@...iv.linux.org.uk, kent.overstreet@...il.com,
        security@...nel.org, linux-kernel@...r.kernel.org,
        kernel-team@...com
Subject: Re: [PATCH 8/8] fs/aio: Use rcu_work instead of explicit rcu and
 work item

Hi Tejun,

sorry for late reply.

On 03/14, Tejun Heo wrote:
>
> Workqueue now has rcu_work.  Use it instead of open-coding rcu -> work
> item bouncing.

Yes, but a bit of open-coding may be more efficient...

> --- a/fs/aio.c
> +++ b/fs/aio.c
> @@ -115,8 +115,7 @@ struct kioctx {
>  	struct page		**ring_pages;
>  	long			nr_pages;
>  
> -	struct rcu_head		free_rcu;
> -	struct work_struct	free_work;	/* see free_ioctx() */
> +	struct rcu_work		free_rwork;	/* see free_ioctx() */

IIUC, you can't easily share rcu_work's, thus every kioctx needs its own
->free_rwork and this looks sub-optimal.

What do you think about the (untested) patch below?

Oleg.


--- a/fs/aio.c
+++ b/fs/aio.c
@@ -115,8 +115,10 @@ struct kioctx {
 	struct page		**ring_pages;
 	long			nr_pages;
 
-	struct rcu_head		free_rcu;
-	struct work_struct	free_work;	/* see free_ioctx() */
+	union {
+		struct rcu_head		free_rcu;
+		struct llist_node	free_llist;
+	};
 
 	/*
 	 * signals when all in-flight requests are done
@@ -589,31 +591,38 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
 	return cancel(&kiocb->common);
 }
 
+static struct llist_head free_ioctx_llist;
+
 /*
  * free_ioctx() should be RCU delayed to synchronize against the RCU
  * protected lookup_ioctx() and also needs process context to call
  * aio_free_ring(), so the double bouncing through kioctx->free_rcu and
  * ->free_work.
  */
-static void free_ioctx(struct work_struct *work)
+static void free_ioctx_workfn(struct work_struct *work)
 {
-	struct kioctx *ctx = container_of(work, struct kioctx, free_work);
+	struct llist_node *llist = llist_del_all(&free_ioctx_llist);
+	struct kioctx *ctx, *tmp;
 
-	pr_debug("freeing %p\n", ctx);
+	llist_for_each_entry_safe(ctx, tmp, llist, free_llist) {
+		pr_debug("freeing %p\n", ctx);
 
-	aio_free_ring(ctx);
-	free_percpu(ctx->cpu);
-	percpu_ref_exit(&ctx->reqs);
-	percpu_ref_exit(&ctx->users);
-	kmem_cache_free(kioctx_cachep, ctx);
+		aio_free_ring(ctx);
+		free_percpu(ctx->cpu);
+		percpu_ref_exit(&ctx->reqs);
+		percpu_ref_exit(&ctx->users);
+		kmem_cache_free(kioctx_cachep, ctx);
+	}
 }
 
+static DECLARE_WORK(free_ioctx_work, free_ioctx_workfn);
+
 static void free_ioctx_rcufn(struct rcu_head *head)
 {
 	struct kioctx *ctx = container_of(head, struct kioctx, free_rcu);
 
-	INIT_WORK(&ctx->free_work, free_ioctx);
-	schedule_work(&ctx->free_work);
+	if (llist_add(&ctx->free_llist, &free_ioctx_llist))
+		schedule_work(&free_ioctx_work);
 }
 
 static void free_ioctx_reqs(struct percpu_ref *ref)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ