[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200216141151.GJ2935@paulmck-ThinkPad-P72>
Date: Sun, 16 Feb 2020 06:11:51 -0800
From: "Paul E. McKenney" <paulmck@...nel.org>
To: Giuseppe Scrivano <gscrivan@...hat.com>
Cc: linux-kernel@...r.kernel.org, rcu@...r.kernel.org,
ebiederm@...ssion.com
Subject: Re: [PATCH] ipc: use a work queue to free_ipc
On Tue, Feb 11, 2020 at 05:24:08PM +0100, Giuseppe Scrivano wrote:
> it avoids blocking on synchronize_rcu() in kern_umount().
>
> the code:
>
> \#define _GNU_SOURCE
> \#include <sched.h>
> \#include <error.h>
> \#include <errno.h>
> \#include <stdlib.h>
> int main()
> {
> int i;
> for (i = 0; i < 1000; i++)
> if (unshare (CLONE_NEWIPC) < 0)
> error (EXIT_FAILURE, errno, "unshare");
> }
>
> gets from:
>
> Command being timed: "./ipc-namespace"
> User time (seconds): 0.00
> System time (seconds): 0.06
> Percent of CPU this job got: 0%
> Elapsed (wall clock) time (h:mm:ss or m:ss): 0:08.05
>
> to:
>
> Command being timed: "./ipc-namespace"
> User time (seconds): 0.00
> System time (seconds): 0.02
> Percent of CPU this job got: 96%
> Elapsed (wall clock) time (h:mm:ss or m:ss): 0:00.03
Nice speedup!
However, I am not convinced that the code shown below is safe.
I believe that you need either a synchronize_rcu() in your free_ipc()
function or that you need to pass free_ipc() to queue_rcu_work() instead
of directly schedule_work(). As things are, I would expect you to see
free_ipc_ns() being invoke too soon on heavily loaded CONFIG_PREEMPT=y
kernels. Which can be quite a pain to debug!
Or am I missing something?
Thanx, Paul
> Signed-off-by: Giuseppe Scrivano <gscrivan@...hat.com>
> ---
> include/linux/ipc_namespace.h | 2 ++
> ipc/namespace.c | 17 +++++++++++++++--
> 2 files changed, 17 insertions(+), 2 deletions(-)
>
> diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
> index c309f43bde45..a06a78c67f19 100644
> --- a/include/linux/ipc_namespace.h
> +++ b/include/linux/ipc_namespace.h
> @@ -68,6 +68,8 @@ struct ipc_namespace {
> struct user_namespace *user_ns;
> struct ucounts *ucounts;
>
> + struct llist_node mnt_llist;
> +
> struct ns_common ns;
> } __randomize_layout;
>
> diff --git a/ipc/namespace.c b/ipc/namespace.c
> index b3ca1476ca51..37d27e1b807a 100644
> --- a/ipc/namespace.c
> +++ b/ipc/namespace.c
> @@ -117,6 +117,7 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
>
> static void free_ipc_ns(struct ipc_namespace *ns)
> {
> + mq_put_mnt(ns);
> sem_exit_ns(ns);
> msg_exit_ns(ns);
> shm_exit_ns(ns);
> @@ -127,6 +128,17 @@ static void free_ipc_ns(struct ipc_namespace *ns)
> kfree(ns);
> }
>
> +static LLIST_HEAD(free_ipc_list);
> +static void free_ipc(struct work_struct *unused)
> +{
> + struct llist_node *node = llist_del_all(&free_ipc_list);
> + struct ipc_namespace *n, *t;
> +
> + llist_for_each_entry_safe(n, t, node, mnt_llist)
> + free_ipc_ns(n);
> +}
> +static DECLARE_WORK(free_ipc_work, free_ipc);
> +
> /*
> * put_ipc_ns - drop a reference to an ipc namespace.
> * @ns: the namespace to put
> @@ -148,8 +160,9 @@ void put_ipc_ns(struct ipc_namespace *ns)
> if (refcount_dec_and_lock(&ns->count, &mq_lock)) {
> mq_clear_sbinfo(ns);
> spin_unlock(&mq_lock);
> - mq_put_mnt(ns);
> - free_ipc_ns(ns);
> +
> + if (llist_add(&ns->mnt_llist, &free_ipc_list))
> + schedule_work(&free_ipc_work);
> }
> }
>
> --
> 2.24.1
>
Powered by blists - more mailing lists