[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20150331072114.093c6937@tlielax.poochiereds.net>
Date: Tue, 31 Mar 2015 07:21:14 -0400
From: Jeff Layton <jeff.layton@...marydata.com>
To: Ian Kent <raven@...maw.net>
Cc: Kernel Mailing List <linux-kernel@...r.kernel.org>,
David Howells <dhowells@...hat.com>,
Oleg Nesterov <onestero@...hat.com>,
Trond Myklebust <trond.myklebust@...marydata.com>,
"J. Bruce Fields" <bfields@...ldses.org>,
Benjamin Coddington <bcodding@...hat.com>,
Al Viro <viro@...IV.linux.org.uk>,
"Eric W. Biederman" <ebiederm@...ssion.com>
Subject: Re: [RFC PATCH 5 1/7] kmod - add workqueue service thread store
On Tue, 31 Mar 2015 11:14:42 +0800
Ian Kent <raven@...maw.net> wrote:
> From: Ian Kent <ikent@...hat.com>
>
> Persistent use of process information is needed for contained
> execution in a namespace other than the root init namespace.
>
> Use a simple random token as a key to create and store thread
> information in a hashed list for use by the usermode helper
> thread runner.
>
> Signed-off-by: Ian Kent <ikent@...hat.com>
> Cc: Benjamin Coddington <bcodding@...hat.com>
> Cc: Al Viro <viro@...IV.linux.org.uk>
> Cc: J. Bruce Fields <bfields@...ldses.org>
> Cc: David Howells <dhowells@...hat.com>
> Cc: Trond Myklebust <trond.myklebust@...marydata.com>
> Cc: Oleg Nesterov <onestero@...hat.com>
> Cc: Eric W. Biederman <ebiederm@...ssion.com>
> Cc: Jeff Layton <jeff.layton@...marydata.com>
> ---
> include/linux/kmod.h | 3 +
> kernel/kmod.c | 179 ++++++++++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 181 insertions(+), 1 deletion(-)
>
> diff --git a/include/linux/kmod.h b/include/linux/kmod.h
> index 0555cc6..fa46722 100644
> --- a/include/linux/kmod.h
> +++ b/include/linux/kmod.h
> @@ -66,6 +66,9 @@ struct subprocess_info {
> void *data;
> };
>
> +extern int umh_wq_get_token(int token, const char *service);
> +extern void umh_wq_put_token(int token);
> +
> extern int
> call_usermodehelper(char *path, char **argv, char **envp, int wait);
>
> diff --git a/kernel/kmod.c b/kernel/kmod.c
> index 2777f40..55d20ce 100644
> --- a/kernel/kmod.c
> +++ b/kernel/kmod.c
> @@ -40,13 +40,30 @@
> #include <linux/ptrace.h>
> #include <linux/async.h>
> #include <asm/uaccess.h>
> +#include <linux/hash.h>
> +#include <linux/list.h>
> +#include <linux/random.h>
>
> #include <trace/events/module.h>
>
> extern int max_threads;
>
> +#define KHELPER "khelper"
> static struct workqueue_struct *khelper_wq;
>
> +#define UMH_WQ_HASH_SHIFT 6
> +#define UMH_WQ_HASH_SIZE 1 << UMH_WQ_HASH_SHIFT
> +
> +struct umh_wq_entry {
> + int token;
> + unsigned int count;
> + struct workqueue_struct *wq;
> + struct hlist_node umh_wq_hlist;
> +};
> +
> +static DEFINE_SPINLOCK(umh_wq_hash_lock);
> +static struct hlist_head umh_wq_hash[UMH_WQ_HASH_SIZE];
> +
> #define CAP_BSET (void *)1
> #define CAP_PI (void *)2
>
> @@ -475,6 +492,165 @@ static void helper_unlock(void)
> wake_up(&running_helpers_waitq);
> }
>
> +static void umh_wq_hash_init(void)
> +{
> + int i;
> +
> + for (i = 0; i < UMH_WQ_HASH_SIZE; i++)
> + INIT_HLIST_HEAD(&umh_wq_hash[i]);
> +}
> +
> +static struct umh_wq_entry *umh_wq_find_entry(int token)
> +{
> + struct umh_wq_entry *this, *entry;
> + struct hlist_head *bucket;
> + unsigned int hash;
> +
> + hash = hash_32((unsigned long) token, UMH_WQ_HASH_SHIFT);
> + bucket = &umh_wq_hash[hash];
> +
> + entry = ERR_PTR(-ENOENT);
> + if (hlist_empty(bucket))
> + goto out;
> +
> + hlist_for_each_entry(this, bucket, umh_wq_hlist) {
> + if (this->token == token) {
> + entry = this;
> + break;
> + }
> + }
> +out:
> + return entry;
> +}
> +
> +static struct workqueue_struct *umh_find_wq(int token, unsigned int nowait)
nit: there's no caller of this in this patch, but one is added in patch #2.
> +{
> + struct umh_wq_entry *entry;
> + unsigned long flags;
> +
> + if (!token)
> + return khelper_wq;
> +
> + if (nowait)
> + spin_lock_irqsave(&umh_wq_hash_lock, flags);
> + else
> + spin_lock(&umh_wq_hash_lock);
> + entry = umh_wq_find_entry(token);
> + if (nowait)
> + spin_unlock_irqrestore(&umh_wq_hash_lock, flags);
> + else
> + spin_unlock(&umh_wq_hash_lock);
> +
> + return entry->wq;
> +}
> +
> +/**
> + * umh_wq_get_token - create service thread and return an identifying token
> + * @token: token of an existing service thread or 0 to create a new
> + * service thread.
> + * @name: service name to be appended to "khelper" for identification.
> + *
> + * Returns a token that used with calls to call_usermode_helper_service().
> + * If token corresponds to an existing service thread its reference count
> + * is increased and the token returned. On failure returns a negative errno.
> + */
> +int umh_wq_get_token(int token, const char *service)
> +{
> + struct workqueue_struct *wq;
> + char *wq_name;
> + int wq_name_len;
> + struct umh_wq_entry *entry;
> + struct hlist_head *bucket;
> + unsigned int hash;
> + unsigned int new_token;
> +
> + if (token) {
> + spin_lock(&umh_wq_hash_lock);
> + entry = umh_wq_find_entry(token);
> + if (entry) {
> + entry->count++;
> + spin_unlock(&umh_wq_hash_lock);
> + return token;
> + }
> + spin_unlock(&umh_wq_hash_lock);
> + }
> +
> + if (!service)
> + return -EINVAL;
> +
> + wq_name_len = sizeof(KHELPER) + strlen(service) + 1;
> + wq_name = kmalloc(wq_name_len, GFP_KERNEL);
> + if (!wq_name)
> + return -ENOMEM;
> + strcpy(wq_name, "KHELPER-");
> + strcat(wq_name, service);
> +
> + entry = kzalloc(sizeof(struct umh_wq_entry), GFP_KERNEL);
> + if (!entry) {
> + kfree(wq_name);
> + return -ENOMEM;
> + }
> +
> + wq = create_singlethread_workqueue(wq_name);
> + if (IS_ERR(wq)) {
> + kfree(wq_name);
> + kfree(entry);
> + return PTR_ERR(wq);
> + }
> + kfree(wq_name);
> + entry->wq = wq;
> +
> + do {
> + new_token = get_random_int();
> + if (!new_token)
> + continue;
Why a random value here? Is there some attack that can be done by
guessing the token?
ISTM that that could end up with you reusing a token soon after it has
been removed from the hash if you were really unlucky. If so, then that
could potentially be dangerous.
If there isn't a potential attack vector that involves guessing this
value, then a simple counter might mean less chance that the token
would be reused (particularly if the token were 64 bits).
> + spin_lock(&umh_wq_hash_lock);
> + entry = umh_wq_find_entry(new_token);
> + if (likely(IS_ERR(entry))) {
> + hash = hash_32(new_token, UMH_WQ_HASH_SHIFT);
> + bucket = &umh_wq_hash[hash];
> + hlist_add_head(&entry->umh_wq_hlist, bucket);
> + entry->token = (long) new_token;
Why cast to long here? Shouldn't that be (int)?
> + spin_unlock(&umh_wq_hash_lock);
> + break;
> + }
> + spin_unlock(&umh_wq_hash_lock);
> + } while (1);
> +
> + return (int) new_token;
> +}
> +EXPORT_SYMBOL(umh_wq_get_token);
> +
> +/**
> + * umh_ns_put_token - stop a service thread if it's not longer in use
> + * and remove it from the serive thread list
> + * @token: token of a service thread.
> + */
> +void umh_wq_put_token(int token)
> +{
> + struct umh_wq_entry *entry;
> +
> + if (!token)
> + return;
> +
> + spin_lock(&umh_wq_hash_lock);
> + entry = umh_wq_find_entry(token);
> + if (unlikely(IS_ERR(entry)))
> + spin_unlock(&umh_wq_hash_lock);
> + else {
> + if (--entry->count)
> + spin_unlock(&umh_wq_hash_lock);
> + else {
> + hlist_del(&entry->umh_wq_hlist);
> + spin_unlock(&umh_wq_hash_lock);
> + destroy_workqueue(entry->wq);
> + kfree(entry);
> + }
> + }
> + return;
> +}
> +EXPORT_SYMBOL(umh_wq_put_token);
> +
> /**
> * call_usermodehelper_setup - prepare to call a usermode helper
> * @path: path to usermode executable
> @@ -689,6 +865,7 @@ struct ctl_table usermodehelper_table[] = {
>
> void __init usermodehelper_init(void)
> {
> - khelper_wq = create_singlethread_workqueue("khelper");
> + umh_wq_hash_init();
> + khelper_wq = create_singlethread_workqueue(KHELPER);
> BUG_ON(!khelper_wq);
> }
>
--
Jeff Layton <jeff.layton@...marydata.com>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists