[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20120817182855.GA11607@hmsreliant.think-freely.org>
Date: Fri, 17 Aug 2012 14:28:55 -0400
From: Neil Horman <nhorman@...driver.com>
To: Daniel Wagner <wagi@...om.org>
Cc: netdev@...r.kernel.org, cgroups@...r.kernel.org,
Daniel Wagner <daniel.wagner@...-carit.de>,
"David S. Miller" <davem@...emloft.net>,
Gao feng <gaofeng@...fujitsu.com>,
Jamal Hadi Salim <jhs@...atatu.com>,
John Fastabend <john.r.fastabend@...el.com>,
Li Zefan <lizefan@...wei.com>, Tejun Heo <tj@...nel.org>
Subject: Re: [PATCH v1 3/5] cgroup: Protect access to task_cls_classid() when
built as module
On Fri, Aug 17, 2012 at 04:58:12PM +0200, Daniel Wagner wrote:
> From: Daniel Wagner <daniel.wagner@...-carit.de>
>
> The module version of task_cls_classid() checks if net_cls_sbusys_id
> is valid to indentify when it is okay to access the controller.
>
> Instead relying on the subusys_id to be set, make it explicit
> with a jump label.
>
> Signed-off-by: Daniel Wagner <daniel.wagner@...-carit.de>
> Cc: "David S. Miller" <davem@...emloft.net>
> Cc: Gao feng <gaofeng@...fujitsu.com>
> Cc: Jamal Hadi Salim <jhs@...atatu.com>
> Cc: John Fastabend <john.r.fastabend@...el.com>
> Cc: Li Zefan <lizefan@...wei.com>
> Cc: Neil Horman <nhorman@...driver.com>
> Cc: Tejun Heo <tj@...nel.org>
> Cc: netdev@...r.kernel.org
> Cc: cgroups@...r.kernel.org
> ---
> include/net/cls_cgroup.h | 5 ++++-
> net/core/sock.c | 5 +++++
> net/sched/cls_cgroup.c | 9 +++++++++
> 3 files changed, 18 insertions(+), 1 deletion(-)
>
> diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
> index 401672c..bbbd957 100644
> --- a/include/net/cls_cgroup.h
> +++ b/include/net/cls_cgroup.h
> @@ -16,6 +16,7 @@
> #include <linux/cgroup.h>
> #include <linux/hardirq.h>
> #include <linux/rcupdate.h>
> +#include <linux/jump_label.h>
>
> #ifdef CONFIG_CGROUPS
> struct cgroup_cls_state
> @@ -44,6 +45,8 @@ static inline u32 task_cls_classid(struct task_struct *p)
> }
>
> #elif IS_MODULE(CONFIG_NET_CLS_CGROUP)
> +extern struct static_key cgroup_cls_enabled;
> +#define clscg_enabled static_key_false(&cgroup_cls_enabled)
>
> extern int net_cls_subsys_id;
>
> @@ -52,7 +55,7 @@ static inline u32 task_cls_classid(struct task_struct *p)
> int id;
> u32 classid = 0;
>
> - if (in_interrupt())
> + if (!clscg_enabled || in_interrupt())
> return 0;
>
> rcu_read_lock();
> diff --git a/net/core/sock.c b/net/core/sock.c
> index 8f67ced..8106e77 100644
> --- a/net/core/sock.c
> +++ b/net/core/sock.c
> @@ -327,6 +327,11 @@ int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
> EXPORT_SYMBOL(__sk_backlog_rcv);
>
> #if defined(CONFIG_CGROUPS)
> +#if IS_MODULE(CONFIG_NET_CLS_CGROUP)
> +struct static_key cgroup_cls_enabled = STATIC_KEY_INIT_FALSE;
> +EXPORT_SYMBOL_GPL(cgroup_cls_enabled);
> +#endif
> +
> #if !defined(CONFIG_NET_CLS_CGROUP)
> int net_cls_subsys_id = -1;
> EXPORT_SYMBOL_GPL(net_cls_subsys_id);
> diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
> index 7743ea8..0635894 100644
> --- a/net/sched/cls_cgroup.c
> +++ b/net/sched/cls_cgroup.c
> @@ -44,12 +44,21 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
>
> if (cgrp->parent)
> cs->classid = cgrp_cls_state(cgrp->parent)->classid;
> +#if IS_MODULE(CONFIG_NET_CLS_CGROUP)
> + else if (!clscg_enabled)
> + static_key_slow_inc(&cgroup_cls_enabled);
This is racy I think. The read of the static key is atomic with other reads,
but the entire conditional is not atomic. If two cpus were creating cgroups in
parallel, it would be possible for both to read the static key as being zero
(the second cpu would read the key before the first cpu could increment it).
> +#endif
>
> return &cs->css;
> }
>
> static void cgrp_destroy(struct cgroup *cgrp)
> {
> +#if IS_MODULE(CONFIG_NET_CLS_CGROUP)
> + if (!cgrp->parent && clscg_enabled)
> + static_key_slow_dec(&cgroup_cls_enabled);
Ditto here with the race above. I think what you want is one of:
1) Use static_key_slow_[inc|dec] unconditionally
2) Keep a separate internal counter to track the number of cgroup instances
so that you only inc the static key on the first create and dec it on the last
delete.
I would think (1) would be sufficent. It looks like static_key_slow_inc uses
atomic_inc_not_zero to just do an inc anyway in the event that multiple inc
events are made.
Neil
> +#endif
> +
> kfree(cgrp_cls_state(cgrp));
> }
>
> --
> 1.7.12.rc1.16.g05a20c8
>
>
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists