[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <vbfef89jny6.fsf@mellanox.com>
Date: Fri, 15 Feb 2019 11:22:45 +0000
From: Vlad Buslov <vladbu@...lanox.com>
To: Stefano Brivio <sbrivio@...hat.com>
CC: "netdev@...r.kernel.org" <netdev@...r.kernel.org>,
"jhs@...atatu.com" <jhs@...atatu.com>,
"xiyou.wangcong@...il.com" <xiyou.wangcong@...il.com>,
"jiri@...nulli.us" <jiri@...nulli.us>,
"davem@...emloft.net" <davem@...emloft.net>
Subject: Re: [PATCH net-next 03/12] net: sched: flower: introduce reference
counting for filters
On Thu 14 Feb 2019 at 20:34, Stefano Brivio <sbrivio@...hat.com> wrote:
> On Thu, 14 Feb 2019 09:47:03 +0200
> Vlad Buslov <vladbu@...lanox.com> wrote:
>
>> +static struct cls_fl_filter *fl_get_next_filter(struct tcf_proto *tp,
>> + unsigned long *handle)
>> +{
>> + struct cls_fl_head *head = fl_head_dereference(tp);
>> + struct cls_fl_filter *f;
>> +
>> + rcu_read_lock();
>> + /* don't return filters that are being deleted */
>> + while ((f = idr_get_next_ul(&head->handle_idr,
>> + handle)) != NULL &&
>> + !refcount_inc_not_zero(&f->refcnt))
>> + ++(*handle);
>
> This... hurts :) What about:
>
> while ((f = idr_get_next_ul(&head->handle_idr, &handle))) {
> if (refcount_inc_not_zero(&f->refcnt))
> break;
> ++(*handle);
> }
>
> ?
I prefer to avoid using value of assignment as boolean and
non-structured jumps, when possible. In this case it seems OK either
way, but how about:
for (f = idr_get_next_ul(&head->handle_idr, handle);
f && !refcount_inc_not_zero(&f->refcnt);
f = idr_get_next_ul(&head->handle_idr, handle))
++(*handle);
>
>> + rcu_read_unlock();
>> +
>> + return f;
>> +}
>> +
>> static bool __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
>> struct netlink_ext_ack *extack)
>> {
>> @@ -456,10 +503,7 @@ static bool __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
>> if (!tc_skip_hw(f->flags))
>> fl_hw_destroy_filter(tp, f, extack);
>> tcf_unbind_filter(tp, &f->res);
>> - if (async)
>> - tcf_queue_work(&f->rwork, fl_destroy_filter_work);
>> - else
>> - __fl_destroy_filter(f);
>> + __fl_put(f);
>>
>> return last;
>> }
>> @@ -494,11 +538,18 @@ static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
>> tcf_queue_work(&head->rwork, fl_destroy_sleepable);
>> }
>>
>> +static void fl_put(struct tcf_proto *tp, void *arg)
>> +{
>> + struct cls_fl_filter *f = arg;
>> +
>> + __fl_put(f);
>> +}
>> +
>> static void *fl_get(struct tcf_proto *tp, u32 handle)
>> {
>> struct cls_fl_head *head = fl_head_dereference(tp);
>>
>> - return idr_find(&head->handle_idr, handle);
>> + return __fl_get(head, handle);
>> }
>>
>> static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
>> @@ -1321,12 +1372,16 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
>> struct nlattr **tb;
>> int err;
>>
>> - if (!tca[TCA_OPTIONS])
>> - return -EINVAL;
>> + if (!tca[TCA_OPTIONS]) {
>> + err = -EINVAL;
>> + goto errout_fold;
>> + }
>>
>> mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
>> - if (!mask)
>> - return -ENOBUFS;
>> + if (!mask) {
>> + err = -ENOBUFS;
>> + goto errout_fold;
>> + }
>>
>> tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
>> if (!tb) {
>> @@ -1349,6 +1404,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
>> err = -ENOBUFS;
>> goto errout_tb;
>> }
>> + refcount_set(&fnew->refcnt, 1);
>>
>> err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
>> if (err < 0)
>> @@ -1381,6 +1437,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
>> if (!tc_in_hw(fnew->flags))
>> fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
>>
>> + refcount_inc(&fnew->refcnt);
>
> I guess I'm not getting the semantics but... why is it 2 now?
As soon as fnew is inserted into head->handle_idr (one reference), it
becomes accessible to concurrent users, which means that it can be
deleted at any time. However, tp->change() returns a reference to newly
created filter to cls_api by assigning "arg" parameter to it (second
reference). After tp->change() returns, cls API continues to use fnew
and releases it with tfilter_put() when finished.
Powered by blists - more mailing lists