[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <vbftvaa4bny.fsf@mellanox.com>
Date: Wed, 21 Aug 2019 18:27:00 +0000
From: Vlad Buslov <vladbu@...lanox.com>
To: Matthew Wilcox <willy@...radead.org>,
Cong Wang <xiyou.wangcong@...il.com>
CC: "netdev@...r.kernel.org" <netdev@...r.kernel.org>
Subject: Re: [PATCH 29/38] cls_flower: Convert handle_idr to XArray
On Wed 21 Aug 2019 at 01:32, Matthew Wilcox <willy@...radead.org> wrote:
> From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
>
> Inline __fl_get() into fl_get(). Use the RCU lock explicitly for
> lookups and walks instead of relying on RTNL. The xa_lock protects us,
> but remains nested under the RTNL for now.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
> ---
> net/sched/cls_flower.c | 54 ++++++++++++++++++++----------------------
> 1 file changed, 26 insertions(+), 28 deletions(-)
>
> diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
> index 054123742e32..54026c9e9b05 100644
> --- a/net/sched/cls_flower.c
> +++ b/net/sched/cls_flower.c
> @@ -91,7 +91,7 @@ struct cls_fl_head {
> struct list_head masks;
> struct list_head hw_filters;
> struct rcu_work rwork;
> - struct idr handle_idr;
> + struct xarray filters;
> };
>
> struct cls_fl_filter {
> @@ -334,7 +334,7 @@ static int fl_init(struct tcf_proto *tp)
> INIT_LIST_HEAD_RCU(&head->masks);
> INIT_LIST_HEAD(&head->hw_filters);
> rcu_assign_pointer(tp->root, head);
> - idr_init(&head->handle_idr);
> + xa_init_flags(&head->filters, XA_FLAGS_ALLOC1);
>
> return rhashtable_init(&head->ht, &mask_ht_params);
> }
> @@ -530,19 +530,6 @@ static void __fl_put(struct cls_fl_filter *f)
> __fl_destroy_filter(f);
> }
>
> -static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
> -{
> - struct cls_fl_filter *f;
> -
> - rcu_read_lock();
> - f = idr_find(&head->handle_idr, handle);
> - if (f && !refcount_inc_not_zero(&f->refcnt))
> - f = NULL;
> - rcu_read_unlock();
> -
> - return f;
> -}
> -
> static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
> bool *last, bool rtnl_held,
> struct netlink_ext_ack *extack)
> @@ -560,7 +547,7 @@ static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
> f->deleted = true;
> rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
> f->mask->filter_ht_params);
> - idr_remove(&head->handle_idr, f->handle);
> + xa_erase(&head->filters, f->handle);
> list_del_rcu(&f->list);
> spin_unlock(&tp->lock);
>
> @@ -599,7 +586,7 @@ static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
> break;
> }
> }
> - idr_destroy(&head->handle_idr);
> + xa_destroy(&head->filters);
>
> __module_get(THIS_MODULE);
> tcf_queue_work(&head->rwork, fl_destroy_sleepable);
> @@ -615,8 +602,15 @@ static void fl_put(struct tcf_proto *tp, void *arg)
> static void *fl_get(struct tcf_proto *tp, u32 handle)
> {
> struct cls_fl_head *head = fl_head_dereference(tp);
> + struct cls_fl_filter *f;
> +
> + rcu_read_lock();
> + f = xa_load(&head->filters, handle);
> + if (f && !refcount_inc_not_zero(&f->refcnt))
> + f = NULL;
> + rcu_read_unlock();
>
> - return __fl_get(head, handle);
> + return f;
> }
>
> static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
> @@ -1663,7 +1657,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
> rhashtable_remove_fast(&fold->mask->ht,
> &fold->ht_node,
> fold->mask->filter_ht_params);
> - idr_replace(&head->handle_idr, fnew, fnew->handle);
> + xa_store(&head->filters, fnew->handle, fnew, 0);
> list_replace_rcu(&fold->list, &fnew->list);
> fold->deleted = true;
>
> @@ -1681,8 +1675,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
> } else {
> if (handle) {
> /* user specifies a handle and it doesn't exist */
> - err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
> - handle, GFP_ATOMIC);
> + fnew->handle = handle;
> + err = xa_insert(&head->filters, handle, fnew,
> + GFP_ATOMIC);
>
> /* Filter with specified handle was concurrently
> * inserted after initial check in cls_api. This is not
> @@ -1690,18 +1685,16 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
> * message flags. Returning EAGAIN will cause cls_api to
> * try to update concurrently inserted rule.
> */
> - if (err == -ENOSPC)
> + if (err == -EBUSY)
> err = -EAGAIN;
> } else {
> - handle = 1;
> - err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
> - INT_MAX, GFP_ATOMIC);
> + err = xa_alloc(&head->filters, &fnew->handle, fnew,
> + xa_limit_31b, GFP_ATOMIC);
> }
> if (err)
> goto errout_hw;
>
> refcount_inc(&fnew->refcnt);
> - fnew->handle = handle;
> list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
> spin_unlock(&tp->lock);
> }
> @@ -1755,23 +1748,28 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
> bool rtnl_held)
> {
> struct cls_fl_head *head = fl_head_dereference(tp);
> - unsigned long id = arg->cookie, tmp;
> + unsigned long id;
> struct cls_fl_filter *f;
Could you sort these by line length if you respin?
>
> arg->count = arg->skip;
>
> - idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
> + rcu_read_lock();
> + xa_for_each_start(&head->filters, id, f, arg->cookie) {
> /* don't return filters that are being deleted */
> if (!refcount_inc_not_zero(&f->refcnt))
> continue;
> + rcu_read_unlock();
> if (arg->fn(tp, f, arg) < 0) {
> __fl_put(f);
> arg->stop = 1;
> + rcu_read_lock();
> break;
> }
> __fl_put(f);
> arg->count++;
> + rcu_read_lock();
> }
> + rcu_read_unlock();
> arg->cookie = id;
> }
At first I was confused why you bring up rtnl lock in commit message
(flower classifier has 'unlocked' flag set and can't rely on it anymore)
but looking at the code I see that we lost rcu read lock here in commit
d39d714969cd ("idr: introduce idr_for_each_entry_continue_ul()") and you
are correctly bringing it back. Adding Cong to advise if it is okay to
wait for this patch to be accepted or we need to proceed with fixing the
missing RCU lock as a standalone patch.
Powered by blists - more mailing lists