lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190820223259.22348-30-willy@infradead.org>
Date:   Tue, 20 Aug 2019 15:32:50 -0700
From:   Matthew Wilcox <willy@...radead.org>
To:     netdev@...r.kernel.org
Cc:     "Matthew Wilcox (Oracle)" <willy@...radead.org>
Subject: [PATCH 29/38] cls_flower: Convert handle_idr to XArray

From: "Matthew Wilcox (Oracle)" <willy@...radead.org>

Inline __fl_get() into fl_get().  Use the RCU lock explicitly for
lookups and walks instead of relying on RTNL.  The xa_lock protects us,
but remains nested under the RTNL for now.

Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
 net/sched/cls_flower.c | 54 ++++++++++++++++++++----------------------
 1 file changed, 26 insertions(+), 28 deletions(-)

diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 054123742e32..54026c9e9b05 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -91,7 +91,7 @@ struct cls_fl_head {
 	struct list_head masks;
 	struct list_head hw_filters;
 	struct rcu_work rwork;
-	struct idr handle_idr;
+	struct xarray filters;
 };
 
 struct cls_fl_filter {
@@ -334,7 +334,7 @@ static int fl_init(struct tcf_proto *tp)
 	INIT_LIST_HEAD_RCU(&head->masks);
 	INIT_LIST_HEAD(&head->hw_filters);
 	rcu_assign_pointer(tp->root, head);
-	idr_init(&head->handle_idr);
+	xa_init_flags(&head->filters, XA_FLAGS_ALLOC1);
 
 	return rhashtable_init(&head->ht, &mask_ht_params);
 }
@@ -530,19 +530,6 @@ static void __fl_put(struct cls_fl_filter *f)
 		__fl_destroy_filter(f);
 }
 
-static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
-{
-	struct cls_fl_filter *f;
-
-	rcu_read_lock();
-	f = idr_find(&head->handle_idr, handle);
-	if (f && !refcount_inc_not_zero(&f->refcnt))
-		f = NULL;
-	rcu_read_unlock();
-
-	return f;
-}
-
 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
 		       bool *last, bool rtnl_held,
 		       struct netlink_ext_ack *extack)
@@ -560,7 +547,7 @@ static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
 	f->deleted = true;
 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
 			       f->mask->filter_ht_params);
-	idr_remove(&head->handle_idr, f->handle);
+	xa_erase(&head->filters, f->handle);
 	list_del_rcu(&f->list);
 	spin_unlock(&tp->lock);
 
@@ -599,7 +586,7 @@ static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
 				break;
 		}
 	}
-	idr_destroy(&head->handle_idr);
+	xa_destroy(&head->filters);
 
 	__module_get(THIS_MODULE);
 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
@@ -615,8 +602,15 @@ static void fl_put(struct tcf_proto *tp, void *arg)
 static void *fl_get(struct tcf_proto *tp, u32 handle)
 {
 	struct cls_fl_head *head = fl_head_dereference(tp);
+	struct cls_fl_filter *f;
+
+	rcu_read_lock();
+	f = xa_load(&head->filters, handle);
+	if (f && !refcount_inc_not_zero(&f->refcnt))
+		f = NULL;
+	rcu_read_unlock();
 
-	return __fl_get(head, handle);
+	return f;
 }
 
 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
@@ -1663,7 +1657,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 		rhashtable_remove_fast(&fold->mask->ht,
 				       &fold->ht_node,
 				       fold->mask->filter_ht_params);
-		idr_replace(&head->handle_idr, fnew, fnew->handle);
+		xa_store(&head->filters, fnew->handle, fnew, 0);
 		list_replace_rcu(&fold->list, &fnew->list);
 		fold->deleted = true;
 
@@ -1681,8 +1675,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 	} else {
 		if (handle) {
 			/* user specifies a handle and it doesn't exist */
-			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
-					    handle, GFP_ATOMIC);
+			fnew->handle = handle;
+			err = xa_insert(&head->filters, handle, fnew,
+					GFP_ATOMIC);
 
 			/* Filter with specified handle was concurrently
 			 * inserted after initial check in cls_api. This is not
@@ -1690,18 +1685,16 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 			 * message flags. Returning EAGAIN will cause cls_api to
 			 * try to update concurrently inserted rule.
 			 */
-			if (err == -ENOSPC)
+			if (err == -EBUSY)
 				err = -EAGAIN;
 		} else {
-			handle = 1;
-			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
-					    INT_MAX, GFP_ATOMIC);
+			err = xa_alloc(&head->filters, &fnew->handle, fnew,
+					xa_limit_31b, GFP_ATOMIC);
 		}
 		if (err)
 			goto errout_hw;
 
 		refcount_inc(&fnew->refcnt);
-		fnew->handle = handle;
 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
 		spin_unlock(&tp->lock);
 	}
@@ -1755,23 +1748,28 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
 		    bool rtnl_held)
 {
 	struct cls_fl_head *head = fl_head_dereference(tp);
-	unsigned long id = arg->cookie, tmp;
+	unsigned long id;
 	struct cls_fl_filter *f;
 
 	arg->count = arg->skip;
 
-	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
+	rcu_read_lock();
+	xa_for_each_start(&head->filters, id, f, arg->cookie) {
 		/* don't return filters that are being deleted */
 		if (!refcount_inc_not_zero(&f->refcnt))
 			continue;
+		rcu_read_unlock();
 		if (arg->fn(tp, f, arg) < 0) {
 			__fl_put(f);
 			arg->stop = 1;
+			rcu_read_lock();
 			break;
 		}
 		__fl_put(f);
 		arg->count++;
+		rcu_read_lock();
 	}
+	rcu_read_unlock();
 	arg->cookie = id;
 }
 
-- 
2.23.0.rc1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ