[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190820223259.22348-25-willy@infradead.org>
Date: Tue, 20 Aug 2019 15:32:45 -0700
From: Matthew Wilcox <willy@...radead.org>
To: netdev@...r.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Subject: [PATCH 24/38] cls_u32: Convert tc_u_common->handle_idr to XArray
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
There are two structures called 'handle_idr' in this module, which is
most confusing. Rename this one to ht_xa. Leave the existing locking
alone, which means that we're covered by both the rtnl lock and the
XArray spinlock when accessing this XArray.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
net/sched/cls_u32.c | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 8614088edd1b..18ef5f375976 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -86,7 +86,8 @@ struct tc_u_common {
struct tc_u_hnode __rcu *hlist;
void *ptr;
int refcnt;
- struct idr handle_idr;
+ u32 ht_next;
+ struct xarray ht_xa;
struct hlist_node hnode;
long knodes;
};
@@ -305,8 +306,12 @@ static void *u32_get(struct tcf_proto *tp, u32 handle)
/* Protected by rtnl lock */
static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
{
- int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
- if (id < 0)
+ int err;
+ u32 id;
+
+ err = xa_alloc_cyclic(&tp_c->ht_xa, &id, ptr, XA_LIMIT(0, 0x7ff),
+ &tp_c->ht_next, GFP_KERNEL);
+ if (err < 0)
return 0;
return (id | 0x800U) << 20;
}
@@ -371,8 +376,7 @@ static int u32_init(struct tcf_proto *tp)
}
tp_c->ptr = key;
INIT_HLIST_NODE(&tp_c->hnode);
- idr_init(&tp_c->handle_idr);
-
+ xa_init_flags(&tp_c->ht_xa, XA_FLAGS_ALLOC1);
hlist_add_head(&tp_c->hnode, tc_u_hash(key));
}
@@ -608,7 +612,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
if (phn == ht) {
u32_clear_hw_hnode(tp, ht, extack);
idr_destroy(&ht->handle_idr);
- idr_remove(&tp_c->handle_idr, ht->handle);
+ xa_erase(&tp_c->ht_xa, ht->handle);
RCU_INIT_POINTER(*hn, ht->next);
kfree_rcu(ht, rcu);
return 0;
@@ -645,7 +649,6 @@ static void u32_destroy(struct tcf_proto *tp, bool rtnl_held,
kfree_rcu(ht, rcu);
}
- idr_destroy(&tp_c->handle_idr);
kfree(tp_c);
}
@@ -950,8 +953,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
return -ENOMEM;
}
} else {
- err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
- handle, GFP_KERNEL);
+ err = xa_insert(&tp_c->ht_xa, handle, ht, GFP_KERNEL);
if (err) {
kfree(ht);
return err;
@@ -966,7 +968,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
err = u32_replace_hw_hnode(tp, ht, flags, extack);
if (err) {
- idr_remove(&tp_c->handle_idr, handle);
+ xa_erase(&tp_c->ht_xa, handle);
kfree(ht);
return err;
}
--
2.23.0.rc1
Powered by blists - more mailing lists