[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date: Wed, 5 May 2021 16:36:42 -0700
From: Cong Wang <xiyou.wangcong@...il.com>
To: netdev@...r.kernel.org
Cc: Cong Wang <cong.wang@...edance.com>,
syzbot+7d941e89dd48bcf42573@...kaller.appspotmail.com,
Taehee Yoo <ap420073@...il.com>
Subject: [Patch net] rtnetlink: use rwsem to protect rtnl_af_ops list
From: Cong Wang <cong.wang@...edance.com>
We use RTNL lock and RCU read lock to protect the global
list rtnl_af_ops, however, this forces the af_ops readers
being in atomic context while iterating this list,
particularly af_ops->set_link_af(). This was not a problem
until we begin to take mutex lock down the path in
__ipv6_dev_mc_dec().
Convert RTNL+RCU to rwsemaphore, so that we can block on
the reader side while still allowing parallel readers.
Reported-and-tested-by: syzbot+7d941e89dd48bcf42573@...kaller.appspotmail.com
Fixes: 63ed8de4be81 ("mld: add mc_lock for protecting per-interface mld data")
Cc: Taehee Yoo <ap420073@...il.com>
Signed-off-by: Cong Wang <cong.wang@...edance.com>
---
net/core/rtnetlink.c | 68 +++++++++++++++++++++-----------------------
1 file changed, 33 insertions(+), 35 deletions(-)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 714d5fa38546..624ee5ab4183 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -538,12 +538,13 @@ static size_t rtnl_link_get_size(const struct net_device *dev)
}
static LIST_HEAD(rtnl_af_ops);
+static DECLARE_RWSEM(af_ops_sem);
static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
{
const struct rtnl_af_ops *ops;
- list_for_each_entry_rcu(ops, &rtnl_af_ops, list) {
+ list_for_each_entry(ops, &rtnl_af_ops, list) {
if (ops->family == family)
return ops;
}
@@ -559,9 +560,9 @@ static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
*/
void rtnl_af_register(struct rtnl_af_ops *ops)
{
- rtnl_lock();
- list_add_tail_rcu(&ops->list, &rtnl_af_ops);
- rtnl_unlock();
+ down_write(&af_ops_sem);
+ list_add_tail(&ops->list, &rtnl_af_ops);
+ up_write(&af_ops_sem);
}
EXPORT_SYMBOL_GPL(rtnl_af_register);
@@ -571,11 +572,9 @@ EXPORT_SYMBOL_GPL(rtnl_af_register);
*/
void rtnl_af_unregister(struct rtnl_af_ops *ops)
{
- rtnl_lock();
- list_del_rcu(&ops->list);
- rtnl_unlock();
-
- synchronize_rcu();
+ down_write(&af_ops_sem);
+ list_del(&ops->list);
+ up_write(&af_ops_sem);
}
EXPORT_SYMBOL_GPL(rtnl_af_unregister);
@@ -588,15 +587,15 @@ static size_t rtnl_link_get_af_size(const struct net_device *dev,
/* IFLA_AF_SPEC */
size = nla_total_size(sizeof(struct nlattr));
- rcu_read_lock();
- list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
+ down_read(&af_ops_sem);
+ list_for_each_entry(af_ops, &rtnl_af_ops, list) {
if (af_ops->get_link_af_size) {
/* AF_* + nested data */
size += nla_total_size(sizeof(struct nlattr)) +
af_ops->get_link_af_size(dev, ext_filter_mask);
}
}
- rcu_read_unlock();
+ up_read(&af_ops_sem);
return size;
}
@@ -1603,7 +1602,7 @@ static int rtnl_fill_link_af(struct sk_buff *skb,
if (!af_spec)
return -EMSGSIZE;
- list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
+ list_for_each_entry(af_ops, &rtnl_af_ops, list) {
struct nlattr *af;
int err;
@@ -1811,10 +1810,10 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
goto nla_put_failure;
- rcu_read_lock();
+ down_read(&af_ops_sem);
if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
- goto nla_put_failure_rcu;
- rcu_read_unlock();
+ goto nla_put_failure_sem;
+ up_read(&af_ops_sem);
if (rtnl_fill_prop_list(skb, dev))
goto nla_put_failure;
@@ -1822,8 +1821,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
nlmsg_end(skb, nlh);
return 0;
-nla_put_failure_rcu:
- rcu_read_unlock();
+nla_put_failure_sem:
+ up_read(&af_ops_sem);
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
@@ -2274,27 +2273,27 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
const struct rtnl_af_ops *af_ops;
- rcu_read_lock();
+ down_read(&af_ops_sem);
af_ops = rtnl_af_lookup(nla_type(af));
if (!af_ops) {
- rcu_read_unlock();
+ up_read(&af_ops_sem);
return -EAFNOSUPPORT;
}
if (!af_ops->set_link_af) {
- rcu_read_unlock();
+ up_read(&af_ops_sem);
return -EOPNOTSUPP;
}
if (af_ops->validate_link_af) {
err = af_ops->validate_link_af(dev, af);
if (err < 0) {
- rcu_read_unlock();
+ up_read(&af_ops_sem);
return err;
}
}
- rcu_read_unlock();
+ up_read(&af_ops_sem);
}
}
@@ -2868,17 +2867,16 @@ static int do_setlink(const struct sk_buff *skb,
nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
const struct rtnl_af_ops *af_ops;
- rcu_read_lock();
-
+ down_read(&af_ops_sem);
BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
err = af_ops->set_link_af(dev, af, extack);
if (err < 0) {
- rcu_read_unlock();
+ up_read(&af_ops_sem);
goto errout;
}
- rcu_read_unlock();
+ up_read(&af_ops_sem);
status |= DO_SETLINK_NOTIFY;
}
}
@@ -5204,8 +5202,8 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
if (!attr)
goto nla_put_failure;
- rcu_read_lock();
- list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
+ down_read(&af_ops_sem);
+ list_for_each_entry(af_ops, &rtnl_af_ops, list) {
if (af_ops->fill_stats_af) {
struct nlattr *af;
int err;
@@ -5213,7 +5211,7 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
af = nla_nest_start_noflag(skb,
af_ops->family);
if (!af) {
- rcu_read_unlock();
+ up_read(&af_ops_sem);
goto nla_put_failure;
}
err = af_ops->fill_stats_af(skb, dev);
@@ -5221,14 +5219,14 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
if (err == -ENODATA) {
nla_nest_cancel(skb, af);
} else if (err < 0) {
- rcu_read_unlock();
+ up_read(&af_ops_sem);
goto nla_put_failure;
}
nla_nest_end(skb, af);
}
}
- rcu_read_unlock();
+ up_read(&af_ops_sem);
nla_nest_end(skb, attr);
@@ -5297,8 +5295,8 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev,
/* for IFLA_STATS_AF_SPEC */
size += nla_total_size(0);
- rcu_read_lock();
- list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
+ down_read(&af_ops_sem);
+ list_for_each_entry(af_ops, &rtnl_af_ops, list) {
if (af_ops->get_stats_af_size) {
size += nla_total_size(
af_ops->get_stats_af_size(dev));
@@ -5307,7 +5305,7 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev,
size += nla_total_size(0);
}
}
- rcu_read_unlock();
+ up_read(&af_ops_sem);
}
return size;
--
2.25.1
Powered by blists - more mailing lists