lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Mon, 06 Apr 2015 19:18:50 -0400 From: chas williams <3chas3@...il.com> To: netdev@...r.kernel.org Subject: [RFC] net: ipv6: hold locks around mrt->mfc6_cache_array[] It appears to me that the locking of the mfc6_cache_array is a little bit broken. The list heads in this array appear be mainly protected by the rtnl in most cases but there are a few were this isn't the case. Regardless, based on the comments at the head of the code, that probably wasn't the author's intent: /* We return to original Alan's scheme. Hash table of resolved entries is changed only in process context and protected with weak lock mrt_lock. Queue of unresolved entries is protected with strong spinlock mfc_unres_lock. In this case data path is free of exclusive locks at all. */ setsocketopt() -> ip6mr_mfc_add() -> ip6mr_cache_resolved() -> ip6_mr_foward() needs some lock since it is going to read the cache array. ipm4_mfc_add() probably should hold the write lock to prevent races when adding entries. mroute_clean_tables() should also probably hold the write lock across the entire iteration, otherwise readers have a small chance to get a reference while the entry is being removed. Comments? diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index f9a3fd3..1210f5c 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1306,12 +1306,12 @@ static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc, line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); + write_lock_bh(&mrt_lock); list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) { if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr) && (parent == -1 || parent == c->mf6c_parent)) { - write_lock_bh(&mrt_lock); list_del(&c->list); write_unlock_bh(&mrt_lock); @@ -1320,6 +1320,7 @@ static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc, return 0; } } + write_unlock_bh(&mrt_lock); return -ENOENT; } @@ -1465,6 +1466,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt, line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); + write_lock_bh(&mrt_lock); list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) { if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && ipv6_addr_equal(&c->mf6c_mcastgrp, @@ -1476,7 +1478,6 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt, } if (found) { - write_lock_bh(&mrt_lock); c->mf6c_parent = mfc->mf6cc_parent; ip6mr_update_thresholds(mrt, c, ttls); if (!mrtsock) @@ -1501,7 +1502,6 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt, if (!mrtsock) c->mfc_flags |= MFC_STATIC; - write_lock_bh(&mrt_lock); list_add(&c->list, &mrt->mfc6_cache_array[line]); write_unlock_bh(&mrt_lock); @@ -1525,8 +1525,10 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt, spin_unlock_bh(&mfc_unres_lock); if (found) { + read_lock(&mrt_lock); ip6mr_cache_resolve(net, mrt, uc, c); ip6mr_cache_free(uc); + read_unlock(&mrt_lock); } mr6_netlink_event(mrt, c, RTM_NEWROUTE); return 0; @@ -1554,18 +1556,18 @@ static void mroute_clean_tables(struct mr6_table *mrt) /* * Wipe the cache */ + write_lock_bh(&mrt_lock); for (i = 0; i < MFC6_LINES; i++) { list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) { if (c->mfc_flags & MFC_STATIC) continue; - write_lock_bh(&mrt_lock); list_del(&c->list); - write_unlock_bh(&mrt_lock); mr6_netlink_event(mrt, c, RTM_DELROUTE); ip6mr_cache_free(c); } } + write_unlock_bh(&mrt_lock); if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { spin_lock_bh(&mfc_unres_lock); -- To unsubscribe from this list: send the line "unsubscribe netdev" in the body of a message to majordomo@...r.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists