lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sat, 21 Jul 2018 22:03:09 -0700 (PDT)
From:   David Miller <davem@...emloft.net>
To:     karn@...q.net
Cc:     kuznet@....inr.ac.ru, yoshfuji@...ux-ipv6.org,
        netdev@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: hard-coded limit on unresolved multicast route cache in
 ipv4/ipmr.c causes slow, unreliable creation of multicast routes on busy
 networks

From: Phil Karn <karn@...q.net>
Date: Sat, 21 Jul 2018 18:31:22 -0700

> I'm running pimd (protocol independent multicast routing) and found that
> on busy networks with lots of unresolved multicast routing entries, the
> creation of new multicast group routes can be extremely slow and
> unreliable, especially when the group in question has little traffic.
> 
> A google search revealed the following conversation about the problem
> from the fall of 2015:
> 
> https://github.com/troglobit/pimd/issues/58
> 
> Note especially the comment by kopren on Sep 13, 2016.
> 
> The writer traced the problem to function ipmr_cache_unresolved() in
> file net/ipmr.c, in the following block of code:
> 
> 		/* Create a new entry if allowable */
> 		if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
> 		    (c = ipmr_cache_alloc_unres()) == NULL) {
> 			spin_unlock_bh(&mfc_unres_lock);
> 
> 			kfree_skb(skb);
> 			return -ENOBUFS;
> 		}
 ...
> Does this hard-coded limit serve any purpose? Can it be safely increased
> to a much larger value, or better yet, removed altogether? If it can't
> be removed, can it at least be made configurable through a /proc entry?

Yeah that limit is bogus for several reasons.

One, it's too low.

Two, it's not configurable.

There does have to be some limit, because we are depending upon a user
process (mrouted or whatever) to receive the netlink message, resolve
the cache entry, and update the kernel.

If the user process gets stuck, or processes entries very slowly, the
backlog could grow infinitely.  So we do indeed need some kind of limit.

But, we essentially already do have such a limit, and that's the
socket receive queue limits of the mrouted socket.  And indeed, we
fail the cache creation if we cannot queue up the netlink message to
the user process successfully.

Therefore, it probably is safe and correct to remove this
cache_resolve_queue_len altogether.

Something like this:

diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index d633f737b3c6..b166465d7c05 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -234,7 +234,6 @@ struct mr_table_ops {
  * @mfc_hash: Hash table of all resolved routes for easy lookup
  * @mfc_cache_list: list of resovled routes for possible traversal
  * @maxvif: Identifier of highest value vif currently in use
- * @cache_resolve_queue_len: current size of unresolved queue
  * @mroute_do_assert: Whether to inform userspace on wrong ingress
  * @mroute_do_pim: Whether to receive IGMP PIMv1
  * @mroute_reg_vif_num: PIM-device vif index
@@ -251,7 +250,6 @@ struct mr_table {
 	struct rhltable		mfc_hash;
 	struct list_head	mfc_cache_list;
 	int			maxvif;
-	atomic_t		cache_resolve_queue_len;
 	bool			mroute_do_assert;
 	bool			mroute_do_pim;
 	int			mroute_reg_vif_num;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9f79b9803a16..c007cf9bfe82 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -747,8 +747,6 @@ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
 	struct sk_buff *skb;
 	struct nlmsgerr *e;
 
-	atomic_dec(&mrt->cache_resolve_queue_len);
-
 	while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved))) {
 		if (ip_hdr(skb)->version == 0) {
 			struct nlmsghdr *nlh = skb_pull(skb,
@@ -1135,9 +1133,11 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
 	}
 
 	if (!found) {
+		bool was_empty;
+
 		/* Create a new entry if allowable */
-		if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
-		    (c = ipmr_cache_alloc_unres()) == NULL) {
+		c = ipmr_cache_alloc_unres();
+		if (!c) {
 			spin_unlock_bh(&mfc_unres_lock);
 
 			kfree_skb(skb);
@@ -1163,11 +1163,11 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
 			return err;
 		}
 
-		atomic_inc(&mrt->cache_resolve_queue_len);
+		was_empty = list_empty(&mrt->mfc_unres_queue);
 		list_add(&c->_c.list, &mrt->mfc_unres_queue);
 		mroute_netlink_event(mrt, c, RTM_NEWROUTE);
 
-		if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
+		if (was_empty)
 			mod_timer(&mrt->ipmr_expire_timer,
 				  c->_c.mfc_un.unres.expires);
 	}
@@ -1274,7 +1274,6 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
 		if (uc->mfc_origin == c->mfc_origin &&
 		    uc->mfc_mcastgrp == c->mfc_mcastgrp) {
 			list_del(&_uc->list);
-			atomic_dec(&mrt->cache_resolve_queue_len);
 			found = true;
 			break;
 		}
@@ -1322,7 +1321,7 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
 		mr_cache_put(c);
 	}
 
-	if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
+	if (!list_empty(&mrt->mfc_unres_queue)) {
 		spin_lock_bh(&mfc_unres_lock);
 		list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
 			list_del(&c->list);
@@ -2648,9 +2647,19 @@ static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh,
 		return ipmr_mfc_delete(tbl, &mfcc, parent);
 }
 
+static int queue_count(struct mr_table *mrt)
+{
+	struct list_head *pos;
+	int count = 0;
+	
+	list_for_each(pos, &mrt->mfc_unres_queue)
+		count++;
+	return count;
+}
+
 static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
 {
-	u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len);
+	u32 queue_len = queue_count(mrt);
 
 	if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) ||
 	    nla_put_u32(skb, IPMRA_TABLE_CACHE_RES_QUEUE_LEN, queue_len) ||
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 0d0f0053bb11..75e9c5a3e7ea 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -759,8 +759,6 @@ static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c)
 	struct net *net = read_pnet(&mrt->net);
 	struct sk_buff *skb;
 
-	atomic_dec(&mrt->cache_resolve_queue_len);
-
 	while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) {
 		if (ipv6_hdr(skb)->version == 0) {
 			struct nlmsghdr *nlh = skb_pull(skb,
@@ -1139,8 +1137,8 @@ static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
 		 *	Create a new entry if allowable
 		 */
 
-		if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
-		    (c = ip6mr_cache_alloc_unres()) == NULL) {
+		c = ip6mr_cache_alloc_unres();
+		if (!c) {
 			spin_unlock_bh(&mfc_unres_lock);
 
 			kfree_skb(skb);
@@ -1167,7 +1165,6 @@ static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
 			return err;
 		}
 
-		atomic_inc(&mrt->cache_resolve_queue_len);
 		list_add(&c->_c.list, &mrt->mfc_unres_queue);
 		mr6_netlink_event(mrt, c, RTM_NEWROUTE);
 
@@ -1455,7 +1452,6 @@ static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
 		if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
 		    ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
 			list_del(&_uc->list);
-			atomic_dec(&mrt->cache_resolve_queue_len);
 			found = true;
 			break;
 		}
@@ -1502,7 +1498,7 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
 		mr_cache_put(c);
 	}
 
-	if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
+	if (!list_empty(&mrt->mfc_unres_queue)) {
 		spin_lock_bh(&mfc_unres_lock);
 		list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
 			list_del(&c->list);

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ