[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <047e723ea493be8cb6a72b1003e12001839f3b98.1560562631.git.sbrivio@redhat.com>
Date: Sat, 15 Jun 2019 03:38:17 +0200
From: Stefano Brivio <sbrivio@...hat.com>
To: David Miller <davem@...emloft.net>
Cc: Jianlin Shi <jishi@...hat.com>, Wei Wang <weiwan@...gle.com>,
David Ahern <dsahern@...il.com>,
Martin KaFai Lau <kafai@...com>,
Eric Dumazet <edumazet@...gle.com>,
Matti Vaittinen <matti.vaittinen@...rohmeurope.com>,
netdev@...r.kernel.org
Subject: [PATCH net-next 1/2] selftests: pmtu: Introduce list_flush_ipv4_exception test case
This test checks that route exceptions can be successfully listed and
flushed using ip -6 route {list,flush} cache.
Signed-off-by: Stefano Brivio <sbrivio@...hat.com>
---
tools/testing/selftests/net/pmtu.sh | 62 +++++++++++++++++++++++++++++
1 file changed, 62 insertions(+)
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index 269e839b747e..6c063b17d7d0 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -112,6 +112,10 @@
# - cleanup_ipv6_exception
# Same as above, but use IPv6 transport from A to B
#
+# - list_flush_ipv4_exception
+# Using the same topology as in pmtu_ipv4, create exceptions, and check
+# they are shown when listing exception caches, gone after flushing them
+#
# - list_flush_ipv6_exception
# Using the same topology as in pmtu_ipv6, create exceptions, and check
# they are shown when listing exception caches, gone after flushing them
@@ -156,6 +160,7 @@ tests="
pmtu_vti6_link_change_mtu vti6: MTU changes on link changes 0
cleanup_ipv4_exception ipv4: cleanup of cached exceptions 1
cleanup_ipv6_exception ipv6: cleanup of cached exceptions 1
+ list_flush_ipv4_exception ipv4: list and flush cached exceptions 1
list_flush_ipv6_exception ipv6: list and flush cached exceptions 1"
NS_A="ns-A"
@@ -1207,6 +1212,63 @@ run_test_nh() {
USE_NH=no
}
+test_list_flush_ipv4_exception() {
+ setup namespaces routing || return 2
+ trace "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \
+ "${ns_r1}" veth_R1-B "${ns_b}" veth_B-R1 \
+ "${ns_a}" veth_A-R2 "${ns_r2}" veth_R2-A \
+ "${ns_r2}" veth_R2-B "${ns_b}" veth_B-R2
+
+ dst_prefix1="${prefix4}.${b_r1}."
+ dst2="${prefix4}.${b_r2}.1"
+
+ # Set up initial MTU values
+ mtu "${ns_a}" veth_A-R1 2000
+ mtu "${ns_r1}" veth_R1-A 2000
+ mtu "${ns_r1}" veth_R1-B 1500
+ mtu "${ns_b}" veth_B-R1 1500
+
+ mtu "${ns_a}" veth_A-R2 2000
+ mtu "${ns_r2}" veth_R2-A 2000
+ mtu "${ns_r2}" veth_R2-B 1500
+ mtu "${ns_b}" veth_B-R2 1500
+
+ fail=0
+
+ # Add 100 addresses for veth endpoint on B reached by default A route
+ for i in $(seq 100 199); do
+ run_cmd ${ns_b} ip addr add "${dst_prefix1}${i}" dev veth_B-R1
+ done
+
+ # Create 100 cached route exceptions for path via R1, one via R2. Note
+ # that with IPv4 we need to actually cause a route lookup that matches
+ # the exception caused by ICMP, in order to actually have a cached
+ # route, so we need to ping each destination twice
+ for i in $(seq 100 199); do
+ run_cmd ${ns_a} ping -q -M want -i 0.1 -c 2 -s 1800 "${dst_prefix1}${i}"
+ done
+ run_cmd ${ns_a} ping -q -M want -i 0.1 -c 2 -s 1800 "${dst2}"
+
+ ${ns_a} ip route list cache | wc -l
+
+ # Each exception is printed as two lines
+ if [ "$(${ns_a} ip route list cache | wc -l)" -ne 202 ]; then
+ err " can't list cached exceptions"
+ fail=1
+ fi
+
+ run_cmd ${ns_a} ip route flush cache
+ pmtu1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst_prefix}1)"
+ pmtu2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst_prefix}2)"
+ if [ -n "${pmtu1}" ] || [ -n "${pmtu2}" ] || \
+ [ -n "$(${ns_a} ip route list cache)" ]; then
+ err " can't flush cached exceptions"
+ fail=1
+ fi
+
+ return ${fail}
+}
+
test_list_flush_ipv6_exception() {
setup namespaces routing || return 2
trace "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \
--
2.20.1
Powered by blists - more mailing lists