[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0d442f373edefc1481275154a0291ba9325fbe41.1766433800.git.fmaurer@redhat.com>
Date: Mon, 22 Dec 2025 21:57:34 +0100
From: Felix Maurer <fmaurer@...hat.com>
To: netdev@...r.kernel.org
Cc: davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
horms@...nel.org,
jkarrenpalo@...il.com,
tglx@...utronix.de,
mingo@...nel.org,
allison.henderson@...cle.com,
matttbe@...nel.org,
petrm@...dia.com,
bigeasy@...utronix.de
Subject: [RFC net 4/6] selftests: hsr: Add tests for more link faults with PRP
Add tests where one link has different rates of packet loss or reorders
packets. PRP should still be able to recover from these link faults and
show no packet loss. However, it is acceptable to receive some level of
duplicate packets. This matches the current specification (IEC
62439-3:2021) of the duplicate discard algorithm that requires it to be
"designed such that it never rejects a legitimate frame, while occasional
acceptance of a duplicate can be tolerated." The rate of acceptable
duplicates in this test is intentionally high (10%) to make the test
stable, the values I observed in the worst test cases (20% loss) are around
5% duplicates.
Signed-off-by: Felix Maurer <fmaurer@...hat.com>
---
.../testing/selftests/net/hsr/link_faults.sh | 79 +++++++++++++++++--
1 file changed, 74 insertions(+), 5 deletions(-)
diff --git a/tools/testing/selftests/net/hsr/link_faults.sh b/tools/testing/selftests/net/hsr/link_faults.sh
index b00fdba62f17..11a55ba5cd7d 100755
--- a/tools/testing/selftests/net/hsr/link_faults.sh
+++ b/tools/testing/selftests/net/hsr/link_faults.sh
@@ -10,10 +10,16 @@ ALL_TESTS="
test_cut_link_hsrv1
test_clean_prp
test_cut_link_prp
+ test_packet_loss_prp
+ test_high_packet_loss_prp
+ test_reordering_prp
"
-# The tests are running ping for 5sec with a relatively short interval with a
-# cut link, which should be recoverable by HSR/PRP.
+# The tests are running ping for 5sec with a relatively short interval in
+# different scenarios with faulty links (cut links, packet loss, delay,
+# reordering) that should be recoverable by HSR/PRP. The ping interval (10ms)
+# is short enough that the base delay (50ms) leads to a queue in the netem
+# qdiscs which is needed for reordering.
setup_hsr_topo()
{
@@ -152,6 +158,7 @@ check_ping()
{
local node="$1"
local dst="$2"
+ local accepted_dups="$3"
local ping_args="-q -i 0.01 -c 400"
log_info "Running ping $node -> $dst"
@@ -176,7 +183,9 @@ check_ping()
loss="${BASH_REMATCH[1]}"
fi
- check_err "$dups" "Unexpected duplicate packets (${dups})"
+ if [ "$dups" -gt "$accepted_dups" ]; then
+ check_err 1 "Unexpected duplicate packets (${dups})"
+ fi
if [ "$loss" != "0%" ]; then
check_err 1 "Unexpected packet loss (${loss})"
fi
@@ -195,7 +204,7 @@ test_clean()
return
fi
- check_ping "$node1" "100.64.0.2"
+ check_ping "$node1" "100.64.0.2" 0
log_test "${tname}"
}
@@ -235,7 +244,7 @@ test_cut_link()
log_info "Cutting link"
ip -net "$node1" link set vethB down
) &
- check_ping "$node1" "100.64.0.2"
+ check_ping "$node1" "100.64.0.2" 0
wait
log_test "${tname}"
@@ -257,6 +266,66 @@ test_cut_link_prp()
test_cut_link "PRP"
}
+test_packet_loss()
+{
+ local proto="$1"
+ local loss="$2"
+
+ RET=0
+ tname="${FUNCNAME} - ${proto}, ${loss}"
+
+ setup_topo "$proto"
+ if ((RET != ksft_pass)); then
+ log_test "${tname} setup"
+ return
+ fi
+
+ # Packet loss with lower delay makes sure the packets on the lossy link
+ # arrive first.
+ tc -net "$node1" qdisc add dev vethA root netem delay 50ms
+ tc -net "$node1" qdisc add dev vethB root netem delay 20ms loss "$loss"
+
+ check_ping "$node1" "100.64.0.2" 40
+
+ log_test "${tname}"
+}
+
+test_packet_loss_prp()
+{
+ test_packet_loss "PRP" "20%"
+}
+
+test_high_packet_loss_prp()
+{
+ test_packet_loss "PRP" "80%"
+}
+
+test_reordering()
+{
+ local proto="$1"
+
+ RET=0
+ tname="${FUNCNAME} - ${proto}"
+
+ setup_topo "$proto"
+ if ((RET != ksft_pass)); then
+ log_test "${tname} setup"
+ return
+ fi
+
+ tc -net "$node1" qdisc add dev vethA root netem delay 50ms
+ tc -net "$node1" qdisc add dev vethB root netem delay 50ms reorder 20%
+
+ check_ping "$node1" "100.64.0.2" 40
+
+ log_test "${tname}"
+}
+
+test_reordering_prp()
+{
+ test_reordering "PRP"
+}
+
cleanup()
{
cleanup_all_ns
--
2.52.0
Powered by blists - more mailing lists