[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <8B7BCD9B-7400-4D6A-A582-0A9A9E2A2A1C@bamaicloud.com>
Date: Mon, 12 May 2025 17:16:53 +0800
From: Tonghao Zhang <tonghao@...aicloud.com>
To: Hangbin Liu <liuhangbin@...il.com>
Cc: netdev@...r.kernel.org,
Jay Vosburgh <jv@...sburgh.net>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Simon Horman <horms@...nel.org>,
Jonathan Corbet <corbet@....net>,
Andrew Lunn <andrew+netdev@...n.ch>
Subject: Re: [PATCH net-next 3/4] net: bonding: send peer notify when failure
recovery
> 2025年5月12日 下午4:05,Hangbin Liu <liuhangbin@...il.com> 写道:
>
> Hi Tonghao,
> On Sat, May 10, 2025 at 12:45:03PM +0800, tonghao@...aicloud.com wrote:
>> From: Tonghao Zhang <tonghao@...aicloud.com>
>>
>> While hardware failures in NICs, optical transceivers, or switches
>> are unavoidable, rapid system recovery can be achieved post-restoration.
>> For example, triggering immediate ARP/ND packet transmission upon
>> LACP failure recovery enables the system to swiftly resume normal
>> operations, thereby minimizing service downtime.
>>
>> Cc: Jay Vosburgh <jv@...sburgh.net>
>> Cc: "David S. Miller" <davem@...emloft.net>
>> Cc: Eric Dumazet <edumazet@...gle.com>
>> Cc: Jakub Kicinski <kuba@...nel.org>
>> Cc: Paolo Abeni <pabeni@...hat.com>
>> Cc: Simon Horman <horms@...nel.org>
>> Cc: Jonathan Corbet <corbet@....net>
>> Cc: Andrew Lunn <andrew+netdev@...n.ch>
>> Signed-off-by: Tonghao Zhang <tonghao@...aicloud.com>
>> ---
>> drivers/net/bonding/bond_3ad.c | 14 ++++++++++++++
>> 1 file changed, 14 insertions(+)
>>
>> diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
>> index c6807e473ab7..6577ce54d115 100644
>> --- a/drivers/net/bonding/bond_3ad.c
>> +++ b/drivers/net/bonding/bond_3ad.c
>> @@ -982,6 +982,19 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
>> return 0;
>> }
>>
>> +static void ad_peer_notif_send(struct port *port)
>> +{
>> + if (!port->aggregator->is_active)
>> + return;
>> +
>> + struct bonding *bond = port->slave->bond;
>> + if (bond->params.broadcast_neighbor && rtnl_trylock()) {
>> + bond->send_peer_notif = bond->params.num_peer_notif *
>> + max(1, bond->params.peer_notif_delay);
>> + rtnl_unlock();
>> + }
>> +}
>> +
>> /**
>> * ad_mux_machine - handle a port's mux state machine
>> * @port: the port we're looking at
>> @@ -1164,6 +1177,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
>> port->actor_oper_port_state |= LACP_STATE_COLLECTING;
>> port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING;
>> port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
>> + ad_peer_notif_send(port);
>> ad_enable_collecting_distributing(port,
>> update_slave_arr);
>> port->ntt = true;
>
> Maybe enable notify after collecting/distributing?
Yes, The same suggestion was provided by Jay. V2:
diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst
index 14f7593d888d..f8f5766703d4 100644
--- a/Documentation/networking/bonding.rst
+++ b/Documentation/networking/bonding.rst
@@ -773,8 +773,9 @@ num_unsol_na
greater than 1.
The valid range is 0 - 255; the default value is 1. These options
- affect only the active-backup mode. These options were added for
- bonding versions 3.3.0 and 3.4.0 respectively.
+ affect the active-backup or 802.3ad (broadcast_neighbor enabled) mode.
+ These options were added for bonding versions 3.3.0 and 3.4.0
+ respectively.
From Linux 3.0 and bonding version 3.7.1, these notifications
are generated by the ipv4 and ipv6 code and the numbers of
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c6807e473ab7..d1c2d416ac87 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -982,6 +982,17 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
return 0;
}
+static void ad_cond_set_peer_notif(struct port *port)
+{
+ struct bonding *bond = port->slave->bond;
+
+ if (bond->params.broadcast_neighbor && rtnl_trylock()) {
+ bond->send_peer_notif = bond->params.num_peer_notif *
+ max(1, bond->params.peer_notif_delay);
+ rtnl_unlock();
+ }
+}
+
/**
* ad_mux_machine - handle a port's mux state machine
* @port: the port we're looking at
@@ -2061,6 +2072,8 @@ static void ad_enable_collecting_distributing(struct port *port,
__enable_port(port);
/* Slave array needs update */
*update_slave_arr = true;
+ /* Should notify peers if possible */
+ ad_cond_set_peer_notif(port);
}
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 342f2dc64116..ce31445e85b6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1240,17 +1240,31 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
/* must be called in RCU critical section or with RTNL held */
static bool bond_should_notify_peers(struct bonding *bond)
{
- struct slave *slave = rcu_dereference_rtnl(bond->curr_active_slave);
+ struct bond_up_slave *slaves;
+ struct slave *slave = NULL;
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ if (!bond->params.broadcast_neighbor)
+ return false;
+
+ slaves = rtnl_dereference(bond->usable_slaves);
+ if (!slaves || !READ_ONCE(slaves->count))
+ return false;
+ } else {
+ slave = rcu_dereference_rtnl(bond->curr_active_slave);
+ if (!slave || test_bit(__LINK_STATE_LINKWATCH_PENDING,
+ &slave->dev->state))
+ return false;
+ }
- if (!slave || !bond->send_peer_notif ||
+ if (!bond->send_peer_notif ||
bond->send_peer_notif %
max(1, bond->params.peer_notif_delay) != 0 ||
- !netif_carrier_ok(bond->dev) ||
- test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
+ !netif_carrier_ok(bond->dev))
return false;
netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
- slave ? slave->dev->name : "NULL");
+ slave ? slave->dev->name : "all");
return true;
}
--
2.34.1
>
> And also please rebase to latest net-next. There is another switch case
> AD_MUX_DISTRIBUTING that enables collecting/distributing, which should
> also send notify.
>
> Thanks
> Hangbin
>
>
Powered by blists - more mailing lists