[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <DB9PR05MB9078FE84F8244C627FDCFA2888B8A@DB9PR05MB9078.eurprd05.prod.outlook.com>
Date: Fri, 24 Nov 2023 08:28:56 +0000
From: Tung Quang Nguyen <tung.q.nguyen@...tech.com.au>
To: xu <xu.xin.sc@...il.com>
CC: "davem@...emloft.net" <davem@...emloft.net>,
"jmaloy@...hat.com" <jmaloy@...hat.com>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>,
"tipc-discussion@...ts.sourceforge.net"
<tipc-discussion@...ts.sourceforge.net>,
"xu.xin16@....com.cn" <xu.xin16@....com.cn>,
"yang.yang29@....com.cn" <yang.yang29@....com.cn>,
"ying.xue@...driver.com" <ying.xue@...driver.com>,
"zhang.yunkai@....com.cn" <zhang.yunkai@....com.cn>
Subject: RE: [RFC PATCH] net/tipc: reduce tipc_node lock holding time in
tipc_rcv
>Could we please solve the problem mentioned above by adding spinlock(&le->lock)?
>
No, you cannot do that. As I said before, the link status (including l->state) needs to be protected by node lock.
What I showed you were just 2 use cases (link reset/delete). There are more use cases (netlink, transmit path etc) that need proper locks.
>For example:
>
>(BTW, I have tested it, with this change, enabling RPS based on tipc port can improve 25% of general throughput)
>
>diff --git a/net/tipc/node.c b/net/tipc/node.c index 3105abe97bb9..470c272d798e 100644
>--- a/net/tipc/node.c
>+++ b/net/tipc/node.c
>@@ -1079,12 +1079,16 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
> __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
> } else {
> /* Defuse pending tipc_node_link_up() */
>+ spin_lock_bh(&le->lock);
> tipc_link_reset(l);
>+ spin_unlock_bh(&le->lock);
> tipc_link_fsm_evt(l, LINK_RESET_EVT);
> }
> if (delete) {
>+ spin_lock_bh(&le->lock);
> kfree(l);
> le->link = NULL;
>+ spin_unlock_bh(&le->lock);
> n->link_cnt--;
> }
> trace_tipc_node_link_down(n, true, "node link down or deleted!"); @@ -2154,14 +2158,15 @@ void tipc_rcv(struct net *net,
>struct sk_buff *skb, struct tipc_bearer *b)
> /* Receive packet directly if conditions permit */
> tipc_node_read_lock(n);
> if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
>+ tipc_node_read_unlock(n);
> spin_lock_bh(&le->lock);
> if (le->link) {
> rc = tipc_link_rcv(le->link, skb, &xmitq);
> skb = NULL;
> }
> spin_unlock_bh(&le->lock);
>- }
>- tipc_node_read_unlock(n);
>+ } else
>+ tipc_node_read_unlock(n);
>
> /* Check/update node state before receiving */
> if (unlikely(skb)) {
>@@ -2169,12 +2174,13 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
> goto out_node_put;
> tipc_node_write_lock(n);
> if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
>+ tipc_node_write_unlock(n);
> if (le->link) {
> rc = tipc_link_rcv(le->link, skb, &xmitq);
> skb = NULL;
> }
>- }
>- tipc_node_write_unlock(n);
>+ } else
>+ tipc_node_write_unlock(n);
> }
>
> if (unlikely(rc & TIPC_LINK_UP_EVT))
Powered by blists - more mailing lists