[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231124094919.2043838-1-xu.xin16@zte.com.cn>
Date: Fri, 24 Nov 2023 09:49:19 +0000
From: xu <xu.xin.sc@...il.com>
To: tung.q.nguyen@...tech.com.au
Cc: davem@...emloft.net,
jmaloy@...hat.com,
linux-kernel@...r.kernel.org,
netdev@...r.kernel.org,
tipc-discussion@...ts.sourceforge.net,
xu.xin.sc@...il.com,
xu.xin16@....com.cn,
yang.yang29@....com.cn,
ying.xue@...driver.com,
zhang.yunkai@....com.cn
Subject: RE: [RFC PATCH] net/tipc: reduce tipc_node lock holding time in tipc_rcv
>>Could we please solve the problem mentioned above by adding spinlock(&le->lock)?
>>
>
>No, you cannot do that. As I said before, the link status (including l->state) needs to be protected by node lock.
Why can't use le->lock instead of node's lock to protect it in tipc_link_rcv.
>What I showed you were just 2 use cases (link reset/delete). There are more use cases (netlink, transmit path etc) that need proper locks.
The same. We can also add spin_lock_bh(&le->lock) to protect the link in other places where it changes the
link status in addition to 'reset/delete'. Because using node lock to protect the link in tipc_link_rcv is
really wasting CPU performance.
>
>>For example:
>>
>>(BTW, I have tested it, with this change, enabling RPS based on tipc port can improve 25% of general throughput)
>>
>>diff --git a/net/tipc/node.c b/net/tipc/node.c index 3105abe97bb9..470c272d798e 100644
>>--- a/net/tipc/node.c
>>+++ b/net/tipc/node.c
>>@@ -1079,12 +1079,16 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
>> __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
>> } else {
>> /* Defuse pending tipc_node_link_up() */
>>+ spin_lock_bh(&le->lock);
>> tipc_link_reset(l);
>>+ spin_unlock_bh(&le->lock);
>> tipc_link_fsm_evt(l, LINK_RESET_EVT);
>> }
>> if (delete) {
>>+ spin_lock_bh(&le->lock);
>> kfree(l);
>> le->link = NULL;
>>+ spin_unlock_bh(&le->lock);
>> n->link_cnt--;
>> }
>> trace_tipc_node_link_down(n, true, "node link down or deleted!"); @@ -2154,14 +2158,15 @@ void tipc_rcv(struct net *net,
>>struct sk_buff *skb, struct tipc_bearer *b)
>> /* Receive packet directly if conditions permit */
>> tipc_node_read_lock(n);
>> if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
>>+ tipc_node_read_unlock(n);
>> spin_lock_bh(&le->lock);
>> if (le->link) {
>> rc = tipc_link_rcv(le->link, skb, &xmitq);
>> skb = NULL;
>> }
>> spin_unlock_bh(&le->lock);
>>- }
>>- tipc_node_read_unlock(n);
>>+ } else
>>+ tipc_node_read_unlock(n);
>>
>> /* Check/update node state before receiving */
>> if (unlikely(skb)) {
>>@@ -2169,12 +2174,13 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
>> goto out_node_put;
>> tipc_node_write_lock(n);
>> if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
>>+ tipc_node_write_unlock(n);
>> if (le->link) {
>> rc = tipc_link_rcv(le->link, skb, &xmitq);
>> skb = NULL;
>> }
>>- }
>>- tipc_node_write_unlock(n);
>>+ } else
>>+ tipc_node_write_unlock(n);
>> }
>>
>> if (unlikely(rc & TIPC_LINK_UP_EVT))
Powered by blists - more mailing lists