[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250514135642.11203-4-chia-yu.chang@nokia-bell-labs.com>
Date: Wed, 14 May 2025 15:56:30 +0200
From: chia-yu.chang@...ia-bell-labs.com
To: linux-doc@...r.kernel.org,
corbet@....net,
horms@...nel.org,
dsahern@...nel.org,
kuniyu@...zon.com,
bpf@...r.kernel.org,
netdev@...r.kernel.org,
dave.taht@...il.com,
pabeni@...hat.com,
jhs@...atatu.com,
kuba@...nel.org,
stephen@...workplumber.org,
xiyou.wangcong@...il.com,
jiri@...nulli.us,
davem@...emloft.net,
edumazet@...gle.com,
andrew+netdev@...n.ch,
donald.hunter@...il.com,
ast@...erby.net,
liuhangbin@...il.com,
shuah@...nel.org,
linux-kselftest@...r.kernel.org,
ij@...nel.org,
ncardwell@...gle.com,
koen.de_schepper@...ia-bell-labs.com,
g.white@...lelabs.com,
ingemar.s.johansson@...csson.com,
mirja.kuehlewind@...csson.com,
cheshire@...le.com,
rs.ietf@....at,
Jason_Livingood@...cast.com,
vidhi_goel@...le.com
Cc: Chia-Yu Chang <chia-yu.chang@...ia-bell-labs.com>
Subject: [PATCH v7 net-next 03/15] tcp: reorganize tcp_sock_write_txrx group for variables later
From: Chia-Yu Chang <chia-yu.chang@...ia-bell-labs.com>
Use the first 3-byte hole at the beginning of the tcp_sock_write_txrx
group for 'noneagle'/'rate_app_limited' to fill in the existing hole
in later patches. Therefore, the group size of tcp_sock_write_txrx is
reduced from 92 + 4 to 91 + 4. In addition, the group size of
tcp_sock_write_rx is changed to 96 to fit in the pahole outcome.
Below are the trimmed pahole outcomes before and after this patch:
[BEFORE THIS PATCH]
struct tcp_sock {
[...]
__cacheline_group_begin__tcp_sock_write_txrx[0]; /* 2585 0 */
/* XXX 3 bytes hole, try to pack */
[...]
struct tcp_options_received rx_opt; /* 2652 24 */
u8 nonagle:4; /* 2676: 0 1 */
u8 rate_app_limited:1; /* 2676: 4 1 */
/* XXX 3 bits hole, try to pack */
__cacheline_group_end__tcp_sock_write_txrx[0]; /* 2677 0 */
/* XXX 3 bytes hole, try to pack */
__cacheline_group_begin__tcp_sock_write_rx[0] __attribute__((__aligned__(8))); /* 2680 0 */
[...]
__cacheline_group_end__tcp_sock_write_rx[0]; /* 2776 0 */
[...]
/* size: 3264, cachelines: 51, members: 163 */
}
[AFTER THIS PATCH]
struct tcp_sock {
[...]
__cacheline_group_begin__tcp_sock_write_txrx[0]; /* 2585 0 */
u8 nonagle:4; /* 2585: 0 1 */
u8 rate_app_limited:1; /* 2585: 4 1 */
/* XXX 3 bits hole, try to pack */
/* XXX 2 bytes hole, try to pack */
[...]
struct tcp_options_received rx_opt; /* 2652 24 */
__cacheline_group_end__tcp_sock_write_txrx[0]; /* 2676 0 */
/* XXX 4 bytes hole, try to pack */
__cacheline_group_begin__tcp_sock_write_rx[0] __attribute__((__aligned__(8))); /* 2680 0 */
[...]
__cacheline_group_end__tcp_sock_write_rx[0]; /* 2776 0 */
[...]
/* size: 3264, cachelines: 51, members: 163 */
}
Signed-off-by: Chia-Yu Chang <chia-yu.chang@...ia-bell-labs.com>
---
include/linux/tcp.h | 4 ++--
net/ipv4/tcp.c | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index a8af71623ba7..0c82a8c942dc 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -286,6 +286,8 @@ struct tcp_sock {
* Header prediction flags
* 0x5?10 << 16 + snd_wnd in net byte order
*/
+ u8 nonagle : 4,/* Disable Nagle algorithm? */
+ rate_app_limited:1; /* rate_{delivered,interval_us} limited? */
__be32 pred_flags;
u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
u64 tcp_mstamp; /* most recent packet received/sent */
@@ -304,8 +306,6 @@ struct tcp_sock {
* Options received (usually on last packet, some only on SYN packets).
*/
struct tcp_options_received rx_opt;
- u8 nonagle : 4,/* Disable Nagle algorithm? */
- rate_app_limited:1; /* rate_{delivered,interval_us} limited? */
__cacheline_group_end(tcp_sock_write_txrx);
/* RX read-write hotpath cache lines */
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0ae265d39184..bc9ab715fc2b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -5130,7 +5130,7 @@ static void __init tcp_struct_check(void)
/* 32bit arches with 8byte alignment on u64 fields might need padding
* before tcp_clock_cache.
*/
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 92 + 4);
+ CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 91 + 4);
/* RX read-write hotpath cache lines */
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_received);
@@ -5147,7 +5147,7 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_acked);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_est);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcvq_space);
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_rx, 99);
+ CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_rx, 96);
}
void __init tcp_init(void)
--
2.34.1
Powered by blists - more mailing lists