[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250918155532.751173-4-edumazet@google.com>
Date: Thu, 18 Sep 2025 15:55:28 +0000
From: Eric Dumazet <edumazet@...gle.com>
To: "David S . Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: Simon Horman <horms@...nel.org>, Neal Cardwell <ncardwell@...gle.com>,
Willem de Bruijn <willemb@...gle.com>, Kuniyuki Iwashima <kuniyu@...gle.com>, netdev@...r.kernel.org,
eric.dumazet@...il.com, Eric Dumazet <edumazet@...gle.com>
Subject: [PATCH net-next 3/7] tcp: move tcp->rcv_tstamp to tcp_sock_write_txrx group
tcp_ack() writes this field, it belongs to tcp_sock_write_txrx.
Signed-off-by: Eric Dumazet <edumazet@...gle.com>
---
Documentation/networking/net_cachelines/tcp_sock.rst | 2 +-
include/linux/tcp.h | 4 ++--
net/ipv4/tcp.c | 6 +++---
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/Documentation/networking/net_cachelines/tcp_sock.rst b/Documentation/networking/net_cachelines/tcp_sock.rst
index d4dc018009451261c81a46dac2d6322005901c99..429df29fba8bc08bce519870e403815780a2182b 100644
--- a/Documentation/networking/net_cachelines/tcp_sock.rst
+++ b/Documentation/networking/net_cachelines/tcp_sock.rst
@@ -26,7 +26,7 @@ u64 bytes_acked read_w
u32 dsack_dups
u32 snd_una read_mostly read_write tcp_wnd_end,tcp_urg_mode,tcp_minshall_check,tcp_cwnd_validate(tx);tcp_ack,tcp_may_update_window,tcp_clean_rtx_queue(write),tcp_ack_tstamp(rx)
u32 snd_sml read_write tcp_minshall_check,tcp_minshall_update
-u32 rcv_tstamp read_mostly tcp_ack
+u32 rcv_tstamp read_write read_write tcp_ack
void * tcp_clean_acked read_mostly tcp_ack
u32 lsndtime read_write tcp_slow_start_after_idle_check,tcp_event_data_sent
u32 last_oow_ack_time
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 3ca5ed02de6d48e64c26744f117d72675f84a3f3..1e6c2ded22c985134bd48b7bf5fd464e01e2fd51 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -238,7 +238,6 @@ struct tcp_sock {
/* RX read-mostly hotpath cache lines */
__cacheline_group_begin(tcp_sock_read_rx);
u32 copied_seq; /* Head of yet unread data */
- u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
u32 snd_wl1; /* Sequence for window update */
u32 tlp_high_seq; /* snd_nxt at the time of TLP */
u32 rttvar_us; /* smoothed mdev_max */
@@ -246,13 +245,13 @@ struct tcp_sock {
u16 advmss; /* Advertised MSS */
u16 urg_data; /* Saved octet of OOB data and control flags */
u32 lost; /* Total data packets lost incl. rexmits */
+ u32 snd_ssthresh; /* Slow start size threshold */
struct minmax rtt_min;
/* OOO segments go in this rbtree. Socket lock must be held. */
struct rb_root out_of_order_queue;
#if defined(CONFIG_TLS_DEVICE)
void (*tcp_clean_acked)(struct sock *sk, u32 acked_seq);
#endif
- u32 snd_ssthresh; /* Slow start size threshold */
u8 recvmsg_inq : 1;/* Indicate # of bytes in queue upon recvmsg */
__cacheline_group_end(tcp_sock_read_rx);
@@ -319,6 +318,7 @@ struct tcp_sock {
*/
u32 app_limited; /* limited until "delivered" reaches this val */
u32 rcv_wnd; /* Current receiver window */
+ u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
/*
* Options received (usually on last packet, some only on SYN packets).
*/
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index d6d0d970e014d4352050115e84d3b5a56f8bed26..24787d2b04aa3d442175df41e0f507ad60398120 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -5111,7 +5111,6 @@ static void __init tcp_struct_check(void)
/* RX read-mostly hotpath cache lines */
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, copied_seq);
- CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rcv_tstamp);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_wl1);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tlp_high_seq);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rttvar_us);
@@ -5124,9 +5123,9 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_ssthresh);
#if IS_ENABLED(CONFIG_TLS_DEVICE)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tcp_clean_acked);
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 77);
+ CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 73);
#else
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 69);
+ CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 65);
#endif
/* TX read-write hotpath cache lines */
@@ -5165,6 +5164,7 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, received_ecn_bytes);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, app_limited);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_wnd);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_tstamp);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rx_opt);
/* 32bit arches with 8byte alignment on u64 fields might need padding
--
2.51.0.384.g4c02a37b29-goog
Powered by blists - more mailing lists