[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240429134025.1233626-2-edumazet@google.com>
Date: Mon, 29 Apr 2024 13:40:21 +0000
From: Eric Dumazet <edumazet@...gle.com>
To: "David S . Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: netdev@...r.kernel.org, eric.dumazet@...il.com,
Eric Dumazet <edumazet@...gle.com>
Subject: [PATCH net-next 1/5] net: move sysctl_max_skb_frags to net_hotdata
sysctl_max_skb_frags is used in TCP and MPTCP fast paths,
move it to net_hodata for better cache locality.
Signed-off-by: Eric Dumazet <edumazet@...gle.com>
---
include/linux/skbuff.h | 2 --
include/net/hotdata.h | 1 +
net/core/hotdata.c | 1 +
net/core/skbuff.c | 5 +----
net/core/sysctl_net_core.c | 2 +-
net/ipv4/tcp.c | 3 ++-
net/mptcp/protocol.c | 3 ++-
7 files changed, 8 insertions(+), 9 deletions(-)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f76825e5b92a334f7726d7f7c99aa60ec69a8e07..4f99a39db2eb2868d9a14334070d4a574ba1f9a3 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -353,8 +353,6 @@ struct sk_buff;
#define MAX_SKB_FRAGS CONFIG_MAX_SKB_FRAGS
-extern int sysctl_max_skb_frags;
-
/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
* segment using its current segmentation instead.
*/
diff --git a/include/net/hotdata.h b/include/net/hotdata.h
index 003667a1efd6b63fc0f0d7cadd2c8472281331b0..a6cff65904267f338fbd258d23be79d46a062f9e 100644
--- a/include/net/hotdata.h
+++ b/include/net/hotdata.h
@@ -38,6 +38,7 @@ struct net_hotdata {
int max_backlog;
int dev_tx_weight;
int dev_rx_weight;
+ int sysctl_max_skb_frags;
};
#define inet_ehash_secret net_hotdata.tcp_protocol.secret
diff --git a/net/core/hotdata.c b/net/core/hotdata.c
index c8a7a451c18a383d091e413a510d84d163473f2f..f17cbb4807b99937b272be12953f790c66cc2cd1 100644
--- a/net/core/hotdata.c
+++ b/net/core/hotdata.c
@@ -18,5 +18,6 @@ struct net_hotdata net_hotdata __cacheline_aligned = {
.max_backlog = 1000,
.dev_tx_weight = 64,
.dev_rx_weight = 64,
+ .sysctl_max_skb_frags = MAX_SKB_FRAGS,
};
EXPORT_SYMBOL(net_hotdata);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 0c8b82750000f63b097cb4b7b990c647c81019df..65779b8f0b126a1c039cf24a47474c0cb80ff6ae 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -109,9 +109,6 @@ static struct kmem_cache *skbuff_ext_cache __ro_after_init;
#define SKB_SMALL_HEAD_HEADROOM \
SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE)
-int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
-EXPORT_SYMBOL(sysctl_max_skb_frags);
-
/* kcm_write_msgs() relies on casting paged frags to bio_vec to use
* iov_iter_bvec(). These static asserts ensure the cast is valid is long as the
* netmem is a page.
@@ -7040,7 +7037,7 @@ static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
ssize_t maxsize, gfp_t gfp)
{
- size_t frag_limit = READ_ONCE(sysctl_max_skb_frags);
+ size_t frag_limit = READ_ONCE(net_hotdata.sysctl_max_skb_frags);
struct page *pages[8], **ppages = pages;
ssize_t spliced = 0, ret = 0;
unsigned int i;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 903ab4a51c178a4705b7eb610958c81c5ebcfdf5..e75375d54b9e50673af28f6d6b3bb83fc74cb1f8 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -595,7 +595,7 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "max_skb_frags",
- .data = &sysctl_max_skb_frags,
+ .data = &net_hotdata.sysctl_max_skb_frags,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4ec0f4feee003d91fa5ae37ed4dd50e09c4c874a..388f6e115bf168e6f70b762096a984a2cacfa5c9 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -280,6 +280,7 @@
#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <net/busy_poll.h>
+#include <net/hotdata.h>
#include <net/rps.h>
/* Track pending CMSGs. */
@@ -1188,7 +1189,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
- if (i >= READ_ONCE(sysctl_max_skb_frags)) {
+ if (i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) {
tcp_mark_push(tp, skb);
goto new_segment;
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 4b13ca362efa395a2ef7a92e553a8c49feebec79..aff17597e6a71ecc4b9aef4d02f039318fa1fe7f 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -20,6 +20,7 @@
#include <net/transp_v6.h>
#endif
#include <net/mptcp.h>
+#include <net/hotdata.h>
#include <net/xfrm.h>
#include <asm/ioctls.h>
#include "protocol.h"
@@ -1272,7 +1273,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
i = skb_shinfo(skb)->nr_frags;
can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset);
- if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) {
+ if (!can_coalesce && i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) {
tcp_mark_push(tcp_sk(ssk), skb);
goto alloc_skb;
}
--
2.44.0.769.g3c40516874-goog
Powered by blists - more mailing lists