[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240305160413.2231423-9-edumazet@google.com>
Date: Tue, 5 Mar 2024 16:04:03 +0000
From: Eric Dumazet <edumazet@...gle.com>
To: "David S . Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: netdev@...r.kernel.org, David Ahern <dsahern@...nel.org>,
Willem de Bruijn <willemb@...gle.com>, Soheil Hassas Yeganeh <soheil@...gle.com>,
Neal Cardwell <ncardwell@...gle.com>, eric.dumazet@...il.com,
Eric Dumazet <edumazet@...gle.com>
Subject: [PATCH net-next 08/18] net: move dev_tx_weight to net_hotdata
dev_tx_weight is used in tx fast path.
Move it to net_hotdata for better cache locality.
Signed-off-by: Eric Dumazet <edumazet@...gle.com>
---
include/linux/netdevice.h | 1 -
include/net/hotdata.h | 1 +
net/core/dev.c | 1 -
net/core/hotdata.c | 1 +
net/core/sysctl_net_core.c | 2 +-
net/sched/sch_generic.c | 3 ++-
6 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 044d6f5b2ace3e2decd4296e01c8d3e200c6c7dc..c2a735edc44be95fbd4bbd1e234d883582bfde10 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4788,7 +4788,6 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
extern int dev_rx_weight;
-extern int dev_tx_weight;
enum {
NESTED_SYNC_IMM_BIT,
diff --git a/include/net/hotdata.h b/include/net/hotdata.h
index a4a8df3bc0dea1b4c9589bd70f7ac457ebc5b634..2b0eb6b7f1f2c9b1273b07e06ba0b5c12a2934bf 100644
--- a/include/net/hotdata.h
+++ b/include/net/hotdata.h
@@ -23,6 +23,7 @@ struct net_hotdata {
int netdev_budget_usecs;
int tstamp_prequeue;
int max_backlog;
+ int dev_tx_weight;
};
extern struct net_hotdata net_hotdata;
diff --git a/net/core/dev.c b/net/core/dev.c
index 1b112c4db983c2d7cd280bc8c2ebc621ea3c6145..3f8d451f42fb9ba621f1ea19e784c7f0be0bf2d8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4409,7 +4409,6 @@ int weight_p __read_mostly = 64; /* old backlog weight */
int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
int dev_rx_weight __read_mostly = 64;
-int dev_tx_weight __read_mostly = 64;
/* Called with irq disabled */
static inline void ____napi_schedule(struct softnet_data *sd,
diff --git a/net/core/hotdata.c b/net/core/hotdata.c
index 35ed5a83ecc7ebda513fe4fafc596e053f0252c5..ec8c3b48e8fea57491c5870055cffb44c779db44 100644
--- a/net/core/hotdata.c
+++ b/net/core/hotdata.c
@@ -16,5 +16,6 @@ struct net_hotdata net_hotdata __cacheline_aligned = {
.tstamp_prequeue = 1,
.max_backlog = 1000,
+ .dev_tx_weight = 64,
};
EXPORT_SYMBOL(net_hotdata);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 8eaeeb289914258f90cf940e906d5c6be0cc0cd6..a30016a8660e09db89b3153e4103c185a800a2ef 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -302,7 +302,7 @@ static int proc_do_dev_weight(struct ctl_table *table, int write,
if (!ret && write) {
weight = READ_ONCE(weight_p);
WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
- WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
+ WRITE_ONCE(net_hotdata.dev_tx_weight, weight * dev_weight_tx_bias);
}
mutex_unlock(&dev_weight_mutex);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 9b3e9262040b6ef6516752c558c8997bf4054123..ff5336493777507242320d7e9214c637663f0734 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -27,6 +27,7 @@
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
+#include <net/hotdata.h>
#include <trace/events/qdisc.h>
#include <trace/events/net.h>
#include <net/xfrm.h>
@@ -409,7 +410,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets)
void __qdisc_run(struct Qdisc *q)
{
- int quota = READ_ONCE(dev_tx_weight);
+ int quota = READ_ONCE(net_hotdata.dev_tx_weight);
int packets;
while (qdisc_restart(q, &packets)) {
--
2.44.0.278.ge034bb2e1d-goog
Powered by blists - more mailing lists