[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220609063412.2205738-5-eric.dumazet@gmail.com>
Date: Wed, 8 Jun 2022 23:34:09 -0700
From: Eric Dumazet <eric.dumazet@...il.com>
To: "David S . Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: netdev <netdev@...r.kernel.org>,
Soheil Hassas Yeganeh <soheil@...gle.com>,
Wei Wang <weiwan@...gle.com>,
Shakeel Butt <shakeelb@...gle.com>,
Neal Cardwell <ncardwell@...gle.com>,
Eric Dumazet <edumazet@...gle.com>,
Eric Dumazet <eric.dumazet@...il.com>
Subject: [PATCH net-next 4/7] net: implement per-cpu reserves for memory_allocated
From: Eric Dumazet <edumazet@...gle.com>
We plan keeping sk->sk_forward_alloc as small as possible
in future patches.
This means we are going to call sk_memory_allocated_add()
and sk_memory_allocated_sub() more often.
Implement a per-cpu cache of +1/-1 MB, to reduce number
of changes to sk->sk_prot->memory_allocated, which
would otherwise be cause of false sharing.
Signed-off-by: Eric Dumazet <edumazet@...gle.com>
---
include/net/sock.h | 38 +++++++++++++++++++++++++++++---------
1 file changed, 29 insertions(+), 9 deletions(-)
diff --git a/include/net/sock.h b/include/net/sock.h
index 825f8cbf791f02d798f17dd4f7a2659cebb0e98a..59040fee74e7de8d63fbf719f46e172906c134bb 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1397,22 +1397,48 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
return !!*sk->sk_prot->memory_pressure;
}
+static inline long
+proto_memory_allocated(const struct proto *prot)
+{
+ return max(0L, atomic_long_read(prot->memory_allocated));
+}
+
static inline long
sk_memory_allocated(const struct sock *sk)
{
- return atomic_long_read(sk->sk_prot->memory_allocated);
+ return proto_memory_allocated(sk->sk_prot);
}
+/* 1 MB per cpu, in page units */
+#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
+
static inline long
sk_memory_allocated_add(struct sock *sk, int amt)
{
- return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
+ int local_reserve;
+
+ preempt_disable();
+ local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
+ if (local_reserve >= SK_MEMORY_PCPU_RESERVE) {
+ __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
+ atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
+ }
+ preempt_enable();
+ return sk_memory_allocated(sk);
}
static inline void
sk_memory_allocated_sub(struct sock *sk, int amt)
{
- atomic_long_sub(amt, sk->sk_prot->memory_allocated);
+ int local_reserve;
+
+ preempt_disable();
+ local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
+ if (local_reserve <= -SK_MEMORY_PCPU_RESERVE) {
+ __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
+ atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
+ }
+ preempt_enable();
}
#define SK_ALLOC_PERCPU_COUNTER_BATCH 16
@@ -1441,12 +1467,6 @@ proto_sockets_allocated_sum_positive(struct proto *prot)
return percpu_counter_sum_positive(prot->sockets_allocated);
}
-static inline long
-proto_memory_allocated(struct proto *prot)
-{
- return atomic_long_read(prot->memory_allocated);
-}
-
static inline bool
proto_memory_pressure(struct proto *prot)
{
--
2.36.1.255.ge46751e96f-goog
Powered by blists - more mailing lists