[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y4+rNYF9WZyJyBQp@cmpxchg.org>
Date: Tue, 6 Dec 2022 21:51:01 +0100
From: Johannes Weiner <hannes@...xchg.org>
To: Eric Dumazet <edumazet@...gle.com>
Cc: Ivan Babrou <ivan@...udflare.com>, Linux MM <linux-mm@...ck.org>,
Linux Kernel Network Developers <netdev@...r.kernel.org>,
linux-kernel <linux-kernel@...r.kernel.org>,
Michal Hocko <mhocko@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Shakeel Butt <shakeelb@...gle.com>,
Muchun Song <songmuchun@...edance.com>,
Andrew Morton <akpm@...ux-foundation.org>,
"David S. Miller" <davem@...emloft.net>,
Hideaki YOSHIFUJI <yoshfuji@...ux-ipv6.org>,
David Ahern <dsahern@...nel.org>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, cgroups@...r.kernel.org,
kernel-team <kernel-team@...udflare.com>
Subject: Re: Low TCP throughput due to vmpressure with swap enabled
On Tue, Dec 06, 2022 at 08:13:50PM +0100, Eric Dumazet wrote:
> On Tue, Dec 6, 2022 at 8:00 PM Johannes Weiner <hannes@...xchg.org> wrote:
> > @@ -1701,10 +1701,10 @@ void mem_cgroup_sk_alloc(struct sock *sk);
> > void mem_cgroup_sk_free(struct sock *sk);
> > static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
> > {
> > - if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
> > + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->socket_pressure)
>
> && READ_ONCE(memcg->socket_pressure))
>
> > return true;
> > do {
> > - if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
> > + if (memcg->socket_pressure)
>
> if (READ_ONCE(...))
Good point, I'll add those.
> > @@ -7195,10 +7194,10 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
> > struct page_counter *fail;
> >
> > if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
> > - memcg->tcpmem_pressure = 0;
>
> Orthogonal to your patch, but:
>
> Maybe avoid touching this cache line too often and use READ/WRITE_ONCE() ?
>
> if (READ_ONCE(memcg->socket_pressure))
> WRITE_ONCE(memcg->socket_pressure, false);
Ah, that's a good idea.
I think it'll be fine in the failure case, since that's associated
with OOM and total performance breakdown anyway.
But certainly, in the common case of the charge succeeding, we should
not keep hammering false into that variable over and over.
How about the delta below? I also flipped the branches around to keep
the common path at the first indentation level, hopefully making that
a bit clearer too.
Thanks for taking a look, Eric!
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ef1c388be5b3..13ae10116895 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1701,10 +1701,11 @@ void mem_cgroup_sk_alloc(struct sock *sk);
void mem_cgroup_sk_free(struct sock *sk);
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->socket_pressure)
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
+ READ_ONCE(memcg->socket_pressure))
return true;
do {
- if (memcg->socket_pressure)
+ if (READ_ONCE(memcg->socket_pressure))
return true;
} while ((memcg = parent_mem_cgroup(memcg)));
return false;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0d4b9dbe775a..96c4ec0f11ca 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -7193,31 +7193,29 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
struct page_counter *fail;
- if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
- memcg->socket_pressure = false;
- return true;
+ if (!page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
+ WRITE_ONCE(memcg->socket_pressure, true);
+ if (gfp_mask & __GFP_FAIL) {
+ page_counter_charge(&memcg->tcpmem, nr_pages);
+ return true;
+ }
+ return false;
}
- memcg->socket_pressure = true;
+ if (unlikely(READ_ONCE(memcg->socket_pressure)))
+ WRITE_ONCE(memcg->socket_pressure, false);
+ }
+
+ if (try_charge(memcg, gfp_mask & ~__GFP_NOFAIL, nr_pages) < 0) {
+ WRITE_ONCE(memcg->socket_pressure, true);
if (gfp_mask & __GFP_NOFAIL) {
- page_counter_charge(&memcg->tcpmem, nr_pages);
+ try_charge(memcg, gfp_mask, nr_pages);
+ mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
return true;
}
return false;
}
-
- if (try_charge(memcg, gfp_mask & ~__GFP_NOFAIL, nr_pages) == 0) {
- memcg->socket_pressure = false;
- goto success;
- }
- memcg->socket_pressure = true;
- if (gfp_mask & __GFP_NOFAIL) {
- try_charge(memcg, gfp_mask, nr_pages);
- goto success;
- }
-
- return false;
-
-success:
+ if (unlikely(READ_ONCE(memcg->socket_pressure)))
+ WRITE_ONCE(memcg->socket_pressure, false);
mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
return true;
}
Powered by blists - more mailing lists