[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200316145630.GN11482@dhcp22.suse.cz>
Date: Mon, 16 Mar 2020 15:56:30 +0100
From: Michal Hocko <mhocko@...nel.org>
To: Chris Down <chris@...isdown.name>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Johannes Weiner <hannes@...xchg.org>,
Tejun Heo <tj@...nel.org>, Roman Gushchin <guro@...com>,
linux-mm@...ck.org, cgroups@...r.kernel.org,
linux-kernel@...r.kernel.org, kernel-team@...com
Subject: Re: [PATCH 2/6] mm, memcg: Prevent memory.max load tearing
On Thu 12-03-20 17:32:56, Chris Down wrote:
> This one is a bit more nuanced because we have memcg_max_mutex, which is
> mostly just used for enforcing invariants, but we still need to
> READ_ONCE since (despite its name) it doesn't really protect memory.max
> access.
>
> On write (page_counter_set_max() and memory_max_write()) we use xchg(),
> which uses smp_mb(), so that's already fine.
>
> Signed-off-by: Chris Down <chris@...isdown.name>
> Cc: Andrew Morton <akpm@...ux-foundation.org>
> Cc: Johannes Weiner <hannes@...xchg.org>
> Cc: Roman Gushchin <guro@...com>
> Cc: Tejun Heo <tj@...nel.org>
> Cc: linux-mm@...ck.org
> Cc: cgroups@...r.kernel.org
> Cc: linux-kernel@...r.kernel.org
> Cc: kernel-team@...com
Acked-by: Michal Hocko <mhocko@...e.com>
> ---
> mm/memcontrol.c | 12 ++++++------
> 1 file changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index d32d3c0a16d4..aca2964ea494 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -1507,7 +1507,7 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
>
> pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
> K((u64)page_counter_read(&memcg->memory)),
> - K((u64)memcg->memory.max), memcg->memory.failcnt);
> + K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
> if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
> pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
> K((u64)page_counter_read(&memcg->swap)),
> @@ -1538,7 +1538,7 @@ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
> {
> unsigned long max;
>
> - max = memcg->memory.max;
> + max = READ_ONCE(memcg->memory.max);
> if (mem_cgroup_swappiness(memcg)) {
> unsigned long memsw_max;
> unsigned long swap_max;
> @@ -3006,7 +3006,7 @@ static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
> * Make sure that the new limit (memsw or memory limit) doesn't
> * break our basic invariant rule memory.max <= memsw.max.
> */
> - limits_invariant = memsw ? max >= memcg->memory.max :
> + limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
> max <= memcg->memsw.max;
> if (!limits_invariant) {
> mutex_unlock(&memcg_max_mutex);
> @@ -3753,8 +3753,8 @@ static int memcg_stat_show(struct seq_file *m, void *v)
> /* Hierarchical information */
> memory = memsw = PAGE_COUNTER_MAX;
> for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
> - memory = min(memory, mi->memory.max);
> - memsw = min(memsw, mi->memsw.max);
> + memory = min(memory, READ_ONCE(mi->memory.max));
> + memsw = min(memsw, READ_ONCE(mi->memsw.max));
> }
> seq_printf(m, "hierarchical_memory_limit %llu\n",
> (u64)memory * PAGE_SIZE);
> @@ -4257,7 +4257,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
> *pheadroom = PAGE_COUNTER_MAX;
>
> while ((parent = parent_mem_cgroup(memcg))) {
> - unsigned long ceiling = min(memcg->memory.max,
> + unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
> READ_ONCE(memcg->high));
> unsigned long used = page_counter_read(&memcg->memory);
>
> --
> 2.25.1
--
Michal Hocko
SUSE Labs
Powered by blists - more mailing lists