[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <E0A7CC1A-B02C-4210-A1DF-0600E027D5D0@linux.dev>
Date: Thu, 18 Jul 2024 19:20:14 +0800
From: Muchun Song <muchun.song@...ux.dev>
To: Vlastimil Babka <vbabka@...e.cz>
Cc: Muchun Song <songmuchun@...edance.com>, akpm@...ux-foundation.org,
hannes@...xchg.org, nphamcs@...il.com, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, Michal Hocko <mhocko@...nel.org>
Subject: Re: [PATCH] mm: list_lru: fix UAF for memory cgroup
> On Jul 18, 2024, at 18:30, Vlastimil Babka <vbabka@...e.cz> wrote:
>
> On 7/18/24 10:36 AM, Muchun Song wrote:
>> The mem_cgroup_from_slab_obj() is supposed to be called under rcu
>> lock or cgroup_mutex or others which could prevent returned memcg
>> from being freed. Fix it by adding missing rcu read lock.
>
> Was the UAF ever observed, or is this due to code review?
Just code review.
Thanks.
> Should there be some lockdep_assert somwhere?
>
It’s a good option to improve this. Maybe mem_cgroup_from_slab_obj() is a good place.
>> Fixes: 0a97c01cd20bb ("list_lru: allow explicit memcg and NUMA node selection)
>> Signed-off-by: Muchun Song <songmuchun@...edance.com>
>> ---
>> mm/list_lru.c | 24 ++++++++++++++++++------
>> 1 file changed, 18 insertions(+), 6 deletions(-)
>>
>> diff --git a/mm/list_lru.c b/mm/list_lru.c
>> index 3fd64736bc458..225da0778a3be 100644
>> --- a/mm/list_lru.c
>> +++ b/mm/list_lru.c
>> @@ -85,6 +85,7 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
>> }
>> #endif /* CONFIG_MEMCG_KMEM */
>>
>> +/* The caller must ensure the memcg lifetime. */
>> bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
>> struct mem_cgroup *memcg)
>> {
>> @@ -109,14 +110,20 @@ EXPORT_SYMBOL_GPL(list_lru_add);
>>
>> bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
>> {
>> + bool ret;
>> int nid = page_to_nid(virt_to_page(item));
>> - struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
>> - mem_cgroup_from_slab_obj(item) : NULL;
>> + struct mem_cgroup *memcg;
>>
>> - return list_lru_add(lru, item, nid, memcg);
>> + rcu_read_lock();
>> + memcg = list_lru_memcg_aware(lru) ? mem_cgroup_from_slab_obj(item) : NULL;
>> + ret = list_lru_add(lru, item, nid, memcg);
>> + rcu_read_unlock();
>> +
>> + return ret;
>> }
>> EXPORT_SYMBOL_GPL(list_lru_add_obj);
>>
>> +/* The caller must ensure the memcg lifetime. */
>> bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
>> struct mem_cgroup *memcg)
>> {
>> @@ -139,11 +146,16 @@ EXPORT_SYMBOL_GPL(list_lru_del);
>>
>> bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
>> {
>> + bool ret;
>> int nid = page_to_nid(virt_to_page(item));
>> - struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
>> - mem_cgroup_from_slab_obj(item) : NULL;
>> + struct mem_cgroup *memcg;
>>
>> - return list_lru_del(lru, item, nid, memcg);
>> + rcu_read_lock();
>> + memcg = list_lru_memcg_aware(lru) ? mem_cgroup_from_slab_obj(item) : NULL;
>> + ret = list_lru_del(lru, item, nid, memcg);
>> + rcu_read_unlock();
>> +
>> + return ret;
>> }
>> EXPORT_SYMBOL_GPL(list_lru_del_obj);
>>
>
Powered by blists - more mailing lists