[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1502351536-9108-1-git-send-email-shuwang@redhat.com>
Date: Thu, 10 Aug 2017 15:52:16 +0800
From: shuwang@...hat.com
To: mingo@...hat.com, peterz@...radead.org
Cc: linux-kernel@...r.kernel.org, chuhu@...hat.com, liwang@...hat.com,
Shu Wang <shuwang@...hat.com>
Subject: [PATCH 1/1] sched/topology: fix memleak in __sdt_alloc()
From: Shu Wang <shuwang@...hat.com>
Found this issue by kmemleak. the sg and sgc from __sdt_alloc()
might be leaked as each domain holds many groups' ref. And in
destroy_sched_domain(), it only declined the first group ref.
Online and offline a cpu can trigger this leak, and cause OOM.
reproducer for my 6 cpus machine:
while true
do
echo 0 > /sys/devices/system/cpu/cpu5/online;
echo 1 > /sys/devices/system/cpu/cpu5/online;
done
unreferenced object 0xffff88007d772a80 (size 64):
comm "cpuhp/5", pid 39, jiffies 4294719962 (age 35.251s)
hex dump (first 32 bytes):
c0 22 77 7d 00 88 ff ff 02 00 00 00 01 00 00 00 ."w}............
40 2a 77 7d 00 88 ff ff 00 00 00 00 00 00 00 00 @*w}............
backtrace:
[<ffffffff8176525a>] kmemleak_alloc+0x4a/0xa0
[<ffffffff8121efe1>] __kmalloc_node+0xf1/0x280
[<ffffffff810d94a8>] build_sched_domains+0x1e8/0xf20
[<ffffffff810da674>] partition_sched_domains+0x304/0x360
[<ffffffff81139557>] cpuset_update_active_cpus+0x17/0x40
[<ffffffff810bdb2e>] sched_cpu_activate+0xae/0xc0
[<ffffffff810900e0>] cpuhp_invoke_callback+0x90/0x400
[<ffffffff81090597>] cpuhp_up_callbacks+0x37/0xb0
[<ffffffff81090887>] cpuhp_thread_fun+0xd7/0xf0
[<ffffffff810b37e0>] smpboot_thread_fn+0x110/0x160
[<ffffffff810af5d9>] kthread+0x109/0x140
[<ffffffff81770e45>] ret_from_fork+0x25/0x30
[<ffffffffffffffff>] 0xffffffffffffffff
unreferenced object 0xffff88007d772a40 (size 64):
comm "cpuhp/5", pid 39, jiffies 4294719962 (age 35.251s)
hex dump (first 32 bytes):
03 00 00 00 00 00 00 00 00 04 00 00 00 00 00 00 ................
00 04 00 00 00 00 00 00 4f 3c fc ff 00 00 00 00 ........O<......
backtrace:
[<ffffffff8176525a>] kmemleak_alloc+0x4a/0xa0
[<ffffffff8121efe1>] __kmalloc_node+0xf1/0x280
[<ffffffff810da16d>] build_sched_domains+0xead/0xf20
[<ffffffff810da674>] partition_sched_domains+0x304/0x360
[<ffffffff81139557>] cpuset_update_active_cpus+0x17/0x40
[<ffffffff810bdb2e>] sched_cpu_activate+0xae/0xc0
[<ffffffff810900e0>] cpuhp_invoke_callback+0x90/0x400
[<ffffffff81090597>] cpuhp_up_callbacks+0x37/0xb0
[<ffffffff81090887>] cpuhp_thread_fun+0xd7/0xf0
[<ffffffff810b37e0>] smpboot_thread_fn+0x110/0x160
[<ffffffff810af5d9>] kthread+0x109/0x140
[<ffffffff81770e45>] ret_from_fork+0x25/0x30
[<ffffffffffffffff>] 0xffffffffffffffff
Reported-by: Chunyu Hu <chuhu@...hat.com>
Signed-off-by: Chunyu Hu <chuhu@...hat.com>
Signed-off-by: Shu Wang <shuwang@...hat.com>
---
kernel/sched/topology.c | 16 +++++++---------
1 file changed, 7 insertions(+), 9 deletions(-)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 79895ae..35c3c4d 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -337,7 +337,8 @@ static void free_sched_groups(struct sched_group *sg, int free_sgc)
if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
kfree(sg->sgc);
- kfree(sg);
+ if (atomic_dec_and_test(&sg->ref))
+ kfree(sg);
sg = tmp;
} while (sg != first);
}
@@ -345,15 +346,11 @@ static void free_sched_groups(struct sched_group *sg, int free_sgc)
static void destroy_sched_domain(struct sched_domain *sd)
{
/*
- * If its an overlapping domain it has private groups, iterate and
- * nuke them all.
+ * A sched domain has many groups' reference, and an overlapping
+ * domain has private groups, iterate and nuck them all.
*/
- if (sd->flags & SD_OVERLAP) {
- free_sched_groups(sd->groups, 1);
- } else if (atomic_dec_and_test(&sd->groups->ref)) {
- kfree(sd->groups->sgc);
- kfree(sd->groups);
- }
+ free_sched_groups(sd->groups, 1);
+
if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
kfree(sd->shared);
kfree(sd);
@@ -670,6 +667,7 @@ build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
else
cpumask_copy(sg_span, sched_domain_span(sd));
+ atomic_inc(&sg->ref);
return sg;
}
--
2.5.0
Powered by blists - more mailing lists