[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <201810220758.w9M7wojE016890@www262.sakura.ne.jp>
Date: Mon, 22 Oct 2018 16:58:50 +0900
From: Tetsuo Handa <penguin-kernel@...ove.sakura.ne.jp>
To: Michal Hocko <mhocko@...nel.org>
Cc: linux-mm@...ck.org, Johannes Weiner <hannes@...xchg.org>,
Tetsuo Handa <penguin-kernel@...ove.SAKURA.ne.jp>,
David Rientjes <rientjes@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
LKML <linux-kernel@...r.kernel.org>,
Michal Hocko <mhocko@...e.com>
Subject: Re: [RFC PATCH 1/2] mm, oom: marks all killed tasks as oom victims
Michal Hocko wrote:
> --- a/mm/oom_kill.c
> +++ b/mm/oom_kill.c
> @@ -898,6 +898,7 @@ static void __oom_kill_process(struct task_struct *victim)
> if (unlikely(p->flags & PF_KTHREAD))
> continue;
> do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, PIDTYPE_TGID);
> + mark_oom_victim(p);
> }
> rcu_read_unlock();
>
> --
Wrong. Either
---
mm/oom_kill.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f10aa53..99b36ff 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -879,6 +879,8 @@ static void __oom_kill_process(struct task_struct *victim)
*/
rcu_read_lock();
for_each_process(p) {
+ struct task_struct *t;
+
if (!process_shares_mm(p, mm))
continue;
if (same_thread_group(p, victim))
@@ -898,6 +900,11 @@ static void __oom_kill_process(struct task_struct *victim)
if (unlikely(p->flags & PF_KTHREAD))
continue;
do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, PIDTYPE_TGID);
+ t = find_lock_task_mm(p);
+ if (!t)
+ continue;
+ mark_oom_victim(t);
+ task_unlock(t);
}
rcu_read_unlock();
--
1.8.3.1
or
---
mm/oom_kill.c | 32 +++++++++++++++++++-------------
1 file changed, 19 insertions(+), 13 deletions(-)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f10aa53..7fa9b7c 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -854,13 +854,6 @@ static void __oom_kill_process(struct task_struct *victim)
count_vm_event(OOM_KILL);
memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
- /*
- * We should send SIGKILL before granting access to memory reserves
- * in order to prevent the OOM victim from depleting the memory
- * reserves from the user space under its control.
- */
- do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, PIDTYPE_TGID);
- mark_oom_victim(victim);
pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
K(get_mm_counter(victim->mm, MM_ANONPAGES)),
@@ -879,11 +872,23 @@ static void __oom_kill_process(struct task_struct *victim)
*/
rcu_read_lock();
for_each_process(p) {
- if (!process_shares_mm(p, mm))
+ struct task_struct *t;
+
+ /*
+ * No use_mm() user needs to read from the userspace so we are
+ * ok to reap it.
+ */
+ if (unlikely(p->flags & PF_KTHREAD))
+ continue;
+ t = find_lock_task_mm(p);
+ if (!t)
continue;
- if (same_thread_group(p, victim))
+ if (likely(t->mm != mm)) {
+ task_unlock(t);
continue;
+ }
if (is_global_init(p)) {
+ task_unlock(t);
can_oom_reap = false;
set_bit(MMF_OOM_SKIP, &mm->flags);
pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
@@ -892,12 +897,13 @@ static void __oom_kill_process(struct task_struct *victim)
continue;
}
/*
- * No use_mm() user needs to read from the userspace so we are
- * ok to reap it.
+ * We should send SIGKILL before granting access to memory
+ * reserves in order to prevent the OOM victim from depleting
+ * the memory reserves from the user space under its control.
*/
- if (unlikely(p->flags & PF_KTHREAD))
- continue;
do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, PIDTYPE_TGID);
+ mark_oom_victim(t);
+ task_unlock(t);
}
rcu_read_unlock();
--
1.8.3.1
will be needed.
Powered by blists - more mailing lists