[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <97a04805-120c-abc1-4e34-01cfdc5ef8fb@redhat.com>
Date: Wed, 13 Apr 2022 12:00:25 -0400
From: Nico Pache <npache@...hat.com>
To: Michal Hocko <mhocko@...e.com>
Cc: Joel Savitz <jsavitz@...hat.com>,
Peter Zijlstra <peterz@...radead.org>, linux-mm@...ck.org,
linux-kernel <linux-kernel@...r.kernel.org>,
Rafael Aquini <aquini@...hat.com>,
Waiman Long <longman@...hat.com>, Baoquan He <bhe@...hat.com>,
Christoph von Recklinghausen <crecklin@...hat.com>,
Don Dutile <ddutile@...hat.com>,
"Herton R . Krzesinski" <herton@...hat.com>,
David Rientjes <rientjes@...gle.com>,
Andrea Arcangeli <aarcange@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Davidlohr Bueso <dave@...olabs.net>,
Ingo Molnar <mingo@...hat.com>,
Darren Hart <dvhart@...radead.org>, stable@...nel.org,
Thomas Gleixner <tglx@...utronix.de>
Subject: Re: [PATCH v8] oom_kill.c: futex: Don't OOM reap the VMA containing
the robust_list_head
> diff --git a/mm/oom_kill.c b/mm/oom_kill.c
> index 7ec38194f8e1..be6d65ead7ec 100644
> --- a/mm/oom_kill.c
> +++ b/mm/oom_kill.c
> @@ -632,7 +632,7 @@ static void oom_reap_task(struct task_struct *tsk)
> */
> set_bit(MMF_OOM_SKIP, &mm->flags);
>
> - /* Drop a reference taken by wake_oom_reaper */
> + /* Drop a reference taken by queue_oom_repaer */
> put_task_struct(tsk);
> }
>
> @@ -644,12 +644,12 @@ static int oom_reaper(void *unused)
> struct task_struct *tsk = NULL;
>
> wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
> - spin_lock(&oom_reaper_lock);
> + spin_lock_irq(&oom_reaper_lock);
> if (oom_reaper_list != NULL) {
> tsk = oom_reaper_list;
> oom_reaper_list = tsk->oom_reaper_list;
> }
> - spin_unlock(&oom_reaper_lock);
> + spin_unlock_irq(&oom_reaper_lock);
>
> if (tsk)
> oom_reap_task(tsk);
> @@ -658,22 +658,50 @@ static int oom_reaper(void *unused)
> return 0;
> }
>
> -static void wake_oom_reaper(struct task_struct *tsk)
> +static void wake_oom_reaper_fn(struct timer_list *timer)
> {
> - /* mm is already queued? */
> - if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
> - return;
> + struct task_struct *tsk = container_of(timer, struct task_struct, oom_reaper_timer);
> + struct mm_struct *mm = tsk->signal->oom_mm;
> + unsigned long flags;
>
> - get_task_struct(tsk);
> + /* The victim managed to terminate on its own - see exit_mmap */
> + if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
> + put_task_struct(tsk);
> + return;
> + }
>
> - spin_lock(&oom_reaper_lock);
> + spin_lock_irqsave(&oom_reaper_lock, flags);
> tsk->oom_reaper_list = oom_reaper_list;
> oom_reaper_list = tsk;
> - spin_unlock(&oom_reaper_lock);
> + spin_unlock_irqrestore(&oom_reaper_lock, flags);
> trace_wake_reaper(tsk->pid);
> wake_up(&oom_reaper_wait);
> }
>
> +/*
> + * Give OOM victims some head room to exit themselves. If they do not exit
> + * on their own the oom reaper is invoked.
> + * The timeout is basically arbitrary and there is no best value to use.
> + * The longer it will be the longer the worst case scenario OOM can
> + * take. The smaller the timeout the more likely the oom_reaper can get
> + * into the way and release resources which could be needed during the
> + * exit path - e.g. futex robust lists can sit in the anonymous memory
> + * which could be reaped and the exit path won't be able to let waiters
> + * know the holding task has terminated.
> + */
> +#define OOM_REAPER_DELAY (2*HZ)
> +static void queue_oom_repaer(struct task_struct *tsk)
> +{
> + /* mm is already queued? */
> + if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
> + return;
> +
> + get_task_struct(tsk);
> + timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper_fn, 0);
> + tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
> + add_timer(&tsk->oom_reaper_timer);
> +}
> +
> static int __init oom_init(void)
> {
> oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
> @@ -681,7 +709,7 @@ static int __init oom_init(void)
> }
> subsys_initcall(oom_init)
> #else
> -static inline void wake_oom_reaper(struct task_struct *tsk)
> +static inline void queue_oom_repaer(struct task_struct *tsk)
> {
> }
> #endif /* CONFIG_MMU */
> @@ -932,7 +960,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
> rcu_read_unlock();
>
> if (can_oom_reap)
> - wake_oom_reaper(victim);
> + queue_oom_repaer(victim);
>
> mmdrop(mm);
> put_task_struct(victim);
> @@ -968,7 +996,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
> task_lock(victim);
> if (task_will_free_mem(victim)) {
> mark_oom_victim(victim);
> - wake_oom_reaper(victim);
> + queue_oom_repaer(victim);
> task_unlock(victim);
> put_task_struct(victim);
> return;
> @@ -1067,7 +1095,7 @@ bool out_of_memory(struct oom_control *oc)
> */
> if (task_will_free_mem(current)) {
> mark_oom_victim(current);
> - wake_oom_reaper(current);
> + queue_oom_repaer(current);
> return true;
> }
Thanks for the code Michal-- It does seem to fix our issue! I will post it after
I finish running it through a few more test cases, and our internal testing suites.
Cheers,
-- Nico
Powered by blists - more mailing lists