[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20191107150323.GA24042@redhat.com>
Date: Thu, 7 Nov 2019 16:03:23 +0100
From: Oleg Nesterov <oleg@...hat.com>
To: Thomas Gleixner <tglx@...utronix.de>
Cc: LKML <linux-kernel@...r.kernel.org>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
Darren Hart <darren@...art.com>,
Yi Wang <wang.yi59@....com.cn>,
Yang Tao <yang.tao172@....com.cn>,
Florian Weimer <fweimer@...hat.com>,
Carlos O'Donell <carlos@...hat.com>,
Alexander Viro <viro@...iv.linux.org.uk>
Subject: Re: [patch 00/12] futex: Cure robust/PI futex exit races
On 11/06, Thomas Gleixner wrote:
>
> fs/exec.c | 2
> include/linux/compat.h | 2
> include/linux/futex.h | 38 +++--
> include/linux/sched.h | 3
> include/linux/sched/mm.h | 6
> kernel/exit.c | 30 ----
> kernel/fork.c | 40 ++---
> kernel/futex.c | 324 ++++++++++++++++++++++++++++++++++++++++-------
> 8 files changed, 330 insertions(+), 115 deletions(-)
The whole series looks good to me.
But I am just curious, what do you all think about the patch below
instead of 10/12 and 12/12 ?
Oleg.
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9e0de08..ad18433 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -621,6 +621,11 @@ struct wake_q_node {
struct wake_q_node *next;
};
+struct wake_q_head {
+ struct wake_q_node *first;
+ struct wake_q_node **lastp;
+};
+
struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
@@ -1055,6 +1060,7 @@ struct task_struct {
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
unsigned int futex_state;
+ struct wake_q_head futex_exit_q;
#endif
#ifdef CONFIG_PERF_EVENTS
struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index 26a2013..62805b5 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -35,11 +35,6 @@
#include <linux/sched.h>
-struct wake_q_head {
- struct wake_q_node *first;
- struct wake_q_node **lastp;
-};
-
#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
#define DEFINE_WAKE_Q(name) \
diff --git a/kernel/futex.c b/kernel/futex.c
index 4b36bc8..87763c7 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1176,6 +1176,24 @@ static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
return ret;
}
+static void wait_for_owner_exiting(int ret)
+{
+ struct wake_q_node *node = ¤t->wake_q;
+
+ if (ret != -EBUSY) {
+ WARN_ON_ONCE(node->next); // XXX not really correct ...
+ return;
+ }
+
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (!READ_ONCE(node->next))
+ break;
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+}
+
static int handle_exit_race(u32 __user *uaddr, u32 uval,
struct task_struct *tsk)
{
@@ -1185,8 +1203,10 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval,
* If the futex exit state is not yet FUTEX_STATE_DEAD, tell the
* caller that the alleged owner is busy.
*/
- if (tsk && tsk->futex_state != FUTEX_STATE_DEAD)
+ if (tsk && tsk->futex_state != FUTEX_STATE_DEAD) {
+ wake_q_add(&tsk->futex_exit_q, current);
return -EBUSY;
+ }
/*
* Reread the user space value to handle the following situation:
@@ -2104,6 +2124,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
hb_waiters_dec(hb2);
put_futex_key(&key2);
put_futex_key(&key1);
+ wait_for_owner_exiting(ret);
cond_resched();
goto retry;
default:
@@ -2855,6 +2876,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
queue_unlock(hb);
put_futex_key(&q.key);
cond_resched();
+ wait_for_owner_exiting(ret);
goto retry;
default:
goto out_unlock_put_key;
@@ -3701,6 +3723,7 @@ static void futex_cleanup(struct task_struct *tsk)
void futex_exit_recursive(struct task_struct *tsk)
{
tsk->futex_state = FUTEX_STATE_DEAD;
+ wake_up_q(&tsk->futex_exit_q);
}
static void futex_cleanup_begin(struct task_struct *tsk)
@@ -3718,16 +3741,17 @@ static void futex_cleanup_begin(struct task_struct *tsk)
*/
raw_spin_lock_irq(&tsk->pi_lock);
tsk->futex_state = FUTEX_STATE_EXITING;
+ wake_q_init(&tsk->futex_exit_q);
raw_spin_unlock_irq(&tsk->pi_lock);
}
static void futex_cleanup_end(struct task_struct *tsk, int state)
{
- /*
- * Lockless store. The only side effect is that an observer might
- * take another loop until it becomes visible.
- */
+ raw_spin_lock_irq(&tsk->pi_lock);
tsk->futex_state = state;
+ raw_spin_unlock_irq(&tsk->pi_lock);
+
+ wake_up_q(&tsk->futex_exit_q);
}
void futex_exec_release(struct task_struct *tsk)
Powered by blists - more mailing lists