[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250225183531.682556-5-andrealmeid@igalia.com>
Date: Tue, 25 Feb 2025 15:35:30 -0300
From: André Almeida <andrealmeid@...lia.com>
To: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Darren Hart <dvhart@...radead.org>,
Davidlohr Bueso <dave@...olabs.net>,
Arnd Bergmann <arnd@...db.de>,
sonicadvance1@...il.com
Cc: linux-kernel@...r.kernel.org,
kernel-dev@...lia.com,
linux-api@...r.kernel.org,
Vinicius Peixoto <vpeixoto@...amp.dev>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
André Almeida <andrealmeid@...lia.com>
Subject: [PATCH v4 4/5] futex: Remove the limit of elements for sys_set_robust_list2 lists
Remove the limit of ROBUST_LIST_LIMIT elements that a robust list can
have, for the ones created with the new interface. This is done by
overwritten the list as it's proceeded in a way that we avoid circular
lists.
For the old interface, we keep the limited behavior to avoid changing
the API.
Signed-off-by: André Almeida <andrealmeid@...lia.com>
---
kernel/futex/core.c | 50 ++++++++++++++++++++++++++++++++-------------
1 file changed, 36 insertions(+), 14 deletions(-)
diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index 07a7e5e9bc8d..bfd4443208ea 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -777,7 +777,8 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
* We silently return on any sign of list-walking problem.
*/
static void exit_robust_list64(struct task_struct *curr,
- struct robust_list_head __user *head)
+ struct robust_list_head __user *head,
+ bool destroyable)
{
struct robust_list __user *entry, *next_entry, *pending;
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
@@ -821,13 +822,17 @@ static void exit_robust_list64(struct task_struct *curr,
}
if (rc)
return;
- entry = next_entry;
- pi = next_pi;
+
/*
* Avoid excessively long or circular lists:
*/
- if (!--limit)
+ if (!destroyable && !--limit)
break;
+ else
+ put_user(&head->list, &entry->next);
+
+ entry = next_entry;
+ pi = next_pi;
cond_resched();
}
@@ -839,7 +844,8 @@ static void exit_robust_list64(struct task_struct *curr,
}
#else
static void exit_robust_list64(struct task_struct *curr,
- struct robust_list_head __user *head)
+ struct robust_list_head __user *head,
+ bool destroyable)
{
pr_warn("32bit kernel should not allow ROBUST_LIST_64BIT");
}
@@ -877,7 +883,8 @@ fetch_robust_entry32(u32 *uentry, struct robust_list __user **entry,
* We silently return on any sign of list-walking problem.
*/
static void exit_robust_list32(struct task_struct *curr,
- struct robust_list_head32 __user *head)
+ struct robust_list_head32 __user *head,
+ bool destroyable)
{
struct robust_list __user *entry, *next_entry, *pending;
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
@@ -926,14 +933,17 @@ static void exit_robust_list32(struct task_struct *curr,
}
if (rc)
return;
- uentry = next_uentry;
- entry = next_entry;
- pi = next_pi;
/*
* Avoid excessively long or circular lists:
*/
- if (!--limit)
+ if (!destroyable && !--limit)
break;
+ else
+ put_user((struct robust_list __user *) &head->list, &entry->next);
+
+ uentry = next_uentry;
+ entry = next_entry;
+ pi = next_pi;
cond_resched();
}
@@ -1087,26 +1097,38 @@ static void exit_pi_state_list(struct task_struct *curr)
static inline void exit_pi_state_list(struct task_struct *curr) { }
#endif
+/*
+ * futex_cleanup - After the task exists, process the robust lists
+ *
+ * Walk through the linked list, parsing robust lists and freeing the
+ * allocated lists. Lists created with the set_robust_list2 don't have a limit
+ * for sizing and can be destroyed while we walk on it to avoid circular list.
+ */
static void futex_cleanup(struct task_struct *tsk)
{
struct robust_list2_entry *curr, *n;
struct list_head *list2 = &tsk->robust_list2;
+ bool destroyable = true;
+ int i = 0;
/*
- * Walk through the linked list, parsing robust lists and freeing the
- * allocated lists
*/
if (unlikely(!list_empty(list2))) {
list_for_each_entry_safe(curr, n, list2, list) {
+ destroyable = true;
+ if (tsk->robust_list_index == i)
+ destroyable = false;
+
if (curr->head != NULL) {
if (curr->list_type == ROBUST_LIST_64BIT)
- exit_robust_list64(tsk, curr->head);
+ exit_robust_list64(tsk, curr->head, destroyable);
else if (curr->list_type == ROBUST_LIST_32BIT)
- exit_robust_list32(tsk, curr->head);
+ exit_robust_list32(tsk, curr->head, destroyable);
curr->head = NULL;
}
list_del_init(&curr->list);
kfree(curr);
+ i++;
}
}
--
2.48.1
Powered by blists - more mailing lists