[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <a66d7a7a9001424d432f52a9fc3931a1f345464f.1718703669.git.viresh.kumar@linaro.org>
Date: Tue, 18 Jun 2024 15:12:28 +0530
From: Viresh Kumar <viresh.kumar@...aro.org>
To: Juergen Gross <jgross@...e.com>,
Stefano Stabellini <sstabellini@...nel.org>,
Oleksandr Tyshchenko <oleksandr_tyshchenko@...m.com>
Cc: Viresh Kumar <viresh.kumar@...aro.org>,
Vincent Guittot <vincent.guittot@...aro.org>,
Alex Bennée <alex.bennee@...aro.org>,
Manos Pitsidianakis <manos.pitsidianakis@...aro.org>,
Paolo Bonzini <pbonzini@...hat.com>,
Al Viro <viro@...iv.linux.org.uk>,
xen-devel@...ts.xenproject.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 1/2] xen: privcmd: Switch from mutex to spinlock for irqfds
irqfd_wakeup() gets EPOLLHUP, when it is called by
eventfd_release() by way of wake_up_poll(&ctx->wqh, EPOLLHUP), which
gets called under spin_lock_irqsave(). We can't use a mutex here as it
will lead to a deadlock.
Fix it by switching over to a spin lock.
Reported-by: Al Viro <viro@...iv.linux.org.uk>
Signed-off-by: Viresh Kumar <viresh.kumar@...aro.org>
---
drivers/xen/privcmd.c | 26 +++++++++++++++-----------
1 file changed, 15 insertions(+), 11 deletions(-)
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 67dfa4778864..5ceb6c56cf3e 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -13,7 +13,6 @@
#include <linux/file.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -845,7 +844,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
#ifdef CONFIG_XEN_PRIVCMD_EVENTFD
/* Irqfd support */
static struct workqueue_struct *irqfd_cleanup_wq;
-static DEFINE_MUTEX(irqfds_lock);
+static DEFINE_SPINLOCK(irqfds_lock);
static LIST_HEAD(irqfds_list);
struct privcmd_kernel_irqfd {
@@ -909,9 +908,11 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
irqfd_inject(kirqfd);
if (flags & EPOLLHUP) {
- mutex_lock(&irqfds_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&irqfds_lock, flags);
irqfd_deactivate(kirqfd);
- mutex_unlock(&irqfds_lock);
+ spin_unlock_irqrestore(&irqfds_lock, flags);
}
return 0;
@@ -929,6 +930,7 @@ irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
{
struct privcmd_kernel_irqfd *kirqfd, *tmp;
+ unsigned long flags;
__poll_t events;
struct fd f;
void *dm_op;
@@ -968,18 +970,18 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup);
init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
- mutex_lock(&irqfds_lock);
+ spin_lock_irqsave(&irqfds_lock, flags);
list_for_each_entry(tmp, &irqfds_list, list) {
if (kirqfd->eventfd == tmp->eventfd) {
ret = -EBUSY;
- mutex_unlock(&irqfds_lock);
+ spin_unlock_irqrestore(&irqfds_lock, flags);
goto error_eventfd;
}
}
list_add_tail(&kirqfd->list, &irqfds_list);
- mutex_unlock(&irqfds_lock);
+ spin_unlock_irqrestore(&irqfds_lock, flags);
/*
* Check if there was an event already pending on the eventfd before we
@@ -1011,12 +1013,13 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
{
struct privcmd_kernel_irqfd *kirqfd;
struct eventfd_ctx *eventfd;
+ unsigned long flags;
eventfd = eventfd_ctx_fdget(irqfd->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
- mutex_lock(&irqfds_lock);
+ spin_lock_irqsave(&irqfds_lock, flags);
list_for_each_entry(kirqfd, &irqfds_list, list) {
if (kirqfd->eventfd == eventfd) {
@@ -1025,7 +1028,7 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
}
}
- mutex_unlock(&irqfds_lock);
+ spin_unlock_irqrestore(&irqfds_lock, flags);
eventfd_ctx_put(eventfd);
@@ -1073,13 +1076,14 @@ static int privcmd_irqfd_init(void)
static void privcmd_irqfd_exit(void)
{
struct privcmd_kernel_irqfd *kirqfd, *tmp;
+ unsigned long flags;
- mutex_lock(&irqfds_lock);
+ spin_lock_irqsave(&irqfds_lock, flags);
list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list)
irqfd_deactivate(kirqfd);
- mutex_unlock(&irqfds_lock);
+ spin_unlock_irqrestore(&irqfds_lock, flags);
destroy_workqueue(irqfd_cleanup_wq);
}
--
2.31.1.272.g89b43f80a514
Powered by blists - more mailing lists