[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240728004934.1706375-6-sashal@kernel.org>
Date: Sat, 27 Jul 2024 20:49:26 -0400
From: Sasha Levin <sashal@...nel.org>
To: linux-kernel@...r.kernel.org,
stable@...r.kernel.org
Cc: Viresh Kumar <viresh.kumar@...aro.org>,
Al Viro <viro@...iv.linux.org.uk>,
Juergen Gross <jgross@...e.com>,
Sasha Levin <sashal@...nel.org>,
sstabellini@...nel.org,
xen-devel@...ts.xenproject.org
Subject: [PATCH AUTOSEL 6.10 6/9] xen: privcmd: Switch from mutex to spinlock for irqfds
From: Viresh Kumar <viresh.kumar@...aro.org>
[ Upstream commit 1c682593096a487fd9aebc079a307ff7a6d054a3 ]
irqfd_wakeup() gets EPOLLHUP, when it is called by
eventfd_release() by way of wake_up_poll(&ctx->wqh, EPOLLHUP), which
gets called under spin_lock_irqsave(). We can't use a mutex here as it
will lead to a deadlock.
Fix it by switching over to a spin lock.
Reported-by: Al Viro <viro@...iv.linux.org.uk>
Signed-off-by: Viresh Kumar <viresh.kumar@...aro.org>
Reviewed-by: Juergen Gross <jgross@...e.com>
Link: https://lore.kernel.org/r/a66d7a7a9001424d432f52a9fc3931a1f345464f.1718703669.git.viresh.kumar@linaro.org
Signed-off-by: Juergen Gross <jgross@...e.com>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
drivers/xen/privcmd.c | 25 +++++++++++++++----------
1 file changed, 15 insertions(+), 10 deletions(-)
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 67dfa47788649..c9c620e32fa8b 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -845,7 +845,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
#ifdef CONFIG_XEN_PRIVCMD_EVENTFD
/* Irqfd support */
static struct workqueue_struct *irqfd_cleanup_wq;
-static DEFINE_MUTEX(irqfds_lock);
+static DEFINE_SPINLOCK(irqfds_lock);
static LIST_HEAD(irqfds_list);
struct privcmd_kernel_irqfd {
@@ -909,9 +909,11 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
irqfd_inject(kirqfd);
if (flags & EPOLLHUP) {
- mutex_lock(&irqfds_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&irqfds_lock, flags);
irqfd_deactivate(kirqfd);
- mutex_unlock(&irqfds_lock);
+ spin_unlock_irqrestore(&irqfds_lock, flags);
}
return 0;
@@ -929,6 +931,7 @@ irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
{
struct privcmd_kernel_irqfd *kirqfd, *tmp;
+ unsigned long flags;
__poll_t events;
struct fd f;
void *dm_op;
@@ -968,18 +971,18 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup);
init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
- mutex_lock(&irqfds_lock);
+ spin_lock_irqsave(&irqfds_lock, flags);
list_for_each_entry(tmp, &irqfds_list, list) {
if (kirqfd->eventfd == tmp->eventfd) {
ret = -EBUSY;
- mutex_unlock(&irqfds_lock);
+ spin_unlock_irqrestore(&irqfds_lock, flags);
goto error_eventfd;
}
}
list_add_tail(&kirqfd->list, &irqfds_list);
- mutex_unlock(&irqfds_lock);
+ spin_unlock_irqrestore(&irqfds_lock, flags);
/*
* Check if there was an event already pending on the eventfd before we
@@ -1011,12 +1014,13 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
{
struct privcmd_kernel_irqfd *kirqfd;
struct eventfd_ctx *eventfd;
+ unsigned long flags;
eventfd = eventfd_ctx_fdget(irqfd->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
- mutex_lock(&irqfds_lock);
+ spin_lock_irqsave(&irqfds_lock, flags);
list_for_each_entry(kirqfd, &irqfds_list, list) {
if (kirqfd->eventfd == eventfd) {
@@ -1025,7 +1029,7 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
}
}
- mutex_unlock(&irqfds_lock);
+ spin_unlock_irqrestore(&irqfds_lock, flags);
eventfd_ctx_put(eventfd);
@@ -1073,13 +1077,14 @@ static int privcmd_irqfd_init(void)
static void privcmd_irqfd_exit(void)
{
struct privcmd_kernel_irqfd *kirqfd, *tmp;
+ unsigned long flags;
- mutex_lock(&irqfds_lock);
+ spin_lock_irqsave(&irqfds_lock, flags);
list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list)
irqfd_deactivate(kirqfd);
- mutex_unlock(&irqfds_lock);
+ spin_unlock_irqrestore(&irqfds_lock, flags);
destroy_workqueue(irqfd_cleanup_wq);
}
--
2.43.0
Powered by blists - more mailing lists