[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180720172956.2883-3-dave@stgolabs.net>
Date: Fri, 20 Jul 2018 10:29:56 -0700
From: Davidlohr Bueso <dave@...olabs.net>
To: akpm@...ux-foundation.org
Cc: jbaron@...mai.com, viro@...iv.linux.org.uk,
linux-kernel@...r.kernel.org, dave@...olabs.net,
Davidlohr Bueso <dbueso@...e.de>
Subject: [PATCH 2/2] fs/epoll: loosen irq safety in epoll_insert() and epoll_remove()
Both functions are similar to the context of ep_modify(), called via
epoll_ctl(2). Just like ep_modify(), saving and restoring interrupts
is an overkill in these calls as it will never be called with irqs
disabled. While ep_remove() can be called directly from EPOLL_CTL_DEL,
it can also be called when releasing the file, but this also complies
with the above.
Signed-off-by: Davidlohr Bueso <dbueso@...e.de>
---
fs/eventpoll.c | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 2247769eb941..1b1abc461fc0 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -762,7 +762,6 @@ static void epi_rcu_free(struct rcu_head *head)
*/
static int ep_remove(struct eventpoll *ep, struct epitem *epi)
{
- unsigned long flags;
struct file *file = epi->ffd.file;
/*
@@ -777,10 +776,10 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
rb_erase_cached(&epi->rbn, &ep->rbr);
- spin_lock_irqsave(&ep->wq.lock, flags);
+ spin_lock_irq(&ep->wq.lock);
if (ep_is_linked(&epi->rdllink))
list_del_init(&epi->rdllink);
- spin_unlock_irqrestore(&ep->wq.lock, flags);
+ spin_unlock_irq(&ep->wq.lock);
wakeup_source_unregister(ep_wakeup_source(epi));
/*
@@ -1409,7 +1408,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
{
int error, pwake = 0;
__poll_t revents;
- unsigned long flags;
long user_watches;
struct epitem *epi;
struct ep_pqueue epq;
@@ -1476,7 +1474,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
goto error_remove_epi;
/* We have to drop the new item inside our item list to keep track of it */
- spin_lock_irqsave(&ep->wq.lock, flags);
+ spin_lock_irq(&ep->wq.lock);
/* record NAPI ID of new item if present */
ep_set_busy_poll_napi_id(epi);
@@ -1493,7 +1491,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
pwake++;
}
- spin_unlock_irqrestore(&ep->wq.lock, flags);
+ spin_unlock_irq(&ep->wq.lock);
atomic_long_inc(&ep->user->epoll_watches);
@@ -1519,10 +1517,10 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
* list, since that is used/cleaned only inside a section bound by "mtx".
* And ep_insert() is called with "mtx" held.
*/
- spin_lock_irqsave(&ep->wq.lock, flags);
+ spin_lock_irq(&ep->wq.lock);
if (ep_is_linked(&epi->rdllink))
list_del_init(&epi->rdllink);
- spin_unlock_irqrestore(&ep->wq.lock, flags);
+ spin_unlock_irq(&ep->wq.lock);
wakeup_source_unregister(ep_wakeup_source(epi));
--
2.16.4
Powered by blists - more mailing lists