lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201004023929.2740074-22-viro@ZenIV.linux.org.uk>
Date:   Sun,  4 Oct 2020 03:39:24 +0100
From:   Al Viro <viro@...IV.linux.org.uk>
To:     Linus Torvalds <torvalds@...ux-foundation.org>
Cc:     linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
        Marc Zyngier <maz@...nel.org>
Subject: [RFC PATCH 22/27] fold ep_read_events_proc() into the only caller

From: Al Viro <viro@...iv.linux.org.uk>

Signed-off-by: Al Viro <viro@...iv.linux.org.uk>
---
 fs/eventpoll.c | 49 ++++++++++++++++++++-----------------------------
 1 file changed, 20 insertions(+), 29 deletions(-)

diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index a50b48d26c55..1efe8a1a022a 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -729,14 +729,17 @@ static int ep_eventpoll_release(struct inode *inode, struct file *file)
 	return 0;
 }
 
-static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
-			       int depth);
+static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt, int depth);
 
 static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int depth)
 {
 	struct eventpoll *ep = file->private_data;
 	LIST_HEAD(txlist);
-	__poll_t res;
+	struct epitem *epi, *tmp;
+	poll_table pt;
+	__poll_t res = 0;
+
+	init_poll_funcptr(&pt, NULL);
 
 	/* Insert inside our poll wait queue */
 	poll_wait(file, &ep->poll_wait, wait);
@@ -747,7 +750,20 @@ static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int dep
 	 */
 	mutex_lock_nested(&ep->mtx, depth);
 	ep_start_scan(ep, &txlist);
-	res = ep_read_events_proc(ep, &txlist, depth + 1);
+	list_for_each_entry_safe(epi, tmp, &txlist, rdllink) {
+		if (ep_item_poll(epi, &pt, depth + 1)) {
+			res = EPOLLIN | EPOLLRDNORM;
+			break;
+		} else {
+			/*
+			 * Item has been dropped into the ready list by the poll
+			 * callback, but it's not actually ready, as far as
+			 * caller requested events goes. We can remove it here.
+			 */
+			__pm_relax(ep_wakeup_source(epi));
+			list_del_init(&epi->rdllink);
+		}
+	}
 	ep_done_scan(ep, &txlist);
 	mutex_unlock(&ep->mtx);
 	return res;
@@ -772,31 +788,6 @@ static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
 	return res & epi->event.events;
 }
 
-static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
-			       int depth)
-{
-	struct epitem *epi, *tmp;
-	poll_table pt;
-
-	init_poll_funcptr(&pt, NULL);
-
-	list_for_each_entry_safe(epi, tmp, head, rdllink) {
-		if (ep_item_poll(epi, &pt, depth)) {
-			return EPOLLIN | EPOLLRDNORM;
-		} else {
-			/*
-			 * Item has been dropped into the ready list by the poll
-			 * callback, but it's not actually ready, as far as
-			 * caller requested events goes. We can remove it here.
-			 */
-			__pm_relax(ep_wakeup_source(epi));
-			list_del_init(&epi->rdllink);
-		}
-	}
-
-	return 0;
-}
-
 static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
 {
 	return __ep_eventpoll_poll(file, wait, 0);
-- 
2.11.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ