[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190121201456.28338-5-rpenyaev@suse.de>
Date: Mon, 21 Jan 2019 21:14:47 +0100
From: Roman Penyaev <rpenyaev@...e.de>
To: unlisted-recipients:; (no To-header on input)
Cc: Roman Penyaev <rpenyaev@...e.de>,
Andrew Morton <akpm@...ux-foundation.org>,
Davidlohr Bueso <dbueso@...e.de>,
Jason Baron <jbaron@...mai.com>,
Al Viro <viro@...iv.linux.org.uk>,
"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Andrea Parri <andrea.parri@...rulasolutions.com>,
linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [RFC PATCH v2 04/13] epoll: some sanity flags checks for epoll syscalls for polling from userspace
There are various of limitations if epfd is polled by user:
1. Expect always EPOLLET flag (Edge Triggered behavior)
2. No support for EPOLLWAKEUP
events are consumed from userspace, thus no way to call __pm_relax()
3. No support for EPOLLEXCLUSIVE
If device does not pass pollflags to wake_up() there is no way to
call poll() from the context under spinlock, thus special work is
scheduled to offload polling. In this specific case we can't
support exclusive wakeups, because we do not know actual result
of scheduled work.
4. epoll_wait() for epfd, created with EPOLL_USERPOLL flag, accepts events
as NULL and maxevents as 0. No other values are accepted.
Signed-off-by: Roman Penyaev <rpenyaev@...e.de>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Davidlohr Bueso <dbueso@...e.de>
Cc: Jason Baron <jbaron@...mai.com>
Cc: Al Viro <viro@...iv.linux.org.uk>
Cc: "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Andrea Parri <andrea.parri@...rulasolutions.com>
Cc: linux-fsdevel@...r.kernel.org
Cc: linux-kernel@...r.kernel.org
---
fs/eventpoll.c | 68 ++++++++++++++++++++++++++++++++++----------------
1 file changed, 46 insertions(+), 22 deletions(-)
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index a73c077a552c..9c9283e4a073 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -423,6 +423,11 @@ static inline unsigned int ep_to_items_bm_length(unsigned int nr)
return PAGE_ALIGN(ALIGN(nr, 8) >> 3);
}
+static inline bool ep_polled_by_user(struct eventpoll *ep)
+{
+ return !!ep->user_header;
+}
+
/**
* ep_events_available - Checks if ready events might be available.
*
@@ -518,13 +523,17 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_PM_SLEEP
-static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
+static inline void ep_take_care_of_epollwakeup(struct eventpoll *ep,
+ struct epoll_event *epev)
{
- if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
- epev->events &= ~EPOLLWAKEUP;
+ if (epev->events & EPOLLWAKEUP) {
+ if (!capable(CAP_BLOCK_SUSPEND) || ep_polled_by_user(ep))
+ epev->events &= ~EPOLLWAKEUP;
+ }
}
#else
-static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
+static inline void ep_take_care_of_epollwakeup(struct eventpoll *ep,
+ struct epoll_event *epev)
{
epev->events &= ~EPOLLWAKEUP;
}
@@ -2274,10 +2283,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
if (!file_can_poll(tf.file))
goto error_tgt_fput;
- /* Check if EPOLLWAKEUP is allowed */
- if (ep_op_has_event(op))
- ep_take_care_of_epollwakeup(&epds);
-
/*
* We have to check that the file structure underneath the file descriptor
* the user passed to us _is_ an eventpoll file. And also we do not permit
@@ -2287,10 +2292,18 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
if (f.file == tf.file || !is_file_epoll(f.file))
goto error_tgt_fput;
+ /*
+ * At this point it is safe to assume that the "private_data" contains
+ * our own data structure.
+ */
+ ep = f.file->private_data;
+
/*
* epoll adds to the wakeup queue at EPOLL_CTL_ADD time only,
* so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation.
- * Also, we do not currently supported nested exclusive wakeups.
+ * Also, we do not currently supported nested exclusive wakeups
+ * and EPOLLEXCLUSIVE is not supported for epoll which is polled
+ * from userspace.
*/
if (ep_op_has_event(op) && (epds.events & EPOLLEXCLUSIVE)) {
if (op == EPOLL_CTL_MOD)
@@ -2298,13 +2311,18 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
(epds.events & ~EPOLLEXCLUSIVE_OK_BITS)))
goto error_tgt_fput;
+ if (ep_polled_by_user(ep))
+ goto error_tgt_fput;
}
- /*
- * At this point it is safe to assume that the "private_data" contains
- * our own data structure.
- */
- ep = f.file->private_data;
+ if (ep_op_has_event(op)) {
+ if (ep_polled_by_user(ep) && !(epds.events & EPOLLET))
+ /* Polled by user has only edge triggered behaviour */
+ goto error_tgt_fput;
+
+ /* Check if EPOLLWAKEUP is allowed */
+ ep_take_care_of_epollwakeup(ep, &epds);
+ }
/*
* When we insert an epoll file descriptor, inside another epoll file
@@ -2406,14 +2424,6 @@ static int do_epoll_wait(int epfd, struct epoll_event __user *events,
struct fd f;
struct eventpoll *ep;
- /* The maximum number of event must be greater than zero */
- if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
- return -EINVAL;
-
- /* Verify that the area passed by the user is writeable */
- if (!access_ok(events, maxevents * sizeof(struct epoll_event)))
- return -EFAULT;
-
/* Get the "struct file *" for the eventpoll file */
f = fdget(epfd);
if (!f.file)
@@ -2432,6 +2442,20 @@ static int do_epoll_wait(int epfd, struct epoll_event __user *events,
* our own data structure.
*/
ep = f.file->private_data;
+ if (!ep_polled_by_user(ep)) {
+ /* The maximum number of event must be greater than zero */
+ if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
+ goto error_fput;
+
+ /* Verify that the area passed by the user is writeable */
+ error = -EFAULT;
+ if (!access_ok(events, maxevents * sizeof(struct epoll_event)))
+ goto error_fput;
+ } else {
+ /* Use ring instead */
+ if (maxevents != 0 || events != NULL)
+ goto error_fput;
+ }
/* Time to fish for events ... */
error = ep_poll(ep, events, maxevents, timeout);
--
2.19.1
Powered by blists - more mailing lists