From 289caf5d8f6c61c6d2b7fd752a7f483cd153f182 Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Fri, 18 Dec 2020 14:01:44 -0800 Subject: epoll: check for events when removing a timed out thread from the wait queue Patch series "simplify ep_poll". This patch series is a followup based on the suggestions and feedback by Linus: https://lkml.kernel.org/r/CAHk-=wizk=OxUyQPbO8MS41w2Pag1kniUV5WdD5qWL-gq1kjDA@mail.gmail.com The first patch in the series is a fix for the epoll race in presence of timeouts, so that it can be cleanly backported to all affected stable kernels. The rest of the patch series simplify the ep_poll() implementation. Some of these simplifications result in minor performance enhancements as well. We have kept these changes under self tests and internal benchmarks for a few days, and there are minor (1-2%) performance enhancements as a result. This patch (of 8): After abc610e01c66 ("fs/epoll: avoid barrier after an epoll_wait(2) timeout"), we break out of the ep_poll loop upon timeout, without checking whether there is any new events available. Prior to that patch-series we always called ep_events_available() after exiting the loop. This can cause races and missed wakeups. For example, consider the following scenario reported by Guantao Liu: Suppose we have an eventfd added using EPOLLET to an epollfd. Thread 1: Sleeps for just below 5ms and then writes to an eventfd. Thread 2: Calls epoll_wait with a timeout of 5 ms. If it sees an event of the eventfd, it will write back on that fd. Thread 3: Calls epoll_wait with a negative timeout. Prior to abc610e01c66, it is guaranteed that Thread 3 will wake up either by Thread 1 or Thread 2. After abc610e01c66, Thread 3 can be blocked indefinitely if Thread 2 sees a timeout right before the write to the eventfd by Thread 1. Thread 2 will be woken up from schedule_hrtimeout_range and, with evail 0, it will not call ep_send_events(). To fix this issue: 1) Simplify the timed_out case as suggested by Linus. 2) while holding the lock, recheck whether the thread was woken up after its time out has reached. Note that (2) is different from Linus' original suggestion: It do not set "eavail = ep_events_available(ep)" to avoid unnecessary contention (when there are too many timed-out threads and a small number of events), as well as races mentioned in the discussion thread. This is the first patch in the series so that the backport to stable releases is straightforward. Link: https://lkml.kernel.org/r/20201106231635.3528496-1-soheil.kdev@gmail.com Link: https://lkml.kernel.org/r/CAHk-=wizk=OxUyQPbO8MS41w2Pag1kniUV5WdD5qWL-gq1kjDA@mail.gmail.com Link: https://lkml.kernel.org/r/20201106231635.3528496-2-soheil.kdev@gmail.com Fixes: abc610e01c66 ("fs/epoll: avoid barrier after an epoll_wait(2) timeout") Signed-off-by: Soheil Hassas Yeganeh Tested-by: Guantao Liu Suggested-by: Linus Torvalds Reported-by: Guantao Liu Reviewed-by: Eric Dumazet Reviewed-by: Willem de Bruijn Reviewed-by: Khazhismel Kumykov Reviewed-by: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 10b81e69db74..935c66c809ba 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1817,23 +1817,30 @@ fetch_events: } write_unlock_irq(&ep->lock); - if (eavail || res) - break; + if (!eavail && !res) + timed_out = !schedule_hrtimeout_range(to, slack, + HRTIMER_MODE_ABS); - if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) { - timed_out = 1; - break; - } - - /* We were woken up, thus go and try to harvest some events */ + /* + * We were woken up, thus go and try to harvest some events. + * If timed out and still on the wait queue, recheck eavail + * carefully under lock, below. + */ eavail = 1; - } while (0); __set_current_state(TASK_RUNNING); if (!list_empty_careful(&wait.entry)) { write_lock_irq(&ep->lock); + /* + * If the thread timed out and is not on the wait queue, it + * means that the thread was woken up after its timeout expired + * before it could reacquire the lock. Thus, when wait.entry is + * empty, it needs to harvest events. + */ + if (timed_out) + eavail = list_empty(&wait.entry); __remove_wait_queue(&ep->wq, &wait); write_unlock_irq(&ep->lock); } -- cgit v1.2.3 From 2efdaf7660c408d57721cc6dacb0167f866cb451 Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Fri, 18 Dec 2020 14:01:48 -0800 Subject: epoll: simplify signal handling Check signals before locking ep->lock, and immediately return -EINTR if there is any signal pending. This saves a few loads, stores, and branches from the hot path and simplifies the loop structure for follow up patches. Link: https://lkml.kernel.org/r/20201106231635.3528496-3-soheil.kdev@gmail.com Signed-off-by: Soheil Hassas Yeganeh Suggested-by: Linus Torvalds Reviewed-by: Eric Dumazet Reviewed-by: Willem de Bruijn Reviewed-by: Khazhismel Kumykov Cc: Guantao Liu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 935c66c809ba..4f460be723d0 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1733,7 +1733,7 @@ static inline struct timespec64 ep_set_mstimeout(long ms) static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, int maxevents, long timeout) { - int res = 0, eavail, timed_out = 0; + int res, eavail, timed_out = 0; u64 slack = 0; wait_queue_entry_t wait; ktime_t expires, *to = NULL; @@ -1780,6 +1780,9 @@ fetch_events: ep_reset_busy_poll_napi_id(ep); do { + if (signal_pending(current)) + return -EINTR; + /* * Internally init_wait() uses autoremove_wake_function(), * thus wait entry is removed from the wait queue on each @@ -1809,15 +1812,12 @@ fetch_events: * important. */ eavail = ep_events_available(ep); - if (!eavail) { - if (signal_pending(current)) - res = -EINTR; - else - __add_wait_queue_exclusive(&ep->wq, &wait); - } + if (!eavail) + __add_wait_queue_exclusive(&ep->wq, &wait); + write_unlock_irq(&ep->lock); - if (!eavail && !res) + if (!eavail) timed_out = !schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS); @@ -1853,14 +1853,14 @@ send_events: * finding more events available and fetching * repeatedly. */ - res = -EINTR; + return -EINTR; } /* * Try to transfer events to user space. In case we get 0 events and * there's still timeout left over, we go trying again in search of * more luck. */ - if (!res && eavail && + if (eavail && !(res = ep_send_events(ep, events, maxevents)) && !timed_out) goto fetch_events; -- cgit v1.2.3 From cccd29bf0823bdfeb087b7661b06856b1b73bced Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Fri, 18 Dec 2020 14:01:51 -0800 Subject: epoll: pull fatal signal checks into ep_send_events() To simplify the code, pull in checking the fatal signals into ep_send_events(). ep_send_events() is called only from ep_poll(). Note that, previously, we were always checking fatal events, but it is checked only if eavail is true. This should be fine because the goal of that check is to quickly return from epoll_wait() when there is a pending fatal signal. Link: https://lkml.kernel.org/r/20201106231635.3528496-4-soheil.kdev@gmail.com Signed-off-by: Soheil Hassas Yeganeh Suggested-by: Willem de Bruijn Reviewed-by: Eric Dumazet Reviewed-by: Willem de Bruijn Reviewed-by: Khazhismel Kumykov Cc: Guantao Liu Cc: Linus Torvalds Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 4f460be723d0..b9d87745fae6 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1625,6 +1625,14 @@ static int ep_send_events(struct eventpoll *ep, poll_table pt; int res = 0; + /* + * Always short-circuit for fatal signals to allow threads to make a + * timely exit without the chance of finding more events available and + * fetching repeatedly. + */ + if (fatal_signal_pending(current)) + return -EINTR; + init_poll_funcptr(&pt, NULL); mutex_lock(&ep->mtx); @@ -1846,15 +1854,6 @@ fetch_events: } send_events: - if (fatal_signal_pending(current)) { - /* - * Always short-circuit for fatal signals to allow - * threads to make a timely exit without the chance of - * finding more events available and fetching - * repeatedly. - */ - return -EINTR; - } /* * Try to transfer events to user space. In case we get 0 events and * there's still timeout left over, we go trying again in search of -- cgit v1.2.3 From e411596d48b5b77632deb91afcbc3185b9b658cb Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Fri, 18 Dec 2020 14:01:54 -0800 Subject: epoll: move eavail next to the list_empty_careful check This is a no-op change and simply to make the code more coherent. Link: https://lkml.kernel.org/r/20201106231635.3528496-5-soheil.kdev@gmail.com Signed-off-by: Soheil Hassas Yeganeh Suggested-by: Linus Torvalds Reviewed-by: Eric Dumazet Reviewed-by: Willem de Bruijn Reviewed-by: Khazhismel Kumykov Cc: Guantao Liu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index b9d87745fae6..d8eef2157587 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1828,6 +1828,7 @@ fetch_events: if (!eavail) timed_out = !schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS); + __set_current_state(TASK_RUNNING); /* * We were woken up, thus go and try to harvest some events. @@ -1837,8 +1838,6 @@ fetch_events: eavail = 1; } while (0); - __set_current_state(TASK_RUNNING); - if (!list_empty_careful(&wait.entry)) { write_lock_irq(&ep->lock); /* -- cgit v1.2.3 From 1493c47fb140ddd9e5c291f0c0da3fb03741c766 Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Fri, 18 Dec 2020 14:01:57 -0800 Subject: epoll: simplify and optimize busy loop logic ep_events_available() is called multiple times around the busy loop logic, even though the logic is generally not used. ep_reset_busy_poll_napi_id() is similarly always called, even when busy loop is not used. Eliminate ep_reset_busy_poll_napi_id() and inline it inside ep_busy_loop(). Make ep_busy_loop() return whether there are any events available after the busy loop. This will eliminate unnecessary loads and branches, and simplifies the loop. Link: https://lkml.kernel.org/r/20201106231635.3528496-6-soheil.kdev@gmail.com Signed-off-by: Soheil Hassas Yeganeh Reviewed-by: Eric Dumazet Reviewed-by: Willem de Bruijn Reviewed-by: Khazhismel Kumykov Cc: Guantao Liu Cc: Linus Torvalds Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 40 +++++++++++++++++----------------------- 1 file changed, 17 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index d8eef2157587..03d0ac076a16 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -389,19 +389,24 @@ static bool ep_busy_loop_end(void *p, unsigned long start_time) * * we must do our busy polling with irqs enabled */ -static void ep_busy_loop(struct eventpoll *ep, int nonblock) +static bool ep_busy_loop(struct eventpoll *ep, int nonblock) { unsigned int napi_id = READ_ONCE(ep->napi_id); - if ((napi_id >= MIN_NAPI_ID) && net_busy_loop_on()) + if ((napi_id >= MIN_NAPI_ID) && net_busy_loop_on()) { napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end, ep, false, BUSY_POLL_BUDGET); -} - -static inline void ep_reset_busy_poll_napi_id(struct eventpoll *ep) -{ - if (ep->napi_id) + if (ep_events_available(ep)) + return true; + /* + * Busy poll timed out. Drop NAPI ID for now, we can add + * it back in when we have moved a socket with a valid NAPI + * ID onto the ready list. + */ ep->napi_id = 0; + return false; + } + return false; } /* @@ -441,12 +446,9 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi) #else -static inline void ep_busy_loop(struct eventpoll *ep, int nonblock) -{ -} - -static inline void ep_reset_busy_poll_napi_id(struct eventpoll *ep) +static inline bool ep_busy_loop(struct eventpoll *ep, int nonblock) { + return false; } static inline void ep_set_busy_poll_napi_id(struct epitem *epi) @@ -1772,21 +1774,13 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, } fetch_events: - - if (!ep_events_available(ep)) - ep_busy_loop(ep, timed_out); - eavail = ep_events_available(ep); + if (!eavail) + eavail = ep_busy_loop(ep, timed_out); + if (eavail) goto send_events; - /* - * Busy poll timed out. Drop NAPI ID for now, we can add - * it back in when we have moved a socket with a valid NAPI - * ID onto the ready list. - */ - ep_reset_busy_poll_napi_id(ep); - do { if (signal_pending(current)) return -EINTR; -- cgit v1.2.3 From e8c85328b1e88f4ee7f84a1fdbff2f2c7965e026 Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Fri, 18 Dec 2020 14:02:00 -0800 Subject: epoll: pull all code between fetch_events and send_event into the loop This is a no-op change which simplifies the follow up patches. Link: https://lkml.kernel.org/r/20201106231635.3528496-7-soheil.kdev@gmail.com Signed-off-by: Soheil Hassas Yeganeh Suggested-by: Linus Torvalds Reviewed-by: Eric Dumazet Reviewed-by: Willem de Bruijn Reviewed-by: Khazhismel Kumykov Cc: Guantao Liu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 03d0ac076a16..63a7a6e13dfc 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1774,14 +1774,14 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, } fetch_events: - eavail = ep_events_available(ep); - if (!eavail) - eavail = ep_busy_loop(ep, timed_out); + do { + eavail = ep_events_available(ep); + if (!eavail) + eavail = ep_busy_loop(ep, timed_out); - if (eavail) - goto send_events; + if (eavail) + goto send_events; - do { if (signal_pending(current)) return -EINTR; @@ -1830,21 +1830,22 @@ fetch_events: * carefully under lock, below. */ eavail = 1; - } while (0); - if (!list_empty_careful(&wait.entry)) { - write_lock_irq(&ep->lock); - /* - * If the thread timed out and is not on the wait queue, it - * means that the thread was woken up after its timeout expired - * before it could reacquire the lock. Thus, when wait.entry is - * empty, it needs to harvest events. - */ - if (timed_out) - eavail = list_empty(&wait.entry); - __remove_wait_queue(&ep->wq, &wait); - write_unlock_irq(&ep->lock); - } + if (!list_empty_careful(&wait.entry)) { + write_lock_irq(&ep->lock); + /* + * If the thread timed out and is not on the wait queue, + * it means that the thread was woken up after its + * timeout expired before it could reacquire the lock. + * Thus, when wait.entry is empty, it needs to harvest + * events. + */ + if (timed_out) + eavail = list_empty(&wait.entry); + __remove_wait_queue(&ep->wq, &wait); + write_unlock_irq(&ep->lock); + } + } while (0); send_events: /* -- cgit v1.2.3 From 00b27634bc471e0198f93d48694171121af2e159 Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Fri, 18 Dec 2020 14:02:03 -0800 Subject: epoll: replace gotos with a proper loop The existing loop is pointless, and the labels make it really hard to follow the structure. Replace that control structure with a simple loop that returns when there are new events, there is a signal, or the thread has timed out. Link: https://lkml.kernel.org/r/20201106231635.3528496-8-soheil.kdev@gmail.com Signed-off-by: Soheil Hassas Yeganeh Suggested-by: Linus Torvalds Reviewed-by: Eric Dumazet Reviewed-by: Willem de Bruijn Reviewed-by: Khazhismel Kumykov Cc: Guantao Liu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 63a7a6e13dfc..1e0030cb805b 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1743,7 +1743,7 @@ static inline struct timespec64 ep_set_mstimeout(long ms) static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, int maxevents, long timeout) { - int res, eavail, timed_out = 0; + int res, eavail = 0, timed_out = 0; u64 slack = 0; wait_queue_entry_t wait; ktime_t expires, *to = NULL; @@ -1769,18 +1769,30 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, write_lock_irq(&ep->lock); eavail = ep_events_available(ep); write_unlock_irq(&ep->lock); - - goto send_events; } -fetch_events: - do { + while (1) { + if (eavail) { + /* + * Try to transfer events to user space. In case we get + * 0 events and there's still timeout left over, we go + * trying again in search of more luck. + */ + res = ep_send_events(ep, events, maxevents); + if (res) + return res; + } + + if (timed_out) + return 0; + eavail = ep_events_available(ep); - if (!eavail) - eavail = ep_busy_loop(ep, timed_out); + if (eavail) + continue; + eavail = ep_busy_loop(ep, timed_out); if (eavail) - goto send_events; + continue; if (signal_pending(current)) return -EINTR; @@ -1845,19 +1857,7 @@ fetch_events: __remove_wait_queue(&ep->wq, &wait); write_unlock_irq(&ep->lock); } - } while (0); - -send_events: - /* - * Try to transfer events to user space. In case we get 0 events and - * there's still timeout left over, we go trying again in search of - * more luck. - */ - if (eavail && - !(res = ep_send_events(ep, events, maxevents)) && !timed_out) - goto fetch_events; - - return res; + } } /** -- cgit v1.2.3 From e59d3c64cba69b57263dff1d62838bc6a819ae37 Mon Sep 17 00:00:00 2001 From: Soheil Hassas Yeganeh Date: Fri, 18 Dec 2020 14:02:06 -0800 Subject: epoll: eliminate unnecessary lock for zero timeout We call ep_events_available() under lock when timeout is 0, and then call it without locks in the loop for the other cases. Instead, call ep_events_available() without lock for all cases. For non-zero timeouts, we will recheck after adding the thread to the wait queue. For zero timeout cases, by definition, user is opportunistically polling and will have to call epoll_wait again in the future. Note that this lock was kept in c5a282e9635e9 because the whole loop was historically under lock. This patch results in a 1% CPU/RPC reduction in RPC benchmarks. Link: https://lkml.kernel.org/r/20201106231635.3528496-9-soheil.kdev@gmail.com Signed-off-by: Soheil Hassas Yeganeh Suggested-by: Eric Dumazet Reviewed-by: Eric Dumazet Reviewed-by: Willem de Bruijn Reviewed-by: Khazhismel Kumykov Cc: Guantao Liu Cc: Linus Torvalds Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 1e0030cb805b..9efb553b2b2b 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1743,7 +1743,7 @@ static inline struct timespec64 ep_set_mstimeout(long ms) static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, int maxevents, long timeout) { - int res, eavail = 0, timed_out = 0; + int res, eavail, timed_out = 0; u64 slack = 0; wait_queue_entry_t wait; ktime_t expires, *to = NULL; @@ -1759,18 +1759,21 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, } else if (timeout == 0) { /* * Avoid the unnecessary trip to the wait queue loop, if the - * caller specified a non blocking operation. We still need - * lock because we could race and not see an epi being added - * to the ready list while in irq callback. Thus incorrectly - * returning 0 back to userspace. + * caller specified a non blocking operation. */ timed_out = 1; - - write_lock_irq(&ep->lock); - eavail = ep_events_available(ep); - write_unlock_irq(&ep->lock); } + /* + * This call is racy: We may or may not see events that are being added + * to the ready list under the lock (e.g., in IRQ callbacks). For, cases + * with a non-zero timeout, this thread will check the ready list under + * lock and will added to the wait queue. For, cases with a zero + * timeout, the user by definition should not care and will have to + * recheck again. + */ + eavail = ep_events_available(ep); + while (1) { if (eavail) { /* @@ -1786,10 +1789,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, if (timed_out) return 0; - eavail = ep_events_available(ep); - if (eavail) - continue; - eavail = ep_busy_loop(ep, timed_out); if (eavail) continue; -- cgit v1.2.3 From 7cdf7c20e97141eadb05121cc521c8eff47e7d93 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 18 Dec 2020 14:05:35 -0800 Subject: epoll: convert internal api to timespec64 Patch series "add epoll_pwait2 syscall", v4. Enable nanosecond timeouts for epoll. Analogous to pselect and ppoll, introduce an epoll_wait syscall variant that takes a struct timespec instead of int timeout. This patch (of 4): Make epoll more consistent with select/poll: pass along the timeout as timespec64 pointer. In anticipation of additional changes affecting all three polling mechanisms: - add epoll_pwait2 syscall with timespec semantics, and share poll_select_set_timeout implementation. - compute slack before conversion to absolute time, to save one ktime_get_ts64 call. Link: https://lkml.kernel.org/r/20201121144401.3727659-1-willemdebruijn.kernel@gmail.com Link: https://lkml.kernel.org/r/20201121144401.3727659-2-willemdebruijn.kernel@gmail.com Signed-off-by: Willem de Bruijn Cc: Al Viro Cc: Matthew Wilcox (Oracle) Cc: Arnd Bergmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 57 +++++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 9efb553b2b2b..1ba406615991 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1712,15 +1712,25 @@ static int ep_send_events(struct eventpoll *ep, return res; } -static inline struct timespec64 ep_set_mstimeout(long ms) +static struct timespec64 *ep_timeout_to_timespec(struct timespec64 *to, long ms) { - struct timespec64 now, ts = { - .tv_sec = ms / MSEC_PER_SEC, - .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC), - }; + struct timespec64 now; + + if (ms < 0) + return NULL; + + if (!ms) { + to->tv_sec = 0; + to->tv_nsec = 0; + return to; + } + + to->tv_sec = ms / MSEC_PER_SEC; + to->tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC); ktime_get_ts64(&now); - return timespec64_add_safe(now, ts); + *to = timespec64_add_safe(now, *to); + return to; } /** @@ -1732,8 +1742,8 @@ static inline struct timespec64 ep_set_mstimeout(long ms) * stored. * @maxevents: Size (in terms of number of events) of the caller event buffer. * @timeout: Maximum timeout for the ready events fetch operation, in - * milliseconds. If the @timeout is zero, the function will not block, - * while if the @timeout is less than zero, the function will block + * timespec. If the timeout is zero, the function will not block, + * while if the @timeout ptr is NULL, the function will block * until at least one event has been retrieved (or an error * occurred). * @@ -1741,7 +1751,7 @@ static inline struct timespec64 ep_set_mstimeout(long ms) * error code, in case of error. */ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, - int maxevents, long timeout) + int maxevents, struct timespec64 *timeout) { int res, eavail, timed_out = 0; u64 slack = 0; @@ -1750,13 +1760,11 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, lockdep_assert_irqs_enabled(); - if (timeout > 0) { - struct timespec64 end_time = ep_set_mstimeout(timeout); - - slack = select_estimate_accuracy(&end_time); + if (timeout && (timeout->tv_sec | timeout->tv_nsec)) { + slack = select_estimate_accuracy(timeout); to = &expires; - *to = timespec64_to_ktime(end_time); - } else if (timeout == 0) { + *to = timespec64_to_ktime(*timeout); + } else if (timeout) { /* * Avoid the unnecessary trip to the wait queue loop, if the * caller specified a non blocking operation. @@ -2175,7 +2183,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, * part of the user space epoll_wait(2). */ static int do_epoll_wait(int epfd, struct epoll_event __user *events, - int maxevents, int timeout) + int maxevents, struct timespec64 *to) { int error; struct fd f; @@ -2209,7 +2217,7 @@ static int do_epoll_wait(int epfd, struct epoll_event __user *events, ep = f.file->private_data; /* Time to fish for events ... */ - error = ep_poll(ep, events, maxevents, timeout); + error = ep_poll(ep, events, maxevents, to); error_fput: fdput(f); @@ -2219,7 +2227,10 @@ error_fput: SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout) { - return do_epoll_wait(epfd, events, maxevents, timeout); + struct timespec64 to; + + return do_epoll_wait(epfd, events, maxevents, + ep_timeout_to_timespec(&to, timeout)); } /* @@ -2230,6 +2241,7 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout, const sigset_t __user *, sigmask, size_t, sigsetsize) { + struct timespec64 to; int error; /* @@ -2240,7 +2252,9 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, if (error) return error; - error = do_epoll_wait(epfd, events, maxevents, timeout); + error = do_epoll_wait(epfd, events, maxevents, + ep_timeout_to_timespec(&to, timeout)); + restore_saved_sigmask_unless(error == -EINTR); return error; @@ -2253,6 +2267,7 @@ COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd, const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize) { + struct timespec64 to; long err; /* @@ -2263,7 +2278,9 @@ COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd, if (err) return err; - err = do_epoll_wait(epfd, events, maxevents, timeout); + err = do_epoll_wait(epfd, events, maxevents, + ep_timeout_to_timespec(&to, timeout)); + restore_saved_sigmask_unless(err == -EINTR); return err; -- cgit v1.2.3 From 58169a52ebc9a733aeb5bea857bc5daa71a301bb Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 18 Dec 2020 14:05:38 -0800 Subject: epoll: add syscall epoll_pwait2 Add syscall epoll_pwait2, an epoll_wait variant with nsec resolution that replaces int timeout with struct timespec. It is equivalent otherwise. int epoll_pwait2(int fd, struct epoll_event *events, int maxevents, const struct timespec *timeout, const sigset_t *sigset); The underlying hrtimer is already programmed with nsec resolution. pselect and ppoll also set nsec resolution timeout with timespec. The sigset_t in epoll_pwait has a compat variant. epoll_pwait2 needs the same. For timespec, only support this new interface on 2038 aware platforms that define __kernel_timespec_t. So no CONFIG_COMPAT_32BIT_TIME. Link: https://lkml.kernel.org/r/20201121144401.3727659-3-willemdebruijn.kernel@gmail.com Signed-off-by: Willem de Bruijn Cc: Al Viro Cc: Arnd Bergmann Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 73 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 1ba406615991..a829af074eb5 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -2237,11 +2237,10 @@ SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events, * Implement the event wait interface for the eventpoll file. It is the kernel * part of the user space epoll_pwait(2). */ -SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, - int, maxevents, int, timeout, const sigset_t __user *, sigmask, - size_t, sigsetsize) +static int do_epoll_pwait(int epfd, struct epoll_event __user *events, + int maxevents, struct timespec64 *to, + const sigset_t __user *sigmask, size_t sigsetsize) { - struct timespec64 to; int error; /* @@ -2252,22 +2251,48 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, if (error) return error; - error = do_epoll_wait(epfd, events, maxevents, - ep_timeout_to_timespec(&to, timeout)); + error = do_epoll_wait(epfd, events, maxevents, to); restore_saved_sigmask_unless(error == -EINTR); return error; } -#ifdef CONFIG_COMPAT -COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd, - struct epoll_event __user *, events, - int, maxevents, int, timeout, - const compat_sigset_t __user *, sigmask, - compat_size_t, sigsetsize) +SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, + int, maxevents, int, timeout, const sigset_t __user *, sigmask, + size_t, sigsetsize) { struct timespec64 to; + + return do_epoll_pwait(epfd, events, maxevents, + ep_timeout_to_timespec(&to, timeout), + sigmask, sigsetsize); +} + +SYSCALL_DEFINE6(epoll_pwait2, int, epfd, struct epoll_event __user *, events, + int, maxevents, const struct __kernel_timespec __user *, timeout, + const sigset_t __user *, sigmask, size_t, sigsetsize) +{ + struct timespec64 ts, *to = NULL; + + if (timeout) { + if (get_timespec64(&ts, timeout)) + return -EFAULT; + to = &ts; + if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) + return -EINVAL; + } + + return do_epoll_pwait(epfd, events, maxevents, to, + sigmask, sigsetsize); +} + +#ifdef CONFIG_COMPAT +static int do_compat_epoll_pwait(int epfd, struct epoll_event __user *events, + int maxevents, struct timespec64 *timeout, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize) +{ long err; /* @@ -2278,13 +2303,47 @@ COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd, if (err) return err; - err = do_epoll_wait(epfd, events, maxevents, - ep_timeout_to_timespec(&to, timeout)); + err = do_epoll_wait(epfd, events, maxevents, timeout); restore_saved_sigmask_unless(err == -EINTR); return err; } + +COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd, + struct epoll_event __user *, events, + int, maxevents, int, timeout, + const compat_sigset_t __user *, sigmask, + compat_size_t, sigsetsize) +{ + struct timespec64 to; + + return do_compat_epoll_pwait(epfd, events, maxevents, + ep_timeout_to_timespec(&to, timeout), + sigmask, sigsetsize); +} + +COMPAT_SYSCALL_DEFINE6(epoll_pwait2, int, epfd, + struct epoll_event __user *, events, + int, maxevents, + const struct __kernel_timespec __user *, timeout, + const compat_sigset_t __user *, sigmask, + compat_size_t, sigsetsize) +{ + struct timespec64 ts, *to = NULL; + + if (timeout) { + if (get_timespec64(&ts, timeout)) + return -EFAULT; + to = &ts; + if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) + return -EINVAL; + } + + return do_compat_epoll_pwait(epfd, events, maxevents, to, + sigmask, sigsetsize); +} + #endif static int __init eventpoll_init(void) -- cgit v1.2.3