diff options
author | Davidlohr Bueso <dave@stgolabs.net> | 2019-01-03 15:27:26 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-01-04 13:13:46 -0800 |
commit | 86c051793b4c941ee4481725d57cf2a27f6b3aaf (patch) | |
tree | f971e2b0f3dd64fd2fb6f46707c10cdf549de09d | |
parent | 35cff1a6e0236500584a8ae227fe08120d9b5ee2 (diff) | |
download | linux-86c051793b4c941ee4481725d57cf2a27f6b3aaf.tar.bz2 |
fs/epoll: deal with wait_queue only once
There is no reason why we rearm the waitiqueue upon every fetch_events
retry (for when events are found yet send_events() fails). If nothing
else, this saves four lock operations per retry, and furthermore reduces
the scope of the lock even further.
[akpm@linux-foundation.org: restore code to original position, fix and reflow comment]
Link: http://lkml.kernel.org/r/20181114182532.27981-2-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Cc: Jason Baron <jbaron@akamai.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | fs/eventpoll.c | 29 |
1 files changed, 18 insertions, 11 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 752dbc41c941..2329f96469e2 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1749,6 +1749,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, { int res = 0, eavail, timed_out = 0; u64 slack = 0; + bool waiter = false; wait_queue_entry_t wait; ktime_t expires, *to = NULL; @@ -1794,14 +1795,18 @@ fetch_events: ep_reset_busy_poll_napi_id(ep); /* - * We don't have any available event to return to the caller. - * We need to sleep here, and we will be wake up by - * ep_poll_callback() when events will become available. + * We don't have any available event to return to the caller. We need + * to sleep here, and we will be woken by ep_poll_callback() when events + * become available. */ - init_waitqueue_entry(&wait, current); - spin_lock_irq(&ep->wq.lock); - __add_wait_queue_exclusive(&ep->wq, &wait); - spin_unlock_irq(&ep->wq.lock); + if (!waiter) { + waiter = true; + init_waitqueue_entry(&wait, current); + + spin_lock_irq(&ep->wq.lock); + __add_wait_queue_exclusive(&ep->wq, &wait); + spin_unlock_irq(&ep->wq.lock); + } for (;;) { /* @@ -1837,10 +1842,6 @@ fetch_events: __set_current_state(TASK_RUNNING); - spin_lock_irq(&ep->wq.lock); - __remove_wait_queue(&ep->wq, &wait); - spin_unlock_irq(&ep->wq.lock); - send_events: /* * Try to transfer events to user space. In case we get 0 events and @@ -1851,6 +1852,12 @@ send_events: !(res = ep_send_events(ep, events, maxevents)) && !timed_out) goto fetch_events; + if (waiter) { + spin_lock_irq(&ep->wq.lock); + __remove_wait_queue(&ep->wq, &wait); + spin_unlock_irq(&ep->wq.lock); + } + return res; } |