summaryrefslogtreecommitdiffstats
path: root/net/socket.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-04-09 15:25:28 +0200
committerChristoph Hellwig <hch@lst.de>2018-05-26 09:16:44 +0200
commit152524231023c76b3b7b3e71017c7f951812868d (patch)
tree96a821e8d4d5ac0577363cc9c5e7bc6d5ddcc051 /net/socket.c
parent3cafb37633a1230011c5415e6f394b05260d21ad (diff)
downloadlinux-152524231023c76b3b7b3e71017c7f951812868d.tar.bz2
net: add support for ->poll_mask in proto_ops
The socket file operations still implement ->poll until all protocols are switched over. Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'net/socket.c')
-rw-r--r--net/socket.c48
1 files changed, 43 insertions, 5 deletions
diff --git a/net/socket.c b/net/socket.c
index 571ee4005192..2d752e9eb3f9 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -117,8 +117,10 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
static int sock_mmap(struct file *file, struct vm_area_struct *vma);
static int sock_close(struct inode *inode, struct file *file);
-static __poll_t sock_poll(struct file *file,
- struct poll_table_struct *wait);
+static struct wait_queue_head *sock_get_poll_head(struct file *file,
+ __poll_t events);
+static __poll_t sock_poll_mask(struct file *file, __poll_t);
+static __poll_t sock_poll(struct file *file, struct poll_table_struct *wait);
static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
static long compat_sock_ioctl(struct file *file,
@@ -141,6 +143,8 @@ static const struct file_operations socket_file_ops = {
.llseek = no_llseek,
.read_iter = sock_read_iter,
.write_iter = sock_write_iter,
+ .get_poll_head = sock_get_poll_head,
+ .poll_mask = sock_poll_mask,
.poll = sock_poll,
.unlocked_ioctl = sock_ioctl,
#ifdef CONFIG_COMPAT
@@ -1114,14 +1118,48 @@ out_release:
}
EXPORT_SYMBOL(sock_create_lite);
+static struct wait_queue_head *sock_get_poll_head(struct file *file,
+ __poll_t events)
+{
+ struct socket *sock = file->private_data;
+
+ if (!sock->ops->poll_mask)
+ return NULL;
+ sock_poll_busy_loop(sock, events);
+ return sk_sleep(sock->sk);
+}
+
+static __poll_t sock_poll_mask(struct file *file, __poll_t events)
+{
+ struct socket *sock = file->private_data;
+
+ /*
+ * We need to be sure we are in sync with the socket flags modification.
+ *
+ * This memory barrier is paired in the wq_has_sleeper.
+ */
+ smp_mb();
+
+ /* this socket can poll_ll so tell the system call */
+ return sock->ops->poll_mask(sock, events) |
+ (sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0);
+}
+
/* No kernel lock held - perfect */
static __poll_t sock_poll(struct file *file, poll_table *wait)
{
struct socket *sock = file->private_data;
- __poll_t events = poll_requested_events(wait);
+ __poll_t events = poll_requested_events(wait), mask = 0;
- sock_poll_busy_loop(sock, events);
- return sock->ops->poll(file, sock, wait) | sock_poll_busy_flag(sock);
+ if (sock->ops->poll) {
+ sock_poll_busy_loop(sock, events);
+ mask = sock->ops->poll(file, sock, wait);
+ } else if (sock->ops->poll_mask) {
+ sock_poll_wait(file, sock_get_poll_head(file, events), wait);
+ mask = sock->ops->poll_mask(sock, events);
+ }
+
+ return mask | sock_poll_busy_flag(sock);
}
static int sock_mmap(struct file *file, struct vm_area_struct *vma)