summaryrefslogtreecommitdiffstats
path: root/net/netlink/af_netlink.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/netlink/af_netlink.c')
-rw-r--r--net/netlink/af_netlink.c900
1 files changed, 804 insertions, 96 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1e3fd5bfcd86..12ac6b47a35c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -3,6 +3,7 @@
*
* Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+ * Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -55,87 +56,45 @@
#include <linux/types.h>
#include <linux/audit.h>
#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/scm.h>
#include <net/netlink.h>
-#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
-#define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
-
-struct netlink_sock {
- /* struct sock has to be the first member of netlink_sock */
- struct sock sk;
- u32 portid;
- u32 dst_portid;
- u32 dst_group;
- u32 flags;
- u32 subscriptions;
- u32 ngroups;
- unsigned long *groups;
- unsigned long state;
- wait_queue_head_t wait;
- struct netlink_callback *cb;
- struct mutex *cb_mutex;
- struct mutex cb_def_mutex;
- void (*netlink_rcv)(struct sk_buff *skb);
- void (*netlink_bind)(int group);
- struct module *module;
-};
+#include "af_netlink.h"
struct listeners {
struct rcu_head rcu;
unsigned long masks[0];
};
+/* state bits */
+#define NETLINK_CONGESTED 0x0
+
+/* flags */
#define NETLINK_KERNEL_SOCKET 0x1
#define NETLINK_RECV_PKTINFO 0x2
#define NETLINK_BROADCAST_SEND_ERROR 0x4
#define NETLINK_RECV_NO_ENOBUFS 0x8
-static inline struct netlink_sock *nlk_sk(struct sock *sk)
-{
- return container_of(sk, struct netlink_sock, sk);
-}
-
static inline int netlink_is_kernel(struct sock *sk)
{
return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
}
-struct nl_portid_hash {
- struct hlist_head *table;
- unsigned long rehash_time;
-
- unsigned int mask;
- unsigned int shift;
-
- unsigned int entries;
- unsigned int max_shift;
-
- u32 rnd;
-};
-
-struct netlink_table {
- struct nl_portid_hash hash;
- struct hlist_head mc_list;
- struct listeners __rcu *listeners;
- unsigned int flags;
- unsigned int groups;
- struct mutex *cb_mutex;
- struct module *module;
- void (*bind)(int group);
- int registered;
-};
-
-static struct netlink_table *nl_table;
+struct netlink_table *nl_table;
+EXPORT_SYMBOL_GPL(nl_table);
static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
static int netlink_dump(struct sock *sk);
+static void netlink_skb_destructor(struct sk_buff *skb);
-static DEFINE_RWLOCK(nl_table_lock);
+DEFINE_RWLOCK(nl_table_lock);
+EXPORT_SYMBOL_GPL(nl_table_lock);
static atomic_t nl_table_users = ATOMIC_INIT(0);
#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
@@ -152,6 +111,599 @@ static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u
return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
}
+static void netlink_overrun(struct sock *sk)
+{
+ struct netlink_sock *nlk = nlk_sk(sk);
+
+ if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
+ if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
+ sk->sk_err = ENOBUFS;
+ sk->sk_error_report(sk);
+ }
+ }
+ atomic_inc(&sk->sk_drops);
+}
+
+static void netlink_rcv_wake(struct sock *sk)
+{
+ struct netlink_sock *nlk = nlk_sk(sk);
+
+ if (skb_queue_empty(&sk->sk_receive_queue))
+ clear_bit(NETLINK_CONGESTED, &nlk->state);
+ if (!test_bit(NETLINK_CONGESTED, &nlk->state))
+ wake_up_interruptible(&nlk->wait);
+}
+
+#ifdef CONFIG_NETLINK_MMAP
+static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
+{
+ return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
+}
+
+static bool netlink_rx_is_mmaped(struct sock *sk)
+{
+ return nlk_sk(sk)->rx_ring.pg_vec != NULL;
+}
+
+static bool netlink_tx_is_mmaped(struct sock *sk)
+{
+ return nlk_sk(sk)->tx_ring.pg_vec != NULL;
+}
+
+static __pure struct page *pgvec_to_page(const void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ return vmalloc_to_page(addr);
+ else
+ return virt_to_page(addr);
+}
+
+static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = 0; i < len; i++) {
+ if (pg_vec[i] != NULL) {
+ if (is_vmalloc_addr(pg_vec[i]))
+ vfree(pg_vec[i]);
+ else
+ free_pages((unsigned long)pg_vec[i], order);
+ }
+ }
+ kfree(pg_vec);
+}
+
+static void *alloc_one_pg_vec_page(unsigned long order)
+{
+ void *buffer;
+ gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
+ __GFP_NOWARN | __GFP_NORETRY;
+
+ buffer = (void *)__get_free_pages(gfp_flags, order);
+ if (buffer != NULL)
+ return buffer;
+
+ buffer = vzalloc((1 << order) * PAGE_SIZE);
+ if (buffer != NULL)
+ return buffer;
+
+ gfp_flags &= ~__GFP_NORETRY;
+ return (void *)__get_free_pages(gfp_flags, order);
+}
+
+static void **alloc_pg_vec(struct netlink_sock *nlk,
+ struct nl_mmap_req *req, unsigned int order)
+{
+ unsigned int block_nr = req->nm_block_nr;
+ unsigned int i;
+ void **pg_vec, *ptr;
+
+ pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
+ if (pg_vec == NULL)
+ return NULL;
+
+ for (i = 0; i < block_nr; i++) {
+ pg_vec[i] = ptr = alloc_one_pg_vec_page(order);
+ if (pg_vec[i] == NULL)
+ goto err1;
+ }
+
+ return pg_vec;
+err1:
+ free_pg_vec(pg_vec, order, block_nr);
+ return NULL;
+}
+
+static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
+ bool closing, bool tx_ring)
+{
+ struct netlink_sock *nlk = nlk_sk(sk);
+ struct netlink_ring *ring;
+ struct sk_buff_head *queue;
+ void **pg_vec = NULL;
+ unsigned int order = 0;
+ int err;
+
+ ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
+ queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
+
+ if (!closing) {
+ if (atomic_read(&nlk->mapped))
+ return -EBUSY;
+ if (atomic_read(&ring->pending))
+ return -EBUSY;
+ }
+
+ if (req->nm_block_nr) {
+ if (ring->pg_vec != NULL)
+ return -EBUSY;
+
+ if ((int)req->nm_block_size <= 0)
+ return -EINVAL;
+ if (!IS_ALIGNED(req->nm_block_size, PAGE_SIZE))
+ return -EINVAL;
+ if (req->nm_frame_size < NL_MMAP_HDRLEN)
+ return -EINVAL;
+ if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
+ return -EINVAL;
+
+ ring->frames_per_block = req->nm_block_size /
+ req->nm_frame_size;
+ if (ring->frames_per_block == 0)
+ return -EINVAL;
+ if (ring->frames_per_block * req->nm_block_nr !=
+ req->nm_frame_nr)
+ return -EINVAL;
+
+ order = get_order(req->nm_block_size);
+ pg_vec = alloc_pg_vec(nlk, req, order);
+ if (pg_vec == NULL)
+ return -ENOMEM;
+ } else {
+ if (req->nm_frame_nr)
+ return -EINVAL;
+ }
+
+ err = -EBUSY;
+ mutex_lock(&nlk->pg_vec_lock);
+ if (closing || atomic_read(&nlk->mapped) == 0) {
+ err = 0;
+ spin_lock_bh(&queue->lock);
+
+ ring->frame_max = req->nm_frame_nr - 1;
+ ring->head = 0;
+ ring->frame_size = req->nm_frame_size;
+ ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
+
+ swap(ring->pg_vec_len, req->nm_block_nr);
+ swap(ring->pg_vec_order, order);
+ swap(ring->pg_vec, pg_vec);
+
+ __skb_queue_purge(queue);
+ spin_unlock_bh(&queue->lock);
+
+ WARN_ON(atomic_read(&nlk->mapped));
+ }
+ mutex_unlock(&nlk->pg_vec_lock);
+
+ if (pg_vec)
+ free_pg_vec(pg_vec, order, req->nm_block_nr);
+ return err;
+}
+
+static void netlink_mm_open(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+ struct socket *sock = file->private_data;
+ struct sock *sk = sock->sk;
+
+ if (sk)
+ atomic_inc(&nlk_sk(sk)->mapped);
+}
+
+static void netlink_mm_close(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+ struct socket *sock = file->private_data;
+ struct sock *sk = sock->sk;
+
+ if (sk)
+ atomic_dec(&nlk_sk(sk)->mapped);
+}
+
+static const struct vm_operations_struct netlink_mmap_ops = {
+ .open = netlink_mm_open,
+ .close = netlink_mm_close,
+};
+
+static int netlink_mmap(struct file *file, struct socket *sock,
+ struct vm_area_struct *vma)
+{
+ struct sock *sk = sock->sk;
+ struct netlink_sock *nlk = nlk_sk(sk);
+ struct netlink_ring *ring;
+ unsigned long start, size, expected;
+ unsigned int i;
+ int err = -EINVAL;
+
+ if (vma->vm_pgoff)
+ return -EINVAL;
+
+ mutex_lock(&nlk->pg_vec_lock);
+
+ expected = 0;
+ for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
+ if (ring->pg_vec == NULL)
+ continue;
+ expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
+ }
+
+ if (expected == 0)
+ goto out;
+
+ size = vma->vm_end - vma->vm_start;
+ if (size != expected)
+ goto out;
+
+ start = vma->vm_start;
+ for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
+ if (ring->pg_vec == NULL)
+ continue;
+
+ for (i = 0; i < ring->pg_vec_len; i++) {
+ struct page *page;
+ void *kaddr = ring->pg_vec[i];
+ unsigned int pg_num;
+
+ for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
+ page = pgvec_to_page(kaddr);
+ err = vm_insert_page(vma, start, page);
+ if (err < 0)
+ goto out;
+ start += PAGE_SIZE;
+ kaddr += PAGE_SIZE;
+ }
+ }
+ }
+
+ atomic_inc(&nlk->mapped);
+ vma->vm_ops = &netlink_mmap_ops;
+ err = 0;
+out:
+ mutex_unlock(&nlk->pg_vec_lock);
+ return 0;
+}
+
+static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
+{
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
+ struct page *p_start, *p_end;
+
+ /* First page is flushed through netlink_{get,set}_status */
+ p_start = pgvec_to_page(hdr + PAGE_SIZE);
+ p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
+ while (p_start <= p_end) {
+ flush_dcache_page(p_start);
+ p_start++;
+ }
+#endif
+}
+
+static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
+{
+ smp_rmb();
+ flush_dcache_page(pgvec_to_page(hdr));
+ return hdr->nm_status;
+}
+
+static void netlink_set_status(struct nl_mmap_hdr *hdr,
+ enum nl_mmap_status status)
+{
+ hdr->nm_status = status;
+ flush_dcache_page(pgvec_to_page(hdr));
+ smp_wmb();
+}
+
+static struct nl_mmap_hdr *
+__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
+{
+ unsigned int pg_vec_pos, frame_off;
+
+ pg_vec_pos = pos / ring->frames_per_block;
+ frame_off = pos % ring->frames_per_block;
+
+ return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
+}
+
+static struct nl_mmap_hdr *
+netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
+ enum nl_mmap_status status)
+{
+ struct nl_mmap_hdr *hdr;
+
+ hdr = __netlink_lookup_frame(ring, pos);
+ if (netlink_get_status(hdr) != status)
+ return NULL;
+
+ return hdr;
+}
+
+static struct nl_mmap_hdr *
+netlink_current_frame(const struct netlink_ring *ring,
+ enum nl_mmap_status status)
+{
+ return netlink_lookup_frame(ring, ring->head, status);
+}
+
+static struct nl_mmap_hdr *
+netlink_previous_frame(const struct netlink_ring *ring,
+ enum nl_mmap_status status)
+{
+ unsigned int prev;
+
+ prev = ring->head ? ring->head - 1 : ring->frame_max;
+ return netlink_lookup_frame(ring, prev, status);
+}
+
+static void netlink_increment_head(struct netlink_ring *ring)
+{
+ ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
+}
+
+static void netlink_forward_ring(struct netlink_ring *ring)
+{
+ unsigned int head = ring->head, pos = head;
+ const struct nl_mmap_hdr *hdr;
+
+ do {
+ hdr = __netlink_lookup_frame(ring, pos);
+ if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
+ break;
+ if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
+ break;
+ netlink_increment_head(ring);
+ } while (ring->head != head);
+}
+
+static bool netlink_dump_space(struct netlink_sock *nlk)
+{
+ struct netlink_ring *ring = &nlk->rx_ring;
+ struct nl_mmap_hdr *hdr;
+ unsigned int n;
+
+ hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
+ if (hdr == NULL)
+ return false;
+
+ n = ring->head + ring->frame_max / 2;
+ if (n > ring->frame_max)
+ n -= ring->frame_max;
+
+ hdr = __netlink_lookup_frame(ring, n);
+
+ return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
+}
+
+static unsigned int netlink_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ struct netlink_sock *nlk = nlk_sk(sk);
+ unsigned int mask;
+ int err;
+
+ if (nlk->rx_ring.pg_vec != NULL) {
+ /* Memory mapped sockets don't call recvmsg(), so flow control
+ * for dumps is performed here. A dump is allowed to continue
+ * if at least half the ring is unused.
+ */
+ while (nlk->cb != NULL && netlink_dump_space(nlk)) {
+ err = netlink_dump(sk);
+ if (err < 0) {
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
+ break;
+ }
+ }
+ netlink_rcv_wake(sk);
+ }
+
+ mask = datagram_poll(file, sock, wait);
+
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ if (nlk->rx_ring.pg_vec) {
+ netlink_forward_ring(&nlk->rx_ring);
+ if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
+ mask |= POLLIN | POLLRDNORM;
+ }
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
+
+ spin_lock_bh(&sk->sk_write_queue.lock);
+ if (nlk->tx_ring.pg_vec) {
+ if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
+ mask |= POLLOUT | POLLWRNORM;
+ }
+ spin_unlock_bh(&sk->sk_write_queue.lock);
+
+ return mask;
+}
+
+static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
+{
+ return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
+}
+
+static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
+ struct netlink_ring *ring,
+ struct nl_mmap_hdr *hdr)
+{
+ unsigned int size;
+ void *data;
+
+ size = ring->frame_size - NL_MMAP_HDRLEN;
+ data = (void *)hdr + NL_MMAP_HDRLEN;
+
+ skb->head = data;
+ skb->data = data;
+ skb_reset_tail_pointer(skb);
+ skb->end = skb->tail + size;
+ skb->len = 0;
+
+ skb->destructor = netlink_skb_destructor;
+ NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
+ NETLINK_CB(skb).sk = sk;
+}
+
+static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
+ u32 dst_portid, u32 dst_group,
+ struct sock_iocb *siocb)
+{
+ struct netlink_sock *nlk = nlk_sk(sk);
+ struct netlink_ring *ring;
+ struct nl_mmap_hdr *hdr;
+ struct sk_buff *skb;
+ unsigned int maxlen;
+ bool excl = true;
+ int err = 0, len = 0;
+
+ /* Netlink messages are validated by the receiver before processing.
+ * In order to avoid userspace changing the contents of the message
+ * after validation, the socket and the ring may only be used by a
+ * single process, otherwise we fall back to copying.
+ */
+ if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
+ atomic_read(&nlk->mapped) > 1)
+ excl = false;
+
+ mutex_lock(&nlk->pg_vec_lock);
+
+ ring = &nlk->tx_ring;
+ maxlen = ring->frame_size - NL_MMAP_HDRLEN;
+
+ do {
+ hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
+ if (hdr == NULL) {
+ if (!(msg->msg_flags & MSG_DONTWAIT) &&
+ atomic_read(&nlk->tx_ring.pending))
+ schedule();
+ continue;
+ }
+ if (hdr->nm_len > maxlen) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ netlink_frame_flush_dcache(hdr);
+
+ if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
+ skb = alloc_skb_head(GFP_KERNEL);
+ if (skb == NULL) {
+ err = -ENOBUFS;
+ goto out;
+ }
+ sock_hold(sk);
+ netlink_ring_setup_skb(skb, sk, ring, hdr);
+ NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
+ __skb_put(skb, hdr->nm_len);
+ netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
+ atomic_inc(&ring->pending);
+ } else {
+ skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
+ if (skb == NULL) {
+ err = -ENOBUFS;
+ goto out;
+ }
+ __skb_put(skb, hdr->nm_len);
+ memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
+ netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
+ }
+
+ netlink_increment_head(ring);
+
+ NETLINK_CB(skb).portid = nlk->portid;
+ NETLINK_CB(skb).dst_group = dst_group;
+ NETLINK_CB(skb).creds = siocb->scm->creds;
+
+ err = security_netlink_send(sk, skb);
+ if (err) {
+ kfree_skb(skb);
+ goto out;
+ }
+
+ if (unlikely(dst_group)) {
+ atomic_inc(&skb->users);
+ netlink_broadcast(sk, skb, dst_portid, dst_group,
+ GFP_KERNEL);
+ }
+ err = netlink_unicast(sk, skb, dst_portid,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (err < 0)
+ goto out;
+ len += err;
+
+ } while (hdr != NULL ||
+ (!(msg->msg_flags & MSG_DONTWAIT) &&
+ atomic_read(&nlk->tx_ring.pending)));
+
+ if (len > 0)
+ err = len;
+out:
+ mutex_unlock(&nlk->pg_vec_lock);
+ return err;
+}
+
+static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
+{
+ struct nl_mmap_hdr *hdr;
+
+ hdr = netlink_mmap_hdr(skb);
+ hdr->nm_len = skb->len;
+ hdr->nm_group = NETLINK_CB(skb).dst_group;
+ hdr->nm_pid = NETLINK_CB(skb).creds.pid;
+ hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
+ hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
+ netlink_frame_flush_dcache(hdr);
+ netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
+
+ NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
+ kfree_skb(skb);
+}
+
+static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
+{
+ struct netlink_sock *nlk = nlk_sk(sk);
+ struct netlink_ring *ring = &nlk->rx_ring;
+ struct nl_mmap_hdr *hdr;
+
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
+ if (hdr == NULL) {
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
+ kfree_skb(skb);
+ netlink_overrun(sk);
+ return;
+ }
+ netlink_increment_head(ring);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
+
+ hdr->nm_len = skb->len;
+ hdr->nm_group = NETLINK_CB(skb).dst_group;
+ hdr->nm_pid = NETLINK_CB(skb).creds.pid;
+ hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
+ hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
+ netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
+}
+
+#else /* CONFIG_NETLINK_MMAP */
+#define netlink_skb_is_mmaped(skb) false
+#define netlink_rx_is_mmaped(sk) false
+#define netlink_tx_is_mmaped(sk) false
+#define netlink_mmap sock_no_mmap
+#define netlink_poll datagram_poll
+#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
+#endif /* CONFIG_NETLINK_MMAP */
+
static void netlink_destroy_callback(struct netlink_callback *cb)
{
kfree_skb(cb->skb);
@@ -164,6 +716,53 @@ static void netlink_consume_callback(struct netlink_callback *cb)
kfree(cb);
}
+static void netlink_skb_destructor(struct sk_buff *skb)
+{
+#ifdef CONFIG_NETLINK_MMAP
+ struct nl_mmap_hdr *hdr;
+ struct netlink_ring *ring;
+ struct sock *sk;
+
+ /* If a packet from the kernel to userspace was freed because of an
+ * error without being delivered to userspace, the kernel must reset
+ * the status. In the direction userspace to kernel, the status is
+ * always reset here after the packet was processed and freed.
+ */
+ if (netlink_skb_is_mmaped(skb)) {
+ hdr = netlink_mmap_hdr(skb);
+ sk = NETLINK_CB(skb).sk;
+
+ if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
+ netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
+ ring = &nlk_sk(sk)->tx_ring;
+ } else {
+ if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
+ hdr->nm_len = 0;
+ netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
+ }
+ ring = &nlk_sk(sk)->rx_ring;
+ }
+
+ WARN_ON(atomic_read(&ring->pending) == 0);
+ atomic_dec(&ring->pending);
+ sock_put(sk);
+
+ skb->data = NULL;
+ }
+#endif
+ if (skb->sk != NULL)
+ sock_rfree(skb);
+}
+
+static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
+{
+ WARN_ON(skb->sk != NULL);
+ skb->sk = sk;
+ skb->destructor = netlink_skb_destructor;
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+ sk_mem_charge(sk, skb->truesize);
+}
+
static void netlink_sock_destruct(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
@@ -177,6 +776,18 @@ static void netlink_sock_destruct(struct sock *sk)
}
skb_queue_purge(&sk->sk_receive_queue);
+#ifdef CONFIG_NETLINK_MMAP
+ if (1) {
+ struct nl_mmap_req req;
+
+ memset(&req, 0, sizeof(req));
+ if (nlk->rx_ring.pg_vec)
+ netlink_set_ring(sk, &req, true, false);
+ memset(&req, 0, sizeof(req));
+ if (nlk->tx_ring.pg_vec)
+ netlink_set_ring(sk, &req, true, true);
+ }
+#endif /* CONFIG_NETLINK_MMAP */
if (!sock_flag(sk, SOCK_DEAD)) {
printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
@@ -440,6 +1051,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
mutex_init(nlk->cb_mutex);
}
init_waitqueue_head(&nlk->wait);
+#ifdef CONFIG_NETLINK_MMAP
+ mutex_init(&nlk->pg_vec_lock);
+#endif
sk->sk_destruct = netlink_sock_destruct;
sk->sk_protocol = protocol;
@@ -771,19 +1385,6 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
return 0;
}
-static void netlink_overrun(struct sock *sk)
-{
- struct netlink_sock *nlk = nlk_sk(sk);
-
- if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
- if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
- sk->sk_err = ENOBUFS;
- sk->sk_error_report(sk);
- }
- }
- atomic_inc(&sk->sk_drops);
-}
-
static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
{
struct sock *sock;
@@ -836,8 +1437,9 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
nlk = nlk_sk(sk);
- if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
- test_bit(0, &nlk->state)) {
+ if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
+ test_bit(NETLINK_CONGESTED, &nlk->state)) &&
+ !netlink_skb_is_mmaped(skb)) {
DECLARE_WAITQUEUE(wait, current);
if (!*timeo) {
if (!ssk || netlink_is_kernel(ssk))
@@ -851,7 +1453,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
add_wait_queue(&nlk->wait, &wait);
if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
- test_bit(0, &nlk->state)) &&
+ test_bit(NETLINK_CONGESTED, &nlk->state)) &&
!sock_flag(sk, SOCK_DEAD))
*timeo = schedule_timeout(*timeo);
@@ -865,7 +1467,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
}
return 1;
}
- skb_set_owner_r(skb, sk);
+ netlink_skb_set_owner_r(skb, sk);
return 0;
}
@@ -873,7 +1475,14 @@ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
{
int len = skb->len;
- skb_queue_tail(&sk->sk_receive_queue, skb);
+#ifdef CONFIG_NETLINK_MMAP
+ if (netlink_skb_is_mmaped(skb))
+ netlink_queue_mmaped_skb(sk, skb);
+ else if (netlink_rx_is_mmaped(sk))
+ netlink_ring_set_copied(sk, skb);
+ else
+#endif /* CONFIG_NETLINK_MMAP */
+ skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, len);
return len;
}
@@ -896,7 +1505,9 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
{
int delta;
- skb_orphan(skb);
+ WARN_ON(skb->sk != NULL);
+ if (netlink_skb_is_mmaped(skb))
+ return skb;
delta = skb->end - skb->tail;
if (delta * 2 < skb->truesize)
@@ -916,16 +1527,6 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
return skb;
}
-static void netlink_rcv_wake(struct sock *sk)
-{
- struct netlink_sock *nlk = nlk_sk(sk);
-
- if (skb_queue_empty(&sk->sk_receive_queue))
- clear_bit(0, &nlk->state);
- if (!test_bit(0, &nlk->state))
- wake_up_interruptible(&nlk->wait);
-}
-
static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
struct sock *ssk)
{
@@ -935,8 +1536,8 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
ret = -ECONNREFUSED;
if (nlk->netlink_rcv != NULL) {
ret = skb->len;
- skb_set_owner_r(skb, sk);
- NETLINK_CB(skb).ssk = ssk;
+ netlink_skb_set_owner_r(skb, sk);
+ NETLINK_CB(skb).sk = ssk;
nlk->netlink_rcv(skb);
consume_skb(skb);
} else {
@@ -982,6 +1583,69 @@ retry:
}
EXPORT_SYMBOL(netlink_unicast);
+struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
+ u32 dst_portid, gfp_t gfp_mask)
+{
+#ifdef CONFIG_NETLINK_MMAP
+ struct sock *sk = NULL;
+ struct sk_buff *skb;
+ struct netlink_ring *ring;
+ struct nl_mmap_hdr *hdr;
+ unsigned int maxlen;
+
+ sk = netlink_getsockbyportid(ssk, dst_portid);
+ if (IS_ERR(sk))
+ goto out;
+
+ ring = &nlk_sk(sk)->rx_ring;
+ /* fast-path without atomic ops for common case: non-mmaped receiver */
+ if (ring->pg_vec == NULL)
+ goto out_put;
+
+ skb = alloc_skb_head(gfp_mask);
+ if (skb == NULL)
+ goto err1;
+
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ /* check again under lock */
+ if (ring->pg_vec == NULL)
+ goto out_free;
+
+ maxlen = ring->frame_size - NL_MMAP_HDRLEN;
+ if (maxlen < size)
+ goto out_free;
+
+ netlink_forward_ring(ring);
+ hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
+ if (hdr == NULL)
+ goto err2;
+ netlink_ring_setup_skb(skb, sk, ring, hdr);
+ netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
+ atomic_inc(&ring->pending);
+ netlink_increment_head(ring);
+
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
+ return skb;
+
+err2:
+ kfree_skb(skb);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
+ netlink_overrun(sk);
+err1:
+ sock_put(sk);
+ return NULL;
+
+out_free:
+ kfree_skb(skb);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
+out_put:
+ sock_put(sk);
+out:
+#endif
+ return alloc_skb(size, gfp_mask);
+}
+EXPORT_SYMBOL_GPL(netlink_alloc_skb);
+
int netlink_has_listeners(struct sock *sk, unsigned int group)
{
int res = 0;
@@ -1006,8 +1670,8 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
struct netlink_sock *nlk = nlk_sk(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
- !test_bit(0, &nlk->state)) {
- skb_set_owner_r(skb, sk);
+ !test_bit(NETLINK_CONGESTED, &nlk->state)) {
+ netlink_skb_set_owner_r(skb, sk);
__netlink_sendskb(sk, skb);
return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
}
@@ -1242,7 +1906,8 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
if (level != SOL_NETLINK)
return -ENOPROTOOPT;
- if (optlen >= sizeof(int) &&
+ if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
+ optlen >= sizeof(int) &&
get_user(val, (unsigned int __user *)optval))
return -EFAULT;
@@ -1284,13 +1949,32 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
case NETLINK_NO_ENOBUFS:
if (val) {
nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
- clear_bit(0, &nlk->state);
+ clear_bit(NETLINK_CONGESTED, &nlk->state);
wake_up_interruptible(&nlk->wait);
} else {
nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
}
err = 0;
break;
+#ifdef CONFIG_NETLINK_MMAP
+ case NETLINK_RX_RING:
+ case NETLINK_TX_RING: {
+ struct nl_mmap_req req;
+
+ /* Rings might consume more memory than queue limits, require
+ * CAP_NET_ADMIN.
+ */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (optlen < sizeof(req))
+ return -EINVAL;
+ if (copy_from_user(&req, optval, sizeof(req)))
+ return -EFAULT;
+ err = netlink_set_ring(sk, &req, false,
+ optname == NETLINK_TX_RING);
+ break;
+ }
+#endif /* CONFIG_NETLINK_MMAP */
default:
err = -ENOPROTOOPT;
}
@@ -1401,6 +2085,13 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto out;
}
+ if (netlink_tx_is_mmaped(sk) &&
+ msg->msg_iov->iov_base == NULL) {
+ err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
+ siocb);
+ goto out;
+ }
+
err = -EMSGSIZE;
if (len > sk->sk_sndbuf - 32)
goto out;
@@ -1695,7 +2386,7 @@ struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
{
struct nlmsghdr *nlh;
- int size = NLMSG_LENGTH(len);
+ int size = nlmsg_msg_size(len);
nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
nlh->nlmsg_type = type;
@@ -1704,7 +2395,7 @@ __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int fla
nlh->nlmsg_pid = portid;
nlh->nlmsg_seq = seq;
if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
- memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
+ memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
return nlh;
}
EXPORT_SYMBOL(__nlmsg_put);
@@ -1733,9 +2424,13 @@ static int netlink_dump(struct sock *sk)
alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
- skb = sock_rmalloc(sk, alloc_size, 0, GFP_KERNEL);
+ if (!netlink_rx_is_mmaped(sk) &&
+ atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
+ goto errout_skb;
+ skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, GFP_KERNEL);
if (!skb)
goto errout_skb;
+ netlink_skb_set_owner_r(skb, sk);
len = cb->dump(skb, cb);
@@ -1790,13 +2485,25 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
if (cb == NULL)
return -ENOBUFS;
+ /* Memory mapped dump requests need to be copied to avoid looping
+ * on the pending state in netlink_mmap_sendmsg() while the CB hold
+ * a reference to the skb.
+ */
+ if (netlink_skb_is_mmaped(skb)) {
+ skb = skb_copy(skb, GFP_KERNEL);
+ if (skb == NULL) {
+ kfree(cb);
+ return -ENOBUFS;
+ }
+ } else
+ atomic_inc(&skb->users);
+
cb->dump = control->dump;
cb->done = control->done;
cb->nlh = nlh;
cb->data = control->data;
cb->module = control->module;
cb->min_dump_alloc = control->min_dump_alloc;
- atomic_inc(&skb->users);
cb->skb = skb;
sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
@@ -1850,7 +2557,8 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
if (err)
payload += nlmsg_len(nlh);
- skb = nlmsg_new(payload, GFP_KERNEL);
+ skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
+ NETLINK_CB(in_skb).portid, GFP_KERNEL);
if (!skb) {
struct sock *sk;
@@ -2116,7 +2824,7 @@ static const struct proto_ops netlink_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = netlink_getname,
- .poll = datagram_poll,
+ .poll = netlink_poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
@@ -2124,7 +2832,7 @@ static const struct proto_ops netlink_ops = {
.getsockopt = netlink_getsockopt,
.sendmsg = netlink_sendmsg,
.recvmsg = netlink_recvmsg,
- .mmap = sock_no_mmap,
+ .mmap = netlink_mmap,
.sendpage = sock_no_sendpage,
};