diff options
Diffstat (limited to 'net/core/sock.c')
-rw-r--r-- | net/core/sock.c | 151 |
1 files changed, 134 insertions, 17 deletions
diff --git a/net/core/sock.c b/net/core/sock.c index 7a0567b4b2c9..54c836a2216b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -372,7 +372,7 @@ static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) { int ret = -ENOPROTOOPT; #ifdef CONFIG_NETDEVICES - struct net *net = sk->sk_net; + struct net *net = sock_net(sk); char devname[IFNAMSIZ]; int index; @@ -958,7 +958,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, */ sk->sk_prot = sk->sk_prot_creator = prot; sock_lock_init(sk); - sk->sk_net = get_net(net); + sock_net_set(sk, get_net(net)); } return sk; @@ -981,12 +981,32 @@ void sk_free(struct sock *sk) if (atomic_read(&sk->sk_omem_alloc)) printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", - __FUNCTION__, atomic_read(&sk->sk_omem_alloc)); + __func__, atomic_read(&sk->sk_omem_alloc)); - put_net(sk->sk_net); + put_net(sock_net(sk)); sk_prot_free(sk->sk_prot_creator, sk); } +/* + * Last sock_put should drop referrence to sk->sk_net. It has already + * been dropped in sk_change_net. Taking referrence to stopping namespace + * is not an option. + * Take referrence to a socket to remove it from hash _alive_ and after that + * destroy it in the context of init_net. + */ +void sk_release_kernel(struct sock *sk) +{ + if (sk == NULL || sk->sk_socket == NULL) + return; + + sock_hold(sk); + sock_release(sk->sk_socket); + release_net(sock_net(sk)); + sock_net_set(sk, get_net(&init_net)); + sock_put(sk); +} +EXPORT_SYMBOL(sk_release_kernel); + struct sock *sk_clone(const struct sock *sk, const gfp_t priority) { struct sock *newsk; @@ -998,7 +1018,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) sock_copy(newsk, sk); /* SANITY */ - get_net(newsk->sk_net); + get_net(sock_net(newsk)); sk_node_init(&newsk->sk_node); sock_lock_init(newsk); bh_lock_sock(newsk); @@ -1076,10 +1096,12 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) if (sk->sk_route_caps & NETIF_F_GSO) sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; if (sk_can_gso(sk)) { - if (dst->header_len) + if (dst->header_len) { sk->sk_route_caps &= ~NETIF_F_GSO_MASK; - else + } else { sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; + sk->sk_gso_max_size = dst->dev->gso_max_size; + } } } EXPORT_SYMBOL_GPL(sk_setup_caps); @@ -1919,16 +1941,113 @@ EXPORT_SYMBOL(sk_common_release); static DEFINE_RWLOCK(proto_list_lock); static LIST_HEAD(proto_list); +#ifdef CONFIG_PROC_FS +#define PROTO_INUSE_NR 64 /* should be enough for the first time */ +struct prot_inuse { + int val[PROTO_INUSE_NR]; +}; + +static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); + +#ifdef CONFIG_NET_NS +void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) +{ + int cpu = smp_processor_id(); + per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val; +} +EXPORT_SYMBOL_GPL(sock_prot_inuse_add); + +int sock_prot_inuse_get(struct net *net, struct proto *prot) +{ + int cpu, idx = prot->inuse_idx; + int res = 0; + + for_each_possible_cpu(cpu) + res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; + + return res >= 0 ? res : 0; +} +EXPORT_SYMBOL_GPL(sock_prot_inuse_get); + +static int sock_inuse_init_net(struct net *net) +{ + net->core.inuse = alloc_percpu(struct prot_inuse); + return net->core.inuse ? 0 : -ENOMEM; +} + +static void sock_inuse_exit_net(struct net *net) +{ + free_percpu(net->core.inuse); +} + +static struct pernet_operations net_inuse_ops = { + .init = sock_inuse_init_net, + .exit = sock_inuse_exit_net, +}; + +static __init int net_inuse_init(void) +{ + if (register_pernet_subsys(&net_inuse_ops)) + panic("Cannot initialize net inuse counters"); + + return 0; +} + +core_initcall(net_inuse_init); +#else +static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); + +void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) +{ + __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val; +} +EXPORT_SYMBOL_GPL(sock_prot_inuse_add); + +int sock_prot_inuse_get(struct net *net, struct proto *prot) +{ + int cpu, idx = prot->inuse_idx; + int res = 0; + + for_each_possible_cpu(cpu) + res += per_cpu(prot_inuse, cpu).val[idx]; + + return res >= 0 ? res : 0; +} +EXPORT_SYMBOL_GPL(sock_prot_inuse_get); +#endif + +static void assign_proto_idx(struct proto *prot) +{ + prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); + + if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { + printk(KERN_ERR "PROTO_INUSE_NR exhausted\n"); + return; + } + + set_bit(prot->inuse_idx, proto_inuse_idx); +} + +static void release_proto_idx(struct proto *prot) +{ + if (prot->inuse_idx != PROTO_INUSE_NR - 1) + clear_bit(prot->inuse_idx, proto_inuse_idx); +} +#else +static inline void assign_proto_idx(struct proto *prot) +{ +} + +static inline void release_proto_idx(struct proto *prot) +{ +} +#endif + int proto_register(struct proto *prot, int alloc_slab) { char *request_sock_slab_name = NULL; char *timewait_sock_slab_name; - if (sock_prot_inuse_init(prot) != 0) { - printk(KERN_CRIT "%s: Can't alloc inuse counters!\n", prot->name); - goto out; - } - if (alloc_slab) { prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, SLAB_HWCACHE_ALIGN, NULL); @@ -1936,7 +2055,7 @@ int proto_register(struct proto *prot, int alloc_slab) if (prot->slab == NULL) { printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", prot->name); - goto out_free_inuse; + goto out; } if (prot->rsk_prot != NULL) { @@ -1979,6 +2098,7 @@ int proto_register(struct proto *prot, int alloc_slab) write_lock(&proto_list_lock); list_add(&prot->node, &proto_list); + assign_proto_idx(prot); write_unlock(&proto_list_lock); return 0; @@ -1994,8 +2114,6 @@ out_free_request_sock_slab_name: out_free_sock_slab: kmem_cache_destroy(prot->slab); prot->slab = NULL; -out_free_inuse: - sock_prot_inuse_free(prot); out: return -ENOBUFS; } @@ -2005,11 +2123,10 @@ EXPORT_SYMBOL(proto_register); void proto_unregister(struct proto *prot) { write_lock(&proto_list_lock); + release_proto_idx(prot); list_del(&prot->node); write_unlock(&proto_list_lock); - sock_prot_inuse_free(prot); - if (prot->slab != NULL) { kmem_cache_destroy(prot->slab); prot->slab = NULL; |