diff options
author | Arjan van de Ven <arjan@infradead.org> | 2006-03-20 22:33:17 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2006-03-20 22:33:17 -0800 |
commit | 4a3e2f711a00a1feb72ae12fdc749da10179d185 (patch) | |
tree | 76ced9d3270dea4b864da71fa1d4415d2e3c8b11 | |
parent | d4ccd08cdfa8d34f4d25b62041343c52fc79385f (diff) | |
download | linux-4a3e2f711a00a1feb72ae12fdc749da10179d185.tar.bz2 |
[NET] sem2mutex: net/
Semaphore to mutex conversion.
The conversion was generated via scripts, and the result was validated
automatically via a script as well.
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/xfrm.h | 3 | ||||
-rw-r--r-- | net/atm/ioctl.c | 15 | ||||
-rw-r--r-- | net/bluetooth/rfcomm/core.c | 8 | ||||
-rw-r--r-- | net/core/dev.c | 7 | ||||
-rw-r--r-- | net/core/flow.c | 7 | ||||
-rw-r--r-- | net/ipv4/ipcomp.c | 17 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_queue.c | 11 | ||||
-rw-r--r-- | net/ipv4/xfrm4_tunnel.c | 11 | ||||
-rw-r--r-- | net/ipv6/ipcomp6.c | 15 | ||||
-rw-r--r-- | net/ipv6/netfilter/ip6_queue.c | 11 | ||||
-rw-r--r-- | net/ipv6/xfrm6_tunnel.c | 11 | ||||
-rw-r--r-- | net/key/af_key.c | 4 | ||||
-rw-r--r-- | net/netfilter/nf_sockopt.c | 25 | ||||
-rw-r--r-- | net/socket.c | 31 | ||||
-rw-r--r-- | net/sunrpc/cache.c | 17 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 11 | ||||
-rw-r--r-- | net/unix/garbage.c | 7 | ||||
-rw-r--r-- | net/xfrm/xfrm_policy.c | 4 | ||||
-rw-r--r-- | net/xfrm/xfrm_user.c | 4 |
19 files changed, 118 insertions, 101 deletions
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 156f52ef8a91..786371365f2b 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -11,6 +11,7 @@ #include <linux/crypto.h> #include <linux/pfkeyv2.h> #include <linux/in6.h> +#include <linux/mutex.h> #include <net/sock.h> #include <net/dst.h> @@ -24,7 +25,7 @@ extern struct sock *xfrm_nl; extern u32 sysctl_xfrm_aevent_etime; extern u32 sysctl_xfrm_aevent_rseqth; -extern struct semaphore xfrm_cfg_sem; +extern struct mutex xfrm_cfg_mutex; /* Organization of SPD aka "XFRM rules" ------------------------------------ diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c index eb109af7eb4a..851cfa6312af 100644 --- a/net/atm/ioctl.c +++ b/net/atm/ioctl.c @@ -18,6 +18,7 @@ #include <linux/atmmpc.h> #include <net/atmclip.h> #include <linux/atmlec.h> +#include <linux/mutex.h> #include <asm/ioctls.h> #include "resources.h" @@ -25,22 +26,22 @@ #include "common.h" -static DECLARE_MUTEX(ioctl_mutex); +static DEFINE_MUTEX(ioctl_mutex); static LIST_HEAD(ioctl_list); void register_atm_ioctl(struct atm_ioctl *ioctl) { - down(&ioctl_mutex); + mutex_lock(&ioctl_mutex); list_add_tail(&ioctl->list, &ioctl_list); - up(&ioctl_mutex); + mutex_unlock(&ioctl_mutex); } void deregister_atm_ioctl(struct atm_ioctl *ioctl) { - down(&ioctl_mutex); + mutex_lock(&ioctl_mutex); list_del(&ioctl->list); - up(&ioctl_mutex); + mutex_unlock(&ioctl_mutex); } EXPORT_SYMBOL(register_atm_ioctl); @@ -137,7 +138,7 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) error = -ENOIOCTLCMD; - down(&ioctl_mutex); + mutex_lock(&ioctl_mutex); list_for_each(pos, &ioctl_list) { struct atm_ioctl * ic = list_entry(pos, struct atm_ioctl, list); if (try_module_get(ic->owner)) { @@ -147,7 +148,7 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) break; } } - up(&ioctl_mutex); + mutex_unlock(&ioctl_mutex); if (error != -ENOIOCTLCMD) goto done; diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 5b4253c61f62..e99010ce8bb2 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -37,6 +37,8 @@ #include <linux/wait.h> #include <linux/device.h> #include <linux/net.h> +#include <linux/mutex.h> + #include <net/sock.h> #include <asm/uaccess.h> #include <asm/unaligned.h> @@ -57,9 +59,9 @@ static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; static struct task_struct *rfcomm_thread; -static DECLARE_MUTEX(rfcomm_sem); -#define rfcomm_lock() down(&rfcomm_sem); -#define rfcomm_unlock() up(&rfcomm_sem); +static DEFINE_MUTEX(rfcomm_mutex); +#define rfcomm_lock() mutex_lock(&rfcomm_mutex) +#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex) static unsigned long rfcomm_event; diff --git a/net/core/dev.c b/net/core/dev.c index ee044097f7f2..08dec6eb922b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -81,6 +81,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/mutex.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> @@ -2931,7 +2932,7 @@ static void netdev_wait_allrefs(struct net_device *dev) * 2) Since we run with the RTNL semaphore not held, we can sleep * safely in order to wait for the netdev refcnt to drop to zero. */ -static DECLARE_MUTEX(net_todo_run_mutex); +static DEFINE_MUTEX(net_todo_run_mutex); void netdev_run_todo(void) { struct list_head list = LIST_HEAD_INIT(list); @@ -2939,7 +2940,7 @@ void netdev_run_todo(void) /* Need to guard against multiple cpu's getting out of order. */ - down(&net_todo_run_mutex); + mutex_lock(&net_todo_run_mutex); /* Not safe to do outside the semaphore. We must not return * until all unregister events invoked by the local processor @@ -2996,7 +2997,7 @@ void netdev_run_todo(void) } out: - up(&net_todo_run_mutex); + mutex_unlock(&net_todo_run_mutex); } /** diff --git a/net/core/flow.c b/net/core/flow.c index c4f25385029f..55789f832eda 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -20,6 +20,7 @@ #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/cpumask.h> +#include <linux/mutex.h> #include <net/flow.h> #include <asm/atomic.h> #include <asm/semaphore.h> @@ -287,11 +288,11 @@ static void flow_cache_flush_per_cpu(void *data) void flow_cache_flush(void) { struct flow_flush_info info; - static DECLARE_MUTEX(flow_flush_sem); + static DEFINE_MUTEX(flow_flush_sem); /* Don't want cpus going down or up during this. */ lock_cpu_hotplug(); - down(&flow_flush_sem); + mutex_lock(&flow_flush_sem); atomic_set(&info.cpuleft, num_online_cpus()); init_completion(&info.completion); @@ -301,7 +302,7 @@ void flow_cache_flush(void) local_bh_enable(); wait_for_completion(&info.completion); - up(&flow_flush_sem); + mutex_unlock(&flow_flush_sem); unlock_cpu_hotplug(); } diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index d64e2ec8da7b..c95020f7c81e 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -24,6 +24,7 @@ #include <linux/list.h> #include <linux/vmalloc.h> #include <linux/rtnetlink.h> +#include <linux/mutex.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/icmp.h> @@ -36,7 +37,7 @@ struct ipcomp_tfms { int users; }; -static DECLARE_MUTEX(ipcomp_resource_sem); +static DEFINE_MUTEX(ipcomp_resource_mutex); static void **ipcomp_scratches; static int ipcomp_scratch_users; static LIST_HEAD(ipcomp_tfms_list); @@ -253,7 +254,7 @@ error: } /* - * Must be protected by xfrm_cfg_sem. State and tunnel user references are + * Must be protected by xfrm_cfg_mutex. State and tunnel user references are * always incremented on success. */ static int ipcomp_tunnel_attach(struct xfrm_state *x) @@ -411,9 +412,9 @@ static void ipcomp_destroy(struct xfrm_state *x) if (!ipcd) return; xfrm_state_delete_tunnel(x); - down(&ipcomp_resource_sem); + mutex_lock(&ipcomp_resource_mutex); ipcomp_free_data(ipcd); - up(&ipcomp_resource_sem); + mutex_unlock(&ipcomp_resource_mutex); kfree(ipcd); } @@ -440,14 +441,14 @@ static int ipcomp_init_state(struct xfrm_state *x) if (x->props.mode) x->props.header_len += sizeof(struct iphdr); - down(&ipcomp_resource_sem); + mutex_lock(&ipcomp_resource_mutex); if (!ipcomp_alloc_scratches()) goto error; ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name); if (!ipcd->tfms) goto error; - up(&ipcomp_resource_sem); + mutex_unlock(&ipcomp_resource_mutex); if (x->props.mode) { err = ipcomp_tunnel_attach(x); @@ -464,10 +465,10 @@ out: return err; error_tunnel: - down(&ipcomp_resource_sem); + mutex_lock(&ipcomp_resource_mutex); error: ipcomp_free_data(ipcd); - up(&ipcomp_resource_sem); + mutex_unlock(&ipcomp_resource_mutex); kfree(ipcd); goto out; } diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 08f80e2ea2aa..1655866c55b9 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -35,6 +35,7 @@ #include <linux/sysctl.h> #include <linux/proc_fs.h> #include <linux/security.h> +#include <linux/mutex.h> #include <net/sock.h> #include <net/route.h> @@ -61,7 +62,7 @@ static unsigned int queue_dropped = 0; static unsigned int queue_user_dropped = 0; static struct sock *ipqnl; static LIST_HEAD(queue_list); -static DECLARE_MUTEX(ipqnl_sem); +static DEFINE_MUTEX(ipqnl_mutex); static void ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict) @@ -539,7 +540,7 @@ ipq_rcv_sk(struct sock *sk, int len) struct sk_buff *skb; unsigned int qlen; - down(&ipqnl_sem); + mutex_lock(&ipqnl_mutex); for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { skb = skb_dequeue(&sk->sk_receive_queue); @@ -547,7 +548,7 @@ ipq_rcv_sk(struct sock *sk, int len) kfree_skb(skb); } - up(&ipqnl_sem); + mutex_unlock(&ipqnl_mutex); } static int @@ -708,8 +709,8 @@ cleanup_sysctl: cleanup_ipqnl: sock_release(ipqnl->sk_socket); - down(&ipqnl_sem); - up(&ipqnl_sem); + mutex_lock(&ipqnl_mutex); + mutex_unlock(&ipqnl_mutex); cleanup_netlink_notifier: netlink_unregister_notifier(&ipq_nl_notifier); diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c index afbb0d4cc305..b08d56b117f8 100644 --- a/net/ipv4/xfrm4_tunnel.c +++ b/net/ipv4/xfrm4_tunnel.c @@ -5,6 +5,7 @@ #include <linux/skbuff.h> #include <linux/module.h> +#include <linux/mutex.h> #include <net/xfrm.h> #include <net/ip.h> #include <net/protocol.h> @@ -26,19 +27,19 @@ static int ipip_xfrm_rcv(struct xfrm_state *x, struct xfrm_decap_state *decap, s } static struct xfrm_tunnel *ipip_handler; -static DECLARE_MUTEX(xfrm4_tunnel_sem); +static DEFINE_MUTEX(xfrm4_tunnel_mutex); int xfrm4_tunnel_register(struct xfrm_tunnel *handler) { int ret; - down(&xfrm4_tunnel_sem); + mutex_lock(&xfrm4_tunnel_mutex); ret = 0; if (ipip_handler != NULL) ret = -EINVAL; if (!ret) ipip_handler = handler; - up(&xfrm4_tunnel_sem); + mutex_unlock(&xfrm4_tunnel_mutex); return ret; } @@ -49,13 +50,13 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler) { int ret; - down(&xfrm4_tunnel_sem); + mutex_lock(&xfrm4_tunnel_mutex); ret = 0; if (ipip_handler != handler) ret = -EINVAL; if (!ret) ipip_handler = NULL; - up(&xfrm4_tunnel_sem); + mutex_unlock(&xfrm4_tunnel_mutex); synchronize_net(); diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 6107592fbd8c..3c7b324cd20c 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -50,6 +50,7 @@ #include <net/protocol.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> +#include <linux/mutex.h> struct ipcomp6_tfms { struct list_head list; @@ -57,7 +58,7 @@ struct ipcomp6_tfms { int users; }; -static DECLARE_MUTEX(ipcomp6_resource_sem); +static DEFINE_MUTEX(ipcomp6_resource_mutex); static void **ipcomp6_scratches; static int ipcomp6_scratch_users; static LIST_HEAD(ipcomp6_tfms_list); @@ -405,9 +406,9 @@ static void ipcomp6_destroy(struct xfrm_state *x) if (!ipcd) return; xfrm_state_delete_tunnel(x); - down(&ipcomp6_resource_sem); + mutex_lock(&ipcomp6_resource_mutex); ipcomp6_free_data(ipcd); - up(&ipcomp6_resource_sem); + mutex_unlock(&ipcomp6_resource_mutex); kfree(ipcd); xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr); @@ -436,14 +437,14 @@ static int ipcomp6_init_state(struct xfrm_state *x) if (x->props.mode) x->props.header_len += sizeof(struct ipv6hdr); - down(&ipcomp6_resource_sem); + mutex_lock(&ipcomp6_resource_mutex); if (!ipcomp6_alloc_scratches()) goto error; ipcd->tfms = ipcomp6_alloc_tfms(x->calg->alg_name); if (!ipcd->tfms) goto error; - up(&ipcomp6_resource_sem); + mutex_unlock(&ipcomp6_resource_mutex); if (x->props.mode) { err = ipcomp6_tunnel_attach(x); @@ -459,10 +460,10 @@ static int ipcomp6_init_state(struct xfrm_state *x) out: return err; error_tunnel: - down(&ipcomp6_resource_sem); + mutex_lock(&ipcomp6_resource_mutex); error: ipcomp6_free_data(ipcd); - up(&ipcomp6_resource_sem); + mutex_unlock(&ipcomp6_resource_mutex); kfree(ipcd); goto out; diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index af0635084df8..344eab3b5da8 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -35,6 +35,7 @@ #include <linux/spinlock.h> #include <linux/sysctl.h> #include <linux/proc_fs.h> +#include <linux/mutex.h> #include <net/sock.h> #include <net/ipv6.h> #include <net/ip6_route.h> @@ -65,7 +66,7 @@ static unsigned int queue_dropped = 0; static unsigned int queue_user_dropped = 0; static struct sock *ipqnl; static LIST_HEAD(queue_list); -static DECLARE_MUTEX(ipqnl_sem); +static DEFINE_MUTEX(ipqnl_mutex); static void ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict) @@ -537,7 +538,7 @@ ipq_rcv_sk(struct sock *sk, int len) struct sk_buff *skb; unsigned int qlen; - down(&ipqnl_sem); + mutex_lock(&ipqnl_mutex); for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { skb = skb_dequeue(&sk->sk_receive_queue); @@ -545,7 +546,7 @@ ipq_rcv_sk(struct sock *sk, int len) kfree_skb(skb); } - up(&ipqnl_sem); + mutex_unlock(&ipqnl_mutex); } static int @@ -704,8 +705,8 @@ cleanup_sysctl: cleanup_ipqnl: sock_release(ipqnl->sk_socket); - down(&ipqnl_sem); - up(&ipqnl_sem); + mutex_lock(&ipqnl_mutex); + mutex_unlock(&ipqnl_mutex); cleanup_netlink_notifier: netlink_unregister_notifier(&ipq_nl_notifier); diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 8cfc58b96fc2..08f9abbdf1d7 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -31,6 +31,7 @@ #include <net/protocol.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> +#include <linux/mutex.h> #ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG # define X6TDEBUG 3 @@ -357,19 +358,19 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct xfrm_decap_state *dec } static struct xfrm6_tunnel *xfrm6_tunnel_handler; -static DECLARE_MUTEX(xfrm6_tunnel_sem); +static DEFINE_MUTEX(xfrm6_tunnel_mutex); int xfrm6_tunnel_register(struct xfrm6_tunnel *handler) { int ret; - down(&xfrm6_tunnel_sem); + mutex_lock(&xfrm6_tunnel_mutex); ret = 0; if (xfrm6_tunnel_handler != NULL) ret = -EINVAL; if (!ret) xfrm6_tunnel_handler = handler; - up(&xfrm6_tunnel_sem); + mutex_unlock(&xfrm6_tunnel_mutex); return ret; } @@ -380,13 +381,13 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler) { int ret; - down(&xfrm6_tunnel_sem); + mutex_lock(&xfrm6_tunnel_mutex); ret = 0; if (xfrm6_tunnel_handler != handler) ret = -EINVAL; if (!ret) xfrm6_tunnel_handler = NULL; - up(&xfrm6_tunnel_sem); + mutex_unlock(&xfrm6_tunnel_mutex); synchronize_net(); diff --git a/net/key/af_key.c b/net/key/af_key.c index 520fe70d0993..859582275cab 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3080,9 +3080,9 @@ static int pfkey_sendmsg(struct kiocb *kiocb, if (!hdr) goto out; - down(&xfrm_cfg_sem); + mutex_lock(&xfrm_cfg_mutex); err = pfkey_process(sk, skb, hdr); - up(&xfrm_cfg_sem); + mutex_unlock(&xfrm_cfg_mutex); out: if (err && hdr && pfkey_error(hdr, err, sk) == 0) diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c index 61a833a9caa6..0e5c5e204799 100644 --- a/net/netfilter/nf_sockopt.c +++ b/net/netfilter/nf_sockopt.c @@ -4,6 +4,7 @@ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter.h> +#include <linux/mutex.h> #include <net/sock.h> #include "nf_internals.h" @@ -11,7 +12,7 @@ /* Sockopts only registered and called from user context, so net locking would be overkill. Also, [gs]etsockopt calls may sleep. */ -static DECLARE_MUTEX(nf_sockopt_mutex); +static DEFINE_MUTEX(nf_sockopt_mutex); static LIST_HEAD(nf_sockopts); /* Do exclusive ranges overlap? */ @@ -26,7 +27,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg) struct list_head *i; int ret = 0; - if (down_interruptible(&nf_sockopt_mutex) != 0) + if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) return -EINTR; list_for_each(i, &nf_sockopts) { @@ -48,7 +49,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg) list_add(®->list, &nf_sockopts); out: - up(&nf_sockopt_mutex); + mutex_unlock(&nf_sockopt_mutex); return ret; } EXPORT_SYMBOL(nf_register_sockopt); @@ -57,18 +58,18 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg) { /* No point being interruptible: we're probably in cleanup_module() */ restart: - down(&nf_sockopt_mutex); + mutex_lock(&nf_sockopt_mutex); if (reg->use != 0) { /* To be woken by nf_sockopt call... */ /* FIXME: Stuart Young's name appears gratuitously. */ set_current_state(TASK_UNINTERRUPTIBLE); reg->cleanup_task = current; - up(&nf_sockopt_mutex); + mutex_unlock(&nf_sockopt_mutex); schedule(); goto restart; } list_del(®->list); - up(&nf_sockopt_mutex); + mutex_unlock(&nf_sockopt_mutex); } EXPORT_SYMBOL(nf_unregister_sockopt); @@ -80,7 +81,7 @@ static int nf_sockopt(struct sock *sk, int pf, int val, struct nf_sockopt_ops *ops; int ret; - if (down_interruptible(&nf_sockopt_mutex) != 0) + if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) return -EINTR; list_for_each(i, &nf_sockopts) { @@ -90,7 +91,7 @@ static int nf_sockopt(struct sock *sk, int pf, int val, if (val >= ops->get_optmin && val < ops->get_optmax) { ops->use++; - up(&nf_sockopt_mutex); + mutex_unlock(&nf_sockopt_mutex); ret = ops->get(sk, val, opt, len); goto out; } @@ -98,22 +99,22 @@ static int nf_sockopt(struct sock *sk, int pf, int val, if (val >= ops->set_optmin && val < ops->set_optmax) { ops->use++; - up(&nf_sockopt_mutex); + mutex_unlock(&nf_sockopt_mutex); ret = ops->set(sk, val, opt, *len); goto out; } } } } - up(&nf_sockopt_mutex); + mutex_unlock(&nf_sockopt_mutex); return -ENOPROTOOPT; out: - down(&nf_sockopt_mutex); + mutex_lock(&nf_sockopt_mutex); ops->use--; if (ops->cleanup_task) wake_up_process(ops->cleanup_task); - up(&nf_sockopt_mutex); + mutex_unlock(&nf_sockopt_mutex); return ret; } diff --git a/net/socket.c b/net/socket.c index 510ae18d220a..e3c21d5ec288 100644 --- a/net/socket.c +++ b/net/socket.c @@ -68,6 +68,7 @@ #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <linux/mutex.h> #include <linux/wanrouter.h> #include <linux/if_bridge.h> #include <linux/if_frad.h> @@ -826,36 +827,36 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const char __user *ubuf, * with module unload. */ -static DECLARE_MUTEX(br_ioctl_mutex); +static DEFINE_MUTEX(br_ioctl_mutex); static int (*br_ioctl_hook)(unsigned int cmd, void __user *arg) = NULL; void brioctl_set(int (*hook)(unsigned int, void __user *)) { - down(&br_ioctl_mutex); + mutex_lock(&br_ioctl_mutex); br_ioctl_hook = hook; - up(&br_ioctl_mutex); + mutex_unlock(&br_ioctl_mutex); } EXPORT_SYMBOL(brioctl_set); -static DECLARE_MUTEX(vlan_ioctl_mutex); +static DEFINE_MUTEX(vlan_ioctl_mutex); static int (*vlan_ioctl_hook)(void __user *arg); void vlan_ioctl_set(int (*hook)(void __user *)) { - down(&vlan_ioctl_mutex); + mutex_lock(&vlan_ioctl_mutex); vlan_ioctl_hook = hook; - up(&vlan_ioctl_mutex); + mutex_unlock(&vlan_ioctl_mutex); } EXPORT_SYMBOL(vlan_ioctl_set); -static DECLARE_MUTEX(dlci_ioctl_mutex); +static DEFINE_MUTEX(dlci_ioctl_mutex); static int (*dlci_ioctl_hook)(unsigned int, void __user *); void dlci_ioctl_set(int (*hook)(unsigned int, void __user *)) { - down(&dlci_ioctl_mutex); + mutex_lock(&dlci_ioctl_mutex); dlci_ioctl_hook = hook; - up(&dlci_ioctl_mutex); + mutex_unlock(&dlci_ioctl_mutex); } EXPORT_SYMBOL(dlci_ioctl_set); @@ -899,10 +900,10 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) if (!br_ioctl_hook) request_module("bridge"); - down(&br_ioctl_mutex); + mutex_lock(&br_ioctl_mutex); if (br_ioctl_hook) err = br_ioctl_hook(cmd, argp); - up(&br_ioctl_mutex); + mutex_unlock(&br_ioctl_mutex); break; case SIOCGIFVLAN: case SIOCSIFVLAN: @@ -910,10 +911,10 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) if (!vlan_ioctl_hook) request_module("8021q"); - down(&vlan_ioctl_mutex); + mutex_lock(&vlan_ioctl_mutex); if (vlan_ioctl_hook) err = vlan_ioctl_hook(argp); - up(&vlan_ioctl_mutex); + mutex_unlock(&vlan_ioctl_mutex); break; case SIOCGIFDIVERT: case SIOCSIFDIVERT: @@ -927,9 +928,9 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) request_module("dlci"); if (dlci_ioctl_hook) { - down(&dlci_ioctl_mutex); + mutex_lock(&dlci_ioctl_mutex); err = dlci_ioctl_hook(cmd, argp); - up(&dlci_ioctl_mutex); + mutex_unlock(&dlci_ioctl_mutex); } break; default: diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index dcaa0c4453ff..0acccfeeb284 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -26,6 +26,7 @@ #include <linux/proc_fs.h> #include <linux/net.h> #include <linux/workqueue.h> +#include <linux/mutex.h> #include <asm/ioctls.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/cache.h> @@ -532,7 +533,7 @@ void cache_clean_deferred(void *owner) */ static DEFINE_SPINLOCK(queue_lock); -static DECLARE_MUTEX(queue_io_sem); +static DEFINE_MUTEX(queue_io_mutex); struct cache_queue { struct list_head list; @@ -561,7 +562,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) if (count == 0) return 0; - down(&queue_io_sem); /* protect against multiple concurrent + mutex_lock(&queue_io_mutex); /* protect against multiple concurrent * readers on this file */ again: spin_lock(&queue_lock); @@ -574,7 +575,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) } if (rp->q.list.next == &cd->queue) { spin_unlock(&queue_lock); - up(&queue_io_sem); + mutex_unlock(&queue_io_mutex); BUG_ON(rp->offset); return 0; } @@ -621,11 +622,11 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) } if (err == -EAGAIN) goto again; - up(&queue_io_sem); + mutex_unlock(&queue_io_mutex); return err ? err : count; } -static char write_buf[8192]; /* protected by queue_io_sem */ +static char write_buf[8192]; /* protected by queue_io_mutex */ static ssize_t cache_write(struct file *filp, const char __user *buf, size_t count, @@ -639,10 +640,10 @@ cache_write(struct file *filp, const char __user *buf, size_t count, if (count >= sizeof(write_buf)) return -EINVAL; - down(&queue_io_sem); + mutex_lock(&queue_io_mutex); if (copy_from_user(write_buf, buf, count)) { - up(&queue_io_sem); + mutex_unlock(&queue_io_mutex); return -EFAULT; } write_buf[count] = '\0'; @@ -651,7 +652,7 @@ cache_write(struct file *filp, const char __user *buf, size_t count, else err = -EINVAL; - up(&queue_io_sem); + mutex_unlock(&queue_io_mutex); return err ? err : count; } diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index e838d042f7f5..dff07795bd16 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -18,6 +18,7 @@ #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/spinlock.h> +#include <linux/mutex.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/xprt.h> @@ -62,7 +63,7 @@ static LIST_HEAD(all_tasks); /* * rpciod-related stuff */ -static DECLARE_MUTEX(rpciod_sema); +static DEFINE_MUTEX(rpciod_mutex); static unsigned int rpciod_users; static struct workqueue_struct *rpciod_workqueue; @@ -1047,7 +1048,7 @@ rpciod_up(void) struct workqueue_struct *wq; int error = 0; - down(&rpciod_sema); + mutex_lock(&rpciod_mutex); dprintk("rpciod_up: users %d\n", rpciod_users); rpciod_users++; if (rpciod_workqueue) @@ -1070,14 +1071,14 @@ rpciod_up(void) rpciod_workqueue = wq; error = 0; out: - up(&rpciod_sema); + mutex_unlock(&rpciod_mutex); return error; } void rpciod_down(void) { - down(&rpciod_sema); + mutex_lock(&rpciod_mutex); dprintk("rpciod_down sema %d\n", rpciod_users); if (rpciod_users) { if (--rpciod_users) @@ -1094,7 +1095,7 @@ rpciod_down(void) destroy_workqueue(rpciod_workqueue); rpciod_workqueue = NULL; out: - up(&rpciod_sema); + mutex_unlock(&rpciod_mutex); } #ifdef RPC_DEBUG diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 411802bd4d37..746c2f4a5fa6 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -76,6 +76,7 @@ #include <linux/netdevice.h> #include <linux/file.h> #include <linux/proc_fs.h> +#include <linux/mutex.h> #include <net/sock.h> #include <net/af_unix.h> @@ -169,7 +170,7 @@ static void maybe_unmark_and_push(struct sock *x) void unix_gc(void) { - static DECLARE_MUTEX(unix_gc_sem); + static DEFINE_MUTEX(unix_gc_sem); int i; struct sock *s; struct sk_buff_head hitlist; @@ -179,7 +180,7 @@ void unix_gc(void) * Avoid a recursive GC. */ - if (down_trylock(&unix_gc_sem)) + if (!mutex_trylock(&unix_gc_sem)) return; spin_lock(&unix_table_lock); @@ -308,5 +309,5 @@ void unix_gc(void) */ __skb_queue_purge(&hitlist); - up(&unix_gc_sem); + mutex_unlock(&unix_gc_sem); } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index b46079263e8b..f5eae9febd26 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -26,8 +26,8 @@ #include <net/xfrm.h> #include <net/ip.h> -DECLARE_MUTEX(xfrm_cfg_sem); -EXPORT_SYMBOL(xfrm_cfg_sem); +DEFINE_MUTEX(xfrm_cfg_mutex); +EXPORT_SYMBOL(xfrm_cfg_mutex); static DEFINE_RWLOCK(xfrm_policy_lock); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 7b1acd995168..4a7120a7e10f 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1486,9 +1486,9 @@ static void xfrm_netlink_rcv(struct sock *sk, int len) unsigned int qlen = 0; do { - down(&xfrm_cfg_sem); + mutex_lock(&xfrm_cfg_mutex); netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg); - up(&xfrm_cfg_sem); + mutex_unlock(&xfrm_cfg_mutex); } while (qlen); } |