summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/atm/lec.c9
-rw-r--r--net/atm/lec.h2
-rw-r--r--net/bluetooth/hci_sysfs.c12
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/bridge/br_if.c10
-rw-r--r--net/bridge/br_private.h2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/dst.c2
-rw-r--r--net/core/flow.c6
-rw-r--r--net/core/link_watch.c13
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/netpoll.c11
-rw-r--r--net/core/skbuff.c20
-rw-r--r--net/core/sock.c25
-rw-r--r--net/dccp/ackvec.c4
-rw-r--r--net/dccp/ccid.c6
-rw-r--r--net/dccp/ccid.h4
-rw-r--r--net/dccp/ccids/ccid3.c6
-rw-r--r--net/dccp/ccids/lib/loss_interval.c2
-rw-r--r--net/dccp/ccids/lib/loss_interval.h2
-rw-r--r--net/dccp/ccids/lib/packet_history.h4
-rw-r--r--net/dccp/minisocks.c3
-rw-r--r--net/decnet/dn_table.c4
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c18
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c23
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_event.c12
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_module.c4
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_priv.h13
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_scan.c13
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c6
-rw-r--r--net/ipv4/fib_hash.c8
-rw-r--r--net/ipv4/fib_trie.c6
-rw-r--r--net/ipv4/inet_hashtables.c6
-rw-r--r--net/ipv4/inet_timewait_sock.c7
-rw-r--r--net/ipv4/inetpeer.c2
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c6
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c4
-rw-r--r--net/ipv4/tcp_minisocks.c3
-rw-r--r--net/ipv6/af_inet6.c6
-rw-r--r--net/ipv6/ip6_fib.c4
-rw-r--r--net/ipv6/xfrm6_tunnel.c4
-rw-r--r--net/irda/ircomm/ircomm_tty.c11
-rw-r--r--net/netfilter/nf_conntrack_core.c6
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/xt_hashlimit.c2
-rw-r--r--net/rxrpc/krxiod.c1
-rw-r--r--net/rxrpc/krxsecd.c1
-rw-r--r--net/rxrpc/krxtimod.c1
-rw-r--r--net/sctp/associola.c11
-rw-r--r--net/sctp/endpointola.c10
-rw-r--r--net/sctp/inqueue.c9
-rw-r--r--net/sctp/protocol.c4
-rw-r--r--net/sctp/sm_make_chunk.c4
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/socket.c25
-rw-r--r--net/sunrpc/cache.c8
-rw-r--r--net/sunrpc/rpc_pipe.c14
-rw-r--r--net/sunrpc/sched.c12
-rw-r--r--net/sunrpc/svcauth.c3
-rw-r--r--net/sunrpc/svcsock.c32
-rw-r--r--net/sunrpc/xprt.c7
-rw-r--r--net/sunrpc/xprtsock.c51
-rw-r--r--net/tipc/handler.c2
-rw-r--r--net/xfrm/xfrm_input.c4
-rw-r--r--net/xfrm/xfrm_policy.c10
-rw-r--r--net/xfrm/xfrm_state.c8
68 files changed, 315 insertions, 229 deletions
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 5946ec63724f..3fc0abeeaf34 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1454,7 +1454,7 @@ static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr,
#define LEC_ARP_REFRESH_INTERVAL (3*HZ)
-static void lec_arp_check_expire(void *data);
+static void lec_arp_check_expire(struct work_struct *work);
static void lec_arp_expire_arp(unsigned long data);
/*
@@ -1477,7 +1477,7 @@ static void lec_arp_init(struct lec_priv *priv)
INIT_HLIST_HEAD(&priv->lec_no_forward);
INIT_HLIST_HEAD(&priv->mcast_fwds);
spin_lock_init(&priv->lec_arp_lock);
- INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv);
+ INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire);
schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
}
@@ -1875,10 +1875,11 @@ static void lec_arp_expire_vcc(unsigned long data)
* to ESI_FORWARD_DIRECT. This causes the flush period to end
* regardless of the progress of the flush protocol.
*/
-static void lec_arp_check_expire(void *data)
+static void lec_arp_check_expire(struct work_struct *work)
{
unsigned long flags;
- struct lec_priv *priv = data;
+ struct lec_priv *priv =
+ container_of(work, struct lec_priv, lec_arp_work.work);
struct hlist_node *node, *next;
struct lec_arp_table *entry;
unsigned long now;
diff --git a/net/atm/lec.h b/net/atm/lec.h
index 24cc95f86741..99136babd535 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -92,7 +92,7 @@ struct lec_priv {
spinlock_t lec_arp_lock;
struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */
struct atm_vcc *lecd;
- struct work_struct lec_arp_work; /* C10 */
+ struct delayed_work lec_arp_work; /* C10 */
unsigned int maximum_unknown_frame_count;
/*
* Within the period of time defined by this variable, the client will send
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 3eeeb7a86e75..d4c935692ccf 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -237,9 +237,9 @@ static void bt_release(struct device *dev)
kfree(data);
}
-static void add_conn(void *data)
+static void add_conn(struct work_struct *work)
{
- struct hci_conn *conn = data;
+ struct hci_conn *conn = container_of(work, struct hci_conn, work);
int i;
if (device_register(&conn->dev) < 0) {
@@ -272,14 +272,14 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
dev_set_drvdata(&conn->dev, conn);
- INIT_WORK(&conn->work, add_conn, (void *) conn);
+ INIT_WORK(&conn->work, add_conn);
schedule_work(&conn->work);
}
-static void del_conn(void *data)
+static void del_conn(struct work_struct *work)
{
- struct hci_conn *conn = data;
+ struct hci_conn *conn = container_of(work, struct hci_conn, work);
device_del(&conn->dev);
}
@@ -287,7 +287,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
{
BT_DBG("conn %p", conn);
- INIT_WORK(&conn->work, del_conn, (void *) conn);
+ INIT_WORK(&conn->work, del_conn);
schedule_work(&conn->work);
}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index d9f04864d15d..8ca448db7a0d 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -23,7 +23,7 @@
#include <asm/atomic.h>
#include "br_private.h"
-static kmem_cache_t *br_fdb_cache __read_mostly;
+static struct kmem_cache *br_fdb_cache __read_mostly;
static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f753c40c11d2..55bb2634c088 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -77,12 +77,16 @@ static int port_cost(struct net_device *dev)
* Called from work queue to allow for calling functions that
* might sleep (such as speed check), and to debounce.
*/
-static void port_carrier_check(void *arg)
+static void port_carrier_check(struct work_struct *work)
{
- struct net_device *dev = arg;
struct net_bridge_port *p;
+ struct net_device *dev;
struct net_bridge *br;
+ dev = container_of(work, struct net_bridge_port,
+ carrier_check.work)->dev;
+ work_release(work);
+
rtnl_lock();
p = dev->br_port;
if (!p)
@@ -276,7 +280,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
p->port_no = index;
br_init_port(p);
p->state = BR_STATE_DISABLED;
- INIT_WORK(&p->carrier_check, port_carrier_check, dev);
+ INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check);
br_stp_port_timer_init(p);
kobject_init(&p->kobj);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 74258d86f256..3a534e94c7f3 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -82,7 +82,7 @@ struct net_bridge_port
struct timer_list hold_timer;
struct timer_list message_age_timer;
struct kobject kobj;
- struct work_struct carrier_check;
+ struct delayed_work carrier_check;
struct rcu_head rcu;
};
diff --git a/net/core/dev.c b/net/core/dev.c
index 59d058a3b504..e660cb57e42a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3340,7 +3340,6 @@ void unregister_netdev(struct net_device *dev)
EXPORT_SYMBOL(unregister_netdev);
-#ifdef CONFIG_HOTPLUG_CPU
static int dev_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *ocpu)
@@ -3384,7 +3383,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_NET_DMA
/**
diff --git a/net/core/dst.c b/net/core/dst.c
index 1a5e49da0e77..836ec6606925 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -125,7 +125,7 @@ void * dst_alloc(struct dst_ops * ops)
if (ops->gc())
return NULL;
}
- dst = kmem_cache_alloc(ops->kmem_cachep, SLAB_ATOMIC);
+ dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
if (!dst)
return NULL;
memset(dst, 0, ops->entry_size);
diff --git a/net/core/flow.c b/net/core/flow.c
index b16d31ae5e54..d137f971f97d 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -44,7 +44,7 @@ static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
#define flow_table(cpu) (per_cpu(flow_tables, cpu))
-static kmem_cache_t *flow_cachep __read_mostly;
+static struct kmem_cache *flow_cachep __read_mostly;
static int flow_lwm, flow_hwm;
@@ -211,7 +211,7 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
if (flow_count(cpu) > flow_hwm)
flow_cache_shrink(cpu);
- fle = kmem_cache_alloc(flow_cachep, SLAB_ATOMIC);
+ fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
if (fle) {
fle->next = *head;
*head = fle;
@@ -340,7 +340,6 @@ static void __devinit flow_cache_cpu_prepare(int cpu)
tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
}
-#ifdef CONFIG_HOTPLUG_CPU
static int flow_cache_cpu(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
@@ -349,7 +348,6 @@ static int flow_cache_cpu(struct notifier_block *nfb,
__flow_cache_shrink((unsigned long)hcpu, 0);
return NOTIFY_OK;
}
-#endif /* CONFIG_HOTPLUG_CPU */
static int __init flow_cache_init(void)
{
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 4b36114744c5..549a2ce951b0 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -34,8 +34,8 @@ enum lw_bits {
static unsigned long linkwatch_flags;
static unsigned long linkwatch_nextevent;
-static void linkwatch_event(void *dummy);
-static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL);
+static void linkwatch_event(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
static LIST_HEAD(lweventlist);
static DEFINE_SPINLOCK(lweventlist_lock);
@@ -127,7 +127,7 @@ void linkwatch_run_queue(void)
}
-static void linkwatch_event(void *dummy)
+static void linkwatch_event(struct work_struct *dummy)
{
/* Limit the number of linkwatch events to one
* per second so that a runaway driver does not
@@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev)
unsigned long delay = linkwatch_nextevent - jiffies;
/* If we wrap around we'll delay it by at most HZ. */
- if (!delay || delay > HZ)
- schedule_work(&linkwatch_work);
- else
- schedule_delayed_work(&linkwatch_work, delay);
+ if (delay > HZ)
+ delay = 0;
+ schedule_delayed_work(&linkwatch_work, delay);
}
}
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index ba509a4a8e92..0ab1987b9348 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -251,7 +251,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
goto out_entries;
}
- n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
+ n = kmem_cache_alloc(tbl->kmem_cachep, GFP_ATOMIC);
if (!n)
goto out_entries;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 3c58846fcaa5..b3c559b9ac35 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -50,9 +50,10 @@ static atomic_t trapped;
static void zap_completion_queue(void);
static void arp_reply(struct sk_buff *skb);
-static void queue_process(void *p)
+static void queue_process(struct work_struct *work)
{
- struct netpoll_info *npinfo = p;
+ struct netpoll_info *npinfo =
+ container_of(work, struct netpoll_info, tx_work.work);
struct sk_buff *skb;
while ((skb = skb_dequeue(&npinfo->txq))) {
@@ -72,8 +73,6 @@ static void queue_process(void *p)
schedule_delayed_work(&npinfo->tx_work, HZ/10);
return;
}
-
- netif_tx_unlock_bh(dev);
}
}
@@ -263,7 +262,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
if (status != NETDEV_TX_OK) {
skb_queue_tail(&npinfo->txq, skb);
- schedule_work(&npinfo->tx_work);
+ schedule_delayed_work(&npinfo->tx_work,0);
}
}
@@ -628,7 +627,7 @@ int netpoll_setup(struct netpoll *np)
spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx);
skb_queue_head_init(&npinfo->txq);
- INIT_WORK(&npinfo->tx_work, queue_process, npinfo);
+ INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
atomic_set(&npinfo->refcnt, 1);
} else {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8e1c385e5ba9..de7801d589e7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -68,8 +68,8 @@
#include "kmap_skb.h"
-static kmem_cache_t *skbuff_head_cache __read_mostly;
-static kmem_cache_t *skbuff_fclone_cache __read_mostly;
+static struct kmem_cache *skbuff_head_cache __read_mostly;
+static struct kmem_cache *skbuff_fclone_cache __read_mostly;
/*
* Keep out-of-line to prevent kernel bloat.
@@ -132,6 +132,7 @@ EXPORT_SYMBOL(skb_truesize_bug);
* @gfp_mask: allocation mask
* @fclone: allocate from fclone cache instead of head cache
* and allocate a cloned (child) skb
+ * @node: numa node to allocate memory on
*
* Allocate a new &sk_buff. The returned buffer has no headroom and a
* tail room of size bytes. The object has a reference count of one.
@@ -141,9 +142,9 @@ EXPORT_SYMBOL(skb_truesize_bug);
* %GFP_ATOMIC.
*/
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
- int fclone)
+ int fclone, int node)
{
- kmem_cache_t *cache;
+ struct kmem_cache *cache;
struct skb_shared_info *shinfo;
struct sk_buff *skb;
u8 *data;
@@ -151,14 +152,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
/* Get the HEAD */
- skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA);
+ skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
if (!skb)
goto out;
/* Get the DATA. Size must match skb_add_mtu(). */
size = SKB_DATA_ALIGN(size);
- data = kmalloc_track_caller(size + sizeof(struct skb_shared_info),
- gfp_mask);
+ data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
+ gfp_mask, node);
if (!data)
goto nodata;
@@ -210,7 +211,7 @@ nodata:
* Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC.
*/
-struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
+struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
unsigned int size,
gfp_t gfp_mask)
{
@@ -267,9 +268,10 @@ nodata:
struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
unsigned int length, gfp_t gfp_mask)
{
+ int node = dev->class_dev.dev ? dev_to_node(dev->class_dev.dev) : -1;
struct sk_buff *skb;
- skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
+ skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
if (likely(skb)) {
skb_reserve(skb, NET_SKB_PAD);
skb->dev = dev;
diff --git a/net/core/sock.c b/net/core/sock.c
index 419c7d3289c7..0ed5b4f0bc40 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -810,24 +810,11 @@ lenout:
*/
static void inline sock_lock_init(struct sock *sk)
{
- spin_lock_init(&sk->sk_lock.slock);
- sk->sk_lock.owner = NULL;
- init_waitqueue_head(&sk->sk_lock.wq);
- /*
- * Make sure we are not reinitializing a held lock:
- */
- debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock));
-
- /*
- * Mark both the sk_lock and the sk_lock.slock as a
- * per-address-family lock class:
- */
- lockdep_set_class_and_name(&sk->sk_lock.slock,
- af_family_slock_keys + sk->sk_family,
- af_family_slock_key_strings[sk->sk_family]);
- lockdep_init_map(&sk->sk_lock.dep_map,
- af_family_key_strings[sk->sk_family],
- af_family_keys + sk->sk_family, 0);
+ sock_lock_init_class_and_name(sk,
+ af_family_slock_key_strings[sk->sk_family],
+ af_family_slock_keys + sk->sk_family,
+ af_family_key_strings[sk->sk_family],
+ af_family_keys + sk->sk_family);
}
/**
@@ -841,7 +828,7 @@ struct sock *sk_alloc(int family, gfp_t priority,
struct proto *prot, int zero_it)
{
struct sock *sk = NULL;
- kmem_cache_t *slab = prot->slab;
+ struct kmem_cache *slab = prot->slab;
if (slab != NULL)
sk = kmem_cache_alloc(slab, priority);
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index bdf1bb7a82c0..1f4727ddbdbf 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -21,8 +21,8 @@
#include <net/sock.h>
-static kmem_cache_t *dccp_ackvec_slab;
-static kmem_cache_t *dccp_ackvec_record_slab;
+static struct kmem_cache *dccp_ackvec_slab;
+static struct kmem_cache *dccp_ackvec_record_slab;
static struct dccp_ackvec_record *dccp_ackvec_record_new(void)
{
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index ff05e59043cd..d8cf92f09e68 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -55,9 +55,9 @@ static inline void ccids_read_unlock(void)
#define ccids_read_unlock() do { } while(0)
#endif
-static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
+static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
{
- kmem_cache_t *slab;
+ struct kmem_cache *slab;
char slab_name_fmt[32], *slab_name;
va_list args;
@@ -75,7 +75,7 @@ static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
return slab;
}
-static void ccid_kmem_cache_destroy(kmem_cache_t *slab)
+static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
{
if (slab != NULL) {
const char *name = kmem_cache_name(slab);
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index c7c29514dce8..bcc2d12ae81c 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -27,9 +27,9 @@ struct ccid_operations {
unsigned char ccid_id;
const char *ccid_name;
struct module *ccid_owner;
- kmem_cache_t *ccid_hc_rx_slab;
+ struct kmem_cache *ccid_hc_rx_slab;
__u32 ccid_hc_rx_obj_size;
- kmem_cache_t *ccid_hc_tx_slab;
+ struct kmem_cache *ccid_hc_tx_slab;
__u32 ccid_hc_tx_obj_size;
int (*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk);
int (*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk);
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index cf8c07b2704f..66a27b9688ca 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -295,7 +295,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
if (new_packet == NULL || new_packet->dccphtx_sent) {
new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist,
- SLAB_ATOMIC);
+ GFP_ATOMIC);
if (unlikely(new_packet == NULL)) {
DCCP_WARN("%s, sk=%p, not enough mem to add to history,"
@@ -889,7 +889,7 @@ static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
/* new loss event detected */
/* calculate last interval length */
seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss);
- entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC);
+ entry = dccp_li_hist_entry_new(ccid3_li_hist, GFP_ATOMIC);
if (entry == NULL) {
DCCP_BUG("out of memory - can not allocate entry");
@@ -1011,7 +1011,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
}
packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp,
- skb, SLAB_ATOMIC);
+ skb, GFP_ATOMIC);
if (unlikely(packet == NULL)) {
DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet "
"to history, consider it lost!\n", dccp_role(sk), sk);
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 48b9b93f8acb..0a0baef16b3e 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -125,7 +125,7 @@ int dccp_li_hist_interval_new(struct dccp_li_hist *hist,
int i;
for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) {
- entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC);
+ entry = dccp_li_hist_entry_new(hist, GFP_ATOMIC);
if (entry == NULL) {
dccp_li_hist_purge(hist, list);
DCCP_BUG("loss interval list entry is NULL");
diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h
index 0ae85f0340b2..eb257014dd74 100644
--- a/net/dccp/ccids/lib/loss_interval.h
+++ b/net/dccp/ccids/lib/loss_interval.h
@@ -20,7 +20,7 @@
#define DCCP_LI_HIST_IVAL_F_LENGTH 8
struct dccp_li_hist {
- kmem_cache_t *dccplih_slab;
+ struct kmem_cache *dccplih_slab;
};
extern struct dccp_li_hist *dccp_li_hist_new(const char *name);
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index 067cf1c85a37..9a8bcf224aa7 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -68,14 +68,14 @@ struct dccp_rx_hist_entry {
};
struct dccp_tx_hist {
- kmem_cache_t *dccptxh_slab;
+ struct kmem_cache *dccptxh_slab;
};
extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name);
extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist);
struct dccp_rx_hist {
- kmem_cache_t *dccprxh_slab;
+ struct kmem_cache *dccprxh_slab;
};
extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name);
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 7b52f2a03eef..4c9e26775f72 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -32,8 +32,7 @@ struct inet_timewait_death_row dccp_death_row = {
.tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
(unsigned long)&dccp_death_row),
.twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work,
- inet_twdr_twkill_work,
- &dccp_death_row),
+ inet_twdr_twkill_work),
/* Short-time timewait calendar */
.twcal_hand = -1,
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index bdbc3f431668..13b2421991ba 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -79,7 +79,7 @@ for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_n
static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ];
static DEFINE_RWLOCK(dn_fib_tables_lock);
-static kmem_cache_t *dn_hash_kmem __read_mostly;
+static struct kmem_cache *dn_hash_kmem __read_mostly;
static int dn_fib_hash_zombies;
static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
@@ -590,7 +590,7 @@ create:
replace:
err = -ENOBUFS;
- new_f = kmem_cache_alloc(dn_hash_kmem, SLAB_KERNEL);
+ new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL);
if (new_f == NULL)
goto out;
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index cf51c87a971d..08386c102954 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -58,9 +58,11 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft
}
void
-ieee80211softmac_assoc_timeout(void *d)
+ieee80211softmac_assoc_timeout(struct work_struct *work)
{
- struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
+ struct ieee80211softmac_device *mac =
+ container_of(work, struct ieee80211softmac_device,
+ associnfo.timeout.work);
struct ieee80211softmac_network *n;
mutex_lock(&mac->associnfo.mutex);
@@ -186,9 +188,11 @@ ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void
/* This function is called to handle userspace requests (asynchronously) */
void
-ieee80211softmac_assoc_work(void *d)
+ieee80211softmac_assoc_work(struct work_struct *work)
{
- struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
+ struct ieee80211softmac_device *mac =
+ container_of(work, struct ieee80211softmac_device,
+ associnfo.work.work);
struct ieee80211softmac_network *found = NULL;
struct ieee80211_network *net = NULL, *best = NULL;
int bssvalid;
@@ -412,7 +416,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev,
network->authenticated = 0;
/* we don't want to do this more than once ... */
network->auth_desynced_once = 1;
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
break;
}
default:
@@ -446,7 +450,7 @@ ieee80211softmac_handle_disassoc(struct net_device * dev,
ieee80211softmac_disassoc(mac);
/* try to reassociate */
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
return 0;
}
@@ -466,7 +470,7 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev,
dprintkl(KERN_INFO PFX "reassoc request from unknown network\n");
return 0;
}
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
return 0;
}
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index 0612015f1c78..6012705aa4f8 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -26,7 +26,7 @@
#include "ieee80211softmac_priv.h"
-static void ieee80211softmac_auth_queue(void *data);
+static void ieee80211softmac_auth_queue(struct work_struct *work);
/* Queues an auth request to the desired AP */
int
@@ -54,14 +54,14 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
auth->mac = mac;
auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT;
auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST;
- INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth);
+ INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue);
/* Lock (for list) */
spin_lock_irqsave(&mac->lock, flags);
/* add to list */
list_add_tail(&auth->list, &mac->auth_queue);
- schedule_work(&auth->work);
+ schedule_delayed_work(&auth->work, 0);
spin_unlock_irqrestore(&mac->lock, flags);
return 0;
@@ -70,14 +70,15 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
/* Sends an auth request to the desired AP and handles timeouts */
static void
-ieee80211softmac_auth_queue(void *data)
+ieee80211softmac_auth_queue(struct work_struct *work)
{
struct ieee80211softmac_device *mac;
struct ieee80211softmac_auth_queue_item *auth;
struct ieee80211softmac_network *net;
unsigned long flags;
- auth = (struct ieee80211softmac_auth_queue_item *)data;
+ auth = container_of(work, struct ieee80211softmac_auth_queue_item,
+ work.work);
net = auth->net;
mac = auth->mac;
@@ -118,9 +119,11 @@ ieee80211softmac_auth_queue(void *data)
/* Sends a response to an auth challenge (for shared key auth). */
static void
-ieee80211softmac_auth_challenge_response(void *_aq)
+ieee80211softmac_auth_challenge_response(struct work_struct *work)
{
- struct ieee80211softmac_auth_queue_item *aq = _aq;
+ struct ieee80211softmac_auth_queue_item *aq =
+ container_of(work, struct ieee80211softmac_auth_queue_item,
+ work.work);
/* Send our response */
ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
@@ -234,8 +237,8 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
* we have obviously already sent the initial auth
* request. */
cancel_delayed_work(&aq->work);
- INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq);
- schedule_work(&aq->work);
+ INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response);
+ schedule_delayed_work(&aq->work, 0);
spin_unlock_irqrestore(&mac->lock, flags);
return 0;
case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
@@ -398,6 +401,6 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de
ieee80211softmac_deauth_from_net(mac, net);
/* let's try to re-associate */
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
return 0;
}
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c
index f34fa2ef666b..b9015656cfb3 100644
--- a/net/ieee80211/softmac/ieee80211softmac_event.c
+++ b/net/ieee80211/softmac/ieee80211softmac_event.c
@@ -73,10 +73,12 @@ static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = {
static void
-ieee80211softmac_notify_callback(void *d)
+ieee80211softmac_notify_callback(struct work_struct *work)
{
- struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d;
- kfree(d);
+ struct ieee80211softmac_event *pevent =
+ container_of(work, struct ieee80211softmac_event, work.work);
+ struct ieee80211softmac_event event = *pevent;
+ kfree(pevent);
event.fun(event.mac->dev, event.event_type, event.context);
}
@@ -99,7 +101,7 @@ ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac,
return -ENOMEM;
eventptr->event_type = event;
- INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr);
+ INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback);
eventptr->fun = fun;
eventptr->context = context;
eventptr->mac = mac;
@@ -170,7 +172,7 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve
/* User may have subscribed to ANY event, so
* we tell them which event triggered it. */
eventptr->event_type = event;
- schedule_work(&eventptr->work);
+ schedule_delayed_work(&eventptr->work, 0);
}
}
}
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c
index 33aff4f4a471..256207b71dc9 100644
--- a/net/ieee80211/softmac/ieee80211softmac_module.c
+++ b/net/ieee80211/softmac/ieee80211softmac_module.c
@@ -58,8 +58,8 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv)
INIT_LIST_HEAD(&softmac->events);
mutex_init(&softmac->associnfo.mutex);
- INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac);
- INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac);
+ INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work);
+ INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout);
softmac->start_scan = ieee80211softmac_start_scan_implementation;
softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation;
softmac->stop_scan = ieee80211softmac_stop_scan_implementation;
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h
index 0642e090b8a7..c0dbe070e548 100644
--- a/net/ieee80211/softmac/ieee80211softmac_priv.h
+++ b/net/ieee80211/softmac/ieee80211softmac_priv.h
@@ -78,7 +78,7 @@
/* private definitions and prototypes */
/*** prototypes from _scan.c */
-void ieee80211softmac_scan(void *sm);
+void ieee80211softmac_scan(struct work_struct *work);
/* for internal use if scanning is needed */
int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac);
void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac);
@@ -149,7 +149,7 @@ int ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *au
int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth);
/*** prototypes from _assoc.c */
-void ieee80211softmac_assoc_work(void *d);
+void ieee80211softmac_assoc_work(struct work_struct *work);
int ieee80211softmac_handle_assoc_response(struct net_device * dev,
struct ieee80211_assoc_response * resp,
struct ieee80211_network * network);
@@ -157,7 +157,7 @@ int ieee80211softmac_handle_disassoc(struct net_device * dev,
struct ieee80211_disassoc * disassoc);
int ieee80211softmac_handle_reassoc_req(struct net_device * dev,
struct ieee80211_reassoc_request * reassoc);
-void ieee80211softmac_assoc_timeout(void *d);
+void ieee80211softmac_assoc_timeout(struct work_struct *work);
void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason);
void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac);
@@ -207,7 +207,7 @@ struct ieee80211softmac_auth_queue_item {
struct ieee80211softmac_device *mac; /* SoftMAC device */
u8 retry; /* Retry limit */
u8 state; /* Auth State */
- struct work_struct work; /* Work queue */
+ struct delayed_work work; /* Work queue */
};
/* scanning information */
@@ -219,7 +219,8 @@ struct ieee80211softmac_scaninfo {
stop:1;
u8 skip_flags;
struct completion finished;
- struct work_struct softmac_scan;
+ struct delayed_work softmac_scan;
+ struct ieee80211softmac_device *mac;
};
/* private event struct */
@@ -227,7 +228,7 @@ struct ieee80211softmac_event {
struct list_head list;
int event_type;
void *event_context;
- struct work_struct work;
+ struct delayed_work work;
notify_function_ptr fun;
void *context;
struct ieee80211softmac_device *mac;
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c
index 5507feab32de..0c85d6c24cdb 100644
--- a/net/ieee80211/softmac/ieee80211softmac_scan.c
+++ b/net/ieee80211/softmac/ieee80211softmac_scan.c
@@ -90,12 +90,14 @@ ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *sm)
/* internal scanning implementation follows */
-void ieee80211softmac_scan(void *d)
+void ieee80211softmac_scan(struct work_struct *work)
{
int invalid_channel;
u8 current_channel_idx;
- struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d;
- struct ieee80211softmac_scaninfo *si = sm->scaninfo;
+ struct ieee80211softmac_scaninfo *si =
+ container_of(work, struct ieee80211softmac_scaninfo,
+ softmac_scan.work);
+ struct ieee80211softmac_device *sm = si->mac;
unsigned long flags;
while (!(si->stop) && (si->current_channel_idx < si->number_channels)) {
@@ -146,7 +148,8 @@ static inline struct ieee80211softmac_scaninfo *allocate_scaninfo(struct ieee802
struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC);
if (unlikely(!info))
return NULL;
- INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac);
+ INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan);
+ info->mac = mac;
init_completion(&info->finished);
return info;
}
@@ -187,7 +190,7 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev)
sm->scaninfo->started = 1;
sm->scaninfo->stop = 0;
INIT_COMPLETION(sm->scaninfo->finished);
- schedule_work(&sm->scaninfo->softmac_scan);
+ schedule_delayed_work(&sm->scaninfo->softmac_scan, 0);
spin_unlock_irqrestore(&sm->lock, flags);
return 0;
}
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index 23068a830f7d..2ffaebd21c53 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -122,7 +122,7 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
sm->associnfo.associating = 1;
/* queue lower level code to do work (if necessary) */
- schedule_work(&sm->associnfo.work);
+ schedule_delayed_work(&sm->associnfo.work, 0);
out:
mutex_unlock(&sm->associnfo.mutex);
@@ -356,7 +356,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
/* force reassociation */
mac->associnfo.bssvalid = 0;
if (mac->associnfo.associated)
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
} else if (is_zero_ether_addr(data->ap_addr.sa_data)) {
/* the bssid we have is no longer fixed */
mac->associnfo.bssfixed = 0;
@@ -373,7 +373,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
/* tell the other code that this bssid should be used no matter what */
mac->associnfo.bssfixed = 1;
/* queue associate if new bssid or (old one again and not associated) */
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
}
out:
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 107bb6cbb0b3..648f47c1c399 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -45,8 +45,8 @@
#include "fib_lookup.h"
-static kmem_cache_t *fn_hash_kmem __read_mostly;
-static kmem_cache_t *fn_alias_kmem __read_mostly;
+static struct kmem_cache *fn_hash_kmem __read_mostly;
+static struct kmem_cache *fn_alias_kmem __read_mostly;
struct fib_node {
struct hlist_node fn_hash;
@@ -485,13 +485,13 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg)
goto out;
err = -ENOBUFS;
- new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
+ new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
if (new_fa == NULL)
goto out;
new_f = NULL;
if (!f) {
- new_f = kmem_cache_alloc(fn_hash_kmem, SLAB_KERNEL);
+ new_f = kmem_cache_alloc(fn_hash_kmem, GFP_KERNEL);
if (new_f == NULL)
goto out_free_new_fa;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index d17990ec724f..cfb249cc0a58 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -172,7 +172,7 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn);
static struct tnode *halve(struct trie *t, struct tnode *tn);
static void tnode_free(struct tnode *tn);
-static kmem_cache_t *fn_alias_kmem __read_mostly;
+static struct kmem_cache *fn_alias_kmem __read_mostly;
static struct trie *trie_local = NULL, *trie_main = NULL;
@@ -1187,7 +1187,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
u8 state;
err = -ENOBUFS;
- new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
+ new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
if (new_fa == NULL)
goto out;
@@ -1232,7 +1232,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
goto out;
err = -ENOBUFS;
- new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
+ new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
if (new_fa == NULL)
goto out;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 244c4f445c7d..8c79c8a4ea5c 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -27,11 +27,11 @@
* Allocate and initialize a new local port bind bucket.
* The bindhash mutex for snum's hash chain must be held here.
*/
-struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep,
+struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
struct inet_bind_hashbucket *head,
const unsigned short snum)
{
- struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, SLAB_ATOMIC);
+ struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
if (tb != NULL) {
tb->port = snum;
@@ -45,7 +45,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep,
/*
* Caller must hold hashbucket lock for this tb with local BH disabled
*/
-void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb)
+void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
{
if (hlist_empty(&tb->owners)) {
__hlist_del(&tb->node);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index cdd805344c61..e28330aa4139 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -91,7 +91,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
{
struct inet_timewait_sock *tw =
kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
- SLAB_ATOMIC);
+ GFP_ATOMIC);
if (tw != NULL) {
const struct inet_sock *inet = inet_sk(sk);
@@ -197,9 +197,10 @@ EXPORT_SYMBOL_GPL(inet_twdr_hangman);
extern void twkill_slots_invalid(void);
-void inet_twdr_twkill_work(void *data)
+void inet_twdr_twkill_work(struct work_struct *work)
{
- struct inet_timewait_death_row *twdr = data;
+ struct inet_timewait_death_row *twdr =
+ container_of(work, struct inet_timewait_death_row, twkill_work);
int i;
if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8))
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index f072f3875af8..711eb6d0285a 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -73,7 +73,7 @@
/* Exported for inet_getid inline function. */
DEFINE_SPINLOCK(inet_peer_idlock);
-static kmem_cache_t *peer_cachep __read_mostly;
+static struct kmem_cache *peer_cachep __read_mostly;
#define node_height(x) x->avl_height
static struct inet_peer peer_fake_node = {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index efcf45ecc818..ecb5422ea237 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -105,7 +105,7 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
In this case data path is free of exclusive locks at all.
*/
-static kmem_cache_t *mrt_cachep __read_mostly;
+static struct kmem_cache *mrt_cachep __read_mostly;
static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index 8832eb517d52..8086787a2c51 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -44,7 +44,7 @@
static struct list_head *ip_vs_conn_tab;
/* SLAB cache for IPVS connections */
-static kmem_cache_t *ip_vs_conn_cachep __read_mostly;
+static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
/* counter for current IPVS connections */
static atomic_t ip_vs_conn_count = ATOMIC_INIT(0);
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index f261616e4602..9b933381ebbe 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -221,10 +221,10 @@ static void update_defense_level(void)
* Timer for checking the defense
*/
#define DEFENSE_TIMER_PERIOD 1*HZ
-static void defense_work_handler(void *data);
-static DECLARE_WORK(defense_work, defense_work_handler, NULL);
+static void defense_work_handler(struct work_struct *work);
+static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
-static void defense_work_handler(void *data)
+static void defense_work_handler(struct work_struct *work)
{
update_defense_level();
if (atomic_read(&ip_vs_dropentry))
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index f4b0e68a16d2..8556a4f4f60a 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -65,8 +65,8 @@ static LIST_HEAD(helpers);
unsigned int ip_conntrack_htable_size __read_mostly = 0;
int ip_conntrack_max __read_mostly;
struct list_head *ip_conntrack_hash __read_mostly;
-static kmem_cache_t *ip_conntrack_cachep __read_mostly;
-static kmem_cache_t *ip_conntrack_expect_cachep __read_mostly;
+static struct kmem_cache *ip_conntrack_cachep __read_mostly;
+static struct kmem_cache *ip_conntrack_expect_cachep __read_mostly;
struct ip_conntrack ip_conntrack_untracked;
unsigned int ip_ct_log_invalid __read_mostly;
static LIST_HEAD(unconfirmed);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6dddf59c1fb9..4a3889dd1943 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -45,8 +45,7 @@ struct inet_timewait_death_row tcp_death_row = {
.tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
(unsigned long)&tcp_death_row),
.twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
- inet_twdr_twkill_work,
- &tcp_death_row),
+ inet_twdr_twkill_work),
/* Short-time timewait calendar */
.twcal_hand = -1,
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 87c8f54872b7..e5cd83b2205d 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -720,10 +720,8 @@ snmp6_mib_free(void *ptr[2])
{
if (ptr == NULL)
return;
- if (ptr[0])
- free_percpu(ptr[0]);
- if (ptr[1])
- free_percpu(ptr[1]);
+ free_percpu(ptr[0]);
+ free_percpu(ptr[1]);
ptr[0] = ptr[1] = NULL;
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index bf526115e518..96d8310ae9c8 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -50,7 +50,7 @@
struct rt6_statistics rt6_stats;
-static kmem_cache_t * fib6_node_kmem __read_mostly;
+static struct kmem_cache * fib6_node_kmem __read_mostly;
enum fib_walk_state_t
{
@@ -150,7 +150,7 @@ static __inline__ struct fib6_node * node_alloc(void)
{
struct fib6_node *fn;
- if ((fn = kmem_cache_alloc(fib6_node_kmem, SLAB_ATOMIC)) != NULL)
+ if ((fn = kmem_cache_alloc(fib6_node_kmem, GFP_ATOMIC)) != NULL)
memset(fn, 0, sizeof(struct fib6_node));
return fn;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 01a5c52a2be3..12e426b9aacd 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -50,7 +50,7 @@ static u32 xfrm6_tunnel_spi;
#define XFRM6_TUNNEL_SPI_MIN 1
#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
-static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly;
+static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
@@ -180,7 +180,7 @@ try_next_2:;
spi = 0;
goto out;
alloc_spi:
- x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC);
+ x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
if (!x6spi)
goto out;
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index d50a02030ad7..262bda808d96 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -61,7 +61,7 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty);
static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch);
static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout);
static void ircomm_tty_hangup(struct tty_struct *tty);
-static void ircomm_tty_do_softint(void *private_);
+static void ircomm_tty_do_softint(struct work_struct *work);
static void ircomm_tty_shutdown(struct ircomm_tty_cb *self);
static void ircomm_tty_stop(struct tty_struct *tty);
@@ -389,7 +389,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
self->flow = FLOW_STOP;
self->line = line;
- INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self);
+ INIT_WORK(&self->tqueue, ircomm_tty_do_softint);
self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED;
self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED;
self->close_delay = 5*HZ/10;
@@ -594,15 +594,16 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty)
}
/*
- * Function ircomm_tty_do_softint (private_)
+ * Function ircomm_tty_do_softint (work)
*
* We use this routine to give the write wakeup to the user at at a
* safe time (as fast as possible after write have completed). This
* can be compared to the Tx interrupt.
*/
-static void ircomm_tty_do_softint(void *private_)
+static void ircomm_tty_do_softint(struct work_struct *work)
{
- struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_;
+ struct ircomm_tty_cb *self =
+ container_of(work, struct ircomm_tty_cb, tqueue);
struct tty_struct *tty;
unsigned long flags;
struct sk_buff *skb, *ctrl_skb;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index eaa0f8a1adb6..a9638ff52a72 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -108,7 +108,7 @@ static struct {
size_t size;
/* slab cache pointer */
- kmem_cache_t *cachep;
+ struct kmem_cache *cachep;
/* allocated slab cache + modules which uses this slab cache */
int use;
@@ -147,7 +147,7 @@ int nf_conntrack_register_cache(u_int32_t features, const char *name,
{
int ret = 0;
char *cache_name;
- kmem_cache_t *cachep;
+ struct kmem_cache *cachep;
DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n",
features, name, size);
@@ -226,7 +226,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_register_cache);
/* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */
void nf_conntrack_unregister_cache(u_int32_t features)
{
- kmem_cache_t *cachep;
+ struct kmem_cache *cachep;
char *name;
/*
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 588d37937046..c20f901fa177 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -29,7 +29,7 @@
LIST_HEAD(nf_conntrack_expect_list);
EXPORT_SYMBOL_GPL(nf_conntrack_expect_list);
-kmem_cache_t *nf_conntrack_expect_cachep __read_mostly;
+struct kmem_cache *nf_conntrack_expect_cachep __read_mostly;
static unsigned int nf_conntrack_expect_next_id;
/* nf_conntrack_expect helper functions */
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index a98de0b54d65..a5a6e192ac2d 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -92,7 +92,7 @@ struct xt_hashlimit_htable {
static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */
static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */
static HLIST_HEAD(hashlimit_htables);
-static kmem_cache_t *hashlimit_cachep __read_mostly;
+static struct kmem_cache *hashlimit_cachep __read_mostly;
static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b)
{
diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c
index dada34a77b21..49effd92144e 100644
--- a/net/rxrpc/krxiod.c
+++ b/net/rxrpc/krxiod.c
@@ -13,6 +13,7 @@
#include <linux/completion.h>
#include <linux/spinlock.h>
#include <linux/init.h>
+#include <linux/freezer.h>
#include <rxrpc/krxiod.h>
#include <rxrpc/transport.h>
#include <rxrpc/peer.h>
diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c
index cea4eb5e2497..3ab0f77409f4 100644
--- a/net/rxrpc/krxsecd.c
+++ b/net/rxrpc/krxsecd.c
@@ -27,6 +27,7 @@
#include <rxrpc/call.h>
#include <linux/udp.h>
#include <linux/ip.h>
+#include <linux/freezer.h>
#include <net/sock.h>
#include "internal.h"
diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c
index 3e7466900bd4..9a9b6132dba4 100644
--- a/net/rxrpc/krxtimod.c
+++ b/net/rxrpc/krxtimod.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/completion.h>
+#include <linux/freezer.h>
#include <rxrpc/rxrpc.h>
#include <rxrpc/krxtimod.h>
#include <asm/errno.h>
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 39471d3b31b9..ad0057db0f91 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -61,7 +61,7 @@
#include <net/sctp/sm.h>
/* Forward declarations for internal functions. */
-static void sctp_assoc_bh_rcv(struct sctp_association *asoc);
+static void sctp_assoc_bh_rcv(struct work_struct *work);
/* 1st Level Abstractions. */
@@ -269,9 +269,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
/* Create an input queue. */
sctp_inq_init(&asoc->base.inqueue);
- sctp_inq_set_th_handler(&asoc->base.inqueue,
- (void (*)(void *))sctp_assoc_bh_rcv,
- asoc);
+ sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
/* Create an output queue. */
sctp_outq_init(asoc, &asoc->outqueue);
@@ -946,8 +944,11 @@ out:
}
/* Do delayed input processing. This is scheduled by sctp_rcv(). */
-static void sctp_assoc_bh_rcv(struct sctp_association *asoc)
+static void sctp_assoc_bh_rcv(struct work_struct *work)
{
+ struct sctp_association *asoc =
+ container_of(work, struct sctp_association,
+ base.inqueue.immediate);
struct sctp_endpoint *ep;
struct sctp_chunk *chunk;
struct sock *sk;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 33a42e90c32f..129756908da4 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -61,7 +61,7 @@
#include <net/sctp/sm.h>
/* Forward declarations for internal helpers. */
-static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep);
+static void sctp_endpoint_bh_rcv(struct work_struct *work);
/*
* Initialize the base fields of the endpoint structure.
@@ -89,8 +89,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
sctp_inq_init(&ep->base.inqueue);
/* Set its top-half handler */
- sctp_inq_set_th_handler(&ep->base.inqueue,
- (void (*)(void *))sctp_endpoint_bh_rcv, ep);
+ sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
/* Initialize the bind addr area */
sctp_bind_addr_init(&ep->base.bind_addr, 0);
@@ -318,8 +317,11 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
/* Do delayed input processing. This is scheduled by sctp_rcv().
* This may be called on BH or task time.
*/
-static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep)
+static void sctp_endpoint_bh_rcv(struct work_struct *work)
{
+ struct sctp_endpoint *ep =
+ container_of(work, struct sctp_endpoint,
+ base.inqueue.immediate);
struct sctp_association *asoc;
struct sock *sk;
struct sctp_transport *transport;
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index cf6deed7e849..71b07466e880 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -54,7 +54,7 @@ void sctp_inq_init(struct sctp_inq *queue)
queue->in_progress = NULL;
/* Create a task for delivering data. */
- INIT_WORK(&queue->immediate, NULL, NULL);
+ INIT_WORK(&queue->immediate, NULL);
queue->malloced = 0;
}
@@ -97,7 +97,7 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
* on the BH related data structures.
*/
list_add_tail(&chunk->list, &q->in_chunk_list);
- q->immediate.func(q->immediate.data);
+ q->immediate.func(&q->immediate);
}
/* Extract a chunk from an SCTP inqueue.
@@ -205,9 +205,8 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
* The intent is that this routine will pull stuff out of the
* inqueue and process it.
*/
-void sctp_inq_set_th_handler(struct sctp_inq *q,
- void (*callback)(void *), void *arg)
+void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
{
- INIT_WORK(&q->immediate, callback, arg);
+ INIT_WORK(&q->immediate, callback);
}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 11f3b549f4a4..f2ba8615895b 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -79,8 +79,8 @@ static struct sctp_pf *sctp_pf_inet_specific;
static struct sctp_af *sctp_af_v4_specific;
static struct sctp_af *sctp_af_v6_specific;
-kmem_cache_t *sctp_chunk_cachep __read_mostly;
-kmem_cache_t *sctp_bucket_cachep __read_mostly;
+struct kmem_cache *sctp_chunk_cachep __read_mostly;
+struct kmem_cache *sctp_bucket_cachep __read_mostly;
/* Return the address of the control sock. */
struct sock *sctp_get_ctl_sock(void)
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 04954e5f6846..30927d3a597f 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -65,7 +65,7 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
-extern kmem_cache_t *sctp_chunk_cachep;
+extern struct kmem_cache *sctp_chunk_cachep;
SCTP_STATIC
struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
@@ -979,7 +979,7 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb,
{
struct sctp_chunk *retval;
- retval = kmem_cache_alloc(sctp_chunk_cachep, SLAB_ATOMIC);
+ retval = kmem_cache_alloc(sctp_chunk_cachep, GFP_ATOMIC);
if (!retval)
goto nodata;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 02b27145b279..1e8132b8c4d9 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -107,7 +107,7 @@ static void sctp_sock_migrate(struct sock *, struct sock *,
struct sctp_association *, sctp_socket_type_t);
static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
-extern kmem_cache_t *sctp_bucket_cachep;
+extern struct kmem_cache *sctp_bucket_cachep;
/* Get the sndbuf space available at the time on the association. */
static inline int sctp_wspace(struct sctp_association *asoc)
@@ -4989,7 +4989,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
{
struct sctp_bind_bucket *pp;
- pp = kmem_cache_alloc(sctp_bucket_cachep, SLAB_ATOMIC);
+ pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
SCTP_DBG_OBJCNT_INC(bind_bucket);
if (pp) {
pp->port = snum;
diff --git a/net/socket.c b/net/socket.c
index e8db54702a69..29ea1de43ecb 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -230,13 +230,13 @@ int move_addr_to_user(void *kaddr, int klen, void __user *uaddr,
#define SOCKFS_MAGIC 0x534F434B
-static kmem_cache_t *sock_inode_cachep __read_mostly;
+static struct kmem_cache *sock_inode_cachep __read_mostly;
static struct inode *sock_alloc_inode(struct super_block *sb)
{
struct socket_alloc *ei;
- ei = kmem_cache_alloc(sock_inode_cachep, SLAB_KERNEL);
+ ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
init_waitqueue_head(&ei->socket.wait);
@@ -257,7 +257,7 @@ static void sock_destroy_inode(struct inode *inode)
container_of(inode, struct socket_alloc, vfs_inode));
}
-static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct socket_alloc *ei = (struct socket_alloc *)foo;
@@ -305,7 +305,14 @@ static struct file_system_type sock_fs_type = {
static int sockfs_delete_dentry(struct dentry *dentry)
{
- return 1;
+ /*
+ * At creation time, we pretended this dentry was hashed
+ * (by clearing DCACHE_UNHASHED bit in d_flags)
+ * At delete time, we restore the truth : not hashed.
+ * (so that dput() can proceed correctly)
+ */
+ dentry->d_flags |= DCACHE_UNHASHED;
+ return 0;
}
static struct dentry_operations sockfs_dentry_operations = {
.d_delete = sockfs_delete_dentry,
@@ -353,14 +360,20 @@ static int sock_attach_fd(struct socket *sock, struct file *file)
this.len = sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino);
this.name = name;
- this.hash = SOCK_INODE(sock)->i_ino;
+ this.hash = 0;
file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this);
if (unlikely(!file->f_dentry))
return -ENOMEM;
file->f_dentry->d_op = &sockfs_dentry_operations;
- d_add(file->f_dentry, SOCK_INODE(sock));
+ /*
+ * We dont want to push this dentry into global dentry hash table.
+ * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
+ * This permits a working /proc/$pid/fd/XXX on sockets
+ */
+ file->f_dentry->d_flags &= ~DCACHE_UNHASHED;
+ d_instantiate(file->f_dentry, SOCK_INODE(sock));
file->f_vfsmnt = mntget(sock_mnt);
file->f_mapping = file->f_dentry->d_inode->i_mapping;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 00cb388ece03..d96fd466a9a4 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -284,8 +284,8 @@ static struct file_operations cache_file_operations;
static struct file_operations content_file_operations;
static struct file_operations cache_flush_operations;
-static void do_cache_clean(void *data);
-static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL);
+static void do_cache_clean(struct work_struct *work);
+static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
void cache_register(struct cache_detail *cd)
{
@@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd)
spin_unlock(&cache_list_lock);
/* start the cleaning process */
- schedule_work(&cache_cleaner);
+ schedule_delayed_work(&cache_cleaner, 0);
}
int cache_unregister(struct cache_detail *cd)
@@ -461,7 +461,7 @@ static int cache_clean(void)
/*
* We want to regularly clean the cache, so we need to schedule some work ...
*/
-static void do_cache_clean(void *data)
+static void do_cache_clean(struct work_struct *work)
{
int delay = 5;
if (cache_clean() == -1)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 9a0b41a97f90..19703aa9659e 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -33,7 +33,7 @@ static int rpc_mount_count;
static struct file_system_type rpc_pipe_fs_type;
-static kmem_cache_t *rpc_inode_cachep __read_mostly;
+static struct kmem_cache *rpc_inode_cachep __read_mostly;
#define RPC_UPCALL_TIMEOUT (30*HZ)
@@ -54,10 +54,11 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
}
static void
-rpc_timeout_upcall_queue(void *data)
+rpc_timeout_upcall_queue(struct work_struct *work)
{
LIST_HEAD(free_list);
- struct rpc_inode *rpci = (struct rpc_inode *)data;
+ struct rpc_inode *rpci =
+ container_of(work, struct rpc_inode, queue_timeout.work);
struct inode *inode = &rpci->vfs_inode;
void (*destroy_msg)(struct rpc_pipe_msg *);
@@ -142,7 +143,7 @@ static struct inode *
rpc_alloc_inode(struct super_block *sb)
{
struct rpc_inode *rpci;
- rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, SLAB_KERNEL);
+ rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL);
if (!rpci)
return NULL;
return &rpci->vfs_inode;
@@ -823,7 +824,7 @@ static struct file_system_type rpc_pipe_fs_type = {
};
static void
-init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct rpc_inode *rpci = (struct rpc_inode *) foo;
@@ -837,7 +838,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
INIT_LIST_HEAD(&rpci->pipe);
rpci->pipelen = 0;
init_waitqueue_head(&rpci->waitq);
- INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci);
+ INIT_DELAYED_WORK(&rpci->queue_timeout,
+ rpc_timeout_upcall_queue);
rpci->ops = NULL;
}
}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index a1ab4eed41f4..225e6510b523 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -34,14 +34,14 @@ static int rpc_task_id;
#define RPC_BUFFER_MAXSIZE (2048)
#define RPC_BUFFER_POOLSIZE (8)
#define RPC_TASK_POOLSIZE (8)
-static kmem_cache_t *rpc_task_slabp __read_mostly;
-static kmem_cache_t *rpc_buffer_slabp __read_mostly;
+static struct kmem_cache *rpc_task_slabp __read_mostly;
+static struct kmem_cache *rpc_buffer_slabp __read_mostly;
static mempool_t *rpc_task_mempool __read_mostly;
static mempool_t *rpc_buffer_mempool __read_mostly;
static void __rpc_default_timer(struct rpc_task *task);
static void rpciod_killall(void);
-static void rpc_async_schedule(void *);
+static void rpc_async_schedule(struct work_struct *);
/*
* RPC tasks sit here while waiting for conditions to improve.
@@ -305,7 +305,7 @@ static void rpc_make_runnable(struct rpc_task *task)
if (RPC_IS_ASYNC(task)) {
int status;
- INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task);
+ INIT_WORK(&task->u.tk_work, rpc_async_schedule);
status = queue_work(task->tk_workqueue, &task->u.tk_work);
if (status < 0) {
printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
@@ -695,9 +695,9 @@ rpc_execute(struct rpc_task *task)
return __rpc_execute(task);
}
-static void rpc_async_schedule(void *arg)
+static void rpc_async_schedule(struct work_struct *work)
{
- __rpc_execute((struct rpc_task *)arg);
+ __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
}
/**
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index ee9bb1522d5e..c7bb5f7f21a5 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -119,7 +119,8 @@ EXPORT_SYMBOL(svc_auth_unregister);
#define DN_HASHMASK (DN_HASHMAX-1)
static struct hlist_head auth_domain_table[DN_HASHMAX];
-static spinlock_t auth_domain_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t auth_domain_lock =
+ __SPIN_LOCK_UNLOCKED(auth_domain_lock);
void auth_domain_put(struct auth_domain *dom)
{
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 64ca1f61dd94..99f54fb6d669 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -32,6 +32,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/file.h>
+#include <linux/freezer.h>
#include <net/sock.h>
#include <net/checksum.h>
#include <net/ip.h>
@@ -84,6 +85,35 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req);
*/
static int svc_conn_age_period = 6*60;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key svc_key[2];
+static struct lock_class_key svc_slock_key[2];
+
+static inline void svc_reclassify_socket(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ BUG_ON(sk->sk_lock.owner != NULL);
+ switch (sk->sk_family) {
+ case AF_INET:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
+ &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]);
+ break;
+
+ case AF_INET6:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
+ &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]);
+ break;
+
+ default:
+ BUG();
+ }
+}
+#else
+static inline void svc_reclassify_socket(struct socket *sock)
+{
+}
+#endif
+
/*
* Queue up an idle server thread. Must have pool->sp_lock held.
* Note: this is really a stack rather than a queue, so that we only
@@ -1556,6 +1586,8 @@ svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin)
if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0)
return error;
+ svc_reclassify_socket(sock);
+
if (type == SOCK_STREAM)
sock->sk->sk_reuse = 1; /* allow address reuse */
error = kernel_bind(sock, (struct sockaddr *) sin,
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 80857470dc11..4f9a5d9791fb 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -479,9 +479,10 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
return status;
}
-static void xprt_autoclose(void *args)
+static void xprt_autoclose(struct work_struct *work)
{
- struct rpc_xprt *xprt = (struct rpc_xprt *)args;
+ struct rpc_xprt *xprt =
+ container_of(work, struct rpc_xprt, task_cleanup);
xprt_disconnect(xprt);
xprt->ops->close(xprt);
@@ -932,7 +933,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si
INIT_LIST_HEAD(&xprt->free);
INIT_LIST_HEAD(&xprt->recv);
- INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt);
+ INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
init_timer(&xprt->timer);
xprt->timer.function = xprt_init_autodisconnect;
xprt->timer.data = (unsigned long) xprt;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 757fc91ef25d..2fc4a3123261 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1058,15 +1058,45 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
return err;
}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key xs_key[2];
+static struct lock_class_key xs_slock_key[2];
+
+static inline void xs_reclassify_socket(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ BUG_ON(sk->sk_lock.owner != NULL);
+ switch (sk->sk_family) {
+ case AF_INET:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET-NFS",
+ &xs_slock_key[0], "sk_lock-AF_INET-NFS", &xs_key[0]);
+ break;
+
+ case AF_INET6:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFS",
+ &xs_slock_key[1], "sk_lock-AF_INET6-NFS", &xs_key[1]);
+ break;
+
+ default:
+ BUG();
+ }
+}
+#else
+static inline void xs_reclassify_socket(struct socket *sock)
+{
+}
+#endif
+
/**
* xs_udp_connect_worker - set up a UDP socket
- * @args: RPC transport to connect
+ * @work: RPC transport to connect
*
* Invoked by a work queue tasklet.
*/
-static void xs_udp_connect_worker(void *args)
+static void xs_udp_connect_worker(struct work_struct *work)
{
- struct rpc_xprt *xprt = (struct rpc_xprt *) args;
+ struct rpc_xprt *xprt =
+ container_of(work, struct rpc_xprt, connect_worker.work);
struct socket *sock = xprt->sock;
int err, status = -EIO;
@@ -1080,6 +1110,7 @@ static void xs_udp_connect_worker(void *args)
dprintk("RPC: can't create UDP transport socket (%d).\n", -err);
goto out;
}
+ xs_reclassify_socket(sock);
if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
sock_release(sock);
@@ -1144,13 +1175,14 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
/**
* xs_tcp_connect_worker - connect a TCP socket to a remote endpoint
- * @args: RPC transport to connect
+ * @work: RPC transport to connect
*
* Invoked by a work queue tasklet.
*/
-static void xs_tcp_connect_worker(void *args)
+static void xs_tcp_connect_worker(struct work_struct *work)
{
- struct rpc_xprt *xprt = (struct rpc_xprt *)args;
+ struct rpc_xprt *xprt =
+ container_of(work, struct rpc_xprt, connect_worker.work);
struct socket *sock = xprt->sock;
int err, status = -EIO;
@@ -1163,6 +1195,7 @@ static void xs_tcp_connect_worker(void *args)
dprintk("RPC: can't create TCP transport socket (%d).\n", -err);
goto out;
}
+ xs_reclassify_socket(sock);
if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
sock_release(sock);
@@ -1262,7 +1295,7 @@ static void xs_connect(struct rpc_task *task)
xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
} else {
dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
- schedule_work(&xprt->connect_worker);
+ schedule_delayed_work(&xprt->connect_worker, 0);
/* flush_scheduled_work can sleep... */
if (!RPC_IS_ASYNC(task))
@@ -1375,7 +1408,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
/* XXX: header size can vary due to auth type, IPv6, etc. */
xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
- INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
+ INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker);
xprt->bind_timeout = XS_BIND_TO;
xprt->connect_timeout = XS_UDP_CONN_TO;
xprt->reestablish_timeout = XS_UDP_REEST_TO;
@@ -1420,7 +1453,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
- INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
+ INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker);
xprt->bind_timeout = XS_BIND_TO;
xprt->connect_timeout = XS_TCP_CONN_TO;
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index ae6ddf00a1aa..eb80778d6d9c 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -42,7 +42,7 @@ struct queue_item {
unsigned long data;
};
-static kmem_cache_t *tipc_queue_item_cache;
+static struct kmem_cache *tipc_queue_item_cache;
static struct list_head signal_queue_head;
static DEFINE_SPINLOCK(qitem_lock);
static int handler_enabled = 0;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index e8198a2c785d..414f89070380 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -12,7 +12,7 @@
#include <net/ip.h>
#include <net/xfrm.h>
-static kmem_cache_t *secpath_cachep __read_mostly;
+static struct kmem_cache *secpath_cachep __read_mostly;
void __secpath_destroy(struct sec_path *sp)
{
@@ -27,7 +27,7 @@ struct sec_path *secpath_dup(struct sec_path *src)
{
struct sec_path *sp;
- sp = kmem_cache_alloc(secpath_cachep, SLAB_ATOMIC);
+ sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
if (!sp)
return NULL;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 64d3938f74c4..3f3f563eb4ab 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -39,7 +39,7 @@ EXPORT_SYMBOL(xfrm_policy_count);
static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
-static kmem_cache_t *xfrm_dst_cache __read_mostly;
+static struct kmem_cache *xfrm_dst_cache __read_mostly;
static struct work_struct xfrm_policy_gc_work;
static HLIST_HEAD(xfrm_policy_gc_list);
@@ -392,7 +392,7 @@ static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
xfrm_pol_put(policy);
}
-static void xfrm_policy_gc_task(void *data)
+static void xfrm_policy_gc_task(struct work_struct *work)
{
struct xfrm_policy *policy;
struct hlist_node *entry, *tmp;
@@ -580,7 +580,7 @@ static inline int xfrm_byidx_should_resize(int total)
static DEFINE_MUTEX(hash_resize_mutex);
-static void xfrm_hash_resize(void *__unused)
+static void xfrm_hash_resize(struct work_struct *__unused)
{
int dir, total;
@@ -597,7 +597,7 @@ static void xfrm_hash_resize(void *__unused)
mutex_unlock(&hash_resize_mutex);
}
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
/* Generate new index... KAME seems to generate them ordered by cost
* of an absolute inpredictability of ordering of rules. This will not pass. */
@@ -2116,7 +2116,7 @@ static void __init xfrm_policy_init(void)
panic("XFRM: failed to allocate bydst hash\n");
}
- INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
+ INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
register_netdevice_notifier(&xfrm_dev_notifier);
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 864962bbda90..da54a64ccfa3 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -115,7 +115,7 @@ static unsigned long xfrm_hash_new_size(void)
static DEFINE_MUTEX(hash_resize_mutex);
-static void xfrm_hash_resize(void *__unused)
+static void xfrm_hash_resize(struct work_struct *__unused)
{
struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
unsigned long nsize, osize;
@@ -168,7 +168,7 @@ out_unlock:
mutex_unlock(&hash_resize_mutex);
}
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
DECLARE_WAIT_QUEUE_HEAD(km_waitq);
EXPORT_SYMBOL(km_waitq);
@@ -207,7 +207,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
kfree(x);
}
-static void xfrm_state_gc_task(void *data)
+static void xfrm_state_gc_task(struct work_struct *data)
{
struct xfrm_state *x;
struct hlist_node *entry, *tmp;
@@ -1568,6 +1568,6 @@ void __init xfrm_state_init(void)
panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
- INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);
+ INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
}