summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-07-09 20:33:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-07-09 20:33:18 -0700
commite864561c12bc743789eab76242b8314d394daa52 (patch)
tree5f9bf951fd8e713b224062a37b2a0b24c14f17ce /include
parent42359da44112565e12a5209befb36dc6b6d6cd9c (diff)
parente594e96e8a14101a6decabf6746bd5186287debc (diff)
downloadlinux-e864561c12bc743789eab76242b8314d394daa52.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (29 commits) cxgb3: Fix crash caused by stashing wrong netdev_queue ixgbe: Fix coexistence of FCoE and Flow Director in 82599 memory barrier: adding smp_mb__after_lock net: adding memory barrier to the poll and receive callbacks netpoll: Fix carrier detection for drivers that are using phylib includecheck fix: include/linux, rfkill.h p54: tx refused but queue active Atheros Kconfig needs to be dependent on WLAN_80211 mac80211: fix docbook mac80211_hwsim: avoid NULL access ssb: Add support for 4318E b43: Add support for 4318E zd1211rw: adding SONY IFU-WLM2 (054c:0257) as a zd1211b device zd1211rw: 07b8:6001 is a ZD1211B r6040: bump driver version to 0.24 and date to 08 July 2009 r6040: restore MIER register correctly when IRQ line is shared ipv4: Fix fib_trie rebalancing, part 4 (root thresholds) davinci_emac: fix kernel oops when changing MAC address while interface is down igb: set lan id prior to configuring phy mac80211: minstrel: avoid accessing negative indices in rix_to_ndx() ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/rfkill.h1
-rw-r--r--include/linux/spinlock.h5
-rw-r--r--include/net/sock.h69
3 files changed, 74 insertions, 1 deletions
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index e73e2429a1b1..2ce29831feb6 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -99,7 +99,6 @@ enum rfkill_user_states {
#undef RFKILL_STATE_UNBLOCKED
#undef RFKILL_STATE_HARD_BLOCKED
-#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 252b245cfcf4..4be57ab03478 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -132,6 +132,11 @@ do { \
#endif /*__raw_spin_is_contended*/
#endif
+/* The lock does not imply full memory barrier. */
+#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
+static inline void smp_mb__after_lock(void) { smp_mb(); }
+#endif
+
/**
* spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
diff --git a/include/net/sock.h b/include/net/sock.h
index 352f06bbd7a9..2c0da9239b95 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -54,6 +54,7 @@
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
+#include <linux/poll.h>
#include <asm/atomic.h>
#include <net/dst.h>
@@ -1241,6 +1242,74 @@ static inline int sk_has_allocations(const struct sock *sk)
return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
}
+/**
+ * sk_has_sleeper - check if there are any waiting processes
+ * @sk: socket
+ *
+ * Returns true if socket has waiting processes
+ *
+ * The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory
+ * barrier call. They were added due to the race found within the tcp code.
+ *
+ * Consider following tcp code paths:
+ *
+ * CPU1 CPU2
+ *
+ * sys_select receive packet
+ * ... ...
+ * __add_wait_queue update tp->rcv_nxt
+ * ... ...
+ * tp->rcv_nxt check sock_def_readable
+ * ... {
+ * schedule ...
+ * if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ * wake_up_interruptible(sk->sk_sleep)
+ * ...
+ * }
+ *
+ * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
+ * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1
+ * could then endup calling schedule and sleep forever if there are no more
+ * data on the socket.
+ *
+ * The sk_has_sleeper is always called right after a call to read_lock, so we
+ * can use smp_mb__after_lock barrier.
+ */
+static inline int sk_has_sleeper(struct sock *sk)
+{
+ /*
+ * We need to be sure we are in sync with the
+ * add_wait_queue modifications to the wait queue.
+ *
+ * This memory barrier is paired in the sock_poll_wait.
+ */
+ smp_mb__after_lock();
+ return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
+}
+
+/**
+ * sock_poll_wait - place memory barrier behind the poll_wait call.
+ * @filp: file
+ * @wait_address: socket wait queue
+ * @p: poll_table
+ *
+ * See the comments in the sk_has_sleeper function.
+ */
+static inline void sock_poll_wait(struct file *filp,
+ wait_queue_head_t *wait_address, poll_table *p)
+{
+ if (p && wait_address) {
+ poll_wait(filp, wait_address, p);
+ /*
+ * We need to be sure we are in sync with the
+ * socket flags modification.
+ *
+ * This memory barrier is paired in the sk_has_sleeper.
+ */
+ smp_mb();
+ }
+}
+
/*
* Queue a received datagram if it will fit. Stream and sequenced
* protocols can't normally use this as they need to fit buffers in