summaryrefslogtreecommitdiffstats
path: root/net/rds/iw_recv.c
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2009-04-01 08:20:20 +0000
committerDavid S. Miller <davem@davemloft.net>2009-04-02 00:52:22 -0700
commit8cbd9606a6367c221a7bbcc47f3ab1a8c31b6437 (patch)
tree9ff2adf88be32383fd59dc133f1cd6670b9ee815 /net/rds/iw_recv.c
parent745cbccac3fe8cead529a1b3358e1e86a1505bfa (diff)
downloadlinux-8cbd9606a6367c221a7bbcc47f3ab1a8c31b6437.tar.bz2
RDS: Use spinlock to protect 64b value update on 32b archs
We have a 64bit value that needs to be set atomically. This is easy and quick on all 64bit archs, and can also be done on x86/32 with set_64bit() (uses cmpxchg8b). However other 32b archs don't have this. I actually changed this to the current state in preparation for mainline because the old way (using a spinlock on 32b) resulted in unsightly #ifdefs in the code. But obviously, being correct takes precedence. Signed-off-by: Andy Grover <andy.grover@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rds/iw_recv.c')
-rw-r--r--net/rds/iw_recv.c37
1 files changed, 35 insertions, 2 deletions
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index a1931f0027a2..fde470fa50d5 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -395,10 +395,37 @@ void rds_iw_recv_init_ack(struct rds_iw_connection *ic)
* room for it beyond the ring size. Send completion notices its special
* wr_id and avoids working with the ring in that case.
*/
+#ifndef KERNEL_HAS_ATOMIC64
static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
int ack_required)
{
- rds_iw_set_64bit(&ic->i_ack_next, seq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ic->i_ack_lock, flags);
+ ic->i_ack_next = seq;
+ if (ack_required)
+ set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+ spin_unlock_irqrestore(&ic->i_ack_lock, flags);
+}
+
+static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
+{
+ unsigned long flags;
+ u64 seq;
+
+ clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+
+ spin_lock_irqsave(&ic->i_ack_lock, flags);
+ seq = ic->i_ack_next;
+ spin_unlock_irqrestore(&ic->i_ack_lock, flags);
+
+ return seq;
+}
+#else
+static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
+ int ack_required)
+{
+ atomic64_set(&ic->i_ack_next, seq);
if (ack_required) {
smp_mb__before_clear_bit();
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
@@ -410,8 +437,10 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
smp_mb__after_clear_bit();
- return ic->i_ack_next;
+ return atomic64_read(&ic->i_ack_next);
}
+#endif
+
static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credits)
{
@@ -464,6 +493,10 @@ static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credi
* - i_ack_next, which is the last sequence number we received
*
* Potentially, send queue and receive queue handlers can run concurrently.
+ * It would be nice to not have to use a spinlock to synchronize things,
+ * but the one problem that rules this out is that 64bit updates are
+ * not atomic on all platforms. Things would be a lot simpler if
+ * we had atomic64 or maybe cmpxchg64 everywhere.
*
* Reconnecting complicates this picture just slightly. When we
* reconnect, we may be seeing duplicate packets. The peer