diff options
author | Bryan O'Sullivan <bos@pathscale.com> | 2006-07-01 04:35:51 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-01 09:55:58 -0700 |
commit | 7bbb15ea8543e2e49476a27b507be3b02828a124 (patch) | |
tree | 8f5b92db6e28905798edd17d390af5e120a9c9ec | |
parent | ddd4bb22108417fdc5c35324bd13a3265581ae76 (diff) | |
download | linux-7bbb15ea8543e2e49476a27b507be3b02828a124.tar.bz2 |
[PATCH] IB/ipath: fix an indenting problem
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com>
Cc: "Michael S. Tsirkin" <mst@mellanox.co.il>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_rc.c | 52 |
1 files changed, 26 insertions, 26 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index 0ca89af3e10d..720cb3ae1fc3 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c @@ -1053,32 +1053,32 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, goto ack_done; } rdma_read: - if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST))) - goto ack_done; - if (unlikely(tlen != (hdrsize + pmtu + 4))) - goto ack_done; - if (unlikely(pmtu >= qp->s_len)) - goto ack_done; - /* We got a response so update the timeout. */ - if (unlikely(qp->s_last == qp->s_tail || - get_swqe_ptr(qp, qp->s_last)->wr.opcode != - IB_WR_RDMA_READ)) - goto ack_done; - spin_lock(&dev->pending_lock); - if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait)) - list_move_tail(&qp->timerwait, - &dev->pending[dev->pending_index]); - spin_unlock(&dev->pending_lock); - /* - * Update the RDMA receive state but do the copy w/o holding the - * locks and blocking interrupts. XXX Yet another place that - * affects relaxed RDMA order since we don't want s_sge modified. - */ - qp->s_len -= pmtu; - qp->s_last_psn = psn; - spin_unlock_irqrestore(&qp->s_lock, flags); - ipath_copy_sge(&qp->s_sge, data, pmtu); - goto bail; + if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST))) + goto ack_done; + if (unlikely(tlen != (hdrsize + pmtu + 4))) + goto ack_done; + if (unlikely(pmtu >= qp->s_len)) + goto ack_done; + /* We got a response so update the timeout. */ + if (unlikely(qp->s_last == qp->s_tail || + get_swqe_ptr(qp, qp->s_last)->wr.opcode != + IB_WR_RDMA_READ)) + goto ack_done; + spin_lock(&dev->pending_lock); + if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait)) + list_move_tail(&qp->timerwait, + &dev->pending[dev->pending_index]); + spin_unlock(&dev->pending_lock); + /* + * Update the RDMA receive state but do the copy w/o holding the + * locks and blocking interrupts. XXX Yet another place that + * affects relaxed RDMA order since we don't want s_sge modified. + */ + qp->s_len -= pmtu; + qp->s_last_psn = psn; + spin_unlock_irqrestore(&qp->s_lock, flags); + ipath_copy_sge(&qp->s_sge, data, pmtu); + goto bail; case OP(RDMA_READ_RESPONSE_LAST): /* ACKs READ req. */ |