diff options
author | Johannes Berg <johannes.berg@intel.com> | 2012-03-05 11:24:24 -0800 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2012-03-06 15:16:08 -0500 |
commit | 015c15e1067c988fc87fb550b222f075c8d3f47c (patch) | |
tree | 20ebc3c951e8d9e531410d3f0a116ea39405f752 /drivers/net/wireless/iwlwifi/iwl-trans-pcie.c | |
parent | 9451ca1a31be44f8235c3f8f998ff27fc7a8395e (diff) | |
download | linux-015c15e1067c988fc87fb550b222f075c8d3f47c.tar.bz2 |
iwlwifi: introduce per-queue locks
Instead of (ab)using the sta_lock, make the
transport layer lock its own TX queue data
structures with a lock per queue. This also
unifies with the cmd queue lock.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-trans-pcie.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-trans-pcie.c | 31 |
1 files changed, 19 insertions, 12 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c index 9f8b23909404..f47426a5ef4d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c @@ -390,6 +390,8 @@ static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, if (ret) return ret; + spin_lock_init(&txq->lock); + /* * Tell nic where to find circular buffer of Tx Frame Descriptors for * given Tx queue, and enable the DMA channel used for that queue. @@ -409,8 +411,6 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; struct iwl_queue *q = &txq->q; enum dma_data_direction dma_dir; - unsigned long flags; - spinlock_t *lock; if (!q->n_bd) return; @@ -418,22 +418,19 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) /* In the command queue, all the TBs are mapped as BIDI * so unmap them as such. */ - if (txq_id == trans->shrd->cmd_queue) { + if (txq_id == trans->shrd->cmd_queue) dma_dir = DMA_BIDIRECTIONAL; - lock = &trans->hcmd_lock; - } else { + else dma_dir = DMA_TO_DEVICE; - lock = &trans->shrd->sta_lock; - } - spin_lock_irqsave(lock, flags); + spin_lock_bh(&txq->lock); while (q->write_ptr != q->read_ptr) { /* The read_ptr needs to bound by q->n_window */ iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr), dma_dir); q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); } - spin_unlock_irqrestore(lock, flags); + spin_unlock_bh(&txq->lock); } /** @@ -1358,6 +1355,8 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, txq = &trans_pcie->txq[txq_id]; q = &txq->q; + spin_lock(&txq->lock); + /* In AGG mode, the index in the ring must correspond to the WiFi * sequence number. This is a HW requirements to help the SCD to parse * the BA. @@ -1404,7 +1403,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, &dev_cmd->hdr, firstlen, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(trans->dev, txcmd_phys))) - return -1; + goto out_err; dma_unmap_addr_set(out_meta, mapping, txcmd_phys); dma_unmap_len_set(out_meta, len, firstlen); @@ -1426,7 +1425,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, dma_unmap_addr(out_meta, mapping), dma_unmap_len(out_meta, len), DMA_BIDIRECTIONAL); - return -1; + goto out_err; } } @@ -1481,7 +1480,11 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, iwl_stop_queue(trans, txq, "Queue is full"); } } + spin_unlock(&txq->lock); return 0; + out_err: + spin_unlock(&txq->lock); + return -1; } static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) @@ -1560,6 +1563,8 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, int tfd_num = ssn & (txq->q.n_bd - 1); int freed = 0; + spin_lock(&txq->lock); + txq->time_stamp = jiffies; if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE && @@ -1574,6 +1579,7 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, " "agg_txq[sta_id[tid] %d", txq_id, trans_pcie->agg_txq[sta_id][tid]); + spin_unlock(&txq->lock); return 1; } @@ -1587,6 +1593,8 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, status != TX_STATUS_FAIL_PASSIVE_NO_RX)) iwl_wake_queue(trans, txq, "Packets reclaimed"); } + + spin_unlock(&txq->lock); return 0; } @@ -2267,7 +2275,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd, trans->ops = &trans_ops_pcie; trans->shrd = shrd; trans_pcie->trans = trans; - spin_lock_init(&trans->hcmd_lock); spin_lock_init(&trans_pcie->irq_lock); /* W/A - seems to solve weird behavior. We need to remove this if we |