summaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2011-03-04 09:56:21 +0100
committerChris Ball <cjb@laptop.org>2011-03-25 10:38:56 -0400
commit51fc7b2ce74d31b7d9a6bd3c57bf6c1cc77e837d (patch)
tree65871eed7be0a2df91ac52d84d838367eec0c435 /drivers/mmc
parent4f665cb614b8a258b507cc47753dd3f7dd45aac6 (diff)
downloadlinux-51fc7b2ce74d31b7d9a6bd3c57bf6c1cc77e837d.tar.bz2
mmc: tmio-mmc: Improve DMA stability on sh-mobile
On some SDHI tmio implementations the order of DMA and command completion interrupts swaps, which leads to malfunction. This patch postpones DMA activation until the MMC command completion IRQ time. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Acked-by: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/tmio_mmc.c63
1 files changed, 34 insertions, 29 deletions
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index ab1adeabdd22..e88627b33377 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -485,7 +485,10 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
unsigned int count;
unsigned long flags;
- if (!data) {
+ if (host->chan_tx || host->chan_rx) {
+ pr_err("PIO IRQ in DMA mode!\n");
+ return;
+ } else if (!data) {
pr_debug("Spurious PIO IRQ\n");
return;
}
@@ -648,6 +651,8 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
if (host->data->flags & MMC_DATA_READ) {
if (!host->chan_rx)
enable_mmc_irqs(host, TMIO_MASK_READOP);
+ else
+ tasklet_schedule(&host->dma_issue);
} else {
if (!host->chan_tx)
enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
@@ -779,18 +784,6 @@ static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
#endif
}
-static void tmio_dma_complete(void *arg)
-{
- struct tmio_mmc_host *host = arg;
-
- dev_dbg(&host->pdev->dev, "Command completed\n");
-
- if (!host->data)
- dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n");
- else
- enable_mmc_irqs(host, TMIO_STAT_DATAEND);
-}
-
static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
{
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
@@ -817,6 +810,8 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
goto pio;
}
+ disable_mmc_irqs(host, TMIO_STAT_RXRDY);
+
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
@@ -827,14 +822,11 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_FROM_DEVICE, DMA_CTRL_ACK);
- if (desc) {
- desc->callback = tmio_dma_complete;
- desc->callback_param = host;
+ if (desc)
cookie = dmaengine_submit(desc);
- dma_async_issue_pending(chan);
- }
+
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, cookie, host->mrq);
@@ -886,6 +878,8 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
goto pio;
}
+ disable_mmc_irqs(host, TMIO_STAT_TXRQ);
+
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
unsigned long flags;
@@ -900,13 +894,11 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_TO_DEVICE, DMA_CTRL_ACK);
- if (desc) {
- desc->callback = tmio_dma_complete;
- desc->callback_param = host;
+ if (desc)
cookie = dmaengine_submit(desc);
- }
+
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, cookie, host->mrq);
@@ -947,17 +939,30 @@ static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
static void tmio_issue_tasklet_fn(unsigned long priv)
{
struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
- struct dma_chan *chan = host->chan_tx;
+ struct dma_chan *chan = NULL;
+
+ spin_lock_irq(&host->lock);
+
+ if (host && host->data) {
+ if (host->data->flags & MMC_DATA_READ)
+ chan = host->chan_rx;
+ else
+ chan = host->chan_tx;
+ }
+
+ spin_unlock_irq(&host->lock);
- dma_async_issue_pending(chan);
+ enable_mmc_irqs(host, TMIO_STAT_DATAEND);
+
+ if (chan)
+ dma_async_issue_pending(chan);
}
static void tmio_tasklet_fn(unsigned long arg)
{
struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
- unsigned long flags;
- spin_lock_irqsave(&host->lock, flags);
+ spin_lock_irq(&host->lock);
if (!host->data)
goto out;
@@ -973,7 +978,7 @@ static void tmio_tasklet_fn(unsigned long arg)
tmio_mmc_do_data_irq(host);
out:
- spin_unlock_irqrestore(&host->lock, flags);
+ spin_unlock_irq(&host->lock);
}
/* It might be necessary to make filter MFD specific */