diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-03 09:19:20 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-03 09:19:20 -0700 |
commit | 3f46540ee7015ad2e3665e68b0b22c48ce9f99e5 (patch) | |
tree | f96426189e207224aaeecf535b86f62d1e5826e9 /drivers/mmc | |
parent | e65a139d5b10cd92ce44d4200c14bc16d416594e (diff) | |
parent | 9d9491a7da2a4ce9fed32bd8611992ea3471523a (diff) | |
download | linux-3f46540ee7015ad2e3665e68b0b22c48ce9f99e5.tar.bz2 |
Merge tag 'mmc-v4.14-rc4-3' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
Pull MMC fixes from Ulf Hansson:
"Fix dw_mmc request timeout issues"
* tag 'mmc-v4.14-rc4-3' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc:
mmc: dw_mmc: Fix the DTO timeout calculation
mmc: dw_mmc: Add locking to the CTO timer
mmc: dw_mmc: Fix the CTO timeout calculation
mmc: dw_mmc: cancel the CTO timer after a voltage switch
Diffstat (limited to 'drivers/mmc')
-rw-r--r-- | drivers/mmc/host/dw_mmc.c | 107 |
1 files changed, 94 insertions, 13 deletions
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 860313bd952a..4f2806720c5c 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -401,16 +401,37 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) static inline void dw_mci_set_cto(struct dw_mci *host) { unsigned int cto_clks; + unsigned int cto_div; unsigned int cto_ms; + unsigned long irqflags; cto_clks = mci_readl(host, TMOUT) & 0xff; - cto_ms = DIV_ROUND_UP(cto_clks, host->bus_hz / 1000); + cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; + if (cto_div == 0) + cto_div = 1; + cto_ms = DIV_ROUND_UP(MSEC_PER_SEC * cto_clks * cto_div, host->bus_hz); /* add a bit spare time */ cto_ms += 10; - mod_timer(&host->cto_timer, - jiffies + msecs_to_jiffies(cto_ms) + 1); + /* + * The durations we're working with are fairly short so we have to be + * extra careful about synchronization here. Specifically in hardware a + * command timeout is _at most_ 5.1 ms, so that means we expect an + * interrupt (either command done or timeout) to come rather quickly + * after the mci_writel. ...but just in case we have a long interrupt + * latency let's add a bit of paranoia. + * + * In general we'll assume that at least an interrupt will be asserted + * in hardware by the time the cto_timer runs. ...and if it hasn't + * been asserted in hardware by that time then we'll assume it'll never + * come. + */ + spin_lock_irqsave(&host->irq_lock, irqflags); + if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) + mod_timer(&host->cto_timer, + jiffies + msecs_to_jiffies(cto_ms) + 1); + spin_unlock_irqrestore(&host->irq_lock, irqflags); } static void dw_mci_start_command(struct dw_mci *host, @@ -425,11 +446,11 @@ static void dw_mci_start_command(struct dw_mci *host, wmb(); /* drain writebuffer */ dw_mci_wait_while_busy(host, cmd_flags); + mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); + /* response expected command only */ if (cmd_flags & SDMMC_CMD_RESP_EXP) dw_mci_set_cto(host); - - mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); } static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) @@ -1915,10 +1936,15 @@ static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) static void dw_mci_set_drto(struct dw_mci *host) { unsigned int drto_clks; + unsigned int drto_div; unsigned int drto_ms; drto_clks = mci_readl(host, TMOUT) >> 8; - drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000); + drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; + if (drto_div == 0) + drto_div = 1; + drto_ms = DIV_ROUND_UP(MSEC_PER_SEC * drto_clks * drto_div, + host->bus_hz); /* add a bit spare time */ drto_ms += 10; @@ -1926,6 +1952,24 @@ static void dw_mci_set_drto(struct dw_mci *host) mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms)); } +static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host) +{ + if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) + return false; + + /* + * Really be certain that the timer has stopped. This is a bit of + * paranoia and could only really happen if we had really bad + * interrupt latency and the interrupt routine and timeout were + * running concurrently so that the del_timer() in the interrupt + * handler couldn't run. + */ + WARN_ON(del_timer_sync(&host->cto_timer)); + clear_bit(EVENT_CMD_COMPLETE, &host->pending_events); + + return true; +} + static void dw_mci_tasklet_func(unsigned long priv) { struct dw_mci *host = (struct dw_mci *)priv; @@ -1952,8 +1996,7 @@ static void dw_mci_tasklet_func(unsigned long priv) case STATE_SENDING_CMD11: case STATE_SENDING_CMD: - if (!test_and_clear_bit(EVENT_CMD_COMPLETE, - &host->pending_events)) + if (!dw_mci_clear_pending_cmd_complete(host)) break; cmd = host->cmd; @@ -2122,8 +2165,7 @@ static void dw_mci_tasklet_func(unsigned long priv) /* fall through */ case STATE_SENDING_STOP: - if (!test_and_clear_bit(EVENT_CMD_COMPLETE, - &host->pending_events)) + if (!dw_mci_clear_pending_cmd_complete(host)) break; /* CMD error in data command */ @@ -2570,6 +2612,8 @@ done: static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) { + del_timer(&host->cto_timer); + if (!host->cmd_status) host->cmd_status = status; @@ -2594,6 +2638,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) struct dw_mci *host = dev_id; u32 pending; struct dw_mci_slot *slot = host->slot; + unsigned long irqflags; pending = mci_readl(host, MINTSTS); /* read-only mask reg */ @@ -2601,8 +2646,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) /* Check volt switch first, since it can look like an error */ if ((host->state == STATE_SENDING_CMD11) && (pending & SDMMC_INT_VOLT_SWITCH)) { - unsigned long irqflags; - mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); pending &= ~SDMMC_INT_VOLT_SWITCH; @@ -2618,11 +2661,15 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) } if (pending & DW_MCI_CMD_ERROR_FLAGS) { + spin_lock_irqsave(&host->irq_lock, irqflags); + del_timer(&host->cto_timer); mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); host->cmd_status = pending; smp_wmb(); /* drain writebuffer */ set_bit(EVENT_CMD_COMPLETE, &host->pending_events); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); } if (pending & DW_MCI_DATA_ERROR_FLAGS) { @@ -2662,9 +2709,12 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) } if (pending & SDMMC_INT_CMD_DONE) { - del_timer(&host->cto_timer); + spin_lock_irqsave(&host->irq_lock, irqflags); + mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); dw_mci_cmd_interrupt(host, pending); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); } if (pending & SDMMC_INT_CD) { @@ -2938,7 +2988,35 @@ static void dw_mci_cmd11_timer(unsigned long arg) static void dw_mci_cto_timer(unsigned long arg) { struct dw_mci *host = (struct dw_mci *)arg; + unsigned long irqflags; + u32 pending; + spin_lock_irqsave(&host->irq_lock, irqflags); + + /* + * If somehow we have very bad interrupt latency it's remotely possible + * that the timer could fire while the interrupt is still pending or + * while the interrupt is midway through running. Let's be paranoid + * and detect those two cases. Note that this is paranoia is somewhat + * justified because in this function we don't actually cancel the + * pending command in the controller--we just assume it will never come. + */ + pending = mci_readl(host, MINTSTS); /* read-only mask reg */ + if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) { + /* The interrupt should fire; no need to act but we can warn */ + dev_warn(host->dev, "Unexpected interrupt latency\n"); + goto exit; + } + if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) { + /* Presumably interrupt handler couldn't delete the timer */ + dev_warn(host->dev, "CTO timeout when already completed\n"); + goto exit; + } + + /* + * Continued paranoia to make sure we're in the state we expect. + * This paranoia isn't really justified but it seems good to be safe. + */ switch (host->state) { case STATE_SENDING_CMD11: case STATE_SENDING_CMD: @@ -2957,6 +3035,9 @@ static void dw_mci_cto_timer(unsigned long arg) host->state); break; } + +exit: + spin_unlock_irqrestore(&host->irq_lock, irqflags); } static void dw_mci_dto_timer(unsigned long arg) |