summaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi.c
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2019-01-23 17:29:53 +0000
committerMark Brown <broonie@kernel.org>2019-01-23 17:29:53 +0000
commitf0125f1a559be1033055f44e511174aaa75b60cc (patch)
tree83d67533a010578ce2bc018302fc5ec2e291c9a4 /drivers/spi/spi.c
parent51eea52d26d4939b788b7244c28cf47e902b4c4c (diff)
downloadlinux-f0125f1a559be1033055f44e511174aaa75b60cc.tar.bz2
spi: Go back to immediate teardown
Commit 412e6037324 ("spi: core: avoid waking pump thread from spi_sync instead run teardown delayed") introduced regressions on some boards, apparently connected to spi_mem not triggering shutdown properly any more. Since we've thus far been unable to figure out exactly where the breakage is revert the optimisation for now. Reported-by: Jon Hunter <jonathanh@nvidia.com> Signed-off-by: Mark Brown <broonie@kernel.org> Cc: kernel@martin.sperl.org
Diffstat (limited to 'drivers/spi/spi.c')
-rw-r--r--drivers/spi/spi.c122
1 files changed, 33 insertions, 89 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 06b9139664a3..13f447a67d67 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1225,7 +1225,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
return;
}
- /* If another context is idling the device then defer to kthread */
+ /* If another context is idling the device then defer */
if (ctlr->idling) {
kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
@@ -1239,10 +1239,34 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
return;
}
- /* schedule idle teardown with a delay of 1 second */
- kthread_mod_delayed_work(&ctlr->kworker,
- &ctlr->pump_idle_teardown,
- HZ);
+ /* Only do teardown in the thread */
+ if (!in_kthread) {
+ kthread_queue_work(&ctlr->kworker,
+ &ctlr->pump_messages);
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+ return;
+ }
+
+ ctlr->busy = false;
+ ctlr->idling = true;
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+
+ kfree(ctlr->dummy_rx);
+ ctlr->dummy_rx = NULL;
+ kfree(ctlr->dummy_tx);
+ ctlr->dummy_tx = NULL;
+ if (ctlr->unprepare_transfer_hardware &&
+ ctlr->unprepare_transfer_hardware(ctlr))
+ dev_err(&ctlr->dev,
+ "failed to unprepare transfer hardware\n");
+ if (ctlr->auto_runtime_pm) {
+ pm_runtime_mark_last_busy(ctlr->dev.parent);
+ pm_runtime_put_autosuspend(ctlr->dev.parent);
+ }
+ trace_spi_controller_idle(ctlr);
+
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
+ ctlr->idling = false;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
@@ -1335,77 +1359,6 @@ static void spi_pump_messages(struct kthread_work *work)
__spi_pump_messages(ctlr, true);
}
-/**
- * spi_pump_idle_teardown - kthread delayed work function which tears down
- * the controller settings after some delay
- * @work: pointer to kthread work struct contained in the controller struct
- */
-static void spi_pump_idle_teardown(struct kthread_work *work)
-{
- struct spi_controller *ctlr =
- container_of(work, struct spi_controller,
- pump_idle_teardown.work);
- unsigned long flags;
-
- /* Lock queue */
- spin_lock_irqsave(&ctlr->queue_lock, flags);
-
- /* Make sure we are not already running a message */
- if (ctlr->cur_msg)
- goto out;
-
- /* if there is anything in the list then exit */
- if (!list_empty(&ctlr->queue))
- goto out;
-
- /* if the controller is running then exit */
- if (ctlr->running)
- goto out;
-
- /* if the controller is busy then exit */
- if (ctlr->busy)
- goto out;
-
- /* if the controller is idling then exit
- * this is actually a bit strange and would indicate that
- * this function is scheduled twice, which should not happen
- */
- if (ctlr->idling)
- goto out;
-
- /* set up the initial states */
- ctlr->busy = false;
- ctlr->idling = true;
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
-
- /* free dummy receive buffers */
- kfree(ctlr->dummy_rx);
- ctlr->dummy_rx = NULL;
- kfree(ctlr->dummy_tx);
- ctlr->dummy_tx = NULL;
-
- /* unprepare hardware */
- if (ctlr->unprepare_transfer_hardware &&
- ctlr->unprepare_transfer_hardware(ctlr))
- dev_err(&ctlr->dev,
- "failed to unprepare transfer hardware\n");
- /* handle pm */
- if (ctlr->auto_runtime_pm) {
- pm_runtime_mark_last_busy(ctlr->dev.parent);
- pm_runtime_put_autosuspend(ctlr->dev.parent);
- }
-
- /* mark controller as idle */
- trace_spi_controller_idle(ctlr);
-
- /* finally put us from idling into stopped */
- spin_lock_irqsave(&ctlr->queue_lock, flags);
- ctlr->idling = false;
-
-out:
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
-}
-
static int spi_init_queue(struct spi_controller *ctlr)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
@@ -1421,8 +1374,7 @@ static int spi_init_queue(struct spi_controller *ctlr)
return PTR_ERR(ctlr->kworker_task);
}
kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
- kthread_init_delayed_work(&ctlr->pump_idle_teardown,
- spi_pump_idle_teardown);
+
/*
* Controller config will indicate if this controller should run the
* message pump with high (realtime) priority to reduce the transfer
@@ -1494,16 +1446,7 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
spin_lock_irqsave(&ctlr->queue_lock, flags);
ctlr->cur_msg = NULL;
ctlr->cur_msg_prepared = false;
-
- /* if there is something queued, then wake the queue */
- if (!list_empty(&ctlr->queue))
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
- else
- /* otherwise schedule delayed teardown */
- kthread_mod_delayed_work(&ctlr->kworker,
- &ctlr->pump_idle_teardown,
- HZ);
-
+ kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
trace_spi_message_done(mesg);
@@ -1608,7 +1551,7 @@ static int __spi_queued_transfer(struct spi_device *spi,
msg->status = -EINPROGRESS;
list_add_tail(&msg->queue, &ctlr->queue);
- if (need_pump)
+ if (!ctlr->busy && need_pump)
kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
@@ -3783,3 +3726,4 @@ err0:
* include needing to have boardinfo data structures be much more public.
*/
postcore_initcall(spi_init);
+