summaryrefslogtreecommitdiffstats
path: root/drivers/iio/adc
diff options
context:
space:
mode:
authorOleksij Rempel <o.rempel@pengutronix.de>2022-01-11 14:04:02 +0100
committerJonathan Cameron <Jonathan.Cameron@huawei.com>2022-01-23 18:03:36 +0000
commit84e5d88953d765e5da14f96dd30b0b1cc215c157 (patch)
treee35a2a986e627f4e91f2d7a196886c1eb9a65741 /drivers/iio/adc
parent0f66edfb0722bc424c5d8300e5a938b1b7c5c78f (diff)
downloadlinux-84e5d88953d765e5da14f96dd30b0b1cc215c157.tar.bz2
iio: adc: tsc2046: rework the trigger state machine
Initially this was designed to: | Fix sleeping in atomic context warning and a deadlock after iio_trigger_poll() | call | | If iio_trigger_poll() is called after IRQ was disabled, we will call | reenable_trigger() directly from hard IRQ or hrtimer context instead of | IRQ thread. In this case we will run in to multiple issue as sleeping in atomic | context and a deadlock. | | To avoid this issue, rework the trigger to use state machine. All state | changes are done over the hrtimer, so it allows us to drop fsleep() and | avoid the deadlock. Since this issue was fixed by: 9020ef659885 ("iio: trigger: Fix a scheduling whilst atomic issue seen on tsc2046"). This patch is a cleanup to make state machine easier to follow. Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de> Link: https://lore.kernel.org/r/20220111130402.3404769-1-o.rempel@pengutronix.de Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Diffstat (limited to 'drivers/iio/adc')
-rw-r--r--drivers/iio/adc/ti-tsc2046.c145
1 files changed, 99 insertions, 46 deletions
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index d84ae6b008c1..098dbca49176 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -82,6 +82,10 @@
#define TI_TSC2046_DATA_12BIT GENMASK(14, 3)
#define TI_TSC2046_MAX_CHAN 8
+#define TI_TSC2046_MIN_POLL_CNT 3
+#define TI_TSC2046_EXT_POLL_CNT 3
+#define TI_TSC2046_POLL_CNT \
+ (TI_TSC2046_MIN_POLL_CNT + TI_TSC2046_EXT_POLL_CNT)
/* Represents a HW sample */
struct tsc2046_adc_atom {
@@ -123,14 +127,23 @@ struct tsc2046_adc_ch_cfg {
unsigned int oversampling_ratio;
};
+enum tsc2046_state {
+ TSC2046_STATE_SHUTDOWN,
+ TSC2046_STATE_STANDBY,
+ TSC2046_STATE_POLL,
+ TSC2046_STATE_POLL_IRQ_DISABLE,
+ TSC2046_STATE_ENABLE_IRQ,
+};
+
struct tsc2046_adc_priv {
struct spi_device *spi;
const struct tsc2046_adc_dcfg *dcfg;
struct iio_trigger *trig;
struct hrtimer trig_timer;
- spinlock_t trig_lock;
- unsigned int trig_more_count;
+ enum tsc2046_state state;
+ int poll_cnt;
+ spinlock_t state_lock;
struct spi_transfer xfer;
struct spi_message msg;
@@ -411,21 +424,63 @@ static const struct iio_info tsc2046_adc_info = {
.update_scan_mode = tsc2046_adc_update_scan_mode,
};
-static enum hrtimer_restart tsc2046_adc_trig_more(struct hrtimer *hrtimer)
+static enum hrtimer_restart tsc2046_adc_timer(struct hrtimer *hrtimer)
{
struct tsc2046_adc_priv *priv = container_of(hrtimer,
struct tsc2046_adc_priv,
trig_timer);
unsigned long flags;
- spin_lock_irqsave(&priv->trig_lock, flags);
-
- disable_irq_nosync(priv->spi->irq);
-
- priv->trig_more_count++;
- iio_trigger_poll(priv->trig);
+ /*
+ * This state machine should address following challenges :
+ * - the interrupt source is based on level shifter attached to the X
+ * channel of ADC. It will change the state every time we switch
+ * between channels. So, we need to disable IRQ if we do
+ * iio_trigger_poll().
+ * - we should do iio_trigger_poll() at some reduced sample rate
+ * - we should still trigger for some amount of time after last
+ * interrupt with enabled IRQ was processed.
+ */
- spin_unlock_irqrestore(&priv->trig_lock, flags);
+ spin_lock_irqsave(&priv->state_lock, flags);
+ switch (priv->state) {
+ case TSC2046_STATE_ENABLE_IRQ:
+ if (priv->poll_cnt < TI_TSC2046_POLL_CNT) {
+ priv->poll_cnt++;
+ hrtimer_start(&priv->trig_timer,
+ ns_to_ktime(priv->scan_interval_us *
+ NSEC_PER_USEC),
+ HRTIMER_MODE_REL_SOFT);
+
+ if (priv->poll_cnt >= TI_TSC2046_MIN_POLL_CNT) {
+ priv->state = TSC2046_STATE_POLL_IRQ_DISABLE;
+ enable_irq(priv->spi->irq);
+ } else {
+ priv->state = TSC2046_STATE_POLL;
+ }
+ } else {
+ priv->state = TSC2046_STATE_STANDBY;
+ enable_irq(priv->spi->irq);
+ }
+ break;
+ case TSC2046_STATE_POLL_IRQ_DISABLE:
+ disable_irq_nosync(priv->spi->irq);
+ fallthrough;
+ case TSC2046_STATE_POLL:
+ priv->state = TSC2046_STATE_ENABLE_IRQ;
+ /* iio_trigger_poll() starts hrtimer */
+ iio_trigger_poll(priv->trig);
+ break;
+ case TSC2046_STATE_SHUTDOWN:
+ break;
+ case TSC2046_STATE_STANDBY:
+ fallthrough;
+ default:
+ dev_warn(&priv->spi->dev, "Got unexpected state: %i\n",
+ priv->state);
+ break;
+ }
+ spin_unlock_irqrestore(&priv->state_lock, flags);
return HRTIMER_NORESTART;
}
@@ -434,16 +489,20 @@ static irqreturn_t tsc2046_adc_irq(int irq, void *dev_id)
{
struct iio_dev *indio_dev = dev_id;
struct tsc2046_adc_priv *priv = iio_priv(indio_dev);
-
- spin_lock(&priv->trig_lock);
+ unsigned long flags;
hrtimer_try_to_cancel(&priv->trig_timer);
- priv->trig_more_count = 0;
- disable_irq_nosync(priv->spi->irq);
- iio_trigger_poll(priv->trig);
+ spin_lock_irqsave(&priv->state_lock, flags);
+ if (priv->state != TSC2046_STATE_SHUTDOWN) {
+ priv->state = TSC2046_STATE_ENABLE_IRQ;
+ priv->poll_cnt = 0;
- spin_unlock(&priv->trig_lock);
+ /* iio_trigger_poll() starts hrtimer */
+ disable_irq_nosync(priv->spi->irq);
+ iio_trigger_poll(priv->trig);
+ }
+ spin_unlock_irqrestore(&priv->state_lock, flags);
return IRQ_HANDLED;
}
@@ -452,49 +511,42 @@ static void tsc2046_adc_reenable_trigger(struct iio_trigger *trig)
{
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
struct tsc2046_adc_priv *priv = iio_priv(indio_dev);
- unsigned long flags;
- int delta;
+ ktime_t tim;
/*
* We can sample it as fast as we can, but usually we do not need so
* many samples. Reduce the sample rate for default (touchscreen) use
* case.
- * Currently we do not need a highly precise sample rate. It is enough
- * to have calculated numbers.
*/
- delta = priv->scan_interval_us - priv->time_per_scan_us;
- if (delta > 0)
- fsleep(delta);
-
- spin_lock_irqsave(&priv->trig_lock, flags);
-
- /*
- * We need to trigger at least one extra sample to detect state
- * difference on ADC side.
- */
- if (!priv->trig_more_count) {
- int timeout_ms = DIV_ROUND_UP(priv->scan_interval_us,
- USEC_PER_MSEC);
-
- hrtimer_start(&priv->trig_timer, ms_to_ktime(timeout_ms),
- HRTIMER_MODE_REL_SOFT);
- }
-
- enable_irq(priv->spi->irq);
-
- spin_unlock_irqrestore(&priv->trig_lock, flags);
+ tim = ns_to_ktime((priv->scan_interval_us - priv->time_per_scan_us) *
+ NSEC_PER_USEC);
+ hrtimer_start(&priv->trig_timer, tim, HRTIMER_MODE_REL_SOFT);
}
static int tsc2046_adc_set_trigger_state(struct iio_trigger *trig, bool enable)
{
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
struct tsc2046_adc_priv *priv = iio_priv(indio_dev);
+ unsigned long flags;
if (enable) {
- enable_irq(priv->spi->irq);
+ spin_lock_irqsave(&priv->state_lock, flags);
+ if (priv->state == TSC2046_STATE_SHUTDOWN) {
+ priv->state = TSC2046_STATE_STANDBY;
+ enable_irq(priv->spi->irq);
+ }
+ spin_unlock_irqrestore(&priv->state_lock, flags);
} else {
- disable_irq(priv->spi->irq);
- hrtimer_try_to_cancel(&priv->trig_timer);
+ spin_lock_irqsave(&priv->state_lock, flags);
+
+ if (priv->state == TSC2046_STATE_STANDBY ||
+ priv->state == TSC2046_STATE_POLL_IRQ_DISABLE)
+ disable_irq_nosync(priv->spi->irq);
+
+ priv->state = TSC2046_STATE_SHUTDOWN;
+ spin_unlock_irqrestore(&priv->state_lock, flags);
+
+ hrtimer_cancel(&priv->trig_timer);
}
return 0;
@@ -668,10 +720,11 @@ static int tsc2046_adc_probe(struct spi_device *spi)
iio_trigger_set_drvdata(trig, indio_dev);
trig->ops = &tsc2046_adc_trigger_ops;
- spin_lock_init(&priv->trig_lock);
+ spin_lock_init(&priv->state_lock);
+ priv->state = TSC2046_STATE_SHUTDOWN;
hrtimer_init(&priv->trig_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_SOFT);
- priv->trig_timer.function = tsc2046_adc_trig_more;
+ priv->trig_timer.function = tsc2046_adc_timer;
ret = devm_iio_trigger_register(dev, trig);
if (ret) {