summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-04 12:01:07 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-04 12:01:07 -0700
commitb39de277b02ffd8e3dccb01e9159bd45cb07b95d (patch)
tree0cfa6f87f308b0a17b3026dc09813a3eff888ae1 /drivers
parentd62eb5edf6643ede7e48b4d03ba972c0e8949acc (diff)
parent082f6968bb204d1a3d8b2da3c53d6b7a59bbd985 (diff)
downloadlinux-b39de277b02ffd8e3dccb01e9159bd45cb07b95d.tar.bz2
Merge tag 'spi-v4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
Pull spi updates from Mark Brown: "There's only one big change in this release but it's a very big change: Geert Uytterhoeven has implemented support for SPI slave mode. This feature has been on the cards since the subsystem was originally merged back in the mists of time so it's great that Geert stepped up and finally implemented it. - SPI slave support, together with wholesale renaming of SPI controllers from master to controller which went surprisingly smoothly. This is already used with Renesas SoCs and support is in the works for i.MX too. - New drivers for Meson SPICC and ST STM32" * tag 'spi-v4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: (57 commits) spi: loopback-test: Fix kfree() NULL pointer error. spi: loopback-test: fix spelling mistake: "reruning" -> "rerunning" spi: sirf: fix spelling mistake: "registerred" -> "registered" spi: stm32: fix potential dereference null return value spi: stm32: enhance DMA error management spi: stm32: add runtime PM support spi: stm32: use normal conditional statements instead of ternary operator spi: stm32: replace st, spi-midi with st, spi-midi-ns to fit bindings spi: stm32: fix example with st, spi-midi-ns property spi: stm32: fix compatible to fit with new bindings spi: stm32: use SoC specific compatible spi: rockchip: Disable Runtime PM when chip select is asserted spi: rockchip: Set GPIO_SS flag to enable Slave Select with GPIO CS spi: atmel: fix corrupted data issue on SAM9 family SoCs spi: stm32: fix error check on mbr being -ve spi: add driver for STM32 SPI controller spi: Document the STM32 SPI bindings spi/bcm63xx: Fix checkpatch warnings spi: imx: Check for allocation failure earlier spi: mediatek: add spi support for mt2712 IC ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/spi/Kconfig44
-rw-r--r--drivers/spi/Makefile6
-rw-r--r--drivers/spi/spi-atmel.c30
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c1
-rw-r--r--drivers/spi/spi-bcm63xx.c4
-rw-r--r--drivers/spi/spi-davinci.c9
-rw-r--r--drivers/spi/spi-fsl-dspi.c3
-rw-r--r--drivers/spi/spi-imx.c92
-rw-r--r--drivers/spi/spi-loopback-test.c14
-rw-r--r--drivers/spi/spi-meson-spicc.c619
-rw-r--r--drivers/spi/spi-mt65xx.c61
-rw-r--r--drivers/spi/spi-omap2-mcspi.c3
-rw-r--r--drivers/spi/spi-orion.c10
-rw-r--r--drivers/spi/spi-pxa2xx.c22
-rw-r--r--drivers/spi/spi-rockchip.c79
-rw-r--r--drivers/spi/spi-sh-msiof.c111
-rw-r--r--drivers/spi/spi-sirf.c2
-rw-r--r--drivers/spi/spi-slave-system-control.c154
-rw-r--r--drivers/spi/spi-slave-time.c129
-rw-r--r--drivers/spi/spi-st-ssc4.c38
-rw-r--r--drivers/spi/spi-stm32.c1322
-rw-r--r--drivers/spi/spi.c1220
-rw-r--r--drivers/spi/spidev.c11
23 files changed, 3256 insertions, 728 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1761c9004fc1..9b31351fe429 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -393,6 +393,13 @@ config SPI_FSL_ESPI
From MPC8536, 85xx platform uses the controller, and all P10xx,
P20xx, P30xx,P40xx, P50xx uses this controller.
+config SPI_MESON_SPICC
+ tristate "Amlogic Meson SPICC controller"
+ depends on ARCH_MESON || COMPILE_TEST
+ help
+ This enables master mode support for the SPICC (SPI communication
+ controller) available in Amlogic Meson SoCs.
+
config SPI_MESON_SPIFC
tristate "Amlogic Meson SPIFC controller"
depends on ARCH_MESON || COMPILE_TEST
@@ -457,6 +464,7 @@ config SPI_OMAP24XX
config SPI_TI_QSPI
tristate "DRA7xxx QSPI controller support"
+ depends on HAS_DMA
depends on ARCH_OMAP2PLUS || COMPILE_TEST
help
QSPI master controller for DRA7xxx used for flash devices.
@@ -619,6 +627,16 @@ config SPI_SIRF
help
SPI driver for CSR SiRFprimaII SoCs
+config SPI_STM32
+ tristate "STMicroelectronics STM32 SPI controller"
+ depends on ARCH_STM32 || COMPILE_TEST
+ help
+ SPI driver for STMicroelectonics STM32 SoCs.
+
+ STM32 SPI controller supports DMA and PIO modes. When DMA
+ is not available, the driver automatically falls back to
+ PIO mode.
+
config SPI_ST_SSC4
tristate "STMicroelectronics SPI SSC-based driver"
depends on ARCH_STI || COMPILE_TEST
@@ -784,6 +802,30 @@ config SPI_TLE62X0
endif # SPI_MASTER
-# (slave support would go here)
+#
+# SLAVE side ... listening to other SPI masters
+#
+
+config SPI_SLAVE
+ bool "SPI slave protocol handlers"
+ help
+ If your system has a slave-capable SPI controller, you can enable
+ slave protocol handlers.
+
+if SPI_SLAVE
+
+config SPI_SLAVE_TIME
+ tristate "SPI slave handler reporting boot up time"
+ help
+ SPI slave handler responding with the time of reception of the last
+ SPI message.
+
+config SPI_SLAVE_SYSTEM_CONTROL
+ tristate "SPI slave handler controlling system state"
+ help
+ SPI slave handler to allow remote control of system reboot, power
+ off, halt, and suspend.
+
+endif # SPI_SLAVE
endif # SPI
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index b375a7a89216..a3ae2b70cdc3 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
obj-$(CONFIG_SPI_JCORE) += spi-jcore.o
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o
+obj-$(CONFIG_SPI_MESON_SPICC) += spi-meson-spicc.o
obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o
obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
@@ -89,6 +90,7 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
+obj-$(CONFIG_SPI_STM32) += spi-stm32.o
obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
@@ -105,3 +107,7 @@ obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
obj-$(CONFIG_SPI_XLP) += spi-xlp.o
obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o
+
+# SPI slave protocol handlers
+obj-$(CONFIG_SPI_SLAVE_TIME) += spi-slave-time.o
+obj-$(CONFIG_SPI_SLAVE_SYSTEM_CONTROL) += spi-slave-system-control.o
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 1eb83c9613d5..f95da364c283 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -269,6 +269,7 @@ struct atmel_spi_caps {
bool is_spi2;
bool has_wdrbt;
bool has_dma_support;
+ bool has_pdc_support;
};
/*
@@ -1422,11 +1423,31 @@ static void atmel_get_caps(struct atmel_spi *as)
unsigned int version;
version = atmel_get_version(as);
- dev_info(&as->pdev->dev, "version: 0x%x\n", version);
as->caps.is_spi2 = version > 0x121;
as->caps.has_wdrbt = version >= 0x210;
+#ifdef CONFIG_SOC_SAM_V4_V5
+ /*
+ * Atmel SoCs based on ARM9 (SAM9x) cores should not use spi_map_buf()
+ * since this later function tries to map buffers with dma_map_sg()
+ * even if they have not been allocated inside DMA-safe areas.
+ * On SoCs based on Cortex A5 (SAMA5Dx), it works anyway because for
+ * those ARM cores, the data cache follows the PIPT model.
+ * Also the L2 cache controller of SAMA5D2 uses the PIPT model too.
+ * In case of PIPT caches, there cannot be cache aliases.
+ * However on ARM9 cores, the data cache follows the VIVT model, hence
+ * the cache aliases issue can occur when buffers are allocated from
+ * DMA-unsafe areas, by vmalloc() for instance, where cache coherency is
+ * not taken into account or at least not handled completely (cache
+ * lines of aliases are not invalidated).
+ * This is not a theorical issue: it was reproduced when trying to mount
+ * a UBI file-system on a at91sam9g35ek board.
+ */
+ as->caps.has_dma_support = false;
+#else
as->caps.has_dma_support = version >= 0x212;
+#endif
+ as->caps.has_pdc_support = version < 0x212;
}
/*-------------------------------------------------------------------------*/
@@ -1567,7 +1588,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
} else if (ret == -EPROBE_DEFER) {
return ret;
}
- } else {
+ } else if (as->caps.has_pdc_support) {
as->use_pdc = true;
}
@@ -1609,8 +1630,9 @@ static int atmel_spi_probe(struct platform_device *pdev)
goto out_free_dma;
/* go! */
- dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
- (unsigned long)regs->start, irq);
+ dev_info(&pdev->dev, "Atmel SPI Controller version 0x%x at 0x%08lx (irq %d)\n",
+ atmel_get_version(as), (unsigned long)regs->start,
+ irq);
return 0;
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index 5514cd02e93a..4da2d4a524ca 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -484,6 +484,7 @@ static const struct of_device_id bcm63xx_hsspi_of_match[] = {
{ .compatible = "brcm,bcm6328-hsspi", },
{ },
};
+MODULE_DEVICE_TABLE(of, bcm63xx_hsspi_of_match);
static struct platform_driver bcm63xx_hsspi_driver = {
.driver = {
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 247f71b02235..84c7356ce5b4 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -147,7 +147,7 @@ struct bcm63xx_spi {
/* Platform data */
const unsigned long *reg_offsets;
- unsigned fifo_size;
+ unsigned int fifo_size;
unsigned int msg_type_shift;
unsigned int msg_ctl_width;
@@ -191,7 +191,7 @@ static inline void bcm_spi_writew(struct bcm63xx_spi *bs,
#endif
}
-static const unsigned bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
+static const unsigned int bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
{ 20000000, SPI_CLK_20MHZ },
{ 12500000, SPI_CLK_12_50MHZ },
{ 6250000, SPI_CLK_6_250MHZ },
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 595acdcfc7d0..6ddb6ef1fda4 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -873,9 +873,8 @@ static int spi_davinci_get_pdata(struct platform_device *pdev,
return 0;
}
#else
-static struct davinci_spi_platform_data
- *spi_davinci_get_pdata(struct platform_device *pdev,
- struct davinci_spi *dspi)
+static int spi_davinci_get_pdata(struct platform_device *pdev,
+ struct davinci_spi *dspi)
{
return -ENODEV;
}
@@ -965,7 +964,9 @@ static int davinci_spi_probe(struct platform_device *pdev)
ret = -ENODEV;
goto free_master;
}
- clk_prepare_enable(dspi->clk);
+ ret = clk_prepare_enable(dspi->clk);
+ if (ret)
+ goto free_master;
master->dev.of_node = pdev->dev.of_node;
master->bus_num = pdev->id;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 15201645bdc4..d89127f4a46d 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1032,7 +1032,8 @@ static int dspi_probe(struct platform_device *pdev)
goto out_master_put;
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
- if (dspi_request_dma(dspi, res->start)) {
+ ret = dspi_request_dma(dspi, res->start);
+ if (ret < 0) {
dev_err(&pdev->dev, "can't get dma channels\n");
goto out_clk_put;
}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index b402530a7a9a..f9698b7aeb3b 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -56,10 +56,6 @@
/* The maximum bytes that a sdma BD can transfer.*/
#define MAX_SDMA_BD_BYTES (1 << 15)
-struct spi_imx_config {
- unsigned int speed_hz;
- unsigned int bpw;
-};
enum spi_imx_devtype {
IMX1_CSPI,
@@ -74,7 +70,7 @@ struct spi_imx_data;
struct spi_imx_devtype_data {
void (*intctrl)(struct spi_imx_data *, int);
- int (*config)(struct spi_device *, struct spi_imx_config *);
+ int (*config)(struct spi_device *);
void (*trigger)(struct spi_imx_data *);
int (*rx_available)(struct spi_imx_data *);
void (*reset)(struct spi_imx_data *);
@@ -94,7 +90,8 @@ struct spi_imx_data {
unsigned long spi_clk;
unsigned int spi_bus_clk;
- unsigned int bytes_per_word;
+ unsigned int speed_hz;
+ unsigned int bits_per_word;
unsigned int spi_drctl;
unsigned int count;
@@ -203,34 +200,27 @@ out:
return i;
}
-static int spi_imx_bytes_per_word(const int bpw)
+static int spi_imx_bytes_per_word(const int bits_per_word)
{
- return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
+ return DIV_ROUND_UP(bits_per_word, BITS_PER_BYTE);
}
static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
- unsigned int bpw, i;
+ unsigned int bytes_per_word, i;
if (!master->dma_rx)
return false;
- if (!transfer)
- return false;
-
- bpw = transfer->bits_per_word;
- if (!bpw)
- bpw = spi->bits_per_word;
-
- bpw = spi_imx_bytes_per_word(bpw);
+ bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
- if (bpw != 1 && bpw != 2 && bpw != 4)
+ if (bytes_per_word != 1 && bytes_per_word != 2 && bytes_per_word != 4)
return false;
for (i = spi_imx_get_fifosize(spi_imx) / 2; i > 0; i--) {
- if (!(transfer->len % (i * bpw)))
+ if (!(transfer->len % (i * bytes_per_word)))
break;
}
@@ -340,12 +330,11 @@ static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
}
-static int mx51_ecspi_config(struct spi_device *spi,
- struct spi_imx_config *config)
+static int mx51_ecspi_config(struct spi_device *spi)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
- u32 clk = config->speed_hz, delay, reg;
+ u32 clk = spi_imx->speed_hz, delay, reg;
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
/*
@@ -364,13 +353,13 @@ static int mx51_ecspi_config(struct spi_device *spi,
ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
/* set clock speed */
- ctrl |= mx51_ecspi_clkdiv(spi_imx, config->speed_hz, &clk);
+ ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->speed_hz, &clk);
spi_imx->spi_bus_clk = clk;
/* set chip select to use */
ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
- ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
+ ctrl |= (spi_imx->bits_per_word - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
@@ -501,21 +490,21 @@ static void mx31_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
-static int mx31_config(struct spi_device *spi, struct spi_imx_config *config)
+static int mx31_config(struct spi_device *spi)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
unsigned int clk;
- reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz, &clk) <<
+ reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->speed_hz, &clk) <<
MX31_CSPICTRL_DR_SHIFT;
spi_imx->spi_bus_clk = clk;
if (is_imx35_cspi(spi_imx)) {
- reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
+ reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT;
reg |= MX31_CSPICTRL_SSCTL;
} else {
- reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
+ reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT;
}
if (spi->mode & SPI_CPHA)
@@ -597,18 +586,18 @@ static void mx21_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
-static int mx21_config(struct spi_device *spi, struct spi_imx_config *config)
+static int mx21_config(struct spi_device *spi)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
unsigned int clk;
- reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max, &clk)
+ reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->speed_hz, max, &clk)
<< MX21_CSPICTRL_DR_SHIFT;
spi_imx->spi_bus_clk = clk;
- reg |= config->bpw - 1;
+ reg |= spi_imx->bits_per_word - 1;
if (spi->mode & SPI_CPHA)
reg |= MX21_CSPICTRL_PHA;
@@ -666,17 +655,17 @@ static void mx1_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
-static int mx1_config(struct spi_device *spi, struct spi_imx_config *config)
+static int mx1_config(struct spi_device *spi)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
unsigned int clk;
- reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz, &clk) <<
+ reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->speed_hz, &clk) <<
MX1_CSPICTRL_DR_SHIFT;
spi_imx->spi_bus_clk = clk;
- reg |= config->bpw - 1;
+ reg |= spi_imx->bits_per_word - 1;
if (spi->mode & SPI_CPHA)
reg |= MX1_CSPICTRL_PHA;
@@ -841,15 +830,14 @@ static irqreturn_t spi_imx_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int spi_imx_dma_configure(struct spi_master *master,
- int bytes_per_word)
+static int spi_imx_dma_configure(struct spi_master *master)
{
int ret;
enum dma_slave_buswidth buswidth;
struct dma_slave_config rx = {}, tx = {};
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
- switch (bytes_per_word) {
+ switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
case 4:
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
break;
@@ -883,8 +871,6 @@ static int spi_imx_dma_configure(struct spi_master *master,
return ret;
}
- spi_imx->bytes_per_word = bytes_per_word;
-
return 0;
}
@@ -892,22 +878,19 @@ static int spi_imx_setupxfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
- struct spi_imx_config config;
int ret;
- config.bpw = t ? t->bits_per_word : spi->bits_per_word;
- config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
+ if (!t)
+ return 0;
- if (!config.speed_hz)
- config.speed_hz = spi->max_speed_hz;
- if (!config.bpw)
- config.bpw = spi->bits_per_word;
+ spi_imx->bits_per_word = t->bits_per_word;
+ spi_imx->speed_hz = t->speed_hz;
/* Initialize the functions for transfer */
- if (config.bpw <= 8) {
+ if (spi_imx->bits_per_word <= 8) {
spi_imx->rx = spi_imx_buf_rx_u8;
spi_imx->tx = spi_imx_buf_tx_u8;
- } else if (config.bpw <= 16) {
+ } else if (spi_imx->bits_per_word <= 16) {
spi_imx->rx = spi_imx_buf_rx_u16;
spi_imx->tx = spi_imx_buf_tx_u16;
} else {
@@ -921,13 +904,12 @@ static int spi_imx_setupxfer(struct spi_device *spi,
spi_imx->usedma = 0;
if (spi_imx->usedma) {
- ret = spi_imx_dma_configure(spi->master,
- spi_imx_bytes_per_word(config.bpw));
+ ret = spi_imx_dma_configure(spi->master);
if (ret)
return ret;
}
- spi_imx->devtype_data->config(spi, &config);
+ spi_imx->devtype_data->config(spi);
return 0;
}
@@ -976,8 +958,6 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
goto err;
}
- spi_imx_dma_configure(master, 1);
-
init_completion(&spi_imx->dma_rx_completion);
init_completion(&spi_imx->dma_tx_completion);
master->can_dma = spi_imx_can_dma;
@@ -1189,15 +1169,15 @@ static int spi_imx_probe(struct platform_device *pdev)
}
master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
+ if (!master)
+ return -ENOMEM;
+
ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
if ((ret < 0) || (spi_drctl >= 0x3)) {
/* '11' is reserved */
spi_drctl = 0;
}
- if (!master)
- return -ENOMEM;
-
platform_set_drvdata(pdev, master);
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index f4875f177df0..3459965004f8 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -894,7 +894,7 @@ int spi_test_execute_msg(struct spi_device *spi, struct spi_test *test,
test->elapsed_time = ktime_to_ns(ktime_sub(ktime_get(), start));
if (ret == -ETIMEDOUT) {
dev_info(&spi->dev,
- "spi-message timed out - reruning...\n");
+ "spi-message timed out - rerunning...\n");
/* rerun after a few explicit schedules */
for (i = 0; i < 16; i++)
schedule();
@@ -1021,10 +1021,9 @@ int spi_test_run_tests(struct spi_device *spi,
rx = vmalloc(SPI_TEST_MAX_SIZE_PLUS);
else
rx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL);
- if (!rx) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!rx)
+ return -ENOMEM;
+
if (use_vmalloc)
tx = vmalloc(SPI_TEST_MAX_SIZE_PLUS);
@@ -1032,7 +1031,7 @@ int spi_test_run_tests(struct spi_device *spi,
tx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL);
if (!tx) {
ret = -ENOMEM;
- goto out;
+ goto err_tx;
}
/* now run the individual tests in the table */
@@ -1057,8 +1056,9 @@ int spi_test_run_tests(struct spi_device *spi,
}
out:
- kvfree(rx);
kvfree(tx);
+err_tx:
+ kvfree(rx);
return ret;
}
EXPORT_SYMBOL_GPL(spi_test_run_tests);
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
new file mode 100644
index 000000000000..7f8429635502
--- /dev/null
+++ b/drivers/spi/spi-meson-spicc.c
@@ -0,0 +1,619 @@
+/*
+ * Driver for Amlogic Meson SPI communication controller (SPICC)
+ *
+ * Copyright (C) BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/gpio.h>
+
+/*
+ * The Meson SPICC controller could support DMA based transfers, but is not
+ * implemented by the vendor code, and while having the registers documentation
+ * it has never worked on the GXL Hardware.
+ * The PIO mode is the only mode implemented, and due to badly designed HW :
+ * - all transfers are cutted in 16 words burst because the FIFO hangs on
+ * TX underflow, and there is no TX "Half-Empty" interrupt, so we go by
+ * FIFO max size chunk only
+ * - CS management is dumb, and goes UP between every burst, so is really a
+ * "Data Valid" signal than a Chip Select, GPIO link should be used instead
+ * to have a CS go down over the full transfer
+ */
+
+#define SPICC_MAX_FREQ 30000000
+#define SPICC_MAX_BURST 128
+
+/* Register Map */
+#define SPICC_RXDATA 0x00
+
+#define SPICC_TXDATA 0x04
+
+#define SPICC_CONREG 0x08
+#define SPICC_ENABLE BIT(0)
+#define SPICC_MODE_MASTER BIT(1)
+#define SPICC_XCH BIT(2)
+#define SPICC_SMC BIT(3)
+#define SPICC_POL BIT(4)
+#define SPICC_PHA BIT(5)
+#define SPICC_SSCTL BIT(6)
+#define SPICC_SSPOL BIT(7)
+#define SPICC_DRCTL_MASK GENMASK(9, 8)
+#define SPICC_DRCTL_IGNORE 0
+#define SPICC_DRCTL_FALLING 1
+#define SPICC_DRCTL_LOWLEVEL 2
+#define SPICC_CS_MASK GENMASK(13, 12)
+#define SPICC_DATARATE_MASK GENMASK(18, 16)
+#define SPICC_DATARATE_DIV4 0
+#define SPICC_DATARATE_DIV8 1
+#define SPICC_DATARATE_DIV16 2
+#define SPICC_DATARATE_DIV32 3
+#define SPICC_BITLENGTH_MASK GENMASK(24, 19)
+#define SPICC_BURSTLENGTH_MASK GENMASK(31, 25)
+
+#define SPICC_INTREG 0x0c
+#define SPICC_TE_EN BIT(0) /* TX FIFO Empty Interrupt */
+#define SPICC_TH_EN BIT(1) /* TX FIFO Half-Full Interrupt */
+#define SPICC_TF_EN BIT(2) /* TX FIFO Full Interrupt */
+#define SPICC_RR_EN BIT(3) /* RX FIFO Ready Interrupt */
+#define SPICC_RH_EN BIT(4) /* RX FIFO Half-Full Interrupt */
+#define SPICC_RF_EN BIT(5) /* RX FIFO Full Interrupt */
+#define SPICC_RO_EN BIT(6) /* RX FIFO Overflow Interrupt */
+#define SPICC_TC_EN BIT(7) /* Transfert Complete Interrupt */
+
+#define SPICC_DMAREG 0x10
+#define SPICC_DMA_ENABLE BIT(0)
+#define SPICC_TXFIFO_THRESHOLD_MASK GENMASK(5, 1)
+#define SPICC_RXFIFO_THRESHOLD_MASK GENMASK(10, 6)
+#define SPICC_READ_BURST_MASK GENMASK(14, 11)
+#define SPICC_WRITE_BURST_MASK GENMASK(18, 15)
+#define SPICC_DMA_URGENT BIT(19)
+#define SPICC_DMA_THREADID_MASK GENMASK(25, 20)
+#define SPICC_DMA_BURSTNUM_MASK GENMASK(31, 26)
+
+#define SPICC_STATREG 0x14
+#define SPICC_TE BIT(0) /* TX FIFO Empty Interrupt */
+#define SPICC_TH BIT(1) /* TX FIFO Half-Full Interrupt */
+#define SPICC_TF BIT(2) /* TX FIFO Full Interrupt */
+#define SPICC_RR BIT(3) /* RX FIFO Ready Interrupt */
+#define SPICC_RH BIT(4) /* RX FIFO Half-Full Interrupt */
+#define SPICC_RF BIT(5) /* RX FIFO Full Interrupt */
+#define SPICC_RO BIT(6) /* RX FIFO Overflow Interrupt */
+#define SPICC_TC BIT(7) /* Transfert Complete Interrupt */
+
+#define SPICC_PERIODREG 0x18
+#define SPICC_PERIOD GENMASK(14, 0) /* Wait cycles */
+
+#define SPICC_TESTREG 0x1c
+#define SPICC_TXCNT_MASK GENMASK(4, 0) /* TX FIFO Counter */
+#define SPICC_RXCNT_MASK GENMASK(9, 5) /* RX FIFO Counter */
+#define SPICC_SMSTATUS_MASK GENMASK(12, 10) /* State Machine Status */
+#define SPICC_LBC_RO BIT(13) /* Loop Back Control Read-Only */
+#define SPICC_LBC_W1 BIT(14) /* Loop Back Control Write-Only */
+#define SPICC_SWAP_RO BIT(14) /* RX FIFO Data Swap Read-Only */
+#define SPICC_SWAP_W1 BIT(15) /* RX FIFO Data Swap Write-Only */
+#define SPICC_DLYCTL_RO_MASK GENMASK(20, 15) /* Delay Control Read-Only */
+#define SPICC_DLYCTL_W1_MASK GENMASK(21, 16) /* Delay Control Write-Only */
+#define SPICC_FIFORST_RO_MASK GENMASK(22, 21) /* FIFO Softreset Read-Only */
+#define SPICC_FIFORST_W1_MASK GENMASK(23, 22) /* FIFO Softreset Write-Only */
+
+#define SPICC_DRADDR 0x20 /* Read Address of DMA */
+
+#define SPICC_DWADDR 0x24 /* Write Address of DMA */
+
+#define writel_bits_relaxed(mask, val, addr) \
+ writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr)
+
+#define SPICC_BURST_MAX 16
+#define SPICC_FIFO_HALF 10
+
+struct meson_spicc_device {
+ struct spi_master *master;
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct clk *core;
+ struct spi_message *message;
+ struct spi_transfer *xfer;
+ u8 *tx_buf;
+ u8 *rx_buf;
+ unsigned int bytes_per_word;
+ unsigned long tx_remain;
+ unsigned long txb_remain;
+ unsigned long rx_remain;
+ unsigned long rxb_remain;
+ unsigned long xfer_remain;
+ bool is_burst_end;
+ bool is_last_burst;
+};
+
+static inline bool meson_spicc_txfull(struct meson_spicc_device *spicc)
+{
+ return !!FIELD_GET(SPICC_TF,
+ readl_relaxed(spicc->base + SPICC_STATREG));
+}
+
+static inline bool meson_spicc_rxready(struct meson_spicc_device *spicc)
+{
+ return FIELD_GET(SPICC_RH | SPICC_RR | SPICC_RF_EN,
+ readl_relaxed(spicc->base + SPICC_STATREG));
+}
+
+static inline u32 meson_spicc_pull_data(struct meson_spicc_device *spicc)
+{
+ unsigned int bytes = spicc->bytes_per_word;
+ unsigned int byte_shift = 0;
+ u32 data = 0;
+ u8 byte;
+
+ while (bytes--) {
+ byte = *spicc->tx_buf++;
+ data |= (byte & 0xff) << byte_shift;
+ byte_shift += 8;
+ }
+
+ spicc->tx_remain--;
+ return data;
+}
+
+static inline void meson_spicc_push_data(struct meson_spicc_device *spicc,
+ u32 data)
+{
+ unsigned int bytes = spicc->bytes_per_word;
+ unsigned int byte_shift = 0;
+ u8 byte;
+
+ while (bytes--) {
+ byte = (data >> byte_shift) & 0xff;
+ *spicc->rx_buf++ = byte;
+ byte_shift += 8;
+ }
+
+ spicc->rx_remain--;
+}
+
+static inline void meson_spicc_rx(struct meson_spicc_device *spicc)
+{
+ /* Empty RX FIFO */
+ while (spicc->rx_remain &&
+ meson_spicc_rxready(spicc))
+ meson_spicc_push_data(spicc,
+ readl_relaxed(spicc->base + SPICC_RXDATA));
+}
+
+static inline void meson_spicc_tx(struct meson_spicc_device *spicc)
+{
+ /* Fill Up TX FIFO */
+ while (spicc->tx_remain &&
+ !meson_spicc_txfull(spicc))
+ writel_relaxed(meson_spicc_pull_data(spicc),
+ spicc->base + SPICC_TXDATA);
+}
+
+static inline u32 meson_spicc_setup_rx_irq(struct meson_spicc_device *spicc,
+ u32 irq_ctrl)
+{
+ if (spicc->rx_remain > SPICC_FIFO_HALF)
+ irq_ctrl |= SPICC_RH_EN;
+ else
+ irq_ctrl |= SPICC_RR_EN;
+
+ return irq_ctrl;
+}
+
+static inline void meson_spicc_setup_burst(struct meson_spicc_device *spicc,
+ unsigned int burst_len)
+{
+ /* Setup Xfer variables */
+ spicc->tx_remain = burst_len;
+ spicc->rx_remain = burst_len;
+ spicc->xfer_remain -= burst_len * spicc->bytes_per_word;
+ spicc->is_burst_end = false;
+ if (burst_len < SPICC_BURST_MAX || !spicc->xfer_remain)
+ spicc->is_last_burst = true;
+ else
+ spicc->is_last_burst = false;
+
+ /* Setup burst length */
+ writel_bits_relaxed(SPICC_BURSTLENGTH_MASK,
+ FIELD_PREP(SPICC_BURSTLENGTH_MASK,
+ burst_len),
+ spicc->base + SPICC_CONREG);
+
+ /* Fill TX FIFO */
+ meson_spicc_tx(spicc);
+}
+
+static irqreturn_t meson_spicc_irq(int irq, void *data)
+{
+ struct meson_spicc_device *spicc = (void *) data;
+ u32 ctrl = readl_relaxed(spicc->base + SPICC_INTREG);
+ u32 stat = readl_relaxed(spicc->base + SPICC_STATREG) & ctrl;
+
+ ctrl &= ~(SPICC_RH_EN | SPICC_RR_EN);
+
+ /* Empty RX FIFO */
+ meson_spicc_rx(spicc);
+
+ /* Enable TC interrupt since we transferred everything */
+ if (!spicc->tx_remain && !spicc->rx_remain) {
+ spicc->is_burst_end = true;
+
+ /* Enable TC interrupt */
+ ctrl |= SPICC_TC_EN;
+
+ /* Reload IRQ status */
+ stat = readl_relaxed(spicc->base + SPICC_STATREG) & ctrl;
+ }
+
+ /* Check transfer complete */
+ if ((stat & SPICC_TC) && spicc->is_burst_end) {
+ unsigned int burst_len;
+
+ /* Clear TC bit */
+ writel_relaxed(SPICC_TC, spicc->base + SPICC_STATREG);
+
+ /* Disable TC interrupt */
+ ctrl &= ~SPICC_TC_EN;
+
+ if (spicc->is_last_burst) {
+ /* Disable all IRQs */
+ writel(0, spicc->base + SPICC_INTREG);
+
+ spi_finalize_current_transfer(spicc->master);
+
+ return IRQ_HANDLED;
+ }
+
+ burst_len = min_t(unsigned int,
+ spicc->xfer_remain / spicc->bytes_per_word,
+ SPICC_BURST_MAX);
+
+ /* Setup burst */
+ meson_spicc_setup_burst(spicc, burst_len);
+
+ /* Restart burst */
+ writel_bits_relaxed(SPICC_XCH, SPICC_XCH,
+ spicc->base + SPICC_CONREG);
+ }
+
+ /* Setup RX interrupt trigger */
+ ctrl = meson_spicc_setup_rx_irq(spicc, ctrl);
+
+ /* Reconfigure interrupts */
+ writel(ctrl, spicc->base + SPICC_INTREG);
+
+ return IRQ_HANDLED;
+}
+
+static u32 meson_spicc_setup_speed(struct meson_spicc_device *spicc, u32 conf,
+ u32 speed)
+{
+ unsigned long parent, value;
+ unsigned int i, div;
+
+ parent = clk_get_rate(spicc->core);
+
+ /* Find closest inferior/equal possible speed */
+ for (i = 0 ; i < 7 ; ++i) {
+ /* 2^(data_rate+2) */
+ value = parent >> (i + 2);
+
+ if (value <= speed)
+ break;
+ }
+
+ /* If provided speed it lower than max divider, use max divider */
+ if (i > 7) {
+ div = 7;
+ dev_warn_once(&spicc->pdev->dev, "unable to get close to speed %u\n",
+ speed);
+ } else
+ div = i;
+
+ dev_dbg(&spicc->pdev->dev, "parent %lu, speed %u -> %lu (%u)\n",
+ parent, speed, value, div);
+
+ conf &= ~SPICC_DATARATE_MASK;
+ conf |= FIELD_PREP(SPICC_DATARATE_MASK, div);
+
+ return conf;
+}
+
+static void meson_spicc_setup_xfer(struct meson_spicc_device *spicc,
+ struct spi_transfer *xfer)
+{
+ u32 conf, conf_orig;
+
+ /* Read original configuration */
+ conf = conf_orig = readl_relaxed(spicc->base + SPICC_CONREG);
+
+ /* Select closest divider */
+ conf = meson_spicc_setup_speed(spicc, conf, xfer->speed_hz);
+
+ /* Setup word width */
+ conf &= ~SPICC_BITLENGTH_MASK;
+ conf |= FIELD_PREP(SPICC_BITLENGTH_MASK,
+ (spicc->bytes_per_word << 3) - 1);
+
+ /* Ignore if unchanged */
+ if (conf != conf_orig)
+ writel_relaxed(conf, spicc->base + SPICC_CONREG);
+}
+
+static int meson_spicc_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+ unsigned int burst_len;
+ u32 irq = 0;
+
+ /* Store current transfer */
+ spicc->xfer = xfer;
+
+ /* Setup transfer parameters */
+ spicc->tx_buf = (u8 *)xfer->tx_buf;
+ spicc->rx_buf = (u8 *)xfer->rx_buf;
+ spicc->xfer_remain = xfer->len;
+
+ /* Pre-calculate word size */
+ spicc->bytes_per_word =
+ DIV_ROUND_UP(spicc->xfer->bits_per_word, 8);
+
+ /* Setup transfer parameters */
+ meson_spicc_setup_xfer(spicc, xfer);
+
+ burst_len = min_t(unsigned int,
+ spicc->xfer_remain / spicc->bytes_per_word,
+ SPICC_BURST_MAX);
+
+ meson_spicc_setup_burst(spicc, burst_len);
+
+ irq = meson_spicc_setup_rx_irq(spicc, irq);
+
+ /* Start burst */
+ writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG);
+
+ /* Enable interrupts */
+ writel_relaxed(irq, spicc->base + SPICC_INTREG);
+
+ return 1;
+}
+
+static int meson_spicc_prepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+ struct spi_device *spi = message->spi;
+ u32 conf = 0;
+
+ /* Store current message */
+ spicc->message = message;
+
+ /* Enable Master */
+ conf |= SPICC_ENABLE;
+ conf |= SPICC_MODE_MASTER;
+
+ /* SMC = 0 */
+
+ /* Setup transfer mode */
+ if (spi->mode & SPI_CPOL)
+ conf |= SPICC_POL;
+ else
+ conf &= ~SPICC_POL;
+
+ if (spi->mode & SPI_CPHA)
+ conf |= SPICC_PHA;
+ else
+ conf &= ~SPICC_PHA;
+
+ /* SSCTL = 0 */
+
+ if (spi->mode & SPI_CS_HIGH)
+ conf |= SPICC_SSPOL;
+ else
+ conf &= ~SPICC_SSPOL;
+
+ if (spi->mode & SPI_READY)
+ conf |= FIELD_PREP(SPICC_DRCTL_MASK, SPICC_DRCTL_LOWLEVEL);
+ else
+ conf |= FIELD_PREP(SPICC_DRCTL_MASK, SPICC_DRCTL_IGNORE);
+
+ /* Select CS */
+ conf |= FIELD_PREP(SPICC_CS_MASK, spi->chip_select);
+
+ /* Default Clock rate core/4 */
+
+ /* Default 8bit word */
+ conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, 8 - 1);
+
+ writel_relaxed(conf, spicc->base + SPICC_CONREG);
+
+ /* Setup no wait cycles by default */
+ writel_relaxed(0, spicc->base + SPICC_PERIODREG);
+
+ writel_bits_relaxed(BIT(24), BIT(24), spicc->base + SPICC_TESTREG);
+
+ return 0;
+}
+
+static int meson_spicc_unprepare_transfer(struct spi_master *master)
+{
+ struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+
+ /* Disable all IRQs */
+ writel(0, spicc->base + SPICC_INTREG);
+
+ /* Disable controller */
+ writel_bits_relaxed(SPICC_ENABLE, 0, spicc->base + SPICC_CONREG);
+
+ device_reset_optional(&spicc->pdev->dev);
+
+ return 0;
+}
+
+static int meson_spicc_setup(struct spi_device *spi)
+{
+ int ret = 0;
+
+ if (!spi->controller_state)
+ spi->controller_state = spi_master_get_devdata(spi->master);
+ else if (gpio_is_valid(spi->cs_gpio))
+ goto out_gpio;
+ else if (spi->cs_gpio == -ENOENT)
+ return 0;
+
+ if (gpio_is_valid(spi->cs_gpio)) {
+ ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
+ if (ret) {
+ dev_err(&spi->dev, "failed to request cs gpio\n");
+ return ret;
+ }
+ }
+
+out_gpio:
+ ret = gpio_direction_output(spi->cs_gpio,
+ !(spi->mode & SPI_CS_HIGH));
+
+ return ret;
+}
+
+static void meson_spicc_cleanup(struct spi_device *spi)
+{
+ if (gpio_is_valid(spi->cs_gpio))
+ gpio_free(spi->cs_gpio);
+
+ spi->controller_state = NULL;
+}
+
+static int meson_spicc_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct meson_spicc_device *spicc;
+ struct resource *res;
+ int ret, irq, rate;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*spicc));
+ if (!master) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+ spicc = spi_master_get_devdata(master);
+ spicc->master = master;
+
+ spicc->pdev = pdev;
+ platform_set_drvdata(pdev, spicc);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spicc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spicc->base)) {
+ dev_err(&pdev->dev, "io resource mapping failed\n");
+ ret = PTR_ERR(spicc->base);
+ goto out_master;
+ }
+
+ /* Disable all IRQs */
+ writel_relaxed(0, spicc->base + SPICC_INTREG);
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq,
+ 0, NULL, spicc);
+ if (ret) {
+ dev_err(&pdev->dev, "irq request failed\n");
+ goto out_master;
+ }
+
+ spicc->core = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(spicc->core)) {
+ dev_err(&pdev->dev, "core clock request failed\n");
+ ret = PTR_ERR(spicc->core);
+ goto out_master;
+ }
+
+ ret = clk_prepare_enable(spicc->core);
+ if (ret) {
+ dev_err(&pdev->dev, "core clock enable failed\n");
+ goto out_master;
+ }
+ rate = clk_get_rate(spicc->core);
+
+ device_reset_optional(&pdev->dev);
+
+ master->num_chipselect = 4;
+ master->dev.of_node = pdev->dev.of_node;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) |
+ SPI_BPW_MASK(24) |
+ SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
+ master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
+ master->min_speed_hz = rate >> 9;
+ master->setup = meson_spicc_setup;
+ master->cleanup = meson_spicc_cleanup;
+ master->prepare_message = meson_spicc_prepare_message;
+ master->unprepare_transfer_hardware = meson_spicc_unprepare_transfer;
+ master->transfer_one = meson_spicc_transfer_one;
+
+ /* Setup max rate according to the Meson GX datasheet */
+ if ((rate >> 2) > SPICC_MAX_FREQ)
+ master->max_speed_hz = SPICC_MAX_FREQ;
+ else
+ master->max_speed_hz = rate >> 2;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (!ret)
+ return 0;
+
+ dev_err(&pdev->dev, "spi master registration failed\n");
+
+out_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int meson_spicc_remove(struct platform_device *pdev)
+{
+ struct meson_spicc_device *spicc = platform_get_drvdata(pdev);
+
+ /* Disable SPI */
+ writel(0, spicc->base + SPICC_CONREG);
+
+ clk_disable_unprepare(spicc->core);
+
+ return 0;
+}
+
+static const struct of_device_id meson_spicc_of_match[] = {
+ { .compatible = "amlogic,meson-gx-spicc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, meson_spicc_of_match);
+
+static struct platform_driver meson_spicc_driver = {
+ .probe = meson_spicc_probe,
+ .remove = meson_spicc_remove,
+ .driver = {
+ .name = "meson-spicc",
+ .of_match_table = of_match_ptr(meson_spicc_of_match),
+ },
+};
+
+module_platform_driver(meson_spicc_driver);
+
+MODULE_DESCRIPTION("Meson SPI Communication Controller driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 278867a31950..86bf45667a04 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -35,11 +35,15 @@
#define SPI_CMD_REG 0x0018
#define SPI_STATUS0_REG 0x001c
#define SPI_PAD_SEL_REG 0x0024
+#define SPI_CFG2_REG 0x0028
#define SPI_CFG0_SCK_HIGH_OFFSET 0
#define SPI_CFG0_SCK_LOW_OFFSET 8
#define SPI_CFG0_CS_HOLD_OFFSET 16
#define SPI_CFG0_CS_SETUP_OFFSET 24
+#define SPI_ADJUST_CFG0_SCK_LOW_OFFSET 16
+#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
+#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
#define SPI_CFG1_CS_IDLE_OFFSET 0
#define SPI_CFG1_PACKET_LOOP_OFFSET 8
@@ -55,6 +59,8 @@
#define SPI_CMD_RST BIT(2)
#define SPI_CMD_PAUSE_EN BIT(4)
#define SPI_CMD_DEASSERT BIT(5)
+#define SPI_CMD_SAMPLE_SEL BIT(6)
+#define SPI_CMD_CS_POL BIT(7)
#define SPI_CMD_CPHA BIT(8)
#define SPI_CMD_CPOL BIT(9)
#define SPI_CMD_RX_DMA BIT(10)
@@ -80,6 +86,8 @@ struct mtk_spi_compatible {
bool need_pad_sel;
/* Must explicitly send dummy Tx bytes to do Rx only transfer */
bool must_tx;
+ /* some IC design adjust cfg register to enhance time accuracy */
+ bool enhance_timing;
};
struct mtk_spi {
@@ -96,6 +104,16 @@ struct mtk_spi {
};
static const struct mtk_spi_compatible mtk_common_compat;
+
+static const struct mtk_spi_compatible mt2712_compat = {
+ .must_tx = true,
+};
+
+static const struct mtk_spi_compatible mt7622_compat = {
+ .must_tx = true,
+ .enhance_timing = true,
+};
+
static const struct mtk_spi_compatible mt8173_compat = {
.need_pad_sel = true,
.must_tx = true,
@@ -108,15 +126,23 @@ static const struct mtk_spi_compatible mt8173_compat = {
static const struct mtk_chip_config mtk_default_chip_info = {
.rx_mlsb = 1,
.tx_mlsb = 1,
+ .cs_pol = 0,
+ .sample_sel = 0,
};
static const struct of_device_id mtk_spi_of_match[] = {
{ .compatible = "mediatek,mt2701-spi",
.data = (void *)&mtk_common_compat,
},
+ { .compatible = "mediatek,mt2712-spi",
+ .data = (void *)&mt2712_compat,
+ },
{ .compatible = "mediatek,mt6589-spi",
.data = (void *)&mtk_common_compat,
},
+ { .compatible = "mediatek,mt7622-spi",
+ .data = (void *)&mt7622_compat,
+ },
{ .compatible = "mediatek,mt8135-spi",
.data = (void *)&mtk_common_compat,
},
@@ -182,6 +208,17 @@ static int mtk_spi_prepare_message(struct spi_master *master,
reg_val |= SPI_CMD_RX_ENDIAN;
#endif
+ if (mdata->dev_comp->enhance_timing) {
+ if (chip_config->cs_pol)
+ reg_val |= SPI_CMD_CS_POL;
+ else
+ reg_val &= ~SPI_CMD_CS_POL;
+ if (chip_config->sample_sel)
+ reg_val |= SPI_CMD_SAMPLE_SEL;
+ else
+ reg_val &= ~SPI_CMD_SAMPLE_SEL;
+ }
+
/* set finish and pause interrupt always enable */
reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE;
@@ -233,11 +270,25 @@ static void mtk_spi_prepare_transfer(struct spi_master *master,
sck_time = (div + 1) / 2;
cs_time = sck_time * 2;
- reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET);
- reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
- reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
- reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET);
- writel(reg_val, mdata->base + SPI_CFG0_REG);
+ if (mdata->dev_comp->enhance_timing) {
+ reg_val |= (((sck_time - 1) & 0xffff)
+ << SPI_CFG0_SCK_HIGH_OFFSET);
+ reg_val |= (((sck_time - 1) & 0xffff)
+ << SPI_ADJUST_CFG0_SCK_LOW_OFFSET);
+ writel(reg_val, mdata->base + SPI_CFG2_REG);
+ reg_val |= (((cs_time - 1) & 0xffff)
+ << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+ reg_val |= (((cs_time - 1) & 0xffff)
+ << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+ writel(reg_val, mdata->base + SPI_CFG0_REG);
+ } else {
+ reg_val |= (((sck_time - 1) & 0xff)
+ << SPI_CFG0_SCK_HIGH_OFFSET);
+ reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
+ reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
+ reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET);
+ writel(reg_val, mdata->base + SPI_CFG0_REG);
+ }
reg_val = readl(mdata->base + SPI_CFG1_REG);
reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7275223dbcd4..e048268d8ba2 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1412,9 +1412,6 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
}
- if (status < 0)
- goto free_master;
-
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index be2e87ee8b31..28fc9f161b9d 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -22,6 +22,7 @@
#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/sizes.h>
+#include <linux/gpio.h>
#include <asm/unaligned.h>
#define DRIVER_NAME "orion_spi"
@@ -320,12 +321,18 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
static void orion_spi_set_cs(struct spi_device *spi, bool enable)
{
struct orion_spi *orion_spi;
+ int cs;
+
+ if (gpio_is_valid(spi->cs_gpio))
+ cs = 0;
+ else
+ cs = spi->chip_select;
orion_spi = spi_master_get_devdata(spi->master);
orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK);
orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG,
- ORION_SPI_CS(spi->chip_select));
+ ORION_SPI_CS(cs));
/* Chip select logic is inverted from spi_set_cs */
if (!enable)
@@ -606,6 +613,7 @@ static int orion_spi_probe(struct platform_device *pdev)
master->setup = orion_spi_setup;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->auto_runtime_pm = true;
+ master->flags = SPI_MASTER_GPIO_SS;
platform_set_drvdata(pdev, master);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 47b65d7c4072..38d053682892 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -151,6 +151,18 @@ static const struct lpss_config lpss_platforms[] = {
.cs_sel_shift = 8,
.cs_sel_mask = 3 << 8,
},
+ { /* LPSS_CNL_SSP */
+ .offset = 0x200,
+ .reg_general = -1,
+ .reg_ssp = 0x20,
+ .reg_cs_ctrl = 0x24,
+ .reg_capabilities = 0xfc,
+ .rx_threshold = 1,
+ .tx_threshold_lo = 32,
+ .tx_threshold_hi = 56,
+ .cs_sel_shift = 8,
+ .cs_sel_mask = 3 << 8,
+ },
};
static inline const struct lpss_config
@@ -167,6 +179,7 @@ static bool is_lpss_ssp(const struct driver_data *drv_data)
case LPSS_BSW_SSP:
case LPSS_SPT_SSP:
case LPSS_BXT_SSP:
+ case LPSS_CNL_SSP:
return true;
default:
return false;
@@ -1275,6 +1288,7 @@ static int setup(struct spi_device *spi)
case LPSS_BSW_SSP:
case LPSS_SPT_SSP:
case LPSS_BXT_SSP:
+ case LPSS_CNL_SSP:
config = lpss_get_config(drv_data);
tx_thres = config->tx_threshold_lo;
tx_hi_thres = config->tx_threshold_hi;
@@ -1470,6 +1484,14 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac6), LPSS_BXT_SSP },
+ /* CNL-LP */
+ { PCI_VDEVICE(INTEL, 0x9daa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x9dab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x9dfb), LPSS_CNL_SSP },
+ /* CNL-H */
+ { PCI_VDEVICE(INTEL, 0xa32a), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa32b), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa37b), LPSS_CNL_SSP },
{ },
};
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index acf31f36b898..0b4a52b3e1dc 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -25,6 +25,11 @@
#define DRIVER_NAME "rockchip-spi"
+#define ROCKCHIP_SPI_CLR_BITS(reg, bits) \
+ writel_relaxed(readl_relaxed(reg) & ~(bits), reg)
+#define ROCKCHIP_SPI_SET_BITS(reg, bits) \
+ writel_relaxed(readl_relaxed(reg) | (bits), reg)
+
/* SPI register offsets */
#define ROCKCHIP_SPI_CTRLR0 0x0000
#define ROCKCHIP_SPI_CTRLR1 0x0004
@@ -149,6 +154,8 @@
*/
#define ROCKCHIP_SPI_MAX_TRANLEN 0xffff
+#define ROCKCHIP_SPI_MAX_CS_NUM 2
+
enum rockchip_ssi_type {
SSI_MOTO_SPI = 0,
SSI_TI_SSP,
@@ -193,6 +200,8 @@ struct rockchip_spi {
/* protect state */
spinlock_t lock;
+ bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
+
u32 use_dma;
struct sg_table tx_sg;
struct sg_table rx_sg;
@@ -264,37 +273,29 @@ static inline u32 rx_max(struct rockchip_spi *rs)
static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
{
- u32 ser;
struct spi_master *master = spi->master;
struct rockchip_spi *rs = spi_master_get_devdata(master);
+ bool cs_asserted = !enable;
- pm_runtime_get_sync(rs->dev);
+ /* Return immediately for no-op */
+ if (cs_asserted == rs->cs_asserted[spi->chip_select])
+ return;
- ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
+ if (cs_asserted) {
+ /* Keep things powered as long as CS is asserted */
+ pm_runtime_get_sync(rs->dev);
- /*
- * drivers/spi/spi.c:
- * static void spi_set_cs(struct spi_device *spi, bool enable)
- * {
- * if (spi->mode & SPI_CS_HIGH)
- * enable = !enable;
- *
- * if (spi->cs_gpio >= 0)
- * gpio_set_value(spi->cs_gpio, !enable);
- * else if (spi->master->set_cs)
- * spi->master->set_cs(spi, !enable);
- * }
- *
- * Note: enable(rockchip_spi_set_cs) = !enable(spi_set_cs)
- */
- if (!enable)
- ser |= 1 << spi->chip_select;
- else
- ser &= ~(1 << spi->chip_select);
+ ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER,
+ BIT(spi->chip_select));
+ } else {
+ ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER,
+ BIT(spi->chip_select));
- writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
+ /* Drop reference from when we first asserted CS */
+ pm_runtime_put(rs->dev);
+ }
- pm_runtime_put_sync(rs->dev);
+ rs->cs_asserted[spi->chip_select] = cs_asserted;
}
static int rockchip_spi_prepare_message(struct spi_master *master,
@@ -684,33 +685,33 @@ static int rockchip_spi_probe(struct platform_device *pdev)
rs->regs = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(rs->regs)) {
ret = PTR_ERR(rs->regs);
- goto err_ioremap_resource;
+ goto err_put_master;
}
rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
if (IS_ERR(rs->apb_pclk)) {
dev_err(&pdev->dev, "Failed to get apb_pclk\n");
ret = PTR_ERR(rs->apb_pclk);
- goto err_ioremap_resource;
+ goto err_put_master;
}
rs->spiclk = devm_clk_get(&pdev->dev, "spiclk");
if (IS_ERR(rs->spiclk)) {
dev_err(&pdev->dev, "Failed to get spi_pclk\n");
ret = PTR_ERR(rs->spiclk);
- goto err_ioremap_resource;
+ goto err_put_master;
}
ret = clk_prepare_enable(rs->apb_pclk);
if (ret) {
dev_err(&pdev->dev, "Failed to enable apb_pclk\n");
- goto err_ioremap_resource;
+ goto err_put_master;
}
ret = clk_prepare_enable(rs->spiclk);
if (ret) {
dev_err(&pdev->dev, "Failed to enable spi_clk\n");
- goto err_spiclk_enable;
+ goto err_disable_apbclk;
}
spi_enable_chip(rs, 0);
@@ -728,7 +729,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
if (!rs->fifo_len) {
dev_err(&pdev->dev, "Failed to get fifo length\n");
ret = -EINVAL;
- goto err_get_fifo_len;
+ goto err_disable_spiclk;
}
spin_lock_init(&rs->lock);
@@ -739,7 +740,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
master->auto_runtime_pm = true;
master->bus_num = pdev->id;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
- master->num_chipselect = 2;
+ master->num_chipselect = ROCKCHIP_SPI_MAX_CS_NUM;
master->dev.of_node = pdev->dev.of_node;
master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
@@ -749,13 +750,14 @@ static int rockchip_spi_probe(struct platform_device *pdev)
master->transfer_one = rockchip_spi_transfer_one;
master->max_transfer_size = rockchip_spi_max_transfer_size;
master->handle_err = rockchip_spi_handle_err;
+ master->flags = SPI_MASTER_GPIO_SS;
rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
if (IS_ERR(rs->dma_tx.ch)) {
/* Check tx to see if we need defer probing driver */
if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
- goto err_get_fifo_len;
+ goto err_disable_pm_runtime;
}
dev_warn(rs->dev, "Failed to request TX DMA channel\n");
rs->dma_tx.ch = NULL;
@@ -786,23 +788,24 @@ static int rockchip_spi_probe(struct platform_device *pdev)
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "Failed to register master\n");
- goto err_register_master;
+ goto err_free_dma_rx;
}
return 0;
-err_register_master:
- pm_runtime_disable(&pdev->dev);
+err_free_dma_rx:
if (rs->dma_rx.ch)
dma_release_channel(rs->dma_rx.ch);
err_free_dma_tx:
if (rs->dma_tx.ch)
dma_release_channel(rs->dma_tx.ch);
-err_get_fifo_len:
+err_disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+err_disable_spiclk:
clk_disable_unprepare(rs->spiclk);
-err_spiclk_enable:
+err_disable_apbclk:
clk_disable_unprepare(rs->apb_pclk);
-err_ioremap_resource:
+err_put_master:
spi_master_put(master);
return ret;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 2ce15ca97782..c304c7167866 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -2,7 +2,8 @@
* SuperH MSIOF SPI Master Interface
*
* Copyright (c) 2009 Magnus Damm
- * Copyright (C) 2014 Glider bvba
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014-2017 Glider bvba
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -33,7 +34,6 @@
#include <asm/unaligned.h>
-
struct sh_msiof_chipdata {
u16 tx_fifo_size;
u16 rx_fifo_size;
@@ -53,6 +53,7 @@ struct sh_msiof_spi_priv {
void *rx_dma_page;
dma_addr_t tx_dma_addr;
dma_addr_t rx_dma_addr;
+ bool slave_aborted;
};
#define TMDR1 0x00 /* Transmit Mode Register 1 */
@@ -337,7 +338,10 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
tmp |= lsb_first << MDR1_BITLSB_SHIFT;
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
- sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
+ if (spi_controller_is_slave(p->master))
+ sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
+ else
+ sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
if (p->master->flags & SPI_MASTER_MUST_TX) {
/* These bits are reserved if RX needs TX */
tmp &= ~0x0000ffff;
@@ -564,17 +568,19 @@ static int sh_msiof_prepare_message(struct spi_master *master,
static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
{
- int ret;
+ bool slave = spi_controller_is_slave(p->master);
+ int ret = 0;
/* setup clock and rx/tx signals */
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
+ if (!slave)
+ ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
if (rx_buf && !ret)
ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
if (!ret)
ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
/* start by setting frame bit */
- if (!ret)
+ if (!ret && !slave)
ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
return ret;
@@ -582,20 +588,49 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
{
- int ret;
+ bool slave = spi_controller_is_slave(p->master);
+ int ret = 0;
/* shut down frame, rx/tx and clock signals */
- ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
+ if (!slave)
+ ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
if (!ret)
ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
if (rx_buf && !ret)
ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
- if (!ret)
+ if (!ret && !slave)
ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
return ret;
}
+static int sh_msiof_slave_abort(struct spi_master *master)
+{
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
+
+ p->slave_aborted = true;
+ complete(&p->done);
+ return 0;
+}
+
+static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p)
+{
+ if (spi_controller_is_slave(p->master)) {
+ if (wait_for_completion_interruptible(&p->done) ||
+ p->slave_aborted) {
+ dev_dbg(&p->pdev->dev, "interrupted\n");
+ return -EINTR;
+ }
+ } else {
+ if (!wait_for_completion_timeout(&p->done, HZ)) {
+ dev_err(&p->pdev->dev, "timeout\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
void (*tx_fifo)(struct sh_msiof_spi_priv *,
const void *, int, int),
@@ -628,6 +663,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
tx_fifo(p, tx_buf, words, fifo_shift);
reinit_completion(&p->done);
+ p->slave_aborted = false;
ret = sh_msiof_spi_start(p, rx_buf);
if (ret) {
@@ -636,11 +672,9 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
}
/* wait for tx fifo to be emptied / rx fifo to be filled */
- if (!wait_for_completion_timeout(&p->done, HZ)) {
- dev_err(&p->pdev->dev, "PIO timeout\n");
- ret = -ETIMEDOUT;
+ ret = sh_msiof_wait_for_completion(p);
+ if (ret)
goto stop_reset;
- }
/* read rx fifo */
if (rx_buf)
@@ -732,6 +766,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
sh_msiof_write(p, IER, ier_bits);
reinit_completion(&p->done);
+ p->slave_aborted = false;
/* Now start DMA */
if (rx)
@@ -746,11 +781,9 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
}
/* wait for tx fifo to be emptied / rx fifo to be filled */
- if (!wait_for_completion_timeout(&p->done, HZ)) {
- dev_err(&p->pdev->dev, "DMA timeout\n");
- ret = -ETIMEDOUT;
+ ret = sh_msiof_wait_for_completion(p);
+ if (ret)
goto stop_reset;
- }
/* clear status bits */
sh_msiof_reset_str(p);
@@ -843,7 +876,8 @@ static int sh_msiof_transfer_one(struct spi_master *master,
int ret;
/* setup clocks (clock already enabled in chipselect()) */
- sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
+ if (!spi_controller_is_slave(p->master))
+ sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
while (master->dma_tx && len > 15) {
/*
@@ -998,8 +1032,12 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
if (!info)
return NULL;
+ info->mode = of_property_read_bool(np, "spi-slave") ? MSIOF_SPI_SLAVE
+ : MSIOF_SPI_MASTER;
+
/* Parse the MSIOF properties */
- of_property_read_u32(np, "num-cs", &num_cs);
+ if (info->mode == MSIOF_SPI_MASTER)
+ of_property_read_u32(np, "num-cs", &num_cs);
of_property_read_u32(np, "renesas,tx-fifo-size",
&info->tx_fifo_override);
of_property_read_u32(np, "renesas,rx-fifo-size",
@@ -1159,34 +1197,40 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
struct spi_master *master;
const struct sh_msiof_chipdata *chipdata;
const struct of_device_id *of_id;
+ struct sh_msiof_spi_info *info;
struct sh_msiof_spi_priv *p;
int i;
int ret;
- master = spi_alloc_master(&pdev->dev, sizeof(struct sh_msiof_spi_priv));
- if (master == NULL)
- return -ENOMEM;
-
- p = spi_master_get_devdata(master);
-
- platform_set_drvdata(pdev, p);
- p->master = master;
-
of_id = of_match_device(sh_msiof_match, &pdev->dev);
if (of_id) {
chipdata = of_id->data;
- p->info = sh_msiof_spi_parse_dt(&pdev->dev);
+ info = sh_msiof_spi_parse_dt(&pdev->dev);
} else {
chipdata = (const void *)pdev->id_entry->driver_data;
- p->info = dev_get_platdata(&pdev->dev);
+ info = dev_get_platdata(&pdev->dev);
}
- if (!p->info) {
+ if (!info) {
dev_err(&pdev->dev, "failed to obtain device info\n");
- ret = -ENXIO;
- goto err1;
+ return -ENXIO;
}
+ if (info->mode == MSIOF_SPI_SLAVE)
+ master = spi_alloc_slave(&pdev->dev,
+ sizeof(struct sh_msiof_spi_priv));
+ else
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(struct sh_msiof_spi_priv));
+ if (master == NULL)
+ return -ENOMEM;
+
+ p = spi_master_get_devdata(master);
+
+ platform_set_drvdata(pdev, p);
+ p->master = master;
+ p->info = info;
+
init_completion(&p->done);
p->clk = devm_clk_get(&pdev->dev, NULL);
@@ -1237,6 +1281,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
master->num_chipselect = p->info->num_chipselect;
master->setup = sh_msiof_spi_setup;
master->prepare_message = sh_msiof_prepare_message;
+ master->slave_abort = sh_msiof_slave_abort;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
master->auto_runtime_pm = true;
master->transfer_one = sh_msiof_transfer_one;
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 7072276ad354..bbb1a275f718 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -1158,7 +1158,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
ret = spi_bitbang_start(&sspi->bitbang);
if (ret)
goto free_clk;
- dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
+ dev_info(&pdev->dev, "registered, bus number = %d\n", master->bus_num);
return 0;
free_clk:
diff --git a/drivers/spi/spi-slave-system-control.c b/drivers/spi/spi-slave-system-control.c
new file mode 100644
index 000000000000..c0257e937995
--- /dev/null
+++ b/drivers/spi/spi-slave-system-control.c
@@ -0,0 +1,154 @@
+/*
+ * SPI slave handler controlling system state
+ *
+ * This SPI slave handler allows remote control of system reboot, power off,
+ * halt, and suspend.
+ *
+ * Copyright (C) 2016-2017 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Usage (assuming /dev/spidev2.0 corresponds to the SPI master on the remote
+ * system):
+ *
+ * # reboot='\x7c\x50'
+ * # poweroff='\x71\x3f'
+ * # halt='\x38\x76'
+ * # suspend='\x1b\x1b'
+ * # spidev_test -D /dev/spidev2.0 -p $suspend # or $reboot, $poweroff, $halt
+ */
+
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+#include <linux/spi/spi.h>
+
+/*
+ * The numbers are chosen to display something human-readable on two 7-segment
+ * displays connected to two 74HC595 shift registers
+ */
+#define CMD_REBOOT 0x7c50 /* rb */
+#define CMD_POWEROFF 0x713f /* OF */
+#define CMD_HALT 0x3876 /* HL */
+#define CMD_SUSPEND 0x1b1b /* ZZ */
+
+struct spi_slave_system_control_priv {
+ struct spi_device *spi;
+ struct completion finished;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ __be16 cmd;
+};
+
+static
+int spi_slave_system_control_submit(struct spi_slave_system_control_priv *priv);
+
+static void spi_slave_system_control_complete(void *arg)
+{
+ struct spi_slave_system_control_priv *priv = arg;
+ u16 cmd;
+ int ret;
+
+ if (priv->msg.status)
+ goto terminate;
+
+ cmd = be16_to_cpu(priv->cmd);
+ switch (cmd) {
+ case CMD_REBOOT:
+ dev_info(&priv->spi->dev, "Rebooting system...\n");
+ kernel_restart(NULL);
+
+ case CMD_POWEROFF:
+ dev_info(&priv->spi->dev, "Powering off system...\n");
+ kernel_power_off();
+ break;
+
+ case CMD_HALT:
+ dev_info(&priv->spi->dev, "Halting system...\n");
+ kernel_halt();
+ break;
+
+ case CMD_SUSPEND:
+ dev_info(&priv->spi->dev, "Suspending system...\n");
+ pm_suspend(PM_SUSPEND_MEM);
+ break;
+
+ default:
+ dev_warn(&priv->spi->dev, "Unknown command 0x%x\n", cmd);
+ break;
+ }
+
+ ret = spi_slave_system_control_submit(priv);
+ if (ret)
+ goto terminate;
+
+ return;
+
+terminate:
+ dev_info(&priv->spi->dev, "Terminating\n");
+ complete(&priv->finished);
+}
+
+static
+int spi_slave_system_control_submit(struct spi_slave_system_control_priv *priv)
+{
+ int ret;
+
+ spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1);
+
+ priv->msg.complete = spi_slave_system_control_complete;
+ priv->msg.context = priv;
+
+ ret = spi_async(priv->spi, &priv->msg);
+ if (ret)
+ dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret);
+
+ return ret;
+}
+
+static int spi_slave_system_control_probe(struct spi_device *spi)
+{
+ struct spi_slave_system_control_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->spi = spi;
+ init_completion(&priv->finished);
+ priv->xfer.rx_buf = &priv->cmd;
+ priv->xfer.len = sizeof(priv->cmd);
+
+ ret = spi_slave_system_control_submit(priv);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, priv);
+ return 0;
+}
+
+static int spi_slave_system_control_remove(struct spi_device *spi)
+{
+ struct spi_slave_system_control_priv *priv = spi_get_drvdata(spi);
+
+ spi_slave_abort(spi);
+ wait_for_completion(&priv->finished);
+ return 0;
+}
+
+static struct spi_driver spi_slave_system_control_driver = {
+ .driver = {
+ .name = "spi-slave-system-control",
+ },
+ .probe = spi_slave_system_control_probe,
+ .remove = spi_slave_system_control_remove,
+};
+module_spi_driver(spi_slave_system_control_driver);
+
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_DESCRIPTION("SPI slave handler controlling system state");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-slave-time.c b/drivers/spi/spi-slave-time.c
new file mode 100644
index 000000000000..f2e07a392d68
--- /dev/null
+++ b/drivers/spi/spi-slave-time.c
@@ -0,0 +1,129 @@
+/*
+ * SPI slave handler reporting uptime at reception of previous SPI message
+ *
+ * This SPI slave handler sends the time of reception of the last SPI message
+ * as two 32-bit unsigned integers in binary format and in network byte order,
+ * representing the number of seconds and fractional seconds (in microseconds)
+ * since boot up.
+ *
+ * Copyright (C) 2016-2017 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Usage (assuming /dev/spidev2.0 corresponds to the SPI master on the remote
+ * system):
+ *
+ * # spidev_test -D /dev/spidev2.0 -p dummy-8B
+ * spi mode: 0x0
+ * bits per word: 8
+ * max speed: 500000 Hz (500 KHz)
+ * RX | 00 00 04 6D 00 09 5B BB ...
+ * ^^^^^ ^^^^^^^^
+ * seconds microseconds
+ */
+
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/sched/clock.h>
+#include <linux/spi/spi.h>
+
+
+struct spi_slave_time_priv {
+ struct spi_device *spi;
+ struct completion finished;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ __be32 buf[2];
+};
+
+static int spi_slave_time_submit(struct spi_slave_time_priv *priv);
+
+static void spi_slave_time_complete(void *arg)
+{
+ struct spi_slave_time_priv *priv = arg;
+ int ret;
+
+ ret = priv->msg.status;
+ if (ret)
+ goto terminate;
+
+ ret = spi_slave_time_submit(priv);
+ if (ret)
+ goto terminate;
+
+ return;
+
+terminate:
+ dev_info(&priv->spi->dev, "Terminating\n");
+ complete(&priv->finished);
+}
+
+static int spi_slave_time_submit(struct spi_slave_time_priv *priv)
+{
+ u32 rem_us;
+ int ret;
+ u64 ts;
+
+ ts = local_clock();
+ rem_us = do_div(ts, 1000000000) / 1000;
+
+ priv->buf[0] = cpu_to_be32(ts);
+ priv->buf[1] = cpu_to_be32(rem_us);
+
+ spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1);
+
+ priv->msg.complete = spi_slave_time_complete;
+ priv->msg.context = priv;
+
+ ret = spi_async(priv->spi, &priv->msg);
+ if (ret)
+ dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret);
+
+ return ret;
+}
+
+static int spi_slave_time_probe(struct spi_device *spi)
+{
+ struct spi_slave_time_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->spi = spi;
+ init_completion(&priv->finished);
+ priv->xfer.tx_buf = priv->buf;
+ priv->xfer.len = sizeof(priv->buf);
+
+ ret = spi_slave_time_submit(priv);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, priv);
+ return 0;
+}
+
+static int spi_slave_time_remove(struct spi_device *spi)
+{
+ struct spi_slave_time_priv *priv = spi_get_drvdata(spi);
+
+ spi_slave_abort(spi);
+ wait_for_completion(&priv->finished);
+ return 0;
+}
+
+static struct spi_driver spi_slave_time_driver = {
+ .driver = {
+ .name = "spi-slave-time",
+ },
+ .probe = spi_slave_time_probe,
+ .remove = spi_slave_time_remove,
+};
+module_spi_driver(spi_slave_time_driver);
+
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_DESCRIPTION("SPI slave reporting uptime at previous SPI message");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index e54b59638458..a4e43fc19ece 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -229,42 +229,42 @@ static int spi_st_setup(struct spi_device *spi)
"setting baudrate:target= %u hz, actual= %u hz, sscbrg= %u\n",
hz, spi_st->baud, sscbrg);
- /* Set SSC_CTL and enable SSC */
- var = readl_relaxed(spi_st->base + SSC_CTL);
- var |= SSC_CTL_MS;
+ /* Set SSC_CTL and enable SSC */
+ var = readl_relaxed(spi_st->base + SSC_CTL);
+ var |= SSC_CTL_MS;
- if (spi->mode & SPI_CPOL)
+ if (spi->mode & SPI_CPOL)
var |= SSC_CTL_PO;
- else
+ else
var &= ~SSC_CTL_PO;
- if (spi->mode & SPI_CPHA)
+ if (spi->mode & SPI_CPHA)
var |= SSC_CTL_PH;
- else
+ else
var &= ~SSC_CTL_PH;
- if ((spi->mode & SPI_LSB_FIRST) == 0)
+ if ((spi->mode & SPI_LSB_FIRST) == 0)
var |= SSC_CTL_HB;
- else
+ else
var &= ~SSC_CTL_HB;
- if (spi->mode & SPI_LOOP)
+ if (spi->mode & SPI_LOOP)
var |= SSC_CTL_LPB;
- else
+ else
var &= ~SSC_CTL_LPB;
- var &= ~SSC_CTL_DATA_WIDTH_MSK;
- var |= (spi->bits_per_word - 1);
+ var &= ~SSC_CTL_DATA_WIDTH_MSK;
+ var |= (spi->bits_per_word - 1);
- var |= SSC_CTL_EN_TX_FIFO | SSC_CTL_EN_RX_FIFO;
- var |= SSC_CTL_EN;
+ var |= SSC_CTL_EN_TX_FIFO | SSC_CTL_EN_RX_FIFO;
+ var |= SSC_CTL_EN;
- writel_relaxed(var, spi_st->base + SSC_CTL);
+ writel_relaxed(var, spi_st->base + SSC_CTL);
- /* Clear the status register */
- readl_relaxed(spi_st->base + SSC_RBUF);
+ /* Clear the status register */
+ readl_relaxed(spi_st->base + SSC_RBUF);
- return 0;
+ return 0;
out_free_gpio:
gpio_free(cs);
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
new file mode 100644
index 000000000000..75644bcd938b
--- /dev/null
+++ b/drivers/spi/spi-stm32.c
@@ -0,0 +1,1322 @@
+/*
+ * STMicroelectronics STM32 SPI Controller driver (master mode only)
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics.
+ *
+ * License terms: GPL V2.0.
+ *
+ * spi_stm32 driver is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * spi_stm32 driver is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * spi_stm32 driver. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/debugfs.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "spi_stm32"
+
+/* STM32 SPI registers */
+#define STM32_SPI_CR1 0x00
+#define STM32_SPI_CR2 0x04
+#define STM32_SPI_CFG1 0x08
+#define STM32_SPI_CFG2 0x0C
+#define STM32_SPI_IER 0x10
+#define STM32_SPI_SR 0x14
+#define STM32_SPI_IFCR 0x18
+#define STM32_SPI_TXDR 0x20
+#define STM32_SPI_RXDR 0x30
+#define STM32_SPI_I2SCFGR 0x50
+
+/* STM32_SPI_CR1 bit fields */
+#define SPI_CR1_SPE BIT(0)
+#define SPI_CR1_MASRX BIT(8)
+#define SPI_CR1_CSTART BIT(9)
+#define SPI_CR1_CSUSP BIT(10)
+#define SPI_CR1_HDDIR BIT(11)
+#define SPI_CR1_SSI BIT(12)
+
+/* STM32_SPI_CR2 bit fields */
+#define SPI_CR2_TSIZE_SHIFT 0
+#define SPI_CR2_TSIZE GENMASK(15, 0)
+
+/* STM32_SPI_CFG1 bit fields */
+#define SPI_CFG1_DSIZE_SHIFT 0
+#define SPI_CFG1_DSIZE GENMASK(4, 0)
+#define SPI_CFG1_FTHLV_SHIFT 5
+#define SPI_CFG1_FTHLV GENMASK(8, 5)
+#define SPI_CFG1_RXDMAEN BIT(14)
+#define SPI_CFG1_TXDMAEN BIT(15)
+#define SPI_CFG1_MBR_SHIFT 28
+#define SPI_CFG1_MBR GENMASK(30, 28)
+#define SPI_CFG1_MBR_MIN 0
+#define SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28)
+
+/* STM32_SPI_CFG2 bit fields */
+#define SPI_CFG2_MIDI_SHIFT 4
+#define SPI_CFG2_MIDI GENMASK(7, 4)
+#define SPI_CFG2_COMM_SHIFT 17
+#define SPI_CFG2_COMM GENMASK(18, 17)
+#define SPI_CFG2_SP_SHIFT 19
+#define SPI_CFG2_SP GENMASK(21, 19)
+#define SPI_CFG2_MASTER BIT(22)
+#define SPI_CFG2_LSBFRST BIT(23)
+#define SPI_CFG2_CPHA BIT(24)
+#define SPI_CFG2_CPOL BIT(25)
+#define SPI_CFG2_SSM BIT(26)
+#define SPI_CFG2_AFCNTR BIT(31)
+
+/* STM32_SPI_IER bit fields */
+#define SPI_IER_RXPIE BIT(0)
+#define SPI_IER_TXPIE BIT(1)
+#define SPI_IER_DXPIE BIT(2)
+#define SPI_IER_EOTIE BIT(3)
+#define SPI_IER_TXTFIE BIT(4)
+#define SPI_IER_OVRIE BIT(6)
+#define SPI_IER_MODFIE BIT(9)
+#define SPI_IER_ALL GENMASK(10, 0)
+
+/* STM32_SPI_SR bit fields */
+#define SPI_SR_RXP BIT(0)
+#define SPI_SR_TXP BIT(1)
+#define SPI_SR_EOT BIT(3)
+#define SPI_SR_OVR BIT(6)
+#define SPI_SR_MODF BIT(9)
+#define SPI_SR_SUSP BIT(11)
+#define SPI_SR_RXPLVL_SHIFT 13
+#define SPI_SR_RXPLVL GENMASK(14, 13)
+#define SPI_SR_RXWNE BIT(15)
+
+/* STM32_SPI_IFCR bit fields */
+#define SPI_IFCR_ALL GENMASK(11, 3)
+
+/* STM32_SPI_I2SCFGR bit fields */
+#define SPI_I2SCFGR_I2SMOD BIT(0)
+
+/* SPI Master Baud Rate min/max divisor */
+#define SPI_MBR_DIV_MIN (2 << SPI_CFG1_MBR_MIN)
+#define SPI_MBR_DIV_MAX (2 << SPI_CFG1_MBR_MAX)
+
+/* SPI Communication mode */
+#define SPI_FULL_DUPLEX 0
+#define SPI_SIMPLEX_TX 1
+#define SPI_SIMPLEX_RX 2
+#define SPI_HALF_DUPLEX 3
+
+#define SPI_1HZ_NS 1000000000
+
+/**
+ * struct stm32_spi - private data of the SPI controller
+ * @dev: driver model representation of the controller
+ * @master: controller master interface
+ * @base: virtual memory area
+ * @clk: hw kernel clock feeding the SPI clock generator
+ * @clk_rate: rate of the hw kernel clock feeding the SPI clock generator
+ * @rst: SPI controller reset line
+ * @lock: prevent I/O concurrent access
+ * @irq: SPI controller interrupt line
+ * @fifo_size: size of the embedded fifo in bytes
+ * @cur_midi: master inter-data idleness in ns
+ * @cur_speed: speed configured in Hz
+ * @cur_bpw: number of bits in a single SPI data frame
+ * @cur_fthlv: fifo threshold level (data frames in a single data packet)
+ * @cur_comm: SPI communication mode
+ * @cur_xferlen: current transfer length in bytes
+ * @cur_usedma: boolean to know if dma is used in current transfer
+ * @tx_buf: data to be written, or NULL
+ * @rx_buf: data to be read, or NULL
+ * @tx_len: number of data to be written in bytes
+ * @rx_len: number of data to be read in bytes
+ * @dma_tx: dma channel for TX transfer
+ * @dma_rx: dma channel for RX transfer
+ * @phys_addr: SPI registers physical base address
+ */
+struct stm32_spi {
+ struct device *dev;
+ struct spi_master *master;
+ void __iomem *base;
+ struct clk *clk;
+ u32 clk_rate;
+ struct reset_control *rst;
+ spinlock_t lock; /* prevent I/O concurrent access */
+ int irq;
+ unsigned int fifo_size;
+
+ unsigned int cur_midi;
+ unsigned int cur_speed;
+ unsigned int cur_bpw;
+ unsigned int cur_fthlv;
+ unsigned int cur_comm;
+ unsigned int cur_xferlen;
+ bool cur_usedma;
+
+ const void *tx_buf;
+ void *rx_buf;
+ int tx_len;
+ int rx_len;
+ struct dma_chan *dma_tx;
+ struct dma_chan *dma_rx;
+ dma_addr_t phys_addr;
+};
+
+static inline void stm32_spi_set_bits(struct stm32_spi *spi,
+ u32 offset, u32 bits)
+{
+ writel_relaxed(readl_relaxed(spi->base + offset) | bits,
+ spi->base + offset);
+}
+
+static inline void stm32_spi_clr_bits(struct stm32_spi *spi,
+ u32 offset, u32 bits)
+{
+ writel_relaxed(readl_relaxed(spi->base + offset) & ~bits,
+ spi->base + offset);
+}
+
+/**
+ * stm32_spi_get_fifo_size - Return fifo size
+ * @spi: pointer to the spi controller data structure
+ */
+static int stm32_spi_get_fifo_size(struct stm32_spi *spi)
+{
+ unsigned long flags;
+ u32 count = 0;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
+
+ while (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP)
+ writeb_relaxed(++count, spi->base + STM32_SPI_TXDR);
+
+ stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ dev_dbg(spi->dev, "%d x 8-bit fifo size\n", count);
+
+ return count;
+}
+
+/**
+ * stm32_spi_get_bpw_mask - Return bits per word mask
+ * @spi: pointer to the spi controller data structure
+ */
+static int stm32_spi_get_bpw_mask(struct stm32_spi *spi)
+{
+ unsigned long flags;
+ u32 cfg1, max_bpw;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ /*
+ * The most significant bit at DSIZE bit field is reserved when the
+ * maximum data size of periperal instances is limited to 16-bit
+ */
+ stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_DSIZE);
+
+ cfg1 = readl_relaxed(spi->base + STM32_SPI_CFG1);
+ max_bpw = (cfg1 & SPI_CFG1_DSIZE) >> SPI_CFG1_DSIZE_SHIFT;
+ max_bpw += 1;
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ dev_dbg(spi->dev, "%d-bit maximum data frame\n", max_bpw);
+
+ return SPI_BPW_RANGE_MASK(4, max_bpw);
+}
+
+/**
+ * stm32_spi_prepare_mbr - Determine SPI_CFG1.MBR value
+ * @spi: pointer to the spi controller data structure
+ * @speed_hz: requested speed
+ *
+ * Return SPI_CFG1.MBR value in case of success or -EINVAL
+ */
+static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
+{
+ u32 div, mbrdiv;
+
+ div = DIV_ROUND_UP(spi->clk_rate, speed_hz);
+
+ /*
+ * SPI framework set xfer->speed_hz to master->max_speed_hz if
+ * xfer->speed_hz is greater than master->max_speed_hz, and it returns
+ * an error when xfer->speed_hz is lower than master->min_speed_hz, so
+ * no need to check it there.
+ * However, we need to ensure the following calculations.
+ */
+ if ((div < SPI_MBR_DIV_MIN) &&
+ (div > SPI_MBR_DIV_MAX))
+ return -EINVAL;
+
+ /* Determine the first power of 2 greater than or equal to div */
+ if (div & (div - 1))
+ mbrdiv = fls(div);
+ else
+ mbrdiv = fls(div) - 1;
+
+ spi->cur_speed = spi->clk_rate / (1 << mbrdiv);
+
+ return mbrdiv - 1;
+}
+
+/**
+ * stm32_spi_prepare_fthlv - Determine FIFO threshold level
+ * @spi: pointer to the spi controller data structure
+ */
+static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi)
+{
+ u32 fthlv, half_fifo;
+
+ /* data packet should not exceed 1/2 of fifo space */
+ half_fifo = (spi->fifo_size / 2);
+
+ if (spi->cur_bpw <= 8)
+ fthlv = half_fifo;
+ else if (spi->cur_bpw <= 16)
+ fthlv = half_fifo / 2;
+ else
+ fthlv = half_fifo / 4;
+
+ /* align packet size with data registers access */
+ if (spi->cur_bpw > 8)
+ fthlv -= (fthlv % 2); /* multiple of 2 */
+ else
+ fthlv -= (fthlv % 4); /* multiple of 4 */
+
+ return fthlv;
+}
+
+/**
+ * stm32_spi_write_txfifo - Write bytes in Transmit Data Register
+ * @spi: pointer to the spi controller data structure
+ *
+ * Read from tx_buf depends on remaining bytes to avoid to read beyond
+ * tx_buf end.
+ */
+static void stm32_spi_write_txfifo(struct stm32_spi *spi)
+{
+ while ((spi->tx_len > 0) &&
+ (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP)) {
+ u32 offs = spi->cur_xferlen - spi->tx_len;
+
+ if (spi->tx_len >= sizeof(u32)) {
+ const u32 *tx_buf32 = (const u32 *)(spi->tx_buf + offs);
+
+ writel_relaxed(*tx_buf32, spi->base + STM32_SPI_TXDR);
+ spi->tx_len -= sizeof(u32);
+ } else if (spi->tx_len >= sizeof(u16)) {
+ const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
+
+ writew_relaxed(*tx_buf16, spi->base + STM32_SPI_TXDR);
+ spi->tx_len -= sizeof(u16);
+ } else {
+ const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
+
+ writeb_relaxed(*tx_buf8, spi->base + STM32_SPI_TXDR);
+ spi->tx_len -= sizeof(u8);
+ }
+ }
+
+ dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len);
+}
+
+/**
+ * stm32_spi_read_rxfifo - Read bytes in Receive Data Register
+ * @spi: pointer to the spi controller data structure
+ *
+ * Write in rx_buf depends on remaining bytes to avoid to write beyond
+ * rx_buf end.
+ */
+static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
+{
+ u32 sr = readl_relaxed(spi->base + STM32_SPI_SR);
+ u32 rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT;
+
+ while ((spi->rx_len > 0) &&
+ ((sr & SPI_SR_RXP) ||
+ (flush && ((sr & SPI_SR_RXWNE) || (rxplvl > 0))))) {
+ u32 offs = spi->cur_xferlen - spi->rx_len;
+
+ if ((spi->rx_len >= sizeof(u32)) ||
+ (flush && (sr & SPI_SR_RXWNE))) {
+ u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs);
+
+ *rx_buf32 = readl_relaxed(spi->base + STM32_SPI_RXDR);
+ spi->rx_len -= sizeof(u32);
+ } else if ((spi->rx_len >= sizeof(u16)) ||
+ (flush && (rxplvl >= 2 || spi->cur_bpw > 8))) {
+ u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
+
+ *rx_buf16 = readw_relaxed(spi->base + STM32_SPI_RXDR);
+ spi->rx_len -= sizeof(u16);
+ } else {
+ u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
+
+ *rx_buf8 = readb_relaxed(spi->base + STM32_SPI_RXDR);
+ spi->rx_len -= sizeof(u8);
+ }
+
+ sr = readl_relaxed(spi->base + STM32_SPI_SR);
+ rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT;
+ }
+
+ dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__,
+ flush ? "(flush)" : "", spi->rx_len);
+}
+
+/**
+ * stm32_spi_enable - Enable SPI controller
+ * @spi: pointer to the spi controller data structure
+ *
+ * SPI data transfer is enabled but spi_ker_ck is idle.
+ * SPI_CFG1 and SPI_CFG2 are now write protected.
+ */
+static void stm32_spi_enable(struct stm32_spi *spi)
+{
+ dev_dbg(spi->dev, "enable controller\n");
+
+ stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
+}
+
+/**
+ * stm32_spi_disable - Disable SPI controller
+ * @spi: pointer to the spi controller data structure
+ *
+ * RX-Fifo is flushed when SPI controller is disabled. To prevent any data
+ * loss, use stm32_spi_read_rxfifo(flush) to read the remaining bytes in
+ * RX-Fifo.
+ */
+static void stm32_spi_disable(struct stm32_spi *spi)
+{
+ unsigned long flags;
+ u32 cr1, sr;
+
+ dev_dbg(spi->dev, "disable controller\n");
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ cr1 = readl_relaxed(spi->base + STM32_SPI_CR1);
+
+ if (!(cr1 & SPI_CR1_SPE)) {
+ spin_unlock_irqrestore(&spi->lock, flags);
+ return;
+ }
+
+ /* Wait on EOT or suspend the flow */
+ if (readl_relaxed_poll_timeout_atomic(spi->base + STM32_SPI_SR,
+ sr, !(sr & SPI_SR_EOT),
+ 10, 100000) < 0) {
+ if (cr1 & SPI_CR1_CSTART) {
+ writel_relaxed(cr1 | SPI_CR1_CSUSP,
+ spi->base + STM32_SPI_CR1);
+ if (readl_relaxed_poll_timeout_atomic(
+ spi->base + STM32_SPI_SR,
+ sr, !(sr & SPI_SR_SUSP),
+ 10, 100000) < 0)
+ dev_warn(spi->dev,
+ "Suspend request timeout\n");
+ }
+ }
+
+ if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0))
+ stm32_spi_read_rxfifo(spi, true);
+
+ if (spi->cur_usedma && spi->tx_buf)
+ dmaengine_terminate_all(spi->dma_tx);
+ if (spi->cur_usedma && spi->rx_buf)
+ dmaengine_terminate_all(spi->dma_rx);
+
+ stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
+
+ stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN |
+ SPI_CFG1_RXDMAEN);
+
+ /* Disable interrupts and clear status flags */
+ writel_relaxed(0, spi->base + STM32_SPI_IER);
+ writel_relaxed(SPI_IFCR_ALL, spi->base + STM32_SPI_IFCR);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+}
+
+/**
+ * stm32_spi_can_dma - Determine if the transfer is eligible for DMA use
+ *
+ * If the current transfer size is greater than fifo size, use DMA.
+ */
+static bool stm32_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi_dev,
+ struct spi_transfer *transfer)
+{
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+
+ dev_dbg(spi->dev, "%s: %s\n", __func__,
+ (transfer->len > spi->fifo_size) ? "true" : "false");
+
+ return (transfer->len > spi->fifo_size);
+}
+
+/**
+ * stm32_spi_irq - Interrupt handler for SPI controller events
+ * @irq: interrupt line
+ * @dev_id: SPI controller master interface
+ */
+static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+ u32 sr, ier, mask;
+ unsigned long flags;
+ bool end = false;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ sr = readl_relaxed(spi->base + STM32_SPI_SR);
+ ier = readl_relaxed(spi->base + STM32_SPI_IER);
+
+ mask = ier;
+ /* EOTIE is triggered on EOT, SUSP and TXC events. */
+ mask |= SPI_SR_SUSP;
+ /*
+ * When TXTF is set, DXPIE and TXPIE are cleared. So in case of
+ * Full-Duplex, need to poll RXP event to know if there are remaining
+ * data, before disabling SPI.
+ */
+ if (spi->rx_buf && !spi->cur_usedma)
+ mask |= SPI_SR_RXP;
+
+ if (!(sr & mask)) {
+ dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
+ sr, ier);
+ spin_unlock_irqrestore(&spi->lock, flags);
+ return IRQ_NONE;
+ }
+
+ if (sr & SPI_SR_SUSP) {
+ dev_warn(spi->dev, "Communication suspended\n");
+ if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
+ stm32_spi_read_rxfifo(spi, false);
+ /*
+ * If communication is suspended while using DMA, it means
+ * that something went wrong, so stop the current transfer
+ */
+ if (spi->cur_usedma)
+ end = true;
+ }
+
+ if (sr & SPI_SR_MODF) {
+ dev_warn(spi->dev, "Mode fault: transfer aborted\n");
+ end = true;
+ }
+
+ if (sr & SPI_SR_OVR) {
+ dev_warn(spi->dev, "Overrun: received value discarded\n");
+ if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
+ stm32_spi_read_rxfifo(spi, false);
+ /*
+ * If overrun is detected while using DMA, it means that
+ * something went wrong, so stop the current transfer
+ */
+ if (spi->cur_usedma)
+ end = true;
+ }
+
+ if (sr & SPI_SR_EOT) {
+ if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
+ stm32_spi_read_rxfifo(spi, true);
+ end = true;
+ }
+
+ if (sr & SPI_SR_TXP)
+ if (!spi->cur_usedma && (spi->tx_buf && (spi->tx_len > 0)))
+ stm32_spi_write_txfifo(spi);
+
+ if (sr & SPI_SR_RXP)
+ if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
+ stm32_spi_read_rxfifo(spi, false);
+
+ writel_relaxed(mask, spi->base + STM32_SPI_IFCR);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ if (end) {
+ spi_finalize_current_transfer(master);
+ stm32_spi_disable(spi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * stm32_spi_setup - setup device chip select
+ */
+static int stm32_spi_setup(struct spi_device *spi_dev)
+{
+ int ret = 0;
+
+ if (!gpio_is_valid(spi_dev->cs_gpio)) {
+ dev_err(&spi_dev->dev, "%d is not a valid gpio\n",
+ spi_dev->cs_gpio);
+ return -EINVAL;
+ }
+
+ dev_dbg(&spi_dev->dev, "%s: set gpio%d output %s\n", __func__,
+ spi_dev->cs_gpio,
+ (spi_dev->mode & SPI_CS_HIGH) ? "low" : "high");
+
+ ret = gpio_direction_output(spi_dev->cs_gpio,
+ !(spi_dev->mode & SPI_CS_HIGH));
+
+ return ret;
+}
+
+/**
+ * stm32_spi_prepare_msg - set up the controller to transfer a single message
+ */
+static int stm32_spi_prepare_msg(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+ struct spi_device *spi_dev = msg->spi;
+ struct device_node *np = spi_dev->dev.of_node;
+ unsigned long flags;
+ u32 cfg2_clrb = 0, cfg2_setb = 0;
+
+ /* SPI slave device may need time between data frames */
+ spi->cur_midi = 0;
+ if (np && !of_property_read_u32(np, "st,spi-midi-ns", &spi->cur_midi))
+ dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi);
+
+ if (spi_dev->mode & SPI_CPOL)
+ cfg2_setb |= SPI_CFG2_CPOL;
+ else
+ cfg2_clrb |= SPI_CFG2_CPOL;
+
+ if (spi_dev->mode & SPI_CPHA)
+ cfg2_setb |= SPI_CFG2_CPHA;
+ else
+ cfg2_clrb |= SPI_CFG2_CPHA;
+
+ if (spi_dev->mode & SPI_LSB_FIRST)
+ cfg2_setb |= SPI_CFG2_LSBFRST;
+ else
+ cfg2_clrb |= SPI_CFG2_LSBFRST;
+
+ dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n",
+ spi_dev->mode & SPI_CPOL,
+ spi_dev->mode & SPI_CPHA,
+ spi_dev->mode & SPI_LSB_FIRST,
+ spi_dev->mode & SPI_CS_HIGH);
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ if (cfg2_clrb || cfg2_setb)
+ writel_relaxed(
+ (readl_relaxed(spi->base + STM32_SPI_CFG2) &
+ ~cfg2_clrb) | cfg2_setb,
+ spi->base + STM32_SPI_CFG2);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return 0;
+}
+
+/**
+ * stm32_spi_dma_cb - dma callback
+ *
+ * DMA callback is called when the transfer is complete or when an error
+ * occurs. If the transfer is complete, EOT flag is raised.
+ */
+static void stm32_spi_dma_cb(void *data)
+{
+ struct stm32_spi *spi = data;
+ unsigned long flags;
+ u32 sr;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ sr = readl_relaxed(spi->base + STM32_SPI_SR);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ if (!(sr & SPI_SR_EOT))
+ dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr);
+
+ /* Now wait for EOT, or SUSP or OVR in case of error */
+}
+
+/**
+ * stm32_spi_dma_config - configure dma slave channel depending on current
+ * transfer bits_per_word.
+ */
+static void stm32_spi_dma_config(struct stm32_spi *spi,
+ struct dma_slave_config *dma_conf,
+ enum dma_transfer_direction dir)
+{
+ enum dma_slave_buswidth buswidth;
+ u32 maxburst;
+
+ if (spi->cur_bpw <= 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (spi->cur_bpw <= 16)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ /* Valid for DMA Half or Full Fifo threshold */
+ if (spi->cur_fthlv == 2)
+ maxburst = 1;
+ else
+ maxburst = spi->cur_fthlv;
+
+ memset(dma_conf, 0, sizeof(struct dma_slave_config));
+ dma_conf->direction = dir;
+ if (dma_conf->direction == DMA_DEV_TO_MEM) { /* RX */
+ dma_conf->src_addr = spi->phys_addr + STM32_SPI_RXDR;
+ dma_conf->src_addr_width = buswidth;
+ dma_conf->src_maxburst = maxburst;
+
+ dev_dbg(spi->dev, "Rx DMA config buswidth=%d, maxburst=%d\n",
+ buswidth, maxburst);
+ } else if (dma_conf->direction == DMA_MEM_TO_DEV) { /* TX */
+ dma_conf->dst_addr = spi->phys_addr + STM32_SPI_TXDR;
+ dma_conf->dst_addr_width = buswidth;
+ dma_conf->dst_maxburst = maxburst;
+
+ dev_dbg(spi->dev, "Tx DMA config buswidth=%d, maxburst=%d\n",
+ buswidth, maxburst);
+ }
+}
+
+/**
+ * stm32_spi_transfer_one_irq - transfer a single spi_transfer using
+ * interrupts
+ *
+ * It must returns 0 if the transfer is finished or 1 if the transfer is still
+ * in progress.
+ */
+static int stm32_spi_transfer_one_irq(struct stm32_spi *spi)
+{
+ unsigned long flags;
+ u32 ier = 0;
+
+ /* Enable the interrupts relative to the current communication mode */
+ if (spi->tx_buf && spi->rx_buf) /* Full Duplex */
+ ier |= SPI_IER_DXPIE;
+ else if (spi->tx_buf) /* Half-Duplex TX dir or Simplex TX */
+ ier |= SPI_IER_TXPIE;
+ else if (spi->rx_buf) /* Half-Duplex RX dir or Simplex RX */
+ ier |= SPI_IER_RXPIE;
+
+ /* Enable the interrupts relative to the end of transfer */
+ ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ stm32_spi_enable(spi);
+
+ /* Be sure to have data in fifo before starting data transfer */
+ if (spi->tx_buf)
+ stm32_spi_write_txfifo(spi);
+
+ stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART);
+
+ writel_relaxed(ier, spi->base + STM32_SPI_IER);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return 1;
+}
+
+/**
+ * stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA
+ *
+ * It must returns 0 if the transfer is finished or 1 if the transfer is still
+ * in progress.
+ */
+static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
+ struct spi_transfer *xfer)
+{
+ struct dma_slave_config tx_dma_conf, rx_dma_conf;
+ struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc;
+ unsigned long flags;
+ u32 ier = 0;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ rx_dma_desc = NULL;
+ if (spi->rx_buf) {
+ stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM);
+ dmaengine_slave_config(spi->dma_rx, &rx_dma_conf);
+
+ /* Enable Rx DMA request */
+ stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN);
+
+ rx_dma_desc = dmaengine_prep_slave_sg(
+ spi->dma_rx, xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ rx_dma_conf.direction,
+ DMA_PREP_INTERRUPT);
+ }
+
+ tx_dma_desc = NULL;
+ if (spi->tx_buf) {
+ stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV);
+ dmaengine_slave_config(spi->dma_tx, &tx_dma_conf);
+
+ tx_dma_desc = dmaengine_prep_slave_sg(
+ spi->dma_tx, xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ tx_dma_conf.direction,
+ DMA_PREP_INTERRUPT);
+ }
+
+ if ((spi->tx_buf && !tx_dma_desc) ||
+ (spi->rx_buf && !rx_dma_desc))
+ goto dma_desc_error;
+
+ if (rx_dma_desc) {
+ rx_dma_desc->callback = stm32_spi_dma_cb;
+ rx_dma_desc->callback_param = spi;
+
+ if (dma_submit_error(dmaengine_submit(rx_dma_desc))) {
+ dev_err(spi->dev, "Rx DMA submit failed\n");
+ goto dma_desc_error;
+ }
+ /* Enable Rx DMA channel */
+ dma_async_issue_pending(spi->dma_rx);
+ }
+
+ if (tx_dma_desc) {
+ if (spi->cur_comm == SPI_SIMPLEX_TX) {
+ tx_dma_desc->callback = stm32_spi_dma_cb;
+ tx_dma_desc->callback_param = spi;
+ }
+
+ if (dma_submit_error(dmaengine_submit(tx_dma_desc))) {
+ dev_err(spi->dev, "Tx DMA submit failed\n");
+ goto dma_submit_error;
+ }
+ /* Enable Tx DMA channel */
+ dma_async_issue_pending(spi->dma_tx);
+
+ /* Enable Tx DMA request */
+ stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN);
+ }
+
+ /* Enable the interrupts relative to the end of transfer */
+ ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE;
+ writel_relaxed(ier, spi->base + STM32_SPI_IER);
+
+ stm32_spi_enable(spi);
+
+ stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return 1;
+
+dma_submit_error:
+ if (spi->rx_buf)
+ dmaengine_terminate_all(spi->dma_rx);
+
+dma_desc_error:
+ stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ dev_info(spi->dev, "DMA issue: fall back to irq transfer\n");
+
+ return stm32_spi_transfer_one_irq(spi);
+}
+
+/**
+ * stm32_spi_transfer_one_setup - common setup to transfer a single
+ * spi_transfer either using DMA or
+ * interrupts.
+ */
+static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
+ struct spi_device *spi_dev,
+ struct spi_transfer *transfer)
+{
+ unsigned long flags;
+ u32 cfg1_clrb = 0, cfg1_setb = 0, cfg2_clrb = 0, cfg2_setb = 0;
+ u32 mode, nb_words;
+ int ret = 0;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ if (spi->cur_bpw != transfer->bits_per_word) {
+ u32 bpw, fthlv;
+
+ spi->cur_bpw = transfer->bits_per_word;
+ bpw = spi->cur_bpw - 1;
+
+ cfg1_clrb |= SPI_CFG1_DSIZE;
+ cfg1_setb |= (bpw << SPI_CFG1_DSIZE_SHIFT) & SPI_CFG1_DSIZE;
+
+ spi->cur_fthlv = stm32_spi_prepare_fthlv(spi);
+ fthlv = spi->cur_fthlv - 1;
+
+ cfg1_clrb |= SPI_CFG1_FTHLV;
+ cfg1_setb |= (fthlv << SPI_CFG1_FTHLV_SHIFT) & SPI_CFG1_FTHLV;
+ }
+
+ if (spi->cur_speed != transfer->speed_hz) {
+ int mbr;
+
+ /* Update spi->cur_speed with real clock speed */
+ mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz);
+ if (mbr < 0) {
+ ret = mbr;
+ goto out;
+ }
+
+ transfer->speed_hz = spi->cur_speed;
+
+ cfg1_clrb |= SPI_CFG1_MBR;
+ cfg1_setb |= ((u32)mbr << SPI_CFG1_MBR_SHIFT) & SPI_CFG1_MBR;
+ }
+
+ if (cfg1_clrb || cfg1_setb)
+ writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG1) &
+ ~cfg1_clrb) | cfg1_setb,
+ spi->base + STM32_SPI_CFG1);
+
+ mode = SPI_FULL_DUPLEX;
+ if (spi_dev->mode & SPI_3WIRE) { /* MISO/MOSI signals shared */
+ /*
+ * SPI_3WIRE and xfer->tx_buf != NULL and xfer->rx_buf != NULL
+ * is forbidden und unvalidated by SPI subsystem so depending
+ * on the valid buffer, we can determine the direction of the
+ * transfer.
+ */
+ mode = SPI_HALF_DUPLEX;
+ if (!transfer->tx_buf)
+ stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR);
+ else if (!transfer->rx_buf)
+ stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR);
+ } else {
+ if (!transfer->tx_buf)
+ mode = SPI_SIMPLEX_RX;
+ else if (!transfer->rx_buf)
+ mode = SPI_SIMPLEX_TX;
+ }
+ if (spi->cur_comm != mode) {
+ spi->cur_comm = mode;
+
+ cfg2_clrb |= SPI_CFG2_COMM;
+ cfg2_setb |= (mode << SPI_CFG2_COMM_SHIFT) & SPI_CFG2_COMM;
+ }
+
+ cfg2_clrb |= SPI_CFG2_MIDI;
+ if ((transfer->len > 1) && (spi->cur_midi > 0)) {
+ u32 sck_period_ns = DIV_ROUND_UP(SPI_1HZ_NS, spi->cur_speed);
+ u32 midi = min((u32)DIV_ROUND_UP(spi->cur_midi, sck_period_ns),
+ (u32)SPI_CFG2_MIDI >> SPI_CFG2_MIDI_SHIFT);
+
+ dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n",
+ sck_period_ns, midi, midi * sck_period_ns);
+
+ cfg2_setb |= (midi << SPI_CFG2_MIDI_SHIFT) & SPI_CFG2_MIDI;
+ }
+
+ if (cfg2_clrb || cfg2_setb)
+ writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG2) &
+ ~cfg2_clrb) | cfg2_setb,
+ spi->base + STM32_SPI_CFG2);
+
+ if (spi->cur_bpw <= 8)
+ nb_words = transfer->len;
+ else if (spi->cur_bpw <= 16)
+ nb_words = DIV_ROUND_UP(transfer->len * 8, 16);
+ else
+ nb_words = DIV_ROUND_UP(transfer->len * 8, 32);
+ nb_words <<= SPI_CR2_TSIZE_SHIFT;
+
+ if (nb_words <= SPI_CR2_TSIZE) {
+ writel_relaxed(nb_words, spi->base + STM32_SPI_CR2);
+ } else {
+ ret = -EMSGSIZE;
+ goto out;
+ }
+
+ spi->cur_xferlen = transfer->len;
+
+ dev_dbg(spi->dev, "transfer communication mode set to %d\n",
+ spi->cur_comm);
+ dev_dbg(spi->dev,
+ "data frame of %d-bit, data packet of %d data frames\n",
+ spi->cur_bpw, spi->cur_fthlv);
+ dev_dbg(spi->dev, "speed set to %dHz\n", spi->cur_speed);
+ dev_dbg(spi->dev, "transfer of %d bytes (%d data frames)\n",
+ spi->cur_xferlen, nb_words);
+ dev_dbg(spi->dev, "dma %s\n",
+ (spi->cur_usedma) ? "enabled" : "disabled");
+
+out:
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return ret;
+}
+
+/**
+ * stm32_spi_transfer_one - transfer a single spi_transfer
+ *
+ * It must return 0 if the transfer is finished or 1 if the transfer is still
+ * in progress.
+ */
+static int stm32_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi_dev,
+ struct spi_transfer *transfer)
+{
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+ int ret;
+
+ spi->tx_buf = transfer->tx_buf;
+ spi->rx_buf = transfer->rx_buf;
+ spi->tx_len = spi->tx_buf ? transfer->len : 0;
+ spi->rx_len = spi->rx_buf ? transfer->len : 0;
+
+ spi->cur_usedma = (master->can_dma &&
+ stm32_spi_can_dma(master, spi_dev, transfer));
+
+ ret = stm32_spi_transfer_one_setup(spi, spi_dev, transfer);
+ if (ret) {
+ dev_err(spi->dev, "SPI transfer setup failed\n");
+ return ret;
+ }
+
+ if (spi->cur_usedma)
+ return stm32_spi_transfer_one_dma(spi, transfer);
+ else
+ return stm32_spi_transfer_one_irq(spi);
+}
+
+/**
+ * stm32_spi_unprepare_msg - relax the hardware
+ *
+ * Normally, if TSIZE has been configured, we should relax the hardware at the
+ * reception of the EOT interrupt. But in case of error, EOT will not be
+ * raised. So the subsystem unprepare_message call allows us to properly
+ * complete the transfer from an hardware point of view.
+ */
+static int stm32_spi_unprepare_msg(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+
+ stm32_spi_disable(spi);
+
+ return 0;
+}
+
+/**
+ * stm32_spi_config - Configure SPI controller as SPI master
+ */
+static int stm32_spi_config(struct stm32_spi *spi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ /* Ensure I2SMOD bit is kept cleared */
+ stm32_spi_clr_bits(spi, STM32_SPI_I2SCFGR, SPI_I2SCFGR_I2SMOD);
+
+ /*
+ * - SS input value high
+ * - transmitter half duplex direction
+ * - automatic communication suspend when RX-Fifo is full
+ */
+ stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SSI |
+ SPI_CR1_HDDIR |
+ SPI_CR1_MASRX);
+
+ /*
+ * - Set the master mode (default Motorola mode)
+ * - Consider 1 master/n slaves configuration and
+ * SS input value is determined by the SSI bit
+ * - keep control of all associated GPIOs
+ */
+ stm32_spi_set_bits(spi, STM32_SPI_CFG2, SPI_CFG2_MASTER |
+ SPI_CFG2_SSM |
+ SPI_CFG2_AFCNTR);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return 0;
+}
+
+static const struct of_device_id stm32_spi_of_match[] = {
+ { .compatible = "st,stm32h7-spi", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_spi_of_match);
+
+static int stm32_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct stm32_spi *spi;
+ struct resource *res;
+ int i, ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
+ if (!master) {
+ dev_err(&pdev->dev, "spi master allocation failed\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, master);
+
+ spi = spi_master_get_devdata(master);
+ spi->dev = &pdev->dev;
+ spi->master = master;
+ spin_lock_init(&spi->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spi->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spi->base)) {
+ ret = PTR_ERR(spi->base);
+ goto err_master_put;
+ }
+ spi->phys_addr = (dma_addr_t)res->start;
+
+ spi->irq = platform_get_irq(pdev, 0);
+ if (spi->irq <= 0) {
+ dev_err(&pdev->dev, "no irq: %d\n", spi->irq);
+ ret = -ENOENT;
+ goto err_master_put;
+ }
+ ret = devm_request_threaded_irq(&pdev->dev, spi->irq, NULL,
+ stm32_spi_irq, IRQF_ONESHOT,
+ pdev->name, master);
+ if (ret) {
+ dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
+ ret);
+ goto err_master_put;
+ }
+
+ spi->clk = devm_clk_get(&pdev->dev, 0);
+ if (IS_ERR(spi->clk)) {
+ ret = PTR_ERR(spi->clk);
+ dev_err(&pdev->dev, "clk get failed: %d\n", ret);
+ goto err_master_put;
+ }
+
+ ret = clk_prepare_enable(spi->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clk enable failed: %d\n", ret);
+ goto err_master_put;
+ }
+ spi->clk_rate = clk_get_rate(spi->clk);
+ if (!spi->clk_rate) {
+ dev_err(&pdev->dev, "clk rate = 0\n");
+ ret = -EINVAL;
+ goto err_master_put;
+ }
+
+ spi->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (!IS_ERR(spi->rst)) {
+ reset_control_assert(spi->rst);
+ udelay(2);
+ reset_control_deassert(spi->rst);
+ }
+
+ spi->fifo_size = stm32_spi_get_fifo_size(spi);
+
+ ret = stm32_spi_config(spi);
+ if (ret) {
+ dev_err(&pdev->dev, "controller configuration failed: %d\n",
+ ret);
+ goto err_clk_disable;
+ }
+
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_MODE_3 | SPI_CS_HIGH | SPI_LSB_FIRST |
+ SPI_3WIRE | SPI_LOOP;
+ master->bits_per_word_mask = stm32_spi_get_bpw_mask(spi);
+ master->max_speed_hz = spi->clk_rate / SPI_MBR_DIV_MIN;
+ master->min_speed_hz = spi->clk_rate / SPI_MBR_DIV_MAX;
+ master->setup = stm32_spi_setup;
+ master->prepare_message = stm32_spi_prepare_msg;
+ master->transfer_one = stm32_spi_transfer_one;
+ master->unprepare_message = stm32_spi_unprepare_msg;
+
+ spi->dma_tx = dma_request_slave_channel(spi->dev, "tx");
+ if (!spi->dma_tx)
+ dev_warn(&pdev->dev, "failed to request tx dma channel\n");
+ else
+ master->dma_tx = spi->dma_tx;
+
+ spi->dma_rx = dma_request_slave_channel(spi->dev, "rx");
+ if (!spi->dma_rx)
+ dev_warn(&pdev->dev, "failed to request rx dma channel\n");
+ else
+ master->dma_rx = spi->dma_rx;
+
+ if (spi->dma_tx || spi->dma_rx)
+ master->can_dma = stm32_spi_can_dma;
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "spi master registration failed: %d\n",
+ ret);
+ goto err_dma_release;
+ }
+
+ if (!master->cs_gpios) {
+ dev_err(&pdev->dev, "no CS gpios available\n");
+ ret = -EINVAL;
+ goto err_dma_release;
+ }
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ if (!gpio_is_valid(master->cs_gpios[i])) {
+ dev_err(&pdev->dev, "%i is not a valid gpio\n",
+ master->cs_gpios[i]);
+ ret = -EINVAL;
+ goto err_dma_release;
+ }
+
+ ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
+ DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get CS gpio %i\n",
+ master->cs_gpios[i]);
+ goto err_dma_release;
+ }
+ }
+
+ dev_info(&pdev->dev, "driver initialized\n");
+
+ return 0;
+
+err_dma_release:
+ if (spi->dma_tx)
+ dma_release_channel(spi->dma_tx);
+ if (spi->dma_rx)
+ dma_release_channel(spi->dma_rx);
+
+ pm_runtime_disable(&pdev->dev);
+err_clk_disable:
+ clk_disable_unprepare(spi->clk);
+err_master_put:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int stm32_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+
+ stm32_spi_disable(spi);
+
+ if (master->dma_tx)
+ dma_release_channel(master->dma_tx);
+ if (master->dma_rx)
+ dma_release_channel(master->dma_rx);
+
+ clk_disable_unprepare(spi->clk);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int stm32_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(spi->clk);
+
+ return 0;
+}
+
+static int stm32_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+
+ return clk_prepare_enable(spi->clk);
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int stm32_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int stm32_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ ret = spi_master_resume(master);
+ if (ret)
+ clk_disable_unprepare(spi->clk);
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops stm32_spi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_spi_suspend, stm32_spi_resume)
+ SET_RUNTIME_PM_OPS(stm32_spi_runtime_suspend,
+ stm32_spi_runtime_resume, NULL)
+};
+
+static struct platform_driver stm32_spi_driver = {
+ .probe = stm32_spi_probe,
+ .remove = stm32_spi_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &stm32_spi_pm_ops,
+ .of_match_table = stm32_spi_of_match,
+ },
+};
+
+module_platform_driver(stm32_spi_driver);
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_DESCRIPTION("STMicroelectronics STM32 SPI Controller driver");
+MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 89254a55eb2e..4fcbb0aa71d3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -48,11 +48,11 @@ static void spidev_release(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
- /* spi masters may cleanup for released devices */
- if (spi->master->cleanup)
- spi->master->cleanup(spi);
+ /* spi controllers may cleanup for released devices */
+ if (spi->controller->cleanup)
+ spi->controller->cleanup(spi);
- spi_master_put(spi->master);
+ spi_controller_put(spi->controller);
kfree(spi);
}
@@ -71,17 +71,17 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
static DEVICE_ATTR_RO(modalias);
#define SPI_STATISTICS_ATTRS(field, file) \
-static ssize_t spi_master_##field##_show(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
+static ssize_t spi_controller_##field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
{ \
- struct spi_master *master = container_of(dev, \
- struct spi_master, dev); \
- return spi_statistics_##field##_show(&master->statistics, buf); \
+ struct spi_controller *ctlr = container_of(dev, \
+ struct spi_controller, dev); \
+ return spi_statistics_##field##_show(&ctlr->statistics, buf); \
} \
-static struct device_attribute dev_attr_spi_master_##field = { \
- .attr = { .name = file, .mode = S_IRUGO }, \
- .show = spi_master_##field##_show, \
+static struct device_attribute dev_attr_spi_controller_##field = { \
+ .attr = { .name = file, .mode = 0444 }, \
+ .show = spi_controller_##field##_show, \
}; \
static ssize_t spi_device_##field##_show(struct device *dev, \
struct device_attribute *attr, \
@@ -91,7 +91,7 @@ static ssize_t spi_device_##field##_show(struct device *dev, \
return spi_statistics_##field##_show(&spi->statistics, buf); \
} \
static struct device_attribute dev_attr_spi_device_##field = { \
- .attr = { .name = file, .mode = S_IRUGO }, \
+ .attr = { .name = file, .mode = 0444 }, \
.show = spi_device_##field##_show, \
}
@@ -201,51 +201,51 @@ static const struct attribute_group *spi_dev_groups[] = {
NULL,
};
-static struct attribute *spi_master_statistics_attrs[] = {
- &dev_attr_spi_master_messages.attr,
- &dev_attr_spi_master_transfers.attr,
- &dev_attr_spi_master_errors.attr,
- &dev_attr_spi_master_timedout.attr,
- &dev_attr_spi_master_spi_sync.attr,
- &dev_attr_spi_master_spi_sync_immediate.attr,
- &dev_attr_spi_master_spi_async.attr,
- &dev_attr_spi_master_bytes.attr,
- &dev_attr_spi_master_bytes_rx.attr,
- &dev_attr_spi_master_bytes_tx.attr,
- &dev_attr_spi_master_transfer_bytes_histo0.attr,
- &dev_attr_spi_master_transfer_bytes_histo1.attr,
- &dev_attr_spi_master_transfer_bytes_histo2.attr,
- &dev_attr_spi_master_transfer_bytes_histo3.attr,
- &dev_attr_spi_master_transfer_bytes_histo4.attr,
- &dev_attr_spi_master_transfer_bytes_histo5.attr,
- &dev_attr_spi_master_transfer_bytes_histo6.attr,
- &dev_attr_spi_master_transfer_bytes_histo7.attr,
- &dev_attr_spi_master_transfer_bytes_histo8.attr,
- &dev_attr_spi_master_transfer_bytes_histo9.attr,
- &dev_attr_spi_master_transfer_bytes_histo10.attr,
- &dev_attr_spi_master_transfer_bytes_histo11.attr,
- &dev_attr_spi_master_transfer_bytes_histo12.attr,
- &dev_attr_spi_master_transfer_bytes_histo13.attr,
- &dev_attr_spi_master_transfer_bytes_histo14.attr,
- &dev_attr_spi_master_transfer_bytes_histo15.attr,
- &dev_attr_spi_master_transfer_bytes_histo16.attr,
- &dev_attr_spi_master_transfers_split_maxsize.attr,
+static struct attribute *spi_controller_statistics_attrs[] = {
+ &dev_attr_spi_controller_messages.attr,
+ &dev_attr_spi_controller_transfers.attr,
+ &dev_attr_spi_controller_errors.attr,
+ &dev_attr_spi_controller_timedout.attr,
+ &dev_attr_spi_controller_spi_sync.attr,
+ &dev_attr_spi_controller_spi_sync_immediate.attr,
+ &dev_attr_spi_controller_spi_async.attr,
+ &dev_attr_spi_controller_bytes.attr,
+ &dev_attr_spi_controller_bytes_rx.attr,
+ &dev_attr_spi_controller_bytes_tx.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo0.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo1.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo2.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo3.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo4.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo5.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo6.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo7.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo8.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo9.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo10.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo11.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo12.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo13.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo14.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo15.attr,
+ &dev_attr_spi_controller_transfer_bytes_histo16.attr,
+ &dev_attr_spi_controller_transfers_split_maxsize.attr,
NULL,
};
-static const struct attribute_group spi_master_statistics_group = {
+static const struct attribute_group spi_controller_statistics_group = {
.name = "statistics",
- .attrs = spi_master_statistics_attrs,
+ .attrs = spi_controller_statistics_attrs,
};
static const struct attribute_group *spi_master_groups[] = {
- &spi_master_statistics_group,
+ &spi_controller_statistics_group,
NULL,
};
void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
struct spi_transfer *xfer,
- struct spi_master *master)
+ struct spi_controller *ctlr)
{
unsigned long flags;
int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
@@ -260,10 +260,10 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
stats->bytes += xfer->len;
if ((xfer->tx_buf) &&
- (xfer->tx_buf != master->dummy_tx))
+ (xfer->tx_buf != ctlr->dummy_tx))
stats->bytes_tx += xfer->len;
if ((xfer->rx_buf) &&
- (xfer->rx_buf != master->dummy_rx))
+ (xfer->rx_buf != ctlr->dummy_rx))
stats->bytes_rx += xfer->len;
spin_unlock_irqrestore(&stats->lock, flags);
@@ -405,7 +405,7 @@ EXPORT_SYMBOL_GPL(__spi_register_driver);
/*-------------------------------------------------------------------------*/
/* SPI devices should normally not be created by SPI device drivers; that
- * would make them board-specific. Similarly with SPI master drivers.
+ * would make them board-specific. Similarly with SPI controller drivers.
* Device registration normally goes into like arch/.../mach.../board-YYY.c
* with other readonly (flashable) information about mainboard devices.
*/
@@ -416,17 +416,17 @@ struct boardinfo {
};
static LIST_HEAD(board_list);
-static LIST_HEAD(spi_master_list);
+static LIST_HEAD(spi_controller_list);
/*
* Used to protect add/del opertion for board_info list and
- * spi_master list, and their matching process
+ * spi_controller list, and their matching process
*/
static DEFINE_MUTEX(board_lock);
/**
* spi_alloc_device - Allocate a new SPI device
- * @master: Controller to which device is connected
+ * @ctlr: Controller to which device is connected
* Context: can sleep
*
* Allows a driver to allocate and initialize a spi_device without
@@ -435,27 +435,27 @@ static DEFINE_MUTEX(board_lock);
* spi_add_device() on it.
*
* Caller is responsible to call spi_add_device() on the returned
- * spi_device structure to add it to the SPI master. If the caller
+ * spi_device structure to add it to the SPI controller. If the caller
* needs to discard the spi_device without adding it, then it should
* call spi_dev_put() on it.
*
* Return: a pointer to the new device, or NULL.
*/
-struct spi_device *spi_alloc_device(struct spi_master *master)
+struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
{
struct spi_device *spi;
- if (!spi_master_get(master))
+ if (!spi_controller_get(ctlr))
return NULL;
spi = kzalloc(sizeof(*spi), GFP_KERNEL);
if (!spi) {
- spi_master_put(master);
+ spi_controller_put(ctlr);
return NULL;
}
- spi->master = master;
- spi->dev.parent = &master->dev;
+ spi->master = spi->controller = ctlr;
+ spi->dev.parent = &ctlr->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
spi->cs_gpio = -ENOENT;
@@ -476,7 +476,7 @@ static void spi_dev_set_name(struct spi_device *spi)
return;
}
- dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
+ dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
spi->chip_select);
}
@@ -485,7 +485,7 @@ static int spi_dev_check(struct device *dev, void *data)
struct spi_device *spi = to_spi_device(dev);
struct spi_device *new_spi = data;
- if (spi->master == new_spi->master &&
+ if (spi->controller == new_spi->controller &&
spi->chip_select == new_spi->chip_select)
return -EBUSY;
return 0;
@@ -503,15 +503,14 @@ static int spi_dev_check(struct device *dev, void *data)
int spi_add_device(struct spi_device *spi)
{
static DEFINE_MUTEX(spi_add_lock);
- struct spi_master *master = spi->master;
- struct device *dev = master->dev.parent;
+ struct spi_controller *ctlr = spi->controller;
+ struct device *dev = ctlr->dev.parent;
int status;
/* Chipselects are numbered 0..max; validate. */
- if (spi->chip_select >= master->num_chipselect) {
- dev_err(dev, "cs%d >= max %d\n",
- spi->chip_select,
- master->num_chipselect);
+ if (spi->chip_select >= ctlr->num_chipselect) {
+ dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
+ ctlr->num_chipselect);
return -EINVAL;
}
@@ -531,8 +530,8 @@ int spi_add_device(struct spi_device *spi)
goto done;
}
- if (master->cs_gpios)
- spi->cs_gpio = master->cs_gpios[spi->chip_select];
+ if (ctlr->cs_gpios)
+ spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
/* Drivers may modify this initial i/o setup, but will
* normally rely on the device being setup. Devices
@@ -561,7 +560,7 @@ EXPORT_SYMBOL_GPL(spi_add_device);
/**
* spi_new_device - instantiate one new SPI device
- * @master: Controller to which device is connected
+ * @ctlr: Controller to which device is connected
* @chip: Describes the SPI device
* Context: can sleep
*
@@ -573,7 +572,7 @@ EXPORT_SYMBOL_GPL(spi_add_device);
*
* Return: the new device, or NULL.
*/
-struct spi_device *spi_new_device(struct spi_master *master,
+struct spi_device *spi_new_device(struct spi_controller *ctlr,
struct spi_board_info *chip)
{
struct spi_device *proxy;
@@ -586,7 +585,7 @@ struct spi_device *spi_new_device(struct spi_master *master,
* suggests syslogged diagnostics are best here (ugh).
*/
- proxy = spi_alloc_device(master);
+ proxy = spi_alloc_device(ctlr);
if (!proxy)
return NULL;
@@ -604,7 +603,7 @@ struct spi_device *spi_new_device(struct spi_master *master,
if (chip->properties) {
status = device_add_properties(&proxy->dev, chip->properties);
if (status) {
- dev_err(&master->dev,
+ dev_err(&ctlr->dev,
"failed to add properties to '%s': %d\n",
chip->modalias, status);
goto err_dev_put;
@@ -631,7 +630,7 @@ EXPORT_SYMBOL_GPL(spi_new_device);
* @spi: spi_device to unregister
*
* Start making the passed SPI device vanish. Normally this would be handled
- * by spi_unregister_master().
+ * by spi_unregister_controller().
*/
void spi_unregister_device(struct spi_device *spi)
{
@@ -648,17 +647,17 @@ void spi_unregister_device(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_unregister_device);
-static void spi_match_master_to_boardinfo(struct spi_master *master,
- struct spi_board_info *bi)
+static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
+ struct spi_board_info *bi)
{
struct spi_device *dev;
- if (master->bus_num != bi->bus_num)
+ if (ctlr->bus_num != bi->bus_num)
return;
- dev = spi_new_device(master, bi);
+ dev = spi_new_device(ctlr, bi);
if (!dev)
- dev_err(master->dev.parent, "can't create new device for %s\n",
+ dev_err(ctlr->dev.parent, "can't create new device for %s\n",
bi->modalias);
}
@@ -697,7 +696,7 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
return -ENOMEM;
for (i = 0; i < n; i++, bi++, info++) {
- struct spi_master *master;
+ struct spi_controller *ctlr;
memcpy(&bi->board_info, info, sizeof(*info));
if (info->properties) {
@@ -709,8 +708,9 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
mutex_lock(&board_lock);
list_add_tail(&bi->list, &board_list);
- list_for_each_entry(master, &spi_master_list, list)
- spi_match_master_to_boardinfo(master, &bi->board_info);
+ list_for_each_entry(ctlr, &spi_controller_list, list)
+ spi_match_controller_to_boardinfo(ctlr,
+ &bi->board_info);
mutex_unlock(&board_lock);
}
@@ -727,16 +727,16 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
if (gpio_is_valid(spi->cs_gpio)) {
gpio_set_value(spi->cs_gpio, !enable);
/* Some SPI masters need both GPIO CS & slave_select */
- if ((spi->master->flags & SPI_MASTER_GPIO_SS) &&
- spi->master->set_cs)
- spi->master->set_cs(spi, !enable);
- } else if (spi->master->set_cs) {
- spi->master->set_cs(spi, !enable);
+ if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
+ spi->controller->set_cs)
+ spi->controller->set_cs(spi, !enable);
+ } else if (spi->controller->set_cs) {
+ spi->controller->set_cs(spi, !enable);
}
}
#ifdef CONFIG_HAS_DMA
-static int spi_map_buf(struct spi_master *master, struct device *dev,
+static int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir)
{
@@ -761,7 +761,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
desc_len = min_t(int, max_seg_size, PAGE_SIZE);
sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
} else if (virt_addr_valid(buf)) {
- desc_len = min_t(int, max_seg_size, master->max_dma_len);
+ desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
sgs = DIV_ROUND_UP(len, desc_len);
} else {
return -EINVAL;
@@ -811,7 +811,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
return 0;
}
-static void spi_unmap_buf(struct spi_master *master, struct device *dev,
+static void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir)
{
if (sgt->orig_nents) {
@@ -820,31 +820,31 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev,
}
}
-static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
+static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
struct device *tx_dev, *rx_dev;
struct spi_transfer *xfer;
int ret;
- if (!master->can_dma)
+ if (!ctlr->can_dma)
return 0;
- if (master->dma_tx)
- tx_dev = master->dma_tx->device->dev;
+ if (ctlr->dma_tx)
+ tx_dev = ctlr->dma_tx->device->dev;
else
- tx_dev = master->dev.parent;
+ tx_dev = ctlr->dev.parent;
- if (master->dma_rx)
- rx_dev = master->dma_rx->device->dev;
+ if (ctlr->dma_rx)
+ rx_dev = ctlr->dma_rx->device->dev;
else
- rx_dev = master->dev.parent;
+ rx_dev = ctlr->dev.parent;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (!master->can_dma(master, msg->spi, xfer))
+ if (!ctlr->can_dma(ctlr, msg->spi, xfer))
continue;
if (xfer->tx_buf != NULL) {
- ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
+ ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
(void *)xfer->tx_buf, xfer->len,
DMA_TO_DEVICE);
if (ret != 0)
@@ -852,79 +852,78 @@ static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
}
if (xfer->rx_buf != NULL) {
- ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
+ ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE);
if (ret != 0) {
- spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
+ spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
DMA_TO_DEVICE);
return ret;
}
}
}
- master->cur_msg_mapped = true;
+ ctlr->cur_msg_mapped = true;
return 0;
}
-static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
+static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
struct spi_transfer *xfer;
struct device *tx_dev, *rx_dev;
- if (!master->cur_msg_mapped || !master->can_dma)
+ if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
return 0;
- if (master->dma_tx)
- tx_dev = master->dma_tx->device->dev;
+ if (ctlr->dma_tx)
+ tx_dev = ctlr->dma_tx->device->dev;
else
- tx_dev = master->dev.parent;
+ tx_dev = ctlr->dev.parent;
- if (master->dma_rx)
- rx_dev = master->dma_rx->device->dev;
+ if (ctlr->dma_rx)
+ rx_dev = ctlr->dma_rx->device->dev;
else
- rx_dev = master->dev.parent;
+ rx_dev = ctlr->dev.parent;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (!master->can_dma(master, msg->spi, xfer))
+ if (!ctlr->can_dma(ctlr, msg->spi, xfer))
continue;
- spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
- spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+ spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
}
return 0;
}
#else /* !CONFIG_HAS_DMA */
-static inline int spi_map_buf(struct spi_master *master,
- struct device *dev, struct sg_table *sgt,
- void *buf, size_t len,
+static inline int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir)
{
return -EINVAL;
}
-static inline void spi_unmap_buf(struct spi_master *master,
+static inline void spi_unmap_buf(struct spi_controller *ctlr,
struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir)
{
}
-static inline int __spi_map_msg(struct spi_master *master,
+static inline int __spi_map_msg(struct spi_controller *ctlr,
struct spi_message *msg)
{
return 0;
}
-static inline int __spi_unmap_msg(struct spi_master *master,
+static inline int __spi_unmap_msg(struct spi_controller *ctlr,
struct spi_message *msg)
{
return 0;
}
#endif /* !CONFIG_HAS_DMA */
-static inline int spi_unmap_msg(struct spi_master *master,
+static inline int spi_unmap_msg(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct spi_transfer *xfer;
@@ -934,63 +933,63 @@ static inline int spi_unmap_msg(struct spi_master *master,
* Restore the original value of tx_buf or rx_buf if they are
* NULL.
*/
- if (xfer->tx_buf == master->dummy_tx)
+ if (xfer->tx_buf == ctlr->dummy_tx)
xfer->tx_buf = NULL;
- if (xfer->rx_buf == master->dummy_rx)
+ if (xfer->rx_buf == ctlr->dummy_rx)
xfer->rx_buf = NULL;
}
- return __spi_unmap_msg(master, msg);
+ return __spi_unmap_msg(ctlr, msg);
}
-static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
struct spi_transfer *xfer;
void *tmp;
unsigned int max_tx, max_rx;
- if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
+ if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
max_tx = 0;
max_rx = 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if ((master->flags & SPI_MASTER_MUST_TX) &&
+ if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
!xfer->tx_buf)
max_tx = max(xfer->len, max_tx);
- if ((master->flags & SPI_MASTER_MUST_RX) &&
+ if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
!xfer->rx_buf)
max_rx = max(xfer->len, max_rx);
}
if (max_tx) {
- tmp = krealloc(master->dummy_tx, max_tx,
+ tmp = krealloc(ctlr->dummy_tx, max_tx,
GFP_KERNEL | GFP_DMA);
if (!tmp)
return -ENOMEM;
- master->dummy_tx = tmp;
+ ctlr->dummy_tx = tmp;
memset(tmp, 0, max_tx);
}
if (max_rx) {
- tmp = krealloc(master->dummy_rx, max_rx,
+ tmp = krealloc(ctlr->dummy_rx, max_rx,
GFP_KERNEL | GFP_DMA);
if (!tmp)
return -ENOMEM;
- master->dummy_rx = tmp;
+ ctlr->dummy_rx = tmp;
}
if (max_tx || max_rx) {
list_for_each_entry(xfer, &msg->transfers,
transfer_list) {
if (!xfer->tx_buf)
- xfer->tx_buf = master->dummy_tx;
+ xfer->tx_buf = ctlr->dummy_tx;
if (!xfer->rx_buf)
- xfer->rx_buf = master->dummy_rx;
+ xfer->rx_buf = ctlr->dummy_rx;
}
}
}
- return __spi_map_msg(master, msg);
+ return __spi_map_msg(ctlr, msg);
}
/*
@@ -1000,14 +999,14 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
* drivers which implement a transfer_one() operation. It provides
* standard handling of delays and chip select management.
*/
-static int spi_transfer_one_message(struct spi_master *master,
+static int spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
unsigned long long ms = 1;
- struct spi_statistics *statm = &master->statistics;
+ struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
spi_set_cs(msg->spi, true);
@@ -1018,13 +1017,13 @@ static int spi_transfer_one_message(struct spi_master *master,
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
trace_spi_transfer_start(msg, xfer);
- spi_statistics_add_transfer_stats(statm, xfer, master);
- spi_statistics_add_transfer_stats(stats, xfer, master);
+ spi_statistics_add_transfer_stats(statm, xfer, ctlr);
+ spi_statistics_add_transfer_stats(stats, xfer, ctlr);
if (xfer->tx_buf || xfer->rx_buf) {
- reinit_completion(&master->xfer_completion);
+ reinit_completion(&ctlr->xfer_completion);
- ret = master->transfer_one(master, msg->spi, xfer);
+ ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
SPI_STATISTICS_INCREMENT_FIELD(statm,
errors);
@@ -1044,7 +1043,7 @@ static int spi_transfer_one_message(struct spi_master *master,
if (ms > UINT_MAX)
ms = UINT_MAX;
- ms = wait_for_completion_timeout(&master->xfer_completion,
+ ms = wait_for_completion_timeout(&ctlr->xfer_completion,
msecs_to_jiffies(ms));
}
@@ -1099,33 +1098,33 @@ out:
if (msg->status == -EINPROGRESS)
msg->status = ret;
- if (msg->status && master->handle_err)
- master->handle_err(master, msg);
+ if (msg->status && ctlr->handle_err)
+ ctlr->handle_err(ctlr, msg);
- spi_res_release(master, msg);
+ spi_res_release(ctlr, msg);
- spi_finalize_current_message(master);
+ spi_finalize_current_message(ctlr);
return ret;
}
/**
* spi_finalize_current_transfer - report completion of a transfer
- * @master: the master reporting completion
+ * @ctlr: the controller reporting completion
*
* Called by SPI drivers using the core transfer_one_message()
* implementation to notify it that the current interrupt driven
* transfer has finished and the next one may be scheduled.
*/
-void spi_finalize_current_transfer(struct spi_master *master)
+void spi_finalize_current_transfer(struct spi_controller *ctlr)
{
- complete(&master->xfer_completion);
+ complete(&ctlr->xfer_completion);
}
EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
/**
* __spi_pump_messages - function which processes spi message queue
- * @master: master to process queue for
+ * @ctlr: controller to process queue for
* @in_kthread: true if we are in the context of the message pump thread
*
* This function checks if there is any spi message in the queue that
@@ -1136,136 +1135,136 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
* inside spi_sync(); the queue extraction handling at the top of the
* function should deal with this safely.
*/
-static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
+static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
{
unsigned long flags;
bool was_busy = false;
int ret;
/* Lock queue */
- spin_lock_irqsave(&master->queue_lock, flags);
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
/* Make sure we are not already running a message */
- if (master->cur_msg) {
- spin_unlock_irqrestore(&master->queue_lock, flags);
+ if (ctlr->cur_msg) {
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
/* If another context is idling the device then defer */
- if (master->idling) {
- kthread_queue_work(&master->kworker, &master->pump_messages);
- spin_unlock_irqrestore(&master->queue_lock, flags);
+ if (ctlr->idling) {
+ kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
/* Check if the queue is idle */
- if (list_empty(&master->queue) || !master->running) {
- if (!master->busy) {
- spin_unlock_irqrestore(&master->queue_lock, flags);
+ if (list_empty(&ctlr->queue) || !ctlr->running) {
+ if (!ctlr->busy) {
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
/* Only do teardown in the thread */
if (!in_kthread) {
- kthread_queue_work(&master->kworker,
- &master->pump_messages);
- spin_unlock_irqrestore(&master->queue_lock, flags);
+ kthread_queue_work(&ctlr->kworker,
+ &ctlr->pump_messages);
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
- master->busy = false;
- master->idling = true;
- spin_unlock_irqrestore(&master->queue_lock, flags);
-
- kfree(master->dummy_rx);
- master->dummy_rx = NULL;
- kfree(master->dummy_tx);
- master->dummy_tx = NULL;
- if (master->unprepare_transfer_hardware &&
- master->unprepare_transfer_hardware(master))
- dev_err(&master->dev,
+ ctlr->busy = false;
+ ctlr->idling = true;
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+
+ kfree(ctlr->dummy_rx);
+ ctlr->dummy_rx = NULL;
+ kfree(ctlr->dummy_tx);
+ ctlr->dummy_tx = NULL;
+ if (ctlr->unprepare_transfer_hardware &&
+ ctlr->unprepare_transfer_hardware(ctlr))
+ dev_err(&ctlr->dev,
"failed to unprepare transfer hardware\n");
- if (master->auto_runtime_pm) {
- pm_runtime_mark_last_busy(master->dev.parent);
- pm_runtime_put_autosuspend(master->dev.parent);
+ if (ctlr->auto_runtime_pm) {
+ pm_runtime_mark_last_busy(ctlr->dev.parent);
+ pm_runtime_put_autosuspend(ctlr->dev.parent);
}
- trace_spi_master_idle(master);
+ trace_spi_controller_idle(ctlr);
- spin_lock_irqsave(&master->queue_lock, flags);
- master->idling = false;
- spin_unlock_irqrestore(&master->queue_lock, flags);
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
+ ctlr->idling = false;
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
/* Extract head of queue */
- master->cur_msg =
- list_first_entry(&master->queue, struct spi_message, queue);
+ ctlr->cur_msg =
+ list_first_entry(&ctlr->queue, struct spi_message, queue);
- list_del_init(&master->cur_msg->queue);
- if (master->busy)
+ list_del_init(&ctlr->cur_msg->queue);
+ if (ctlr->busy)
was_busy = true;
else
- master->busy = true;
- spin_unlock_irqrestore(&master->queue_lock, flags);
+ ctlr->busy = true;
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- mutex_lock(&master->io_mutex);
+ mutex_lock(&ctlr->io_mutex);
- if (!was_busy && master->auto_runtime_pm) {
- ret = pm_runtime_get_sync(master->dev.parent);
+ if (!was_busy && ctlr->auto_runtime_pm) {
+ ret = pm_runtime_get_sync(ctlr->dev.parent);
if (ret < 0) {
- dev_err(&master->dev, "Failed to power device: %d\n",
+ dev_err(&ctlr->dev, "Failed to power device: %d\n",
ret);
- mutex_unlock(&master->io_mutex);
+ mutex_unlock(&ctlr->io_mutex);
return;
}
}
if (!was_busy)
- trace_spi_master_busy(master);
+ trace_spi_controller_busy(ctlr);
- if (!was_busy && master->prepare_transfer_hardware) {
- ret = master->prepare_transfer_hardware(master);
+ if (!was_busy && ctlr->prepare_transfer_hardware) {
+ ret = ctlr->prepare_transfer_hardware(ctlr);
if (ret) {
- dev_err(&master->dev,
+ dev_err(&ctlr->dev,
"failed to prepare transfer hardware\n");
- if (master->auto_runtime_pm)
- pm_runtime_put(master->dev.parent);
- mutex_unlock(&master->io_mutex);
+ if (ctlr->auto_runtime_pm)
+ pm_runtime_put(ctlr->dev.parent);
+ mutex_unlock(&ctlr->io_mutex);
return;
}
}
- trace_spi_message_start(master->cur_msg);
+ trace_spi_message_start(ctlr->cur_msg);
- if (master->prepare_message) {
- ret = master->prepare_message(master, master->cur_msg);
+ if (ctlr->prepare_message) {
+ ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
if (ret) {
- dev_err(&master->dev,
- "failed to prepare message: %d\n", ret);
- master->cur_msg->status = ret;
- spi_finalize_current_message(master);
+ dev_err(&ctlr->dev, "failed to prepare message: %d\n",
+ ret);
+ ctlr->cur_msg->status = ret;
+ spi_finalize_current_message(ctlr);
goto out;
}
- master->cur_msg_prepared = true;
+ ctlr->cur_msg_prepared = true;
}
- ret = spi_map_msg(master, master->cur_msg);
+ ret = spi_map_msg(ctlr, ctlr->cur_msg);
if (ret) {
- master->cur_msg->status = ret;
- spi_finalize_current_message(master);
+ ctlr->cur_msg->status = ret;
+ spi_finalize_current_message(ctlr);
goto out;
}
- ret = master->transfer_one_message(master, master->cur_msg);
+ ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
if (ret) {
- dev_err(&master->dev,
+ dev_err(&ctlr->dev,
"failed to transfer one message from queue\n");
goto out;
}
out:
- mutex_unlock(&master->io_mutex);
+ mutex_unlock(&ctlr->io_mutex);
/* Prod the scheduler in case transfer_one() was busy waiting */
if (!ret)
@@ -1274,44 +1273,43 @@ out:
/**
* spi_pump_messages - kthread work function which processes spi message queue
- * @work: pointer to kthread work struct contained in the master struct
+ * @work: pointer to kthread work struct contained in the controller struct
*/
static void spi_pump_messages(struct kthread_work *work)
{
- struct spi_master *master =
- container_of(work, struct spi_master, pump_messages);
+ struct spi_controller *ctlr =
+ container_of(work, struct spi_controller, pump_messages);
- __spi_pump_messages(master, true);
+ __spi_pump_messages(ctlr, true);
}
-static int spi_init_queue(struct spi_master *master)
+static int spi_init_queue(struct spi_controller *ctlr)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
- master->running = false;
- master->busy = false;
+ ctlr->running = false;
+ ctlr->busy = false;
- kthread_init_worker(&master->kworker);
- master->kworker_task = kthread_run(kthread_worker_fn,
- &master->kworker, "%s",
- dev_name(&master->dev));
- if (IS_ERR(master->kworker_task)) {
- dev_err(&master->dev, "failed to create message pump task\n");
- return PTR_ERR(master->kworker_task);
+ kthread_init_worker(&ctlr->kworker);
+ ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
+ "%s", dev_name(&ctlr->dev));
+ if (IS_ERR(ctlr->kworker_task)) {
+ dev_err(&ctlr->dev, "failed to create message pump task\n");
+ return PTR_ERR(ctlr->kworker_task);
}
- kthread_init_work(&master->pump_messages, spi_pump_messages);
+ kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
/*
- * Master config will indicate if this controller should run the
+ * Controller config will indicate if this controller should run the
* message pump with high (realtime) priority to reduce the transfer
* latency on the bus by minimising the delay between a transfer
* request and the scheduling of the message pump thread. Without this
* setting the message pump thread will remain at default priority.
*/
- if (master->rt) {
- dev_info(&master->dev,
+ if (ctlr->rt) {
+ dev_info(&ctlr->dev,
"will run message pump with realtime priority\n");
- sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
+ sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, &param);
}
return 0;
@@ -1320,23 +1318,23 @@ static int spi_init_queue(struct spi_master *master)
/**
* spi_get_next_queued_message() - called by driver to check for queued
* messages
- * @master: the master to check for queued messages
+ * @ctlr: the controller to check for queued messages
*
* If there are more messages in the queue, the next message is returned from
* this call.
*
* Return: the next message in the queue, else NULL if the queue is empty.
*/
-struct spi_message *spi_get_next_queued_message(struct spi_master *master)
+struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
{
struct spi_message *next;
unsigned long flags;
/* get a pointer to the next message, if any */
- spin_lock_irqsave(&master->queue_lock, flags);
- next = list_first_entry_or_null(&master->queue, struct spi_message,
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
+ next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
queue);
- spin_unlock_irqrestore(&master->queue_lock, flags);
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return next;
}
@@ -1344,36 +1342,36 @@ EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
/**
* spi_finalize_current_message() - the current message is complete
- * @master: the master to return the message to
+ * @ctlr: the controller to return the message to
*
* Called by the driver to notify the core that the message in the front of the
* queue is complete and can be removed from the queue.
*/
-void spi_finalize_current_message(struct spi_master *master)
+void spi_finalize_current_message(struct spi_controller *ctlr)
{
struct spi_message *mesg;
unsigned long flags;
int ret;
- spin_lock_irqsave(&master->queue_lock, flags);
- mesg = master->cur_msg;
- spin_unlock_irqrestore(&master->queue_lock, flags);
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
+ mesg = ctlr->cur_msg;
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- spi_unmap_msg(master, mesg);
+ spi_unmap_msg(ctlr, mesg);
- if (master->cur_msg_prepared && master->unprepare_message) {
- ret = master->unprepare_message(master, mesg);
+ if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
+ ret = ctlr->unprepare_message(ctlr, mesg);
if (ret) {
- dev_err(&master->dev,
- "failed to unprepare message: %d\n", ret);
+ dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
+ ret);
}
}
- spin_lock_irqsave(&master->queue_lock, flags);
- master->cur_msg = NULL;
- master->cur_msg_prepared = false;
- kthread_queue_work(&master->kworker, &master->pump_messages);
- spin_unlock_irqrestore(&master->queue_lock, flags);
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
+ ctlr->cur_msg = NULL;
+ ctlr->cur_msg_prepared = false;
+ kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
trace_spi_message_done(mesg);
@@ -1383,66 +1381,65 @@ void spi_finalize_current_message(struct spi_master *master)
}
EXPORT_SYMBOL_GPL(spi_finalize_current_message);
-static int spi_start_queue(struct spi_master *master)
+static int spi_start_queue(struct spi_controller *ctlr)
{
unsigned long flags;
- spin_lock_irqsave(&master->queue_lock, flags);
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
- if (master->running || master->busy) {
- spin_unlock_irqrestore(&master->queue_lock, flags);
+ if (ctlr->running || ctlr->busy) {
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return -EBUSY;
}
- master->running = true;
- master->cur_msg = NULL;
- spin_unlock_irqrestore(&master->queue_lock, flags);
+ ctlr->running = true;
+ ctlr->cur_msg = NULL;
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- kthread_queue_work(&master->kworker, &master->pump_messages);
+ kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
return 0;
}
-static int spi_stop_queue(struct spi_master *master)
+static int spi_stop_queue(struct spi_controller *ctlr)
{
unsigned long flags;
unsigned limit = 500;
int ret = 0;
- spin_lock_irqsave(&master->queue_lock, flags);
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
/*
* This is a bit lame, but is optimized for the common execution path.
- * A wait_queue on the master->busy could be used, but then the common
+ * A wait_queue on the ctlr->busy could be used, but then the common
* execution path (pump_messages) would be required to call wake_up or
* friends on every SPI message. Do this instead.
*/
- while ((!list_empty(&master->queue) || master->busy) && limit--) {
- spin_unlock_irqrestore(&master->queue_lock, flags);
+ while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
usleep_range(10000, 11000);
- spin_lock_irqsave(&master->queue_lock, flags);
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
}
- if (!list_empty(&master->queue) || master->busy)
+ if (!list_empty(&ctlr->queue) || ctlr->busy)
ret = -EBUSY;
else
- master->running = false;
+ ctlr->running = false;
- spin_unlock_irqrestore(&master->queue_lock, flags);
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
if (ret) {
- dev_warn(&master->dev,
- "could not stop message queue\n");
+ dev_warn(&ctlr->dev, "could not stop message queue\n");
return ret;
}
return ret;
}
-static int spi_destroy_queue(struct spi_master *master)
+static int spi_destroy_queue(struct spi_controller *ctlr)
{
int ret;
- ret = spi_stop_queue(master);
+ ret = spi_stop_queue(ctlr);
/*
* kthread_flush_worker will block until all work is done.
@@ -1451,12 +1448,12 @@ static int spi_destroy_queue(struct spi_master *master)
* return anyway.
*/
if (ret) {
- dev_err(&master->dev, "problem destroying queue\n");
+ dev_err(&ctlr->dev, "problem destroying queue\n");
return ret;
}
- kthread_flush_worker(&master->kworker);
- kthread_stop(master->kworker_task);
+ kthread_flush_worker(&ctlr->kworker);
+ kthread_stop(ctlr->kworker_task);
return 0;
}
@@ -1465,23 +1462,23 @@ static int __spi_queued_transfer(struct spi_device *spi,
struct spi_message *msg,
bool need_pump)
{
- struct spi_master *master = spi->master;
+ struct spi_controller *ctlr = spi->controller;
unsigned long flags;
- spin_lock_irqsave(&master->queue_lock, flags);
+ spin_lock_irqsave(&ctlr->queue_lock, flags);
- if (!master->running) {
- spin_unlock_irqrestore(&master->queue_lock, flags);
+ if (!ctlr->running) {
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return -ESHUTDOWN;
}
msg->actual_length = 0;
msg->status = -EINPROGRESS;
- list_add_tail(&msg->queue, &master->queue);
- if (!master->busy && need_pump)
- kthread_queue_work(&master->kworker, &master->pump_messages);
+ list_add_tail(&msg->queue, &ctlr->queue);
+ if (!ctlr->busy && need_pump)
+ kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
- spin_unlock_irqrestore(&master->queue_lock, flags);
+ spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return 0;
}
@@ -1497,31 +1494,31 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
return __spi_queued_transfer(spi, msg, true);
}
-static int spi_master_initialize_queue(struct spi_master *master)
+static int spi_controller_initialize_queue(struct spi_controller *ctlr)
{
int ret;
- master->transfer = spi_queued_transfer;
- if (!master->transfer_one_message)
- master->transfer_one_message = spi_transfer_one_message;
+ ctlr->transfer = spi_queued_transfer;
+ if (!ctlr->transfer_one_message)
+ ctlr->transfer_one_message = spi_transfer_one_message;
/* Initialize and start queue */
- ret = spi_init_queue(master);
+ ret = spi_init_queue(ctlr);
if (ret) {
- dev_err(&master->dev, "problem initializing queue\n");
+ dev_err(&ctlr->dev, "problem initializing queue\n");
goto err_init_queue;
}
- master->queued = true;
- ret = spi_start_queue(master);
+ ctlr->queued = true;
+ ret = spi_start_queue(ctlr);
if (ret) {
- dev_err(&master->dev, "problem starting queue\n");
+ dev_err(&ctlr->dev, "problem starting queue\n");
goto err_start_queue;
}
return 0;
err_start_queue:
- spi_destroy_queue(master);
+ spi_destroy_queue(ctlr);
err_init_queue:
return ret;
}
@@ -1529,21 +1526,12 @@ err_init_queue:
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_OF)
-static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
+static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
struct device_node *nc)
{
u32 value;
int rc;
- /* Device address */
- rc = of_property_read_u32(nc, "reg", &value);
- if (rc) {
- dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
- nc->full_name, rc);
- return rc;
- }
- spi->chip_select = value;
-
/* Mode (clock phase/polarity/etc.) */
if (of_find_property(nc, "spi-cpha", NULL))
spi->mode |= SPI_CPHA;
@@ -1568,7 +1556,7 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
spi->mode |= SPI_TX_QUAD;
break;
default:
- dev_warn(&master->dev,
+ dev_warn(&ctlr->dev,
"spi-tx-bus-width %d not supported\n",
value);
break;
@@ -1586,17 +1574,36 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
spi->mode |= SPI_RX_QUAD;
break;
default:
- dev_warn(&master->dev,
+ dev_warn(&ctlr->dev,
"spi-rx-bus-width %d not supported\n",
value);
break;
}
}
+ if (spi_controller_is_slave(ctlr)) {
+ if (strcmp(nc->name, "slave")) {
+ dev_err(&ctlr->dev, "%s is not called 'slave'\n",
+ nc->full_name);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ /* Device address */
+ rc = of_property_read_u32(nc, "reg", &value);
+ if (rc) {
+ dev_err(&ctlr->dev, "%s has no valid 'reg' property (%d)\n",
+ nc->full_name, rc);
+ return rc;
+ }
+ spi->chip_select = value;
+
/* Device speed */
rc = of_property_read_u32(nc, "spi-max-frequency", &value);
if (rc) {
- dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
+ dev_err(&ctlr->dev,
+ "%s has no valid 'spi-max-frequency' property (%d)\n",
nc->full_name, rc);
return rc;
}
@@ -1606,15 +1613,15 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
}
static struct spi_device *
-of_register_spi_device(struct spi_master *master, struct device_node *nc)
+of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
{
struct spi_device *spi;
int rc;
/* Alloc an spi_device */
- spi = spi_alloc_device(master);
+ spi = spi_alloc_device(ctlr);
if (!spi) {
- dev_err(&master->dev, "spi_device alloc error for %s\n",
+ dev_err(&ctlr->dev, "spi_device alloc error for %s\n",
nc->full_name);
rc = -ENOMEM;
goto err_out;
@@ -1624,12 +1631,12 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
rc = of_modalias_node(nc, spi->modalias,
sizeof(spi->modalias));
if (rc < 0) {
- dev_err(&master->dev, "cannot find modalias for %s\n",
+ dev_err(&ctlr->dev, "cannot find modalias for %s\n",
nc->full_name);
goto err_out;
}
- rc = of_spi_parse_dt(master, spi, nc);
+ rc = of_spi_parse_dt(ctlr, spi, nc);
if (rc)
goto err_out;
@@ -1640,7 +1647,7 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
/* Register the new device */
rc = spi_add_device(spi);
if (rc) {
- dev_err(&master->dev, "spi_device register error %s\n",
+ dev_err(&ctlr->dev, "spi_device register error %s\n",
nc->full_name);
goto err_of_node_put;
}
@@ -1656,39 +1663,40 @@ err_out:
/**
* of_register_spi_devices() - Register child devices onto the SPI bus
- * @master: Pointer to spi_master device
+ * @ctlr: Pointer to spi_controller device
*
- * Registers an spi_device for each child node of master node which has a 'reg'
- * property.
+ * Registers an spi_device for each child node of controller node which
+ * represents a valid SPI slave.
*/
-static void of_register_spi_devices(struct spi_master *master)
+static void of_register_spi_devices(struct spi_controller *ctlr)
{
struct spi_device *spi;
struct device_node *nc;
- if (!master->dev.of_node)
+ if (!ctlr->dev.of_node)
return;
- for_each_available_child_of_node(master->dev.of_node, nc) {
+ for_each_available_child_of_node(ctlr->dev.of_node, nc) {
if (of_node_test_and_set_flag(nc, OF_POPULATED))
continue;
- spi = of_register_spi_device(master, nc);
+ spi = of_register_spi_device(ctlr, nc);
if (IS_ERR(spi)) {
- dev_warn(&master->dev, "Failed to create SPI device for %s\n",
- nc->full_name);
+ dev_warn(&ctlr->dev,
+ "Failed to create SPI device for %s\n",
+ nc->full_name);
of_node_clear_flag(nc, OF_POPULATED);
}
}
}
#else
-static void of_register_spi_devices(struct spi_master *master) { }
+static void of_register_spi_devices(struct spi_controller *ctlr) { }
#endif
#ifdef CONFIG_ACPI
static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
{
struct spi_device *spi = data;
- struct spi_master *master = spi->master;
+ struct spi_controller *ctlr = spi->controller;
if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
struct acpi_resource_spi_serialbus *sb;
@@ -1702,8 +1710,8 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
* 0 .. max - 1 so we need to ask the driver to
* translate between the two schemes.
*/
- if (master->fw_translate_cs) {
- int cs = master->fw_translate_cs(master,
+ if (ctlr->fw_translate_cs) {
+ int cs = ctlr->fw_translate_cs(ctlr,
sb->device_selection);
if (cs < 0)
return cs;
@@ -1732,7 +1740,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
return 1;
}
-static acpi_status acpi_register_spi_device(struct spi_master *master,
+static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
struct acpi_device *adev)
{
struct list_head resource_list;
@@ -1743,9 +1751,9 @@ static acpi_status acpi_register_spi_device(struct spi_master *master,
acpi_device_enumerated(adev))
return AE_OK;
- spi = spi_alloc_device(master);
+ spi = spi_alloc_device(ctlr);
if (!spi) {
- dev_err(&master->dev, "failed to allocate SPI device for %s\n",
+ dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
dev_name(&adev->dev));
return AE_NO_MEMORY;
}
@@ -1774,7 +1782,7 @@ static acpi_status acpi_register_spi_device(struct spi_master *master,
adev->power.flags.ignore_parent = true;
if (spi_add_device(spi)) {
adev->power.flags.ignore_parent = false;
- dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
+ dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
dev_name(&adev->dev));
spi_dev_put(spi);
}
@@ -1785,104 +1793,211 @@ static acpi_status acpi_register_spi_device(struct spi_master *master,
static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
void *data, void **return_value)
{
- struct spi_master *master = data;
+ struct spi_controller *ctlr = data;
struct acpi_device *adev;
if (acpi_bus_get_device(handle, &adev))
return AE_OK;
- return acpi_register_spi_device(master, adev);
+ return acpi_register_spi_device(ctlr, adev);
}
-static void acpi_register_spi_devices(struct spi_master *master)
+static void acpi_register_spi_devices(struct spi_controller *ctlr)
{
acpi_status status;
acpi_handle handle;
- handle = ACPI_HANDLE(master->dev.parent);
+ handle = ACPI_HANDLE(ctlr->dev.parent);
if (!handle)
return;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
- acpi_spi_add_device, NULL,
- master, NULL);
+ acpi_spi_add_device, NULL, ctlr, NULL);
if (ACPI_FAILURE(status))
- dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
+ dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
}
#else
-static inline void acpi_register_spi_devices(struct spi_master *master) {}
+static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
#endif /* CONFIG_ACPI */
-static void spi_master_release(struct device *dev)
+static void spi_controller_release(struct device *dev)
{
- struct spi_master *master;
+ struct spi_controller *ctlr;
- master = container_of(dev, struct spi_master, dev);
- kfree(master);
+ ctlr = container_of(dev, struct spi_controller, dev);
+ kfree(ctlr);
}
static struct class spi_master_class = {
.name = "spi_master",
.owner = THIS_MODULE,
- .dev_release = spi_master_release,
+ .dev_release = spi_controller_release,
.dev_groups = spi_master_groups,
};
+#ifdef CONFIG_SPI_SLAVE
+/**
+ * spi_slave_abort - abort the ongoing transfer request on an SPI slave
+ * controller
+ * @spi: device used for the current transfer
+ */
+int spi_slave_abort(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+
+ if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
+ return ctlr->slave_abort(ctlr);
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(spi_slave_abort);
+
+static int match_true(struct device *dev, void *data)
+{
+ return 1;
+}
+
+static ssize_t spi_slave_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct spi_controller *ctlr = container_of(dev, struct spi_controller,
+ dev);
+ struct device *child;
+
+ child = device_find_child(&ctlr->dev, NULL, match_true);
+ return sprintf(buf, "%s\n",
+ child ? to_spi_device(child)->modalias : NULL);
+}
+
+static ssize_t spi_slave_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct spi_controller *ctlr = container_of(dev, struct spi_controller,
+ dev);
+ struct spi_device *spi;
+ struct device *child;
+ char name[32];
+ int rc;
+
+ rc = sscanf(buf, "%31s", name);
+ if (rc != 1 || !name[0])
+ return -EINVAL;
+
+ child = device_find_child(&ctlr->dev, NULL, match_true);
+ if (child) {
+ /* Remove registered slave */
+ device_unregister(child);
+ put_device(child);
+ }
+
+ if (strcmp(name, "(null)")) {
+ /* Register new slave */
+ spi = spi_alloc_device(ctlr);
+ if (!spi)
+ return -ENOMEM;
+
+ strlcpy(spi->modalias, name, sizeof(spi->modalias));
+
+ rc = spi_add_device(spi);
+ if (rc) {
+ spi_dev_put(spi);
+ return rc;
+ }
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
+
+static struct attribute *spi_slave_attrs[] = {
+ &dev_attr_slave.attr,
+ NULL,
+};
+
+static const struct attribute_group spi_slave_group = {
+ .attrs = spi_slave_attrs,
+};
+
+static const struct attribute_group *spi_slave_groups[] = {
+ &spi_controller_statistics_group,
+ &spi_slave_group,
+ NULL,
+};
+
+static struct class spi_slave_class = {
+ .name = "spi_slave",
+ .owner = THIS_MODULE,
+ .dev_release = spi_controller_release,
+ .dev_groups = spi_slave_groups,
+};
+#else
+extern struct class spi_slave_class; /* dummy */
+#endif
/**
- * spi_alloc_master - allocate SPI master controller
+ * __spi_alloc_controller - allocate an SPI master or slave controller
* @dev: the controller, possibly using the platform_bus
* @size: how much zeroed driver-private data to allocate; the pointer to this
* memory is in the driver_data field of the returned device,
- * accessible with spi_master_get_devdata().
+ * accessible with spi_controller_get_devdata().
+ * @slave: flag indicating whether to allocate an SPI master (false) or SPI
+ * slave (true) controller
* Context: can sleep
*
- * This call is used only by SPI master controller drivers, which are the
+ * This call is used only by SPI controller drivers, which are the
* only ones directly touching chip registers. It's how they allocate
- * an spi_master structure, prior to calling spi_register_master().
+ * an spi_controller structure, prior to calling spi_register_controller().
*
* This must be called from context that can sleep.
*
- * The caller is responsible for assigning the bus number and initializing
- * the master's methods before calling spi_register_master(); and (after errors
- * adding the device) calling spi_master_put() to prevent a memory leak.
+ * The caller is responsible for assigning the bus number and initializing the
+ * controller's methods before calling spi_register_controller(); and (after
+ * errors adding the device) calling spi_controller_put() to prevent a memory
+ * leak.
*
- * Return: the SPI master structure on success, else NULL.
+ * Return: the SPI controller structure on success, else NULL.
*/
-struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
+struct spi_controller *__spi_alloc_controller(struct device *dev,
+ unsigned int size, bool slave)
{
- struct spi_master *master;
+ struct spi_controller *ctlr;
if (!dev)
return NULL;
- master = kzalloc(size + sizeof(*master), GFP_KERNEL);
- if (!master)
+ ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
+ if (!ctlr)
return NULL;
- device_initialize(&master->dev);
- master->bus_num = -1;
- master->num_chipselect = 1;
- master->dev.class = &spi_master_class;
- master->dev.parent = dev;
- pm_suspend_ignore_children(&master->dev, true);
- spi_master_set_devdata(master, &master[1]);
+ device_initialize(&ctlr->dev);
+ ctlr->bus_num = -1;
+ ctlr->num_chipselect = 1;
+ ctlr->slave = slave;
+ if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
+ ctlr->dev.class = &spi_slave_class;
+ else
+ ctlr->dev.class = &spi_master_class;
+ ctlr->dev.parent = dev;
+ pm_suspend_ignore_children(&ctlr->dev, true);
+ spi_controller_set_devdata(ctlr, &ctlr[1]);
- return master;
+ return ctlr;
}
-EXPORT_SYMBOL_GPL(spi_alloc_master);
+EXPORT_SYMBOL_GPL(__spi_alloc_controller);
#ifdef CONFIG_OF
-static int of_spi_register_master(struct spi_master *master)
+static int of_spi_register_master(struct spi_controller *ctlr)
{
int nb, i, *cs;
- struct device_node *np = master->dev.of_node;
+ struct device_node *np = ctlr->dev.of_node;
if (!np)
return 0;
nb = of_gpio_named_count(np, "cs-gpios");
- master->num_chipselect = max_t(int, nb, master->num_chipselect);
+ ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
/* Return error only for an incorrectly formed cs-gpios property */
if (nb == 0 || nb == -ENOENT)
@@ -1890,15 +2005,14 @@ static int of_spi_register_master(struct spi_master *master)
else if (nb < 0)
return nb;
- cs = devm_kzalloc(&master->dev,
- sizeof(int) * master->num_chipselect,
+ cs = devm_kzalloc(&ctlr->dev, sizeof(int) * ctlr->num_chipselect,
GFP_KERNEL);
- master->cs_gpios = cs;
+ ctlr->cs_gpios = cs;
- if (!master->cs_gpios)
+ if (!ctlr->cs_gpios)
return -ENOMEM;
- for (i = 0; i < master->num_chipselect; i++)
+ for (i = 0; i < ctlr->num_chipselect; i++)
cs[i] = -ENOENT;
for (i = 0; i < nb; i++)
@@ -1907,20 +2021,21 @@ static int of_spi_register_master(struct spi_master *master)
return 0;
}
#else
-static int of_spi_register_master(struct spi_master *master)
+static int of_spi_register_master(struct spi_controller *ctlr)
{
return 0;
}
#endif
/**
- * spi_register_master - register SPI master controller
- * @master: initialized master, originally from spi_alloc_master()
+ * spi_register_controller - register SPI master or slave controller
+ * @ctlr: initialized master, originally from spi_alloc_master() or
+ * spi_alloc_slave()
* Context: can sleep
*
- * SPI master controllers connect to their drivers using some non-SPI bus,
+ * SPI controllers connect to their drivers using some non-SPI bus,
* such as the platform bus. The final stage of probe() in that code
- * includes calling spi_register_master() to hook up to this SPI bus glue.
+ * includes calling spi_register_controller() to hook up to this SPI bus glue.
*
* SPI controllers use board specific (often SOC specific) bus numbers,
* and board-specific addressing for SPI devices combines those numbers
@@ -1929,16 +2044,16 @@ static int of_spi_register_master(struct spi_master *master)
* chip is at which address.
*
* This must be called from context that can sleep. It returns zero on
- * success, else a negative error code (dropping the master's refcount).
+ * success, else a negative error code (dropping the controller's refcount).
* After a successful return, the caller is responsible for calling
- * spi_unregister_master().
+ * spi_unregister_controller().
*
* Return: zero on success, else a negative error code.
*/
-int spi_register_master(struct spi_master *master)
+int spi_register_controller(struct spi_controller *ctlr)
{
static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
- struct device *dev = master->dev.parent;
+ struct device *dev = ctlr->dev.parent;
struct boardinfo *bi;
int status = -ENODEV;
int dynamic = 0;
@@ -1946,103 +2061,109 @@ int spi_register_master(struct spi_master *master)
if (!dev)
return -ENODEV;
- status = of_spi_register_master(master);
- if (status)
- return status;
+ if (!spi_controller_is_slave(ctlr)) {
+ status = of_spi_register_master(ctlr);
+ if (status)
+ return status;
+ }
/* even if it's just one always-selected device, there must
* be at least one chipselect
*/
- if (master->num_chipselect == 0)
+ if (ctlr->num_chipselect == 0)
return -EINVAL;
- if ((master->bus_num < 0) && master->dev.of_node)
- master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
+ if ((ctlr->bus_num < 0) && ctlr->dev.of_node)
+ ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
/* convention: dynamically assigned bus IDs count down from the max */
- if (master->bus_num < 0) {
+ if (ctlr->bus_num < 0) {
/* FIXME switch to an IDR based scheme, something like
* I2C now uses, so we can't run out of "dynamic" IDs
*/
- master->bus_num = atomic_dec_return(&dyn_bus_id);
+ ctlr->bus_num = atomic_dec_return(&dyn_bus_id);
dynamic = 1;
}
- INIT_LIST_HEAD(&master->queue);
- spin_lock_init(&master->queue_lock);
- spin_lock_init(&master->bus_lock_spinlock);
- mutex_init(&master->bus_lock_mutex);
- mutex_init(&master->io_mutex);
- master->bus_lock_flag = 0;
- init_completion(&master->xfer_completion);
- if (!master->max_dma_len)
- master->max_dma_len = INT_MAX;
+ INIT_LIST_HEAD(&ctlr->queue);
+ spin_lock_init(&ctlr->queue_lock);
+ spin_lock_init(&ctlr->bus_lock_spinlock);
+ mutex_init(&ctlr->bus_lock_mutex);
+ mutex_init(&ctlr->io_mutex);
+ ctlr->bus_lock_flag = 0;
+ init_completion(&ctlr->xfer_completion);
+ if (!ctlr->max_dma_len)
+ ctlr->max_dma_len = INT_MAX;
/* register the device, then userspace will see it.
* registration fails if the bus ID is in use.
*/
- dev_set_name(&master->dev, "spi%u", master->bus_num);
- status = device_add(&master->dev);
+ dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
+ status = device_add(&ctlr->dev);
if (status < 0)
goto done;
- dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
- dynamic ? " (dynamic)" : "");
+ dev_dbg(dev, "registered %s %s%s\n",
+ spi_controller_is_slave(ctlr) ? "slave" : "master",
+ dev_name(&ctlr->dev), dynamic ? " (dynamic)" : "");
/* If we're using a queued driver, start the queue */
- if (master->transfer)
- dev_info(dev, "master is unqueued, this is deprecated\n");
+ if (ctlr->transfer)
+ dev_info(dev, "controller is unqueued, this is deprecated\n");
else {
- status = spi_master_initialize_queue(master);
+ status = spi_controller_initialize_queue(ctlr);
if (status) {
- device_del(&master->dev);
+ device_del(&ctlr->dev);
goto done;
}
}
/* add statistics */
- spin_lock_init(&master->statistics.lock);
+ spin_lock_init(&ctlr->statistics.lock);
mutex_lock(&board_lock);
- list_add_tail(&master->list, &spi_master_list);
+ list_add_tail(&ctlr->list, &spi_controller_list);
list_for_each_entry(bi, &board_list, list)
- spi_match_master_to_boardinfo(master, &bi->board_info);
+ spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
mutex_unlock(&board_lock);
/* Register devices from the device tree and ACPI */
- of_register_spi_devices(master);
- acpi_register_spi_devices(master);
+ of_register_spi_devices(ctlr);
+ acpi_register_spi_devices(ctlr);
done:
return status;
}
-EXPORT_SYMBOL_GPL(spi_register_master);
+EXPORT_SYMBOL_GPL(spi_register_controller);
static void devm_spi_unregister(struct device *dev, void *res)
{
- spi_unregister_master(*(struct spi_master **)res);
+ spi_unregister_controller(*(struct spi_controller **)res);
}
/**
- * dev_spi_register_master - register managed SPI master controller
- * @dev: device managing SPI master
- * @master: initialized master, originally from spi_alloc_master()
+ * devm_spi_register_controller - register managed SPI master or slave
+ * controller
+ * @dev: device managing SPI controller
+ * @ctlr: initialized controller, originally from spi_alloc_master() or
+ * spi_alloc_slave()
* Context: can sleep
*
- * Register a SPI device as with spi_register_master() which will
+ * Register a SPI device as with spi_register_controller() which will
* automatically be unregister
*
* Return: zero on success, else a negative error code.
*/
-int devm_spi_register_master(struct device *dev, struct spi_master *master)
+int devm_spi_register_controller(struct device *dev,
+ struct spi_controller *ctlr)
{
- struct spi_master **ptr;
+ struct spi_controller **ptr;
int ret;
ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return -ENOMEM;
- ret = spi_register_master(master);
+ ret = spi_register_controller(ctlr);
if (!ret) {
- *ptr = master;
+ *ptr = ctlr;
devres_add(dev, ptr);
} else {
devres_free(ptr);
@@ -2050,7 +2171,7 @@ int devm_spi_register_master(struct device *dev, struct spi_master *master)
return ret;
}
-EXPORT_SYMBOL_GPL(devm_spi_register_master);
+EXPORT_SYMBOL_GPL(devm_spi_register_controller);
static int __unregister(struct device *dev, void *null)
{
@@ -2059,71 +2180,71 @@ static int __unregister(struct device *dev, void *null)
}
/**
- * spi_unregister_master - unregister SPI master controller
- * @master: the master being unregistered
+ * spi_unregister_controller - unregister SPI master or slave controller
+ * @ctlr: the controller being unregistered
* Context: can sleep
*
- * This call is used only by SPI master controller drivers, which are the
+ * This call is used only by SPI controller drivers, which are the
* only ones directly touching chip registers.
*
* This must be called from context that can sleep.
*/
-void spi_unregister_master(struct spi_master *master)
+void spi_unregister_controller(struct spi_controller *ctlr)
{
int dummy;
- if (master->queued) {
- if (spi_destroy_queue(master))
- dev_err(&master->dev, "queue remove failed\n");
+ if (ctlr->queued) {
+ if (spi_destroy_queue(ctlr))
+ dev_err(&ctlr->dev, "queue remove failed\n");
}
mutex_lock(&board_lock);
- list_del(&master->list);
+ list_del(&ctlr->list);
mutex_unlock(&board_lock);
- dummy = device_for_each_child(&master->dev, NULL, __unregister);
- device_unregister(&master->dev);
+ dummy = device_for_each_child(&ctlr->dev, NULL, __unregister);
+ device_unregister(&ctlr->dev);
}
-EXPORT_SYMBOL_GPL(spi_unregister_master);
+EXPORT_SYMBOL_GPL(spi_unregister_controller);
-int spi_master_suspend(struct spi_master *master)
+int spi_controller_suspend(struct spi_controller *ctlr)
{
int ret;
- /* Basically no-ops for non-queued masters */
- if (!master->queued)
+ /* Basically no-ops for non-queued controllers */
+ if (!ctlr->queued)
return 0;
- ret = spi_stop_queue(master);
+ ret = spi_stop_queue(ctlr);
if (ret)
- dev_err(&master->dev, "queue stop failed\n");
+ dev_err(&ctlr->dev, "queue stop failed\n");
return ret;
}
-EXPORT_SYMBOL_GPL(spi_master_suspend);
+EXPORT_SYMBOL_GPL(spi_controller_suspend);
-int spi_master_resume(struct spi_master *master)
+int spi_controller_resume(struct spi_controller *ctlr)
{
int ret;
- if (!master->queued)
+ if (!ctlr->queued)
return 0;
- ret = spi_start_queue(master);
+ ret = spi_start_queue(ctlr);
if (ret)
- dev_err(&master->dev, "queue restart failed\n");
+ dev_err(&ctlr->dev, "queue restart failed\n");
return ret;
}
-EXPORT_SYMBOL_GPL(spi_master_resume);
+EXPORT_SYMBOL_GPL(spi_controller_resume);
-static int __spi_master_match(struct device *dev, const void *data)
+static int __spi_controller_match(struct device *dev, const void *data)
{
- struct spi_master *m;
+ struct spi_controller *ctlr;
const u16 *bus_num = data;
- m = container_of(dev, struct spi_master, dev);
- return m->bus_num == *bus_num;
+ ctlr = container_of(dev, struct spi_controller, dev);
+ return ctlr->bus_num == *bus_num;
}
/**
@@ -2133,22 +2254,22 @@ static int __spi_master_match(struct device *dev, const void *data)
*
* This call may be used with devices that are registered after
* arch init time. It returns a refcounted pointer to the relevant
- * spi_master (which the caller must release), or NULL if there is
+ * spi_controller (which the caller must release), or NULL if there is
* no such master registered.
*
* Return: the SPI master structure on success, else NULL.
*/
-struct spi_master *spi_busnum_to_master(u16 bus_num)
+struct spi_controller *spi_busnum_to_master(u16 bus_num)
{
struct device *dev;
- struct spi_master *master = NULL;
+ struct spi_controller *ctlr = NULL;
dev = class_find_device(&spi_master_class, NULL, &bus_num,
- __spi_master_match);
+ __spi_controller_match);
if (dev)
- master = container_of(dev, struct spi_master, dev);
+ ctlr = container_of(dev, struct spi_controller, dev);
/* reference got in class_find_device */
- return master;
+ return ctlr;
}
EXPORT_SYMBOL_GPL(spi_busnum_to_master);
@@ -2168,7 +2289,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
* Return: the pointer to the allocated data
*
* This may get enhanced in the future to allocate from a memory pool
- * of the @spi_device or @spi_master to avoid repeated allocations.
+ * of the @spi_device or @spi_controller to avoid repeated allocations.
*/
void *spi_res_alloc(struct spi_device *spi,
spi_res_release_t release,
@@ -2220,11 +2341,10 @@ EXPORT_SYMBOL_GPL(spi_res_add);
/**
* spi_res_release - release all spi resources for this message
- * @master: the @spi_master
+ * @ctlr: the @spi_controller
* @message: the @spi_message
*/
-void spi_res_release(struct spi_master *master,
- struct spi_message *message)
+void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
{
struct spi_res *res;
@@ -2233,7 +2353,7 @@ void spi_res_release(struct spi_master *master,
struct spi_res, entry);
if (res->release)
- res->release(master, message, res->data);
+ res->release(ctlr, message, res->data);
list_del(&res->entry);
@@ -2246,7 +2366,7 @@ EXPORT_SYMBOL_GPL(spi_res_release);
/* Core methods for spi_message alterations */
-static void __spi_replace_transfers_release(struct spi_master *master,
+static void __spi_replace_transfers_release(struct spi_controller *ctlr,
struct spi_message *msg,
void *res)
{
@@ -2255,7 +2375,7 @@ static void __spi_replace_transfers_release(struct spi_master *master,
/* call extra callback if requested */
if (rxfer->release)
- rxfer->release(master, msg, res);
+ rxfer->release(ctlr, msg, res);
/* insert replaced transfers back into the message */
list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
@@ -2375,7 +2495,7 @@ struct spi_replaced_transfers *spi_replace_transfers(
}
EXPORT_SYMBOL_GPL(spi_replace_transfers);
-static int __spi_split_transfer_maxsize(struct spi_master *master,
+static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
struct spi_message *msg,
struct spi_transfer **xferp,
size_t maxsize,
@@ -2437,7 +2557,7 @@ static int __spi_split_transfer_maxsize(struct spi_master *master,
*xferp = &xfers[count - 1];
/* increment statistics counters */
- SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
+ SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
transfers_split_maxsize);
SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
transfers_split_maxsize);
@@ -2449,14 +2569,14 @@ static int __spi_split_transfer_maxsize(struct spi_master *master,
* spi_split_tranfers_maxsize - split spi transfers into multiple transfers
* when an individual transfer exceeds a
* certain size
- * @master: the @spi_master for this transfer
+ * @ctlr: the @spi_controller for this transfer
* @msg: the @spi_message to transform
* @maxsize: the maximum when to apply this
* @gfp: GFP allocation flags
*
* Return: status of transformation
*/
-int spi_split_transfers_maxsize(struct spi_master *master,
+int spi_split_transfers_maxsize(struct spi_controller *ctlr,
struct spi_message *msg,
size_t maxsize,
gfp_t gfp)
@@ -2472,8 +2592,8 @@ int spi_split_transfers_maxsize(struct spi_master *master,
*/
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->len > maxsize) {
- ret = __spi_split_transfer_maxsize(
- master, msg, &xfer, maxsize, gfp);
+ ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
+ maxsize, gfp);
if (ret)
return ret;
}
@@ -2485,18 +2605,18 @@ EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
/*-------------------------------------------------------------------------*/
-/* Core methods for SPI master protocol drivers. Some of the
+/* Core methods for SPI controller protocol drivers. Some of the
* other core methods are currently defined as inline functions.
*/
-static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
+static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
+ u8 bits_per_word)
{
- if (master->bits_per_word_mask) {
+ if (ctlr->bits_per_word_mask) {
/* Only 32 bits fit in the mask */
if (bits_per_word > 32)
return -EINVAL;
- if (!(master->bits_per_word_mask &
- SPI_BPW_MASK(bits_per_word)))
+ if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
return -EINVAL;
}
@@ -2542,9 +2662,9 @@ int spi_setup(struct spi_device *spi)
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
return -EINVAL;
/* help drivers fail *cleanly* when they need options
- * that aren't supported with their current master
+ * that aren't supported with their current controller
*/
- bad_bits = spi->mode & ~spi->master->mode_bits;
+ bad_bits = spi->mode & ~spi->controller->mode_bits;
ugly_bits = bad_bits &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
if (ugly_bits) {
@@ -2563,15 +2683,16 @@ int spi_setup(struct spi_device *spi)
if (!spi->bits_per_word)
spi->bits_per_word = 8;
- status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
+ status = __spi_validate_bits_per_word(spi->controller,
+ spi->bits_per_word);
if (status)
return status;
if (!spi->max_speed_hz)
- spi->max_speed_hz = spi->master->max_speed_hz;
+ spi->max_speed_hz = spi->controller->max_speed_hz;
- if (spi->master->setup)
- status = spi->master->setup(spi);
+ if (spi->controller->setup)
+ status = spi->controller->setup(spi);
spi_set_cs(spi, false);
@@ -2590,7 +2711,7 @@ EXPORT_SYMBOL_GPL(spi_setup);
static int __spi_validate(struct spi_device *spi, struct spi_message *message)
{
- struct spi_master *master = spi->master;
+ struct spi_controller *ctlr = spi->controller;
struct spi_transfer *xfer;
int w_size;
@@ -2602,16 +2723,16 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
* either MOSI or MISO is missing. They can also be caused by
* software limitations.
*/
- if ((master->flags & SPI_MASTER_HALF_DUPLEX)
- || (spi->mode & SPI_3WIRE)) {
- unsigned flags = master->flags;
+ if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
+ (spi->mode & SPI_3WIRE)) {
+ unsigned flags = ctlr->flags;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
if (xfer->rx_buf && xfer->tx_buf)
return -EINVAL;
- if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
+ if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
return -EINVAL;
- if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
+ if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
return -EINVAL;
}
}
@@ -2631,13 +2752,12 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (!xfer->speed_hz)
xfer->speed_hz = spi->max_speed_hz;
if (!xfer->speed_hz)
- xfer->speed_hz = master->max_speed_hz;
+ xfer->speed_hz = ctlr->max_speed_hz;
- if (master->max_speed_hz &&
- xfer->speed_hz > master->max_speed_hz)
- xfer->speed_hz = master->max_speed_hz;
+ if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
+ xfer->speed_hz = ctlr->max_speed_hz;
- if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
+ if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
return -EINVAL;
/*
@@ -2655,8 +2775,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (xfer->len % w_size)
return -EINVAL;
- if (xfer->speed_hz && master->min_speed_hz &&
- xfer->speed_hz < master->min_speed_hz)
+ if (xfer->speed_hz && ctlr->min_speed_hz &&
+ xfer->speed_hz < ctlr->min_speed_hz)
return -EINVAL;
if (xfer->tx_buf && !xfer->tx_nbits)
@@ -2701,16 +2821,16 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
- struct spi_master *master = spi->master;
+ struct spi_controller *ctlr = spi->controller;
message->spi = spi;
- SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
+ SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
trace_spi_message_submit(message);
- return master->transfer(spi, message);
+ return ctlr->transfer(spi, message);
}
/**
@@ -2746,7 +2866,7 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
*/
int spi_async(struct spi_device *spi, struct spi_message *message)
{
- struct spi_master *master = spi->master;
+ struct spi_controller *ctlr = spi->controller;
int ret;
unsigned long flags;
@@ -2754,14 +2874,14 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
if (ret != 0)
return ret;
- spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+ spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
- if (master->bus_lock_flag)
+ if (ctlr->bus_lock_flag)
ret = -EBUSY;
else
ret = __spi_async(spi, message);
- spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+ spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
return ret;
}
@@ -2800,7 +2920,7 @@ EXPORT_SYMBOL_GPL(spi_async);
*/
int spi_async_locked(struct spi_device *spi, struct spi_message *message)
{
- struct spi_master *master = spi->master;
+ struct spi_controller *ctlr = spi->controller;
int ret;
unsigned long flags;
@@ -2808,11 +2928,11 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message)
if (ret != 0)
return ret;
- spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+ spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
ret = __spi_async(spi, message);
- spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+ spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
return ret;
@@ -2824,7 +2944,7 @@ int spi_flash_read(struct spi_device *spi,
struct spi_flash_read_message *msg)
{
- struct spi_master *master = spi->master;
+ struct spi_controller *master = spi->controller;
struct device *rx_dev = NULL;
int ret;
@@ -2878,7 +2998,7 @@ EXPORT_SYMBOL_GPL(spi_flash_read);
/*-------------------------------------------------------------------------*/
-/* Utility methods for SPI master protocol drivers, layered on
+/* Utility methods for SPI protocol drivers, layered on
* top of the core. Some other utility methods are defined as
* inline functions.
*/
@@ -2892,7 +3012,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
{
DECLARE_COMPLETION_ONSTACK(done);
int status;
- struct spi_master *master = spi->master;
+ struct spi_controller *ctlr = spi->controller;
unsigned long flags;
status = __spi_validate(spi, message);
@@ -2903,7 +3023,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
message->context = &done;
message->spi = spi;
- SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
+ SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
/* If we're not using the legacy transfer method then we will
@@ -2911,14 +3031,14 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
* This code would be less tricky if we could remove the
* support for driver implemented message queues.
*/
- if (master->transfer == spi_queued_transfer) {
- spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+ if (ctlr->transfer == spi_queued_transfer) {
+ spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
trace_spi_message_submit(message);
status = __spi_queued_transfer(spi, message, false);
- spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+ spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
} else {
status = spi_async_locked(spi, message);
}
@@ -2927,12 +3047,12 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
/* Push out the messages in the calling context if we
* can.
*/
- if (master->transfer == spi_queued_transfer) {
- SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
+ if (ctlr->transfer == spi_queued_transfer) {
+ SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
spi_sync_immediate);
SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
spi_sync_immediate);
- __spi_pump_messages(master, false);
+ __spi_pump_messages(ctlr, false);
}
wait_for_completion(&done);
@@ -2967,9 +3087,9 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
{
int ret;
- mutex_lock(&spi->master->bus_lock_mutex);
+ mutex_lock(&spi->controller->bus_lock_mutex);
ret = __spi_sync(spi, message);
- mutex_unlock(&spi->master->bus_lock_mutex);
+ mutex_unlock(&spi->controller->bus_lock_mutex);
return ret;
}
@@ -2999,7 +3119,7 @@ EXPORT_SYMBOL_GPL(spi_sync_locked);
/**
* spi_bus_lock - obtain a lock for exclusive SPI bus usage
- * @master: SPI bus master that should be locked for exclusive bus access
+ * @ctlr: SPI bus master that should be locked for exclusive bus access
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
@@ -3012,15 +3132,15 @@ EXPORT_SYMBOL_GPL(spi_sync_locked);
*
* Return: always zero.
*/
-int spi_bus_lock(struct spi_master *master)
+int spi_bus_lock(struct spi_controller *ctlr)
{
unsigned long flags;
- mutex_lock(&master->bus_lock_mutex);
+ mutex_lock(&ctlr->bus_lock_mutex);
- spin_lock_irqsave(&master->bus_lock_spinlock, flags);
- master->bus_lock_flag = 1;
- spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+ spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
+ ctlr->bus_lock_flag = 1;
+ spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
/* mutex remains locked until spi_bus_unlock is called */
@@ -3030,7 +3150,7 @@ EXPORT_SYMBOL_GPL(spi_bus_lock);
/**
* spi_bus_unlock - release the lock for exclusive SPI bus usage
- * @master: SPI bus master that was locked for exclusive bus access
+ * @ctlr: SPI bus master that was locked for exclusive bus access
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
@@ -3041,11 +3161,11 @@ EXPORT_SYMBOL_GPL(spi_bus_lock);
*
* Return: always zero.
*/
-int spi_bus_unlock(struct spi_master *master)
+int spi_bus_unlock(struct spi_controller *ctlr)
{
- master->bus_lock_flag = 0;
+ ctlr->bus_lock_flag = 0;
- mutex_unlock(&master->bus_lock_mutex);
+ mutex_unlock(&ctlr->bus_lock_mutex);
return 0;
}
@@ -3147,45 +3267,48 @@ static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
return dev ? to_spi_device(dev) : NULL;
}
-static int __spi_of_master_match(struct device *dev, const void *data)
+static int __spi_of_controller_match(struct device *dev, const void *data)
{
return dev->of_node == data;
}
-/* the spi masters are not using spi_bus, so we find it with another way */
-static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
+/* the spi controllers are not using spi_bus, so we find it with another way */
+static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
{
struct device *dev;
dev = class_find_device(&spi_master_class, NULL, node,
- __spi_of_master_match);
+ __spi_of_controller_match);
+ if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
+ dev = class_find_device(&spi_slave_class, NULL, node,
+ __spi_of_controller_match);
if (!dev)
return NULL;
/* reference got in class_find_device */
- return container_of(dev, struct spi_master, dev);
+ return container_of(dev, struct spi_controller, dev);
}
static int of_spi_notify(struct notifier_block *nb, unsigned long action,
void *arg)
{
struct of_reconfig_data *rd = arg;
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct spi_device *spi;
switch (of_reconfig_get_state_change(action, arg)) {
case OF_RECONFIG_CHANGE_ADD:
- master = of_find_spi_master_by_node(rd->dn->parent);
- if (master == NULL)
+ ctlr = of_find_spi_controller_by_node(rd->dn->parent);
+ if (ctlr == NULL)
return NOTIFY_OK; /* not for us */
if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
- put_device(&master->dev);
+ put_device(&ctlr->dev);
return NOTIFY_OK;
}
- spi = of_register_spi_device(master, rd->dn);
- put_device(&master->dev);
+ spi = of_register_spi_device(ctlr, rd->dn);
+ put_device(&ctlr->dev);
if (IS_ERR(spi)) {
pr_err("%s: failed to create for '%s'\n",
@@ -3224,7 +3347,7 @@ extern struct notifier_block spi_of_notifier;
#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
#if IS_ENABLED(CONFIG_ACPI)
-static int spi_acpi_master_match(struct device *dev, const void *data)
+static int spi_acpi_controller_match(struct device *dev, const void *data)
{
return ACPI_COMPANION(dev->parent) == data;
}
@@ -3234,16 +3357,19 @@ static int spi_acpi_device_match(struct device *dev, void *data)
return ACPI_COMPANION(dev) == data;
}
-static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev)
+static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
{
struct device *dev;
dev = class_find_device(&spi_master_class, NULL, adev,
- spi_acpi_master_match);
+ spi_acpi_controller_match);
+ if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
+ dev = class_find_device(&spi_slave_class, NULL, adev,
+ spi_acpi_controller_match);
if (!dev)
return NULL;
- return container_of(dev, struct spi_master, dev);
+ return container_of(dev, struct spi_controller, dev);
}
static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
@@ -3259,17 +3385,17 @@ static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
void *arg)
{
struct acpi_device *adev = arg;
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct spi_device *spi;
switch (value) {
case ACPI_RECONFIG_DEVICE_ADD:
- master = acpi_spi_find_master_by_adev(adev->parent);
- if (!master)
+ ctlr = acpi_spi_find_controller_by_adev(adev->parent);
+ if (!ctlr)
break;
- acpi_register_spi_device(master, adev);
- put_device(&master->dev);
+ acpi_register_spi_device(ctlr, adev);
+ put_device(&ctlr->dev);
break;
case ACPI_RECONFIG_DEVICE_REMOVE:
if (!acpi_device_enumerated(adev))
@@ -3312,6 +3438,12 @@ static int __init spi_init(void)
if (status < 0)
goto err2;
+ if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
+ status = class_register(&spi_slave_class);
+ if (status < 0)
+ goto err3;
+ }
+
if (IS_ENABLED(CONFIG_OF_DYNAMIC))
WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
if (IS_ENABLED(CONFIG_ACPI))
@@ -3319,6 +3451,8 @@ static int __init spi_init(void)
return 0;
+err3:
+ class_unregister(&spi_master_class);
err2:
bus_unregister(&spi_bus_type);
err1:
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 9a2a79a871ba..d4d2d8d9f3e7 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -99,7 +99,6 @@ MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
static ssize_t
spidev_sync(struct spidev_data *spidev, struct spi_message *message)
{
- DECLARE_COMPLETION_ONSTACK(done);
int status;
struct spi_device *spi;
@@ -325,7 +324,6 @@ static struct spi_ioc_transfer *
spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
unsigned *n_ioc)
{
- struct spi_ioc_transfer *ioc;
u32 tmp;
/* Check type, command number and direction */
@@ -342,14 +340,7 @@ spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
return NULL;
/* copy into scratch area */
- ioc = kmalloc(tmp, GFP_KERNEL);
- if (!ioc)
- return ERR_PTR(-ENOMEM);
- if (__copy_from_user(ioc, u_ioc, tmp)) {
- kfree(ioc);
- return ERR_PTR(-EFAULT);
- }
- return ioc;
+ return memdup_user(u_ioc, tmp);
}
static long