diff options
Diffstat (limited to 'drivers')
34 files changed, 4800 insertions, 2376 deletions
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index a6e1891217e2..c34eb9d9c59a 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -125,7 +125,7 @@ static void of_gpio_flags_quirks(struct device_node *np, for_each_child_of_node(np, child) { ret = of_property_read_u32(child, "reg", &cs); - if (!ret) + if (ret) continue; if (cs == index) { /* diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index 44fe8018733c..b433e5f91069 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -50,15 +50,6 @@ config SPI_CADENCE_QUADSPI device with a Cadence QSPI controller and want to access the Flash as an MTD device. -config SPI_FSL_QUADSPI - tristate "Freescale Quad SPI controller" - depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST - depends on HAS_IOMEM - help - This enables support for the Quad SPI controller in master mode. - This controller does not support generic SPI. It only supports - SPI NOR. - config SPI_HISI_SFC tristate "Hisilicon SPI-NOR Flash Controller(SFC)" depends on ARCH_HISI || COMPILE_TEST diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile index a552efd22958..2adedbe07662 100644 --- a/drivers/mtd/spi-nor/Makefile +++ b/drivers/mtd/spi-nor/Makefile @@ -2,7 +2,6 @@ obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o -obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c deleted file mode 100644 index 1ff3430f82c8..000000000000 --- a/drivers/mtd/spi-nor/fsl-quadspi.c +++ /dev/null @@ -1,1224 +0,0 @@ -/* - * Freescale QuadSPI driver. - * - * Copyright (C) 2013 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/interrupt.h> -#include <linux/errno.h> -#include <linux/platform_device.h> -#include <linux/sched.h> -#include <linux/delay.h> -#include <linux/io.h> -#include <linux/clk.h> -#include <linux/err.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/timer.h> -#include <linux/jiffies.h> -#include <linux/completion.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/partitions.h> -#include <linux/mtd/spi-nor.h> -#include <linux/mutex.h> -#include <linux/pm_qos.h> -#include <linux/sizes.h> - -/* Controller needs driver to swap endian */ -#define QUADSPI_QUIRK_SWAP_ENDIAN (1 << 0) -/* Controller needs 4x internal clock */ -#define QUADSPI_QUIRK_4X_INT_CLK (1 << 1) -/* - * TKT253890, Controller needs driver to fill txfifo till 16 byte to - * trigger data transfer even though extern data will not transferred. - */ -#define QUADSPI_QUIRK_TKT253890 (1 << 2) -/* Controller cannot wake up from wait mode, TKT245618 */ -#define QUADSPI_QUIRK_TKT245618 (1 << 3) - -/* The registers */ -#define QUADSPI_MCR 0x00 -#define QUADSPI_MCR_RESERVED_SHIFT 16 -#define QUADSPI_MCR_RESERVED_MASK (0xF << QUADSPI_MCR_RESERVED_SHIFT) -#define QUADSPI_MCR_MDIS_SHIFT 14 -#define QUADSPI_MCR_MDIS_MASK (1 << QUADSPI_MCR_MDIS_SHIFT) -#define QUADSPI_MCR_CLR_TXF_SHIFT 11 -#define QUADSPI_MCR_CLR_TXF_MASK (1 << QUADSPI_MCR_CLR_TXF_SHIFT) -#define QUADSPI_MCR_CLR_RXF_SHIFT 10 -#define QUADSPI_MCR_CLR_RXF_MASK (1 << QUADSPI_MCR_CLR_RXF_SHIFT) -#define QUADSPI_MCR_DDR_EN_SHIFT 7 -#define QUADSPI_MCR_DDR_EN_MASK (1 << QUADSPI_MCR_DDR_EN_SHIFT) -#define QUADSPI_MCR_END_CFG_SHIFT 2 -#define QUADSPI_MCR_END_CFG_MASK (3 << QUADSPI_MCR_END_CFG_SHIFT) -#define QUADSPI_MCR_SWRSTHD_SHIFT 1 -#define QUADSPI_MCR_SWRSTHD_MASK (1 << QUADSPI_MCR_SWRSTHD_SHIFT) -#define QUADSPI_MCR_SWRSTSD_SHIFT 0 -#define QUADSPI_MCR_SWRSTSD_MASK (1 << QUADSPI_MCR_SWRSTSD_SHIFT) - -#define QUADSPI_IPCR 0x08 -#define QUADSPI_IPCR_SEQID_SHIFT 24 -#define QUADSPI_IPCR_SEQID_MASK (0xF << QUADSPI_IPCR_SEQID_SHIFT) - -#define QUADSPI_BUF0CR 0x10 -#define QUADSPI_BUF1CR 0x14 -#define QUADSPI_BUF2CR 0x18 -#define QUADSPI_BUFXCR_INVALID_MSTRID 0xe - -#define QUADSPI_BUF3CR 0x1c -#define QUADSPI_BUF3CR_ALLMST_SHIFT 31 -#define QUADSPI_BUF3CR_ALLMST_MASK (1 << QUADSPI_BUF3CR_ALLMST_SHIFT) -#define QUADSPI_BUF3CR_ADATSZ_SHIFT 8 -#define QUADSPI_BUF3CR_ADATSZ_MASK (0xFF << QUADSPI_BUF3CR_ADATSZ_SHIFT) - -#define QUADSPI_BFGENCR 0x20 -#define QUADSPI_BFGENCR_PAR_EN_SHIFT 16 -#define QUADSPI_BFGENCR_PAR_EN_MASK (1 << (QUADSPI_BFGENCR_PAR_EN_SHIFT)) -#define QUADSPI_BFGENCR_SEQID_SHIFT 12 -#define QUADSPI_BFGENCR_SEQID_MASK (0xF << QUADSPI_BFGENCR_SEQID_SHIFT) - -#define QUADSPI_BUF0IND 0x30 -#define QUADSPI_BUF1IND 0x34 -#define QUADSPI_BUF2IND 0x38 -#define QUADSPI_SFAR 0x100 - -#define QUADSPI_SMPR 0x108 -#define QUADSPI_SMPR_DDRSMP_SHIFT 16 -#define QUADSPI_SMPR_DDRSMP_MASK (7 << QUADSPI_SMPR_DDRSMP_SHIFT) -#define QUADSPI_SMPR_FSDLY_SHIFT 6 -#define QUADSPI_SMPR_FSDLY_MASK (1 << QUADSPI_SMPR_FSDLY_SHIFT) -#define QUADSPI_SMPR_FSPHS_SHIFT 5 -#define QUADSPI_SMPR_FSPHS_MASK (1 << QUADSPI_SMPR_FSPHS_SHIFT) -#define QUADSPI_SMPR_HSENA_SHIFT 0 -#define QUADSPI_SMPR_HSENA_MASK (1 << QUADSPI_SMPR_HSENA_SHIFT) - -#define QUADSPI_RBSR 0x10c -#define QUADSPI_RBSR_RDBFL_SHIFT 8 -#define QUADSPI_RBSR_RDBFL_MASK (0x3F << QUADSPI_RBSR_RDBFL_SHIFT) - -#define QUADSPI_RBCT 0x110 -#define QUADSPI_RBCT_WMRK_MASK 0x1F -#define QUADSPI_RBCT_RXBRD_SHIFT 8 -#define QUADSPI_RBCT_RXBRD_USEIPS (0x1 << QUADSPI_RBCT_RXBRD_SHIFT) - -#define QUADSPI_TBSR 0x150 -#define QUADSPI_TBDR 0x154 -#define QUADSPI_SR 0x15c -#define QUADSPI_SR_IP_ACC_SHIFT 1 -#define QUADSPI_SR_IP_ACC_MASK (0x1 << QUADSPI_SR_IP_ACC_SHIFT) -#define QUADSPI_SR_AHB_ACC_SHIFT 2 -#define QUADSPI_SR_AHB_ACC_MASK (0x1 << QUADSPI_SR_AHB_ACC_SHIFT) - -#define QUADSPI_FR 0x160 -#define QUADSPI_FR_TFF_MASK 0x1 - -#define QUADSPI_SFA1AD 0x180 -#define QUADSPI_SFA2AD 0x184 -#define QUADSPI_SFB1AD 0x188 -#define QUADSPI_SFB2AD 0x18c -#define QUADSPI_RBDR 0x200 - -#define QUADSPI_LUTKEY 0x300 -#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0 - -#define QUADSPI_LCKCR 0x304 -#define QUADSPI_LCKER_LOCK 0x1 -#define QUADSPI_LCKER_UNLOCK 0x2 - -#define QUADSPI_RSER 0x164 -#define QUADSPI_RSER_TFIE (0x1 << 0) - -#define QUADSPI_LUT_BASE 0x310 - -/* - * The definition of the LUT register shows below: - * - * --------------------------------------------------- - * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 | - * --------------------------------------------------- - */ -#define OPRND0_SHIFT 0 -#define PAD0_SHIFT 8 -#define INSTR0_SHIFT 10 -#define OPRND1_SHIFT 16 - -/* Instruction set for the LUT register. */ -#define LUT_STOP 0 -#define LUT_CMD 1 -#define LUT_ADDR 2 -#define LUT_DUMMY 3 -#define LUT_MODE 4 -#define LUT_MODE2 5 -#define LUT_MODE4 6 -#define LUT_FSL_READ 7 -#define LUT_FSL_WRITE 8 -#define LUT_JMP_ON_CS 9 -#define LUT_ADDR_DDR 10 -#define LUT_MODE_DDR 11 -#define LUT_MODE2_DDR 12 -#define LUT_MODE4_DDR 13 -#define LUT_FSL_READ_DDR 14 -#define LUT_FSL_WRITE_DDR 15 -#define LUT_DATA_LEARN 16 - -/* - * The PAD definitions for LUT register. - * - * The pad stands for the lines number of IO[0:3]. - * For example, the Quad read need four IO lines, so you should - * set LUT_PAD4 which means we use four IO lines. - */ -#define LUT_PAD1 0 -#define LUT_PAD2 1 -#define LUT_PAD4 2 - -/* Oprands for the LUT register. */ -#define ADDR24BIT 0x18 -#define ADDR32BIT 0x20 - -/* Macros for constructing the LUT register. */ -#define LUT0(ins, pad, opr) \ - (((opr) << OPRND0_SHIFT) | ((LUT_##pad) << PAD0_SHIFT) | \ - ((LUT_##ins) << INSTR0_SHIFT)) - -#define LUT1(ins, pad, opr) (LUT0(ins, pad, opr) << OPRND1_SHIFT) - -/* other macros for LUT register. */ -#define QUADSPI_LUT(x) (QUADSPI_LUT_BASE + (x) * 4) -#define QUADSPI_LUT_NUM 64 - -/* SEQID -- we can have 16 seqids at most. */ -#define SEQID_READ 0 -#define SEQID_WREN 1 -#define SEQID_WRDI 2 -#define SEQID_RDSR 3 -#define SEQID_SE 4 -#define SEQID_CHIP_ERASE 5 -#define SEQID_PP 6 -#define SEQID_RDID 7 -#define SEQID_WRSR 8 -#define SEQID_RDCR 9 -#define SEQID_EN4B 10 -#define SEQID_BRWR 11 - -#define QUADSPI_MIN_IOMAP SZ_4M - -enum fsl_qspi_devtype { - FSL_QUADSPI_VYBRID, - FSL_QUADSPI_IMX6SX, - FSL_QUADSPI_IMX7D, - FSL_QUADSPI_IMX6UL, - FSL_QUADSPI_LS1021A, - FSL_QUADSPI_LS2080A, -}; - -struct fsl_qspi_devtype_data { - enum fsl_qspi_devtype devtype; - int rxfifo; - int txfifo; - int ahb_buf_size; - int driver_data; -}; - -static const struct fsl_qspi_devtype_data vybrid_data = { - .devtype = FSL_QUADSPI_VYBRID, - .rxfifo = 128, - .txfifo = 64, - .ahb_buf_size = 1024, - .driver_data = QUADSPI_QUIRK_SWAP_ENDIAN, -}; - -static const struct fsl_qspi_devtype_data imx6sx_data = { - .devtype = FSL_QUADSPI_IMX6SX, - .rxfifo = 128, - .txfifo = 512, - .ahb_buf_size = 1024, - .driver_data = QUADSPI_QUIRK_4X_INT_CLK - | QUADSPI_QUIRK_TKT245618, -}; - -static const struct fsl_qspi_devtype_data imx7d_data = { - .devtype = FSL_QUADSPI_IMX7D, - .rxfifo = 512, - .txfifo = 512, - .ahb_buf_size = 1024, - .driver_data = QUADSPI_QUIRK_TKT253890 - | QUADSPI_QUIRK_4X_INT_CLK, -}; - -static const struct fsl_qspi_devtype_data imx6ul_data = { - .devtype = FSL_QUADSPI_IMX6UL, - .rxfifo = 128, - .txfifo = 512, - .ahb_buf_size = 1024, - .driver_data = QUADSPI_QUIRK_TKT253890 - | QUADSPI_QUIRK_4X_INT_CLK, -}; - -static struct fsl_qspi_devtype_data ls1021a_data = { - .devtype = FSL_QUADSPI_LS1021A, - .rxfifo = 128, - .txfifo = 64, - .ahb_buf_size = 1024, - .driver_data = 0, -}; - -static const struct fsl_qspi_devtype_data ls2080a_data = { - .devtype = FSL_QUADSPI_LS2080A, - .rxfifo = 128, - .txfifo = 64, - .ahb_buf_size = 1024, - .driver_data = QUADSPI_QUIRK_TKT253890, -}; - - -#define FSL_QSPI_MAX_CHIP 4 -struct fsl_qspi { - struct spi_nor nor[FSL_QSPI_MAX_CHIP]; - void __iomem *iobase; - void __iomem *ahb_addr; - u32 memmap_phy; - u32 memmap_offs; - u32 memmap_len; - struct clk *clk, *clk_en; - struct device *dev; - struct completion c; - const struct fsl_qspi_devtype_data *devtype_data; - u32 nor_size; - u32 nor_num; - u32 clk_rate; - unsigned int chip_base_addr; /* We may support two chips. */ - bool has_second_chip; - bool big_endian; - struct mutex lock; - struct pm_qos_request pm_qos_req; -}; - -static inline int needs_swap_endian(struct fsl_qspi *q) -{ - return q->devtype_data->driver_data & QUADSPI_QUIRK_SWAP_ENDIAN; -} - -static inline int needs_4x_clock(struct fsl_qspi *q) -{ - return q->devtype_data->driver_data & QUADSPI_QUIRK_4X_INT_CLK; -} - -static inline int needs_fill_txfifo(struct fsl_qspi *q) -{ - return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT253890; -} - -static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) -{ - return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT245618; -} - -/* - * R/W functions for big- or little-endian registers: - * The qSPI controller's endian is independent of the CPU core's endian. - * So far, although the CPU core is little-endian but the qSPI have two - * versions for big-endian and little-endian. - */ -static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr) -{ - if (q->big_endian) - iowrite32be(val, addr); - else - iowrite32(val, addr); -} - -static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr) -{ - if (q->big_endian) - return ioread32be(addr); - else - return ioread32(addr); -} - -/* - * An IC bug makes us to re-arrange the 32-bit data. - * The following chips, such as IMX6SLX, have fixed this bug. - */ -static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a) -{ - return needs_swap_endian(q) ? __swab32(a) : a; -} - -static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q) -{ - qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); - qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR); -} - -static inline void fsl_qspi_lock_lut(struct fsl_qspi *q) -{ - qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); - qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR); -} - -static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id) -{ - struct fsl_qspi *q = dev_id; - u32 reg; - - /* clear interrupt */ - reg = qspi_readl(q, q->iobase + QUADSPI_FR); - qspi_writel(q, reg, q->iobase + QUADSPI_FR); - - if (reg & QUADSPI_FR_TFF_MASK) - complete(&q->c); - - dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", q->chip_base_addr, reg); - return IRQ_HANDLED; -} - -static void fsl_qspi_init_lut(struct fsl_qspi *q) -{ - void __iomem *base = q->iobase; - int rxfifo = q->devtype_data->rxfifo; - u32 lut_base; - int i; - - struct spi_nor *nor = &q->nor[0]; - u8 addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT; - u8 read_op = nor->read_opcode; - u8 read_dm = nor->read_dummy; - - fsl_qspi_unlock_lut(q); - - /* Clear all the LUT table */ - for (i = 0; i < QUADSPI_LUT_NUM; i++) - qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4); - - /* Read */ - lut_base = SEQID_READ * 4; - - qspi_writel(q, LUT0(CMD, PAD1, read_op) | LUT1(ADDR, PAD1, addrlen), - base + QUADSPI_LUT(lut_base)); - qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) | - LUT1(FSL_READ, PAD4, rxfifo), - base + QUADSPI_LUT(lut_base + 1)); - - /* Write enable */ - lut_base = SEQID_WREN * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WREN), - base + QUADSPI_LUT(lut_base)); - - /* Page Program */ - lut_base = SEQID_PP * 4; - - qspi_writel(q, LUT0(CMD, PAD1, nor->program_opcode) | - LUT1(ADDR, PAD1, addrlen), - base + QUADSPI_LUT(lut_base)); - qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0), - base + QUADSPI_LUT(lut_base + 1)); - - /* Read Status */ - lut_base = SEQID_RDSR * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDSR) | - LUT1(FSL_READ, PAD1, 0x1), - base + QUADSPI_LUT(lut_base)); - - /* Erase a sector */ - lut_base = SEQID_SE * 4; - - qspi_writel(q, LUT0(CMD, PAD1, nor->erase_opcode) | - LUT1(ADDR, PAD1, addrlen), - base + QUADSPI_LUT(lut_base)); - - /* Erase the whole chip */ - lut_base = SEQID_CHIP_ERASE * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE), - base + QUADSPI_LUT(lut_base)); - - /* READ ID */ - lut_base = SEQID_RDID * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDID) | - LUT1(FSL_READ, PAD1, 0x8), - base + QUADSPI_LUT(lut_base)); - - /* Write Register */ - lut_base = SEQID_WRSR * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRSR) | - LUT1(FSL_WRITE, PAD1, 0x2), - base + QUADSPI_LUT(lut_base)); - - /* Read Configuration Register */ - lut_base = SEQID_RDCR * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDCR) | - LUT1(FSL_READ, PAD1, 0x1), - base + QUADSPI_LUT(lut_base)); - - /* Write disable */ - lut_base = SEQID_WRDI * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRDI), - base + QUADSPI_LUT(lut_base)); - - /* Enter 4 Byte Mode (Micron) */ - lut_base = SEQID_EN4B * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_EN4B), - base + QUADSPI_LUT(lut_base)); - - /* Enter 4 Byte Mode (Spansion) */ - lut_base = SEQID_BRWR * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_BRWR), - base + QUADSPI_LUT(lut_base)); - - fsl_qspi_lock_lut(q); -} - -/* Get the SEQID for the command */ -static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd) -{ - switch (cmd) { - case SPINOR_OP_READ_1_1_4: - case SPINOR_OP_READ_1_1_4_4B: - return SEQID_READ; - case SPINOR_OP_WREN: - return SEQID_WREN; - case SPINOR_OP_WRDI: - return SEQID_WRDI; - case SPINOR_OP_RDSR: - return SEQID_RDSR; - case SPINOR_OP_SE: - return SEQID_SE; - case SPINOR_OP_CHIP_ERASE: - return SEQID_CHIP_ERASE; - case SPINOR_OP_PP: - return SEQID_PP; - case SPINOR_OP_RDID: - return SEQID_RDID; - case SPINOR_OP_WRSR: - return SEQID_WRSR; - case SPINOR_OP_RDCR: - return SEQID_RDCR; - case SPINOR_OP_EN4B: - return SEQID_EN4B; - case SPINOR_OP_BRWR: - return SEQID_BRWR; - default: - if (cmd == q->nor[0].erase_opcode) - return SEQID_SE; - dev_err(q->dev, "Unsupported cmd 0x%.2x\n", cmd); - break; - } - return -EINVAL; -} - -static int -fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len) -{ - void __iomem *base = q->iobase; - int seqid; - u32 reg, reg2; - int err; - - init_completion(&q->c); - dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len:%d, cmd:%.2x\n", - q->chip_base_addr, addr, len, cmd); - - /* save the reg */ - reg = qspi_readl(q, base + QUADSPI_MCR); - - qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr, - base + QUADSPI_SFAR); - qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS, - base + QUADSPI_RBCT); - qspi_writel(q, reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR); - - do { - reg2 = qspi_readl(q, base + QUADSPI_SR); - if (reg2 & (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK)) { - udelay(1); - dev_dbg(q->dev, "The controller is busy, 0x%x\n", reg2); - continue; - } - break; - } while (1); - - /* trigger the LUT now */ - seqid = fsl_qspi_get_seqid(q, cmd); - if (seqid < 0) - return seqid; - - qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, - base + QUADSPI_IPCR); - - /* Wait for the interrupt. */ - if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) { - dev_err(q->dev, - "cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n", - cmd, addr, qspi_readl(q, base + QUADSPI_FR), - qspi_readl(q, base + QUADSPI_SR)); - err = -ETIMEDOUT; - } else { - err = 0; - } - - /* restore the MCR */ - qspi_writel(q, reg, base + QUADSPI_MCR); - - return err; -} - -/* Read out the data from the QUADSPI_RBDR buffer registers. */ -static void fsl_qspi_read_data(struct fsl_qspi *q, int len, u8 *rxbuf) -{ - u32 tmp; - int i = 0; - - while (len > 0) { - tmp = qspi_readl(q, q->iobase + QUADSPI_RBDR + i * 4); - tmp = fsl_qspi_endian_xchg(q, tmp); - dev_dbg(q->dev, "chip addr:0x%.8x, rcv:0x%.8x\n", - q->chip_base_addr, tmp); - - if (len >= 4) { - *((u32 *)rxbuf) = tmp; - rxbuf += 4; - } else { - memcpy(rxbuf, &tmp, len); - break; - } - - len -= 4; - i++; - } -} - -/* - * If we have changed the content of the flash by writing or erasing, - * we need to invalidate the AHB buffer. If we do not do so, we may read out - * the wrong data. The spec tells us reset the AHB domain and Serial Flash - * domain at the same time. - */ -static inline void fsl_qspi_invalid(struct fsl_qspi *q) -{ - u32 reg; - - reg = qspi_readl(q, q->iobase + QUADSPI_MCR); - reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK; - qspi_writel(q, reg, q->iobase + QUADSPI_MCR); - - /* - * The minimum delay : 1 AHB + 2 SFCK clocks. - * Delay 1 us is enough. - */ - udelay(1); - - reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK); - qspi_writel(q, reg, q->iobase + QUADSPI_MCR); -} - -static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, - u8 opcode, unsigned int to, u32 *txbuf, - unsigned count) -{ - int ret, i, j; - u32 tmp; - - dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len : %d\n", - q->chip_base_addr, to, count); - - /* clear the TX FIFO. */ - tmp = qspi_readl(q, q->iobase + QUADSPI_MCR); - qspi_writel(q, tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR); - - /* fill the TX data to the FIFO */ - for (j = 0, i = ((count + 3) / 4); j < i; j++) { - tmp = fsl_qspi_endian_xchg(q, *txbuf); - qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR); - txbuf++; - } - - /* fill the TXFIFO upto 16 bytes for i.MX7d */ - if (needs_fill_txfifo(q)) - for (; i < 4; i++) - qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR); - - /* Trigger it */ - ret = fsl_qspi_runcmd(q, opcode, to, count); - - if (ret == 0) - return count; - - return ret; -} - -static void fsl_qspi_set_map_addr(struct fsl_qspi *q) -{ - int nor_size = q->nor_size; - void __iomem *base = q->iobase; - - qspi_writel(q, nor_size + q->memmap_phy, base + QUADSPI_SFA1AD); - qspi_writel(q, nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD); - qspi_writel(q, nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD); - qspi_writel(q, nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD); -} - -/* - * There are two different ways to read out the data from the flash: - * the "IP Command Read" and the "AHB Command Read". - * - * The IC guy suggests we use the "AHB Command Read" which is faster - * then the "IP Command Read". (What's more is that there is a bug in - * the "IP Command Read" in the Vybrid.) - * - * After we set up the registers for the "AHB Command Read", we can use - * the memcpy to read the data directly. A "missed" access to the buffer - * causes the controller to clear the buffer, and use the sequence pointed - * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash. - */ -static int fsl_qspi_init_ahb_read(struct fsl_qspi *q) -{ - void __iomem *base = q->iobase; - int seqid; - - /* AHB configuration for access buffer 0/1/2 .*/ - qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR); - qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR); - qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR); - /* - * Set ADATSZ with the maximum AHB buffer size to improve the - * read performance. - */ - qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK | - ((q->devtype_data->ahb_buf_size / 8) - << QUADSPI_BUF3CR_ADATSZ_SHIFT), - base + QUADSPI_BUF3CR); - - /* We only use the buffer3 */ - qspi_writel(q, 0, base + QUADSPI_BUF0IND); - qspi_writel(q, 0, base + QUADSPI_BUF1IND); - qspi_writel(q, 0, base + QUADSPI_BUF2IND); - - /* Set the default lut sequence for AHB Read. */ - seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode); - if (seqid < 0) - return seqid; - - qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT, - q->iobase + QUADSPI_BFGENCR); - - return 0; -} - -/* This function was used to prepare and enable QSPI clock */ -static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q) -{ - int ret; - - ret = clk_prepare_enable(q->clk_en); - if (ret) - return ret; - - ret = clk_prepare_enable(q->clk); - if (ret) { - clk_disable_unprepare(q->clk_en); - return ret; - } - - if (needs_wakeup_wait_mode(q)) - pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0); - - return 0; -} - -/* This function was used to disable and unprepare QSPI clock */ -static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q) -{ - if (needs_wakeup_wait_mode(q)) - pm_qos_remove_request(&q->pm_qos_req); - - clk_disable_unprepare(q->clk); - clk_disable_unprepare(q->clk_en); - -} - -/* We use this function to do some basic init for spi_nor_scan(). */ -static int fsl_qspi_nor_setup(struct fsl_qspi *q) -{ - void __iomem *base = q->iobase; - u32 reg; - int ret; - - /* disable and unprepare clock to avoid glitch pass to controller */ - fsl_qspi_clk_disable_unprep(q); - - /* the default frequency, we will change it in the future. */ - ret = clk_set_rate(q->clk, 66000000); - if (ret) - return ret; - - ret = fsl_qspi_clk_prep_enable(q); - if (ret) - return ret; - - /* Reset the module */ - qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK, - base + QUADSPI_MCR); - udelay(1); - - /* Init the LUT table. */ - fsl_qspi_init_lut(q); - - /* Disable the module */ - qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK, - base + QUADSPI_MCR); - - reg = qspi_readl(q, base + QUADSPI_SMPR); - qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK - | QUADSPI_SMPR_FSPHS_MASK - | QUADSPI_SMPR_HSENA_MASK - | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR); - - /* Enable the module */ - qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK, - base + QUADSPI_MCR); - - /* clear all interrupt status */ - qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR); - - /* enable the interrupt */ - qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER); - - return 0; -} - -static int fsl_qspi_nor_setup_last(struct fsl_qspi *q) -{ - unsigned long rate = q->clk_rate; - int ret; - - if (needs_4x_clock(q)) - rate *= 4; - - /* disable and unprepare clock to avoid glitch pass to controller */ - fsl_qspi_clk_disable_unprep(q); - - ret = clk_set_rate(q->clk, rate); - if (ret) - return ret; - - ret = fsl_qspi_clk_prep_enable(q); - if (ret) - return ret; - - /* Init the LUT table again. */ - fsl_qspi_init_lut(q); - - /* Init for AHB read */ - return fsl_qspi_init_ahb_read(q); -} - -static const struct of_device_id fsl_qspi_dt_ids[] = { - { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, }, - { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, }, - { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, }, - { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, }, - { .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, }, - { .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids); - -static void fsl_qspi_set_base_addr(struct fsl_qspi *q, struct spi_nor *nor) -{ - q->chip_base_addr = q->nor_size * (nor - q->nor); -} - -static int fsl_qspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) -{ - int ret; - struct fsl_qspi *q = nor->priv; - - ret = fsl_qspi_runcmd(q, opcode, 0, len); - if (ret) - return ret; - - fsl_qspi_read_data(q, len, buf); - return 0; -} - -static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) -{ - struct fsl_qspi *q = nor->priv; - int ret; - - if (!buf) { - ret = fsl_qspi_runcmd(q, opcode, 0, 1); - if (ret) - return ret; - - if (opcode == SPINOR_OP_CHIP_ERASE) - fsl_qspi_invalid(q); - - } else if (len > 0) { - ret = fsl_qspi_nor_write(q, nor, opcode, 0, - (u32 *)buf, len); - if (ret > 0) - return 0; - } else { - dev_err(q->dev, "invalid cmd %d\n", opcode); - ret = -EINVAL; - } - - return ret; -} - -static ssize_t fsl_qspi_write(struct spi_nor *nor, loff_t to, - size_t len, const u_char *buf) -{ - struct fsl_qspi *q = nor->priv; - ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to, - (u32 *)buf, len); - - /* invalid the data in the AHB buffer. */ - fsl_qspi_invalid(q); - return ret; -} - -static ssize_t fsl_qspi_read(struct spi_nor *nor, loff_t from, - size_t len, u_char *buf) -{ - struct fsl_qspi *q = nor->priv; - u8 cmd = nor->read_opcode; - - /* if necessary,ioremap buffer before AHB read, */ - if (!q->ahb_addr) { - q->memmap_offs = q->chip_base_addr + from; - q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP; - - q->ahb_addr = ioremap_nocache( - q->memmap_phy + q->memmap_offs, - q->memmap_len); - if (!q->ahb_addr) { - dev_err(q->dev, "ioremap failed\n"); - return -ENOMEM; - } - /* ioremap if the data requested is out of range */ - } else if (q->chip_base_addr + from < q->memmap_offs - || q->chip_base_addr + from + len > - q->memmap_offs + q->memmap_len) { - iounmap(q->ahb_addr); - - q->memmap_offs = q->chip_base_addr + from; - q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP; - q->ahb_addr = ioremap_nocache( - q->memmap_phy + q->memmap_offs, - q->memmap_len); - if (!q->ahb_addr) { - dev_err(q->dev, "ioremap failed\n"); - return -ENOMEM; - } - } - - dev_dbg(q->dev, "cmd [%x],read from %p, len:%zd\n", - cmd, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs, - len); - - /* Read out the data directly from the AHB buffer.*/ - memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs, - len); - - return len; -} - -static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs) -{ - struct fsl_qspi *q = nor->priv; - int ret; - - dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n", - nor->mtd.erasesize / 1024, q->chip_base_addr, (u32)offs); - - ret = fsl_qspi_runcmd(q, nor->erase_opcode, offs, 0); - if (ret) - return ret; - - fsl_qspi_invalid(q); - return 0; -} - -static int fsl_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops) -{ - struct fsl_qspi *q = nor->priv; - int ret; - - mutex_lock(&q->lock); - - ret = fsl_qspi_clk_prep_enable(q); - if (ret) - goto err_mutex; - - fsl_qspi_set_base_addr(q, nor); - return 0; - -err_mutex: - mutex_unlock(&q->lock); - return ret; -} - -static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops) -{ - struct fsl_qspi *q = nor->priv; - - fsl_qspi_clk_disable_unprep(q); - mutex_unlock(&q->lock); -} - -static int fsl_qspi_probe(struct platform_device *pdev) -{ - const struct spi_nor_hwcaps hwcaps = { - .mask = SNOR_HWCAPS_READ_1_1_4 | - SNOR_HWCAPS_PP, - }; - struct device_node *np = pdev->dev.of_node; - struct device *dev = &pdev->dev; - struct fsl_qspi *q; - struct resource *res; - struct spi_nor *nor; - struct mtd_info *mtd; - int ret, i = 0; - - q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL); - if (!q) - return -ENOMEM; - - q->nor_num = of_get_child_count(dev->of_node); - if (!q->nor_num || q->nor_num > FSL_QSPI_MAX_CHIP) - return -ENODEV; - - q->dev = dev; - q->devtype_data = of_device_get_match_data(dev); - if (!q->devtype_data) - return -ENODEV; - platform_set_drvdata(pdev, q); - - /* find the resources */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI"); - q->iobase = devm_ioremap_resource(dev, res); - if (IS_ERR(q->iobase)) - return PTR_ERR(q->iobase); - - q->big_endian = of_property_read_bool(np, "big-endian"); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "QuadSPI-memory"); - if (!devm_request_mem_region(dev, res->start, resource_size(res), - res->name)) { - dev_err(dev, "can't request region for resource %pR\n", res); - return -EBUSY; - } - - q->memmap_phy = res->start; - - /* find the clocks */ - q->clk_en = devm_clk_get(dev, "qspi_en"); - if (IS_ERR(q->clk_en)) - return PTR_ERR(q->clk_en); - - q->clk = devm_clk_get(dev, "qspi"); - if (IS_ERR(q->clk)) - return PTR_ERR(q->clk); - - ret = fsl_qspi_clk_prep_enable(q); - if (ret) { - dev_err(dev, "can not enable the clock\n"); - goto clk_failed; - } - - /* find the irq */ - ret = platform_get_irq(pdev, 0); - if (ret < 0) { - dev_err(dev, "failed to get the irq: %d\n", ret); - goto irq_failed; - } - - ret = devm_request_irq(dev, ret, - fsl_qspi_irq_handler, 0, pdev->name, q); - if (ret) { - dev_err(dev, "failed to request irq: %d\n", ret); - goto irq_failed; - } - - ret = fsl_qspi_nor_setup(q); - if (ret) - goto irq_failed; - - if (of_get_property(np, "fsl,qspi-has-second-chip", NULL)) - q->has_second_chip = true; - - mutex_init(&q->lock); - - /* iterate the subnodes. */ - for_each_available_child_of_node(dev->of_node, np) { - /* skip the holes */ - if (!q->has_second_chip) - i *= 2; - - nor = &q->nor[i]; - mtd = &nor->mtd; - - nor->dev = dev; - spi_nor_set_flash_node(nor, np); - nor->priv = q; - - if (q->nor_num > 1 && !mtd->name) { - int spiflash_idx; - - ret = of_property_read_u32(np, "reg", &spiflash_idx); - if (!ret) { - mtd->name = devm_kasprintf(dev, GFP_KERNEL, - "%s-%d", - dev_name(dev), - spiflash_idx); - if (!mtd->name) { - ret = -ENOMEM; - goto mutex_failed; - } - } else { - dev_warn(dev, "reg property is missing\n"); - } - } - - /* fill the hooks */ - nor->read_reg = fsl_qspi_read_reg; - nor->write_reg = fsl_qspi_write_reg; - nor->read = fsl_qspi_read; - nor->write = fsl_qspi_write; - nor->erase = fsl_qspi_erase; - - nor->prepare = fsl_qspi_prep; - nor->unprepare = fsl_qspi_unprep; - - ret = of_property_read_u32(np, "spi-max-frequency", - &q->clk_rate); - if (ret < 0) - goto mutex_failed; - - /* set the chip address for READID */ - fsl_qspi_set_base_addr(q, nor); - - ret = spi_nor_scan(nor, NULL, &hwcaps); - if (ret) - goto mutex_failed; - - ret = mtd_device_register(mtd, NULL, 0); - if (ret) - goto mutex_failed; - - /* Set the correct NOR size now. */ - if (q->nor_size == 0) { - q->nor_size = mtd->size; - - /* Map the SPI NOR to accessiable address */ - fsl_qspi_set_map_addr(q); - } - - /* - * The TX FIFO is 64 bytes in the Vybrid, but the Page Program - * may writes 265 bytes per time. The write is working in the - * unit of the TX FIFO, not in the unit of the SPI NOR's page - * size. - * - * So shrink the spi_nor->page_size if it is larger then the - * TX FIFO. - */ - if (nor->page_size > q->devtype_data->txfifo) - nor->page_size = q->devtype_data->txfifo; - - i++; - } - - /* finish the rest init. */ - ret = fsl_qspi_nor_setup_last(q); - if (ret) - goto last_init_failed; - - fsl_qspi_clk_disable_unprep(q); - return 0; - -last_init_failed: - for (i = 0; i < q->nor_num; i++) { - /* skip the holes */ - if (!q->has_second_chip) - i *= 2; - mtd_device_unregister(&q->nor[i].mtd); - } -mutex_failed: - mutex_destroy(&q->lock); -irq_failed: - fsl_qspi_clk_disable_unprep(q); -clk_failed: - dev_err(dev, "Freescale QuadSPI probe failed\n"); - return ret; -} - -static int fsl_qspi_remove(struct platform_device *pdev) -{ - struct fsl_qspi *q = platform_get_drvdata(pdev); - int i; - - for (i = 0; i < q->nor_num; i++) { - /* skip the holes */ - if (!q->has_second_chip) - i *= 2; - mtd_device_unregister(&q->nor[i].mtd); - } - - /* disable the hardware */ - qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); - qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); - - mutex_destroy(&q->lock); - - if (q->ahb_addr) - iounmap(q->ahb_addr); - - return 0; -} - -static int fsl_qspi_suspend(struct platform_device *pdev, pm_message_t state) -{ - return 0; -} - -static int fsl_qspi_resume(struct platform_device *pdev) -{ - int ret; - struct fsl_qspi *q = platform_get_drvdata(pdev); - - ret = fsl_qspi_clk_prep_enable(q); - if (ret) - return ret; - - fsl_qspi_nor_setup(q); - fsl_qspi_set_map_addr(q); - fsl_qspi_nor_setup_last(q); - - fsl_qspi_clk_disable_unprep(q); - - return 0; -} - -static struct platform_driver fsl_qspi_driver = { - .driver = { - .name = "fsl-quadspi", - .of_match_table = fsl_qspi_dt_ids, - }, - .probe = fsl_qspi_probe, - .remove = fsl_qspi_remove, - .suspend = fsl_qspi_suspend, - .resume = fsl_qspi_resume, -}; -module_platform_driver(fsl_qspi_driver); - -MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver"); -MODULE_AUTHOR("Freescale Semiconductor Inc."); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 9f89cb134549..f761655e2a36 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -63,7 +63,7 @@ config SPI_ALTERA config SPI_ATH79 tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver" - depends on ATH79 && GPIOLIB + depends on ATH79 || COMPILE_TEST select SPI_BITBANG help This enables support for the SPI controller present on the @@ -268,6 +268,27 @@ config SPI_FSL_LPSPI help This enables Freescale i.MX LPSPI controllers in master mode. +config SPI_FSL_QUADSPI + tristate "Freescale QSPI controller" + depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST + depends on HAS_IOMEM + help + This enables support for the Quad SPI controller in master mode. + Up to four flash chips can be connected on two buses with two + chipselects each. + This controller does not support generic SPI messages. It only + supports the high-level SPI memory interface. + +config SPI_NXP_FLEXSPI + tristate "NXP Flex SPI controller" + depends on ARCH_LAYERSCAPE || HAS_IOMEM + help + This enables support for the Flex SPI controller in master mode. + Up to four slave devices can be connected on two buses with two + chipselects each. + This controller does not support generic SPI messages and only + supports the high-level SPI memory interface. + config SPI_GPIO tristate "GPIO-based bitbanging SPI Master" depends on GPIOLIB || COMPILE_TEST @@ -296,8 +317,7 @@ config SPI_IMX depends on ARCH_MXC || COMPILE_TEST select SPI_BITBANG help - This enables using the Freescale i.MX SPI controllers in master - mode. + This enables support for the Freescale i.MX SPI controllers. config SPI_JCORE tristate "J-Core SPI Master" @@ -372,7 +392,7 @@ config SPI_FSL_DSPI depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST help This enables support for the Freescale DSPI controller in master - mode. VF610 platform uses the controller. + mode. VF610, LS1021A and ColdFire platforms uses the controller. config SPI_FSL_ESPI tristate "Freescale eSPI controller" @@ -631,6 +651,12 @@ config SPI_SH_HSPI help SPI driver for SuperH HSPI blocks. +config SPI_SIFIVE + tristate "SiFive SPI controller" + depends on HAS_IOMEM + help + This exposes the SPI controller IP from SiFive. + config SPI_SIRF tristate "CSR SiRFprimaII SPI controller" depends on SIRF_DMA @@ -665,7 +691,7 @@ config SPI_STM32 tristate "STMicroelectronics STM32 SPI controller" depends on ARCH_STM32 || COMPILE_TEST help - SPI driver for STMicroelectonics STM32 SoCs. + SPI driver for STMicroelectronics STM32 SoCs. STM32 SPI controller supports DMA and PIO modes. When DMA is not available, the driver automatically falls back to diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index f29627040dfb..d8fc03c9faa2 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o +obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o obj-$(CONFIG_SPI_GPIO) += spi-gpio.o obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o @@ -63,6 +64,7 @@ obj-$(CONFIG_SPI_MXIC) += spi-mxic.o obj-$(CONFIG_SPI_MXS) += spi-mxs.o obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o +obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o @@ -93,6 +95,7 @@ obj-$(CONFIG_SPI_SH) += spi-sh.o obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o +obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o obj-$(CONFIG_SPI_SIRF) += spi-sirf.o obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o obj-$(CONFIG_SPI_SPRD) += spi-sprd.o diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index ddc712410812..fffc21cd5f79 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Driver for Atmel QSPI Controller * @@ -7,31 +8,19 @@ * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com> * Author: Piotr Bugalski <bugalski.piotr@gmail.com> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. - * * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale. */ -#include <linux/kernel.h> #include <linux/clk.h> -#include <linux/module.h> -#include <linux/platform_device.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> -#include <linux/of.h> - #include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/spi/spi-mem.h> /* QSPI register offsets */ @@ -47,7 +36,9 @@ #define QSPI_IAR 0x0030 /* Instruction Address Register */ #define QSPI_ICR 0x0034 /* Instruction Code Register */ +#define QSPI_WICR 0x0034 /* Write Instruction Code Register */ #define QSPI_IFR 0x0038 /* Instruction Frame Register */ +#define QSPI_RICR 0x003C /* Read Instruction Code Register */ #define QSPI_SMR 0x0040 /* Scrambling Mode Register */ #define QSPI_SKR 0x0044 /* Scrambling Key Register */ @@ -100,7 +91,7 @@ #define QSPI_SCR_DLYBS_MASK GENMASK(23, 16) #define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK) -/* Bitfields in QSPI_ICR (Instruction Code Register) */ +/* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */ #define QSPI_ICR_INST_MASK GENMASK(7, 0) #define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK) #define QSPI_ICR_OPT_MASK GENMASK(23, 16) @@ -125,14 +116,12 @@ #define QSPI_IFR_OPTL_4BIT (2 << 8) #define QSPI_IFR_OPTL_8BIT (3 << 8) #define QSPI_IFR_ADDRL BIT(10) -#define QSPI_IFR_TFRTYP_MASK GENMASK(13, 12) -#define QSPI_IFR_TFRTYP_TRSFR_READ (0 << 12) -#define QSPI_IFR_TFRTYP_TRSFR_READ_MEM (1 << 12) -#define QSPI_IFR_TFRTYP_TRSFR_WRITE (2 << 12) -#define QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM (3 << 13) +#define QSPI_IFR_TFRTYP_MEM BIT(12) +#define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13) #define QSPI_IFR_CRM BIT(14) #define QSPI_IFR_NBDUM_MASK GENMASK(20, 16) #define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK) +#define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */ /* Bitfields in QSPI_SMR (Scrambling Mode Register) */ #define QSPI_SMR_SCREN BIT(0) @@ -148,24 +137,31 @@ #define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8) #define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC) +struct atmel_qspi_caps { + bool has_qspick; + bool has_ricr; +}; struct atmel_qspi { void __iomem *regs; void __iomem *mem; - struct clk *clk; + struct clk *pclk; + struct clk *qspick; struct platform_device *pdev; + const struct atmel_qspi_caps *caps; u32 pending; + u32 mr; struct completion cmd_completion; }; -struct qspi_mode { +struct atmel_qspi_mode { u8 cmd_buswidth; u8 addr_buswidth; u8 data_buswidth; u32 config; }; -static const struct qspi_mode sama5d2_qspi_modes[] = { +static const struct atmel_qspi_mode atmel_qspi_modes[] = { { 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI }, { 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT }, { 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT }, @@ -175,19 +171,8 @@ static const struct qspi_mode sama5d2_qspi_modes[] = { { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD }, }; -/* Register access functions */ -static inline u32 qspi_readl(struct atmel_qspi *aq, u32 reg) -{ - return readl_relaxed(aq->regs + reg); -} - -static inline void qspi_writel(struct atmel_qspi *aq, u32 reg, u32 value) -{ - writel_relaxed(value, aq->regs + reg); -} - -static inline bool is_compatible(const struct spi_mem_op *op, - const struct qspi_mode *mode) +static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op, + const struct atmel_qspi_mode *mode) { if (op->cmd.buswidth != mode->cmd_buswidth) return false; @@ -201,21 +186,21 @@ static inline bool is_compatible(const struct spi_mem_op *op, return true; } -static int find_mode(const struct spi_mem_op *op) +static int atmel_qspi_find_mode(const struct spi_mem_op *op) { u32 i; - for (i = 0; i < ARRAY_SIZE(sama5d2_qspi_modes); i++) - if (is_compatible(op, &sama5d2_qspi_modes[i])) + for (i = 0; i < ARRAY_SIZE(atmel_qspi_modes); i++) + if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i])) return i; - return -1; + return -ENOTSUPP; } static bool atmel_qspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { - if (find_mode(op) < 0) + if (atmel_qspi_find_mode(op) < 0) return false; /* special case not supported by hardware */ @@ -226,29 +211,37 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem, return true; } -static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +static int atmel_qspi_set_cfg(struct atmel_qspi *aq, + const struct spi_mem_op *op, u32 *offset) { - struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master); - int mode; + u32 iar, icr, ifr; u32 dummy_cycles = 0; - u32 iar, icr, ifr, sr; - int err = 0; + int mode; iar = 0; icr = QSPI_ICR_INST(op->cmd.opcode); ifr = QSPI_IFR_INSTEN; - qspi_writel(aq, QSPI_MR, QSPI_MR_SMM); - - mode = find_mode(op); + mode = atmel_qspi_find_mode(op); if (mode < 0) - return -ENOTSUPP; - - ifr |= sama5d2_qspi_modes[mode].config; + return mode; + ifr |= atmel_qspi_modes[mode].config; if (op->dummy.buswidth && op->dummy.nbytes) dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth; + /* + * The controller allows 24 and 32-bit addressing while NAND-flash + * requires 16-bit long. Handling 8-bit long addresses is done using + * the option field. For the 16-bit addresses, the workaround depends + * of the number of requested dummy bits. If there are 8 or more dummy + * cycles, the address is shifted and sent with the first dummy byte. + * Otherwise opcode is disabled and the first byte of the address + * contains the command opcode (works only if the opcode and address + * use the same buswidth). The limitation is when the 16-bit address is + * used without enough dummy cycles and the opcode is using a different + * buswidth than the address. + */ if (op->addr.buswidth) { switch (op->addr.nbytes) { case 0: @@ -282,6 +275,9 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) } } + /* offset of the data access in the QSPI memory space */ + *offset = iar; + /* Set number of dummy cycles */ if (dummy_cycles) ifr |= QSPI_IFR_NBDUM(dummy_cycles); @@ -290,49 +286,82 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) if (op->data.nbytes) ifr |= QSPI_IFR_DATAEN; - if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) - ifr |= QSPI_IFR_TFRTYP_TRSFR_READ; - else - ifr |= QSPI_IFR_TFRTYP_TRSFR_WRITE; + /* + * If the QSPI controller is set in regular SPI mode, set it in + * Serial Memory Mode (SMM). + */ + if (aq->mr != QSPI_MR_SMM) { + writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR); + aq->mr = QSPI_MR_SMM; + } /* Clear pending interrupts */ - (void)qspi_readl(aq, QSPI_SR); + (void)readl_relaxed(aq->regs + QSPI_SR); + + if (aq->caps->has_ricr) { + if (!op->addr.nbytes && op->data.dir == SPI_MEM_DATA_IN) + ifr |= QSPI_IFR_APBTFRTYP_READ; - /* Set QSPI Instruction Frame registers */ - qspi_writel(aq, QSPI_IAR, iar); - qspi_writel(aq, QSPI_ICR, icr); - qspi_writel(aq, QSPI_IFR, ifr); + /* Set QSPI Instruction Frame registers */ + writel_relaxed(iar, aq->regs + QSPI_IAR); + if (op->data.dir == SPI_MEM_DATA_IN) + writel_relaxed(icr, aq->regs + QSPI_RICR); + else + writel_relaxed(icr, aq->regs + QSPI_WICR); + writel_relaxed(ifr, aq->regs + QSPI_IFR); + } else { + if (op->data.dir == SPI_MEM_DATA_OUT) + ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR; + + /* Set QSPI Instruction Frame registers */ + writel_relaxed(iar, aq->regs + QSPI_IAR); + writel_relaxed(icr, aq->regs + QSPI_ICR); + writel_relaxed(ifr, aq->regs + QSPI_IFR); + } + + return 0; +} + +static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master); + u32 sr, offset; + int err; + + err = atmel_qspi_set_cfg(aq, op, &offset); + if (err) + return err; /* Skip to the final steps if there is no data */ if (op->data.nbytes) { /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */ - (void)qspi_readl(aq, QSPI_IFR); + (void)readl_relaxed(aq->regs + QSPI_IFR); /* Send/Receive data */ if (op->data.dir == SPI_MEM_DATA_IN) - _memcpy_fromio(op->data.buf.in, - aq->mem + iar, op->data.nbytes); + _memcpy_fromio(op->data.buf.in, aq->mem + offset, + op->data.nbytes); else - _memcpy_toio(aq->mem + iar, - op->data.buf.out, op->data.nbytes); + _memcpy_toio(aq->mem + offset, op->data.buf.out, + op->data.nbytes); /* Release the chip-select */ - qspi_writel(aq, QSPI_CR, QSPI_CR_LASTXFER); + writel_relaxed(QSPI_CR_LASTXFER, aq->regs + QSPI_CR); } /* Poll INSTRuction End status */ - sr = qspi_readl(aq, QSPI_SR); + sr = readl_relaxed(aq->regs + QSPI_SR); if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED) return err; /* Wait for INSTRuction End interrupt */ reinit_completion(&aq->cmd_completion); aq->pending = sr & QSPI_SR_CMD_COMPLETED; - qspi_writel(aq, QSPI_IER, QSPI_SR_CMD_COMPLETED); + writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IER); if (!wait_for_completion_timeout(&aq->cmd_completion, msecs_to_jiffies(1000))) err = -ETIMEDOUT; - qspi_writel(aq, QSPI_IDR, QSPI_SR_CMD_COMPLETED); + writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IDR); return err; } @@ -361,7 +390,7 @@ static int atmel_qspi_setup(struct spi_device *spi) if (!spi->max_speed_hz) return -EINVAL; - src_rate = clk_get_rate(aq->clk); + src_rate = clk_get_rate(aq->pclk); if (!src_rate) return -EINVAL; @@ -371,7 +400,7 @@ static int atmel_qspi_setup(struct spi_device *spi) scbr--; scr = QSPI_SCR_SCBR(scbr); - qspi_writel(aq, QSPI_SCR, scr); + writel_relaxed(scr, aq->regs + QSPI_SCR); return 0; } @@ -379,21 +408,25 @@ static int atmel_qspi_setup(struct spi_device *spi) static int atmel_qspi_init(struct atmel_qspi *aq) { /* Reset the QSPI controller */ - qspi_writel(aq, QSPI_CR, QSPI_CR_SWRST); + writel_relaxed(QSPI_CR_SWRST, aq->regs + QSPI_CR); + + /* Set the QSPI controller by default in Serial Memory Mode */ + writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR); + aq->mr = QSPI_MR_SMM; /* Enable the QSPI controller */ - qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIEN); + writel_relaxed(QSPI_CR_QSPIEN, aq->regs + QSPI_CR); return 0; } static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) { - struct atmel_qspi *aq = (struct atmel_qspi *)dev_id; + struct atmel_qspi *aq = dev_id; u32 status, mask, pending; - status = qspi_readl(aq, QSPI_SR); - mask = qspi_readl(aq, QSPI_IMR); + status = readl_relaxed(aq->regs + QSPI_SR); + mask = readl_relaxed(aq->regs + QSPI_IMR); pending = status & mask; if (!pending) @@ -449,44 +482,74 @@ static int atmel_qspi_probe(struct platform_device *pdev) } /* Get the peripheral clock */ - aq->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(aq->clk)) { + aq->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(aq->pclk)) + aq->pclk = devm_clk_get(&pdev->dev, NULL); + + if (IS_ERR(aq->pclk)) { dev_err(&pdev->dev, "missing peripheral clock\n"); - err = PTR_ERR(aq->clk); + err = PTR_ERR(aq->pclk); goto exit; } /* Enable the peripheral clock */ - err = clk_prepare_enable(aq->clk); + err = clk_prepare_enable(aq->pclk); if (err) { dev_err(&pdev->dev, "failed to enable the peripheral clock\n"); goto exit; } + aq->caps = of_device_get_match_data(&pdev->dev); + if (!aq->caps) { + dev_err(&pdev->dev, "Could not retrieve QSPI caps\n"); + err = -EINVAL; + goto exit; + } + + if (aq->caps->has_qspick) { + /* Get the QSPI system clock */ + aq->qspick = devm_clk_get(&pdev->dev, "qspick"); + if (IS_ERR(aq->qspick)) { + dev_err(&pdev->dev, "missing system clock\n"); + err = PTR_ERR(aq->qspick); + goto disable_pclk; + } + + /* Enable the QSPI system clock */ + err = clk_prepare_enable(aq->qspick); + if (err) { + dev_err(&pdev->dev, + "failed to enable the QSPI system clock\n"); + goto disable_pclk; + } + } + /* Request the IRQ */ irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "missing IRQ\n"); err = irq; - goto disable_clk; + goto disable_qspick; } err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt, 0, dev_name(&pdev->dev), aq); if (err) - goto disable_clk; + goto disable_qspick; err = atmel_qspi_init(aq); if (err) - goto disable_clk; + goto disable_qspick; err = spi_register_controller(ctrl); if (err) - goto disable_clk; + goto disable_qspick; return 0; -disable_clk: - clk_disable_unprepare(aq->clk); +disable_qspick: + clk_disable_unprepare(aq->qspick); +disable_pclk: + clk_disable_unprepare(aq->pclk); exit: spi_controller_put(ctrl); @@ -499,8 +562,9 @@ static int atmel_qspi_remove(struct platform_device *pdev) struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); spi_unregister_controller(ctrl); - qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIDIS); - clk_disable_unprepare(aq->clk); + writel_relaxed(QSPI_CR_QSPIDIS, aq->regs + QSPI_CR); + clk_disable_unprepare(aq->qspick); + clk_disable_unprepare(aq->pclk); return 0; } @@ -508,7 +572,8 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev) { struct atmel_qspi *aq = dev_get_drvdata(dev); - clk_disable_unprepare(aq->clk); + clk_disable_unprepare(aq->qspick); + clk_disable_unprepare(aq->pclk); return 0; } @@ -517,7 +582,8 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev) { struct atmel_qspi *aq = dev_get_drvdata(dev); - clk_prepare_enable(aq->clk); + clk_prepare_enable(aq->pclk); + clk_prepare_enable(aq->qspick); return atmel_qspi_init(aq); } @@ -525,8 +591,22 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend, atmel_qspi_resume); +static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {}; + +static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = { + .has_qspick = true, + .has_ricr = true, +}; + static const struct of_device_id atmel_qspi_dt_ids[] = { - { .compatible = "atmel,sama5d2-qspi" }, + { + .compatible = "atmel,sama5d2-qspi", + .data = &atmel_sama5d2_qspi_caps, + }, + { + .compatible = "microchip,sam9x60-qspi", + .data = &atmel_sam9x60_qspi_caps, + }, { /* sentinel */ } }; diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c index 3f6b657394de..847f354ebef1 100644 --- a/drivers/spi/spi-ath79.c +++ b/drivers/spi/spi-ath79.c @@ -21,18 +21,26 @@ #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/bitops.h> -#include <linux/gpio.h> #include <linux/clk.h> #include <linux/err.h> - -#include <asm/mach-ath79/ar71xx_regs.h> -#include <asm/mach-ath79/ath79_spi_platform.h> +#include <linux/platform_data/spi-ath79.h> #define DRV_NAME "ath79-spi" #define ATH79_SPI_RRW_DELAY_FACTOR 12000 #define MHZ (1000 * 1000) +#define AR71XX_SPI_REG_FS 0x00 /* Function Select */ +#define AR71XX_SPI_REG_CTRL 0x04 /* SPI Control */ +#define AR71XX_SPI_REG_IOC 0x08 /* SPI I/O Control */ +#define AR71XX_SPI_REG_RDS 0x0c /* Read Data Shift */ + +#define AR71XX_SPI_FS_GPIO BIT(0) /* Enable GPIO mode */ + +#define AR71XX_SPI_IOC_DO BIT(0) /* Data Out pin */ +#define AR71XX_SPI_IOC_CLK BIT(8) /* CLK pin */ +#define AR71XX_SPI_IOC_CS(n) BIT(16 + (n)) + struct ath79_spi { struct spi_bitbang bitbang; u32 ioc_base; @@ -67,31 +75,14 @@ static void ath79_spi_chipselect(struct spi_device *spi, int is_active) { struct ath79_spi *sp = ath79_spidev_to_sp(spi); int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active; + u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select); - if (is_active) { - /* set initial clock polarity */ - if (spi->mode & SPI_CPOL) - sp->ioc_base |= AR71XX_SPI_IOC_CLK; - else - sp->ioc_base &= ~AR71XX_SPI_IOC_CLK; - - ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); - } - - if (gpio_is_valid(spi->cs_gpio)) { - /* SPI is normally active-low */ - gpio_set_value_cansleep(spi->cs_gpio, cs_high); - } else { - u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select); - - if (cs_high) - sp->ioc_base |= cs_bit; - else - sp->ioc_base &= ~cs_bit; - - ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); - } + if (cs_high) + sp->ioc_base |= cs_bit; + else + sp->ioc_base &= ~cs_bit; + ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); } static void ath79_spi_enable(struct ath79_spi *sp) @@ -103,6 +94,9 @@ static void ath79_spi_enable(struct ath79_spi *sp) sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL); sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC); + /* clear clk and mosi in the base state */ + sp->ioc_base &= ~(AR71XX_SPI_IOC_DO | AR71XX_SPI_IOC_CLK); + /* TODO: setup speed? */ ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43); } @@ -115,66 +109,6 @@ static void ath79_spi_disable(struct ath79_spi *sp) ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0); } -static int ath79_spi_setup_cs(struct spi_device *spi) -{ - struct ath79_spi *sp = ath79_spidev_to_sp(spi); - int status; - - status = 0; - if (gpio_is_valid(spi->cs_gpio)) { - unsigned long flags; - - flags = GPIOF_DIR_OUT; - if (spi->mode & SPI_CS_HIGH) - flags |= GPIOF_INIT_LOW; - else - flags |= GPIOF_INIT_HIGH; - - status = gpio_request_one(spi->cs_gpio, flags, - dev_name(&spi->dev)); - } else { - u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select); - - if (spi->mode & SPI_CS_HIGH) - sp->ioc_base &= ~cs_bit; - else - sp->ioc_base |= cs_bit; - - ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); - } - - return status; -} - -static void ath79_spi_cleanup_cs(struct spi_device *spi) -{ - if (gpio_is_valid(spi->cs_gpio)) - gpio_free(spi->cs_gpio); -} - -static int ath79_spi_setup(struct spi_device *spi) -{ - int status = 0; - - if (!spi->controller_state) { - status = ath79_spi_setup_cs(spi); - if (status) - return status; - } - - status = spi_bitbang_setup(spi); - if (status && !spi->controller_state) - ath79_spi_cleanup_cs(spi); - - return status; -} - -static void ath79_spi_cleanup(struct spi_device *spi) -{ - ath79_spi_cleanup_cs(spi); - spi_bitbang_cleanup(spi); -} - static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned int nsecs, u32 word, u8 bits, unsigned flags) { @@ -225,9 +159,10 @@ static int ath79_spi_probe(struct platform_device *pdev) pdata = dev_get_platdata(&pdev->dev); + master->use_gpio_descriptors = true; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); - master->setup = ath79_spi_setup; - master->cleanup = ath79_spi_cleanup; + master->setup = spi_bitbang_setup; + master->cleanup = spi_bitbang_cleanup; if (pdata) { master->bus_num = pdata->bus_num; master->num_chipselect = pdata->num_chipselect; @@ -236,7 +171,6 @@ static int ath79_spi_probe(struct platform_device *pdev) sp->bitbang.master = master; sp->bitbang.chipselect = ath79_spi_chipselect; sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0; - sp->bitbang.setup_transfer = spi_bitbang_setup_transfer; sp->bitbang.flags = SPI_CS_HIGH; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 74fddcd3282b..4954f0ab1606 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -23,8 +23,7 @@ #include <linux/of.h> #include <linux/io.h> -#include <linux/gpio.h> -#include <linux/of_gpio.h> +#include <linux/gpio/consumer.h> #include <linux/pinctrl/consumer.h> #include <linux/pm_runtime.h> @@ -312,7 +311,7 @@ struct atmel_spi { /* Controller-specific per-slave state */ struct atmel_spi_device { - unsigned int npcs_pin; + struct gpio_desc *npcs_pin; u32 csr; }; @@ -355,7 +354,6 @@ static bool atmel_spi_is_v2(struct atmel_spi *as) static void cs_activate(struct atmel_spi *as, struct spi_device *spi) { struct atmel_spi_device *asd = spi->controller_state; - unsigned active = spi->mode & SPI_CS_HIGH; u32 mr; if (atmel_spi_is_v2(as)) { @@ -379,7 +377,7 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi) mr = spi_readl(as, MR); if (as->use_cs_gpios) - gpio_set_value(asd->npcs_pin, active); + gpiod_set_value(asd->npcs_pin, 1); } else { u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; int i; @@ -396,19 +394,16 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi) mr = spi_readl(as, MR); mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); if (as->use_cs_gpios && spi->chip_select != 0) - gpio_set_value(asd->npcs_pin, active); + gpiod_set_value(asd->npcs_pin, 1); spi_writel(as, MR, mr); } - dev_dbg(&spi->dev, "activate %u%s, mr %08x\n", - asd->npcs_pin, active ? " (high)" : "", - mr); + dev_dbg(&spi->dev, "activate NPCS, mr %08x\n", mr); } static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) { struct atmel_spi_device *asd = spi->controller_state; - unsigned active = spi->mode & SPI_CS_HIGH; u32 mr; /* only deactivate *this* device; sometimes transfers to @@ -420,14 +415,12 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) spi_writel(as, MR, mr); } - dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n", - asd->npcs_pin, active ? " (low)" : "", - mr); + dev_dbg(&spi->dev, "DEactivate NPCS, mr %08x\n", mr); if (!as->use_cs_gpios) spi_writel(as, CR, SPI_BIT(LASTXFER)); else if (atmel_spi_is_v2(as) || spi->chip_select != 0) - gpio_set_value(asd->npcs_pin, !active); + gpiod_set_value(asd->npcs_pin, 0); } static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock) @@ -1188,7 +1181,6 @@ static int atmel_spi_setup(struct spi_device *spi) struct atmel_spi_device *asd; u32 csr; unsigned int bits = spi->bits_per_word; - unsigned int npcs_pin; as = spi_master_get_devdata(spi->master); @@ -1209,21 +1201,14 @@ static int atmel_spi_setup(struct spi_device *spi) csr |= SPI_BIT(CSAAT); /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs. - * - * DLYBCT would add delays between words, slowing down transfers. - * It could potentially be useful to cope with DMA bottlenecks, but - * in those cases it's probably best to just use a lower bitrate. */ csr |= SPI_BF(DLYBS, 0); - csr |= SPI_BF(DLYBCT, 0); - - /* chipselect must have been muxed as GPIO (e.g. in board setup) */ - npcs_pin = (unsigned long)spi->controller_data; - if (!as->use_cs_gpios) - npcs_pin = spi->chip_select; - else if (gpio_is_valid(spi->cs_gpio)) - npcs_pin = spi->cs_gpio; + /* DLYBCT adds delays between words. This is useful for slow devices + * that need a bit of time to setup the next transfer. + */ + csr |= SPI_BF(DLYBCT, + (as->spi_clk / 1000000 * spi->word_delay_usecs) >> 5); asd = spi->controller_state; if (!asd) { @@ -1231,11 +1216,21 @@ static int atmel_spi_setup(struct spi_device *spi) if (!asd) return -ENOMEM; - if (as->use_cs_gpios) - gpio_direction_output(npcs_pin, - !(spi->mode & SPI_CS_HIGH)); + /* + * If use_cs_gpios is true this means that we have "cs-gpios" + * defined in the device tree node so we should have + * gotten the GPIO lines from the device tree inside the + * SPI core. Warn if this is not the case but continue since + * CS GPIOs are after all optional. + */ + if (as->use_cs_gpios) { + if (!spi->cs_gpiod) { + dev_err(&spi->dev, + "host claims to use CS GPIOs but no CS found in DT by the SPI core\n"); + } + asd->npcs_pin = spi->cs_gpiod; + } - asd->npcs_pin = npcs_pin; spi->controller_state = asd; } @@ -1473,41 +1468,6 @@ static void atmel_get_caps(struct atmel_spi *as) as->caps.has_pdc_support = version < 0x212; } -/*-------------------------------------------------------------------------*/ -static int atmel_spi_gpio_cs(struct platform_device *pdev) -{ - struct spi_master *master = platform_get_drvdata(pdev); - struct atmel_spi *as = spi_master_get_devdata(master); - struct device_node *np = master->dev.of_node; - int i; - int ret = 0; - int nb = 0; - - if (!as->use_cs_gpios) - return 0; - - if (!np) - return 0; - - nb = of_gpio_named_count(np, "cs-gpios"); - for (i = 0; i < nb; i++) { - int cs_gpio = of_get_named_gpio(pdev->dev.of_node, - "cs-gpios", i); - - if (cs_gpio == -EPROBE_DEFER) - return cs_gpio; - - if (gpio_is_valid(cs_gpio)) { - ret = devm_gpio_request(&pdev->dev, cs_gpio, - dev_name(&pdev->dev)); - if (ret) - return ret; - } - } - - return 0; -} - static void atmel_spi_init(struct atmel_spi *as) { spi_writel(as, CR, SPI_BIT(SWRST)); @@ -1560,6 +1520,7 @@ static int atmel_spi_probe(struct platform_device *pdev) goto out_free; /* the spi->mode bits understood by this driver: */ + master->use_gpio_descriptors = true; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16); master->dev.of_node = pdev->dev.of_node; @@ -1592,6 +1553,11 @@ static int atmel_spi_probe(struct platform_device *pdev) atmel_get_caps(as); + /* + * If there are chip selects in the device tree, those will be + * discovered by the SPI core when registering the SPI master + * and assigned to each SPI device. + */ as->use_cs_gpios = true; if (atmel_spi_is_v2(as) && pdev->dev.of_node && @@ -1600,10 +1566,6 @@ static int atmel_spi_probe(struct platform_device *pdev) master->num_chipselect = 4; } - ret = atmel_spi_gpio_cs(pdev); - if (ret) - goto out_unmap_regs; - as->use_dma = false; as->use_pdc = false; if (as->caps.has_dma_support) { diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c index 671e374e1b01..f7e054848ca5 100644 --- a/drivers/spi/spi-bcm2835aux.c +++ b/drivers/spi/spi-bcm2835aux.c @@ -456,7 +456,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) } bs->clk = devm_clk_get(&pdev->dev, NULL); - if ((!bs->clk) || (IS_ERR(bs->clk))) { + if (IS_ERR(bs->clk)) { err = PTR_ERR(bs->clk); dev_err(&pdev->dev, "could not get clk: %d\n", err); goto out_master_put; diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index f29176000b8d..dd9a8c54a693 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c @@ -213,19 +213,6 @@ int spi_bitbang_setup(struct spi_device *spi) dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs); - /* NOTE we _need_ to call chipselect() early, ideally with adapter - * setup, unless the hardware defaults cooperate to avoid confusion - * between normal (active low) and inverted chipselects. - */ - - /* deselect chip (low or high) */ - mutex_lock(&bitbang->lock); - if (!bitbang->busy) { - bitbang->chipselect(spi, BITBANG_CS_INACTIVE); - ndelay(cs->nsecs); - } - mutex_unlock(&bitbang->lock); - return 0; } EXPORT_SYMBOL_GPL(spi_bitbang_setup); diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 7c88f74f7f47..43d0e79842ac 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -13,7 +13,7 @@ #include <linux/clk.h> #include <linux/delay.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> @@ -128,10 +128,6 @@ struct cdns_spi { u32 is_decoded_cs; }; -struct cdns_spi_device_data { - bool gpio_requested; -}; - /* Macros for the SPI controller read/write */ static inline u32 cdns_spi_read(struct cdns_spi *xspi, u32 offset) { @@ -176,16 +172,16 @@ static void cdns_spi_init_hw(struct cdns_spi *xspi) /** * cdns_spi_chipselect - Select or deselect the chip select line * @spi: Pointer to the spi_device structure - * @is_high: Select(0) or deselect (1) the chip select line + * @enable: Select (1) or deselect (0) the chip select line */ -static void cdns_spi_chipselect(struct spi_device *spi, bool is_high) +static void cdns_spi_chipselect(struct spi_device *spi, bool enable) { struct cdns_spi *xspi = spi_master_get_devdata(spi->master); u32 ctrl_reg; ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); - if (is_high) { + if (!enable) { /* Deselect the slave */ ctrl_reg |= CDNS_SPI_CR_SSCTRL; } else { @@ -469,64 +465,6 @@ static int cdns_unprepare_transfer_hardware(struct spi_master *master) return 0; } -static int cdns_spi_setup(struct spi_device *spi) -{ - - int ret = -EINVAL; - struct cdns_spi_device_data *cdns_spi_data = spi_get_ctldata(spi); - - /* this is a pin managed by the controller, leave it alone */ - if (spi->cs_gpio == -ENOENT) - return 0; - - /* this seems to be the first time we're here */ - if (!cdns_spi_data) { - cdns_spi_data = kzalloc(sizeof(*cdns_spi_data), GFP_KERNEL); - if (!cdns_spi_data) - return -ENOMEM; - cdns_spi_data->gpio_requested = false; - spi_set_ctldata(spi, cdns_spi_data); - } - - /* if we haven't done so, grab the gpio */ - if (!cdns_spi_data->gpio_requested && gpio_is_valid(spi->cs_gpio)) { - ret = gpio_request_one(spi->cs_gpio, - (spi->mode & SPI_CS_HIGH) ? - GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH, - dev_name(&spi->dev)); - if (ret) - dev_err(&spi->dev, "can't request chipselect gpio %d\n", - spi->cs_gpio); - else - cdns_spi_data->gpio_requested = true; - } else { - if (gpio_is_valid(spi->cs_gpio)) { - int mode = ((spi->mode & SPI_CS_HIGH) ? - GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH); - - ret = gpio_direction_output(spi->cs_gpio, mode); - if (ret) - dev_err(&spi->dev, "chipselect gpio %d setup failed (%d)\n", - spi->cs_gpio, ret); - } - } - - return ret; -} - -static void cdns_spi_cleanup(struct spi_device *spi) -{ - struct cdns_spi_device_data *cdns_spi_data = spi_get_ctldata(spi); - - if (cdns_spi_data) { - if (cdns_spi_data->gpio_requested) - gpio_free(spi->cs_gpio); - kfree(cdns_spi_data); - spi_set_ctldata(spi, NULL); - } - -} - /** * cdns_spi_probe - Probe method for the SPI driver * @pdev: Pointer to the platform_device structure @@ -584,11 +522,6 @@ static int cdns_spi_probe(struct platform_device *pdev) goto clk_dis_apb; } - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); - ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs); if (ret < 0) master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS; @@ -603,8 +536,10 @@ static int cdns_spi_probe(struct platform_device *pdev) /* SPI controller initializations */ cdns_spi_init_hw(xspi); - pm_runtime_mark_last_busy(&pdev->dev); - pm_runtime_put_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); irq = platform_get_irq(pdev, 0); if (irq <= 0) { @@ -621,13 +556,12 @@ static int cdns_spi_probe(struct platform_device *pdev) goto clk_dis_all; } + master->use_gpio_descriptors = true; master->prepare_transfer_hardware = cdns_prepare_transfer_hardware; master->prepare_message = cdns_prepare_message; master->transfer_one = cdns_transfer_one; master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware; master->set_cs = cdns_spi_chipselect; - master->setup = cdns_spi_setup; - master->cleanup = cdns_spi_cleanup; master->auto_runtime_pm = true; master->mode_bits = SPI_CPOL | SPI_CPHA; diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c index 18193df2eba8..8c03c409fc07 100644 --- a/drivers/spi/spi-clps711x.c +++ b/drivers/spi/spi-clps711x.c @@ -11,7 +11,7 @@ #include <linux/io.h> #include <linux/clk.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/platform_device.h> @@ -36,25 +36,6 @@ struct spi_clps711x_data { int len; }; -static int spi_clps711x_setup(struct spi_device *spi) -{ - if (!spi->controller_state) { - int ret; - - ret = devm_gpio_request(&spi->master->dev, spi->cs_gpio, - dev_name(&spi->master->dev)); - if (ret) - return ret; - - spi->controller_state = spi; - } - - /* We are expect that SPI-device is not selected */ - gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); - - return 0; -} - static int spi_clps711x_prepare_message(struct spi_master *master, struct spi_message *msg) { @@ -125,11 +106,11 @@ static int spi_clps711x_probe(struct platform_device *pdev) if (!master) return -ENOMEM; + master->use_gpio_descriptors = true; master->bus_num = -1; master->mode_bits = SPI_CPHA | SPI_CS_HIGH; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8); master->dev.of_node = pdev->dev.of_node; - master->setup = spi_clps711x_setup; master->prepare_message = spi_clps711x_prepare_message; master->transfer_one = spi_clps711x_transfer_one; diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 56adec83f8fc..eb246ebcfa3a 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -15,7 +15,7 @@ #include <linux/interrupt.h> #include <linux/io.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/platform_device.h> @@ -25,7 +25,6 @@ #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_gpio.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/slab.h> @@ -222,12 +221,17 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value) * Board specific chip select logic decides the polarity and cs * line for the controller */ - if (spi->cs_gpio >= 0) { + if (spi->cs_gpiod) { + /* + * FIXME: is this code ever executed? This host does not + * set SPI_MASTER_GPIO_SS so this chipselect callback should + * not get called from the SPI core when we are using + * GPIOs for chip select. + */ if (value == BITBANG_CS_ACTIVE) - gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH); + gpiod_set_value(spi->cs_gpiod, 1); else - gpio_set_value(spi->cs_gpio, - !(spi->mode & SPI_CS_HIGH)); + gpiod_set_value(spi->cs_gpiod, 0); } else { if (value == BITBANG_CS_ACTIVE) { if (!(spi->mode & SPI_CS_WORD)) @@ -418,30 +422,18 @@ static int davinci_spi_of_setup(struct spi_device *spi) */ static int davinci_spi_setup(struct spi_device *spi) { - int retval = 0; struct davinci_spi *dspi; - struct spi_master *master = spi->master; struct device_node *np = spi->dev.of_node; bool internal_cs = true; dspi = spi_master_get_devdata(spi->master); if (!(spi->mode & SPI_NO_CS)) { - if (np && (master->cs_gpios != NULL) && (spi->cs_gpio >= 0)) { - retval = gpio_direction_output( - spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); + if (np && spi->cs_gpiod) internal_cs = false; - } - - if (retval) { - dev_err(&spi->dev, "GPIO %d setup failed (%d)\n", - spi->cs_gpio, retval); - return retval; - } - if (internal_cs) { + if (internal_cs) set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); - } } if (spi->mode & SPI_READY) @@ -962,6 +954,7 @@ static int davinci_spi_probe(struct platform_device *pdev) if (ret) goto free_master; + master->use_gpio_descriptors = true; master->dev.of_node = pdev->dev.of_node; master->bus_num = pdev->id; master->num_chipselect = pdata->num_chipselect; @@ -980,27 +973,6 @@ static int davinci_spi_probe(struct platform_device *pdev) if (dspi->version == SPI_VERSION_2) dspi->bitbang.flags |= SPI_READY; - if (pdev->dev.of_node) { - int i; - - for (i = 0; i < pdata->num_chipselect; i++) { - int cs_gpio = of_get_named_gpio(pdev->dev.of_node, - "cs-gpios", i); - - if (cs_gpio == -EPROBE_DEFER) { - ret = cs_gpio; - goto free_clk; - } - - if (gpio_is_valid(cs_gpio)) { - ret = devm_gpio_request(&pdev->dev, cs_gpio, - dev_name(&pdev->dev)); - if (ret) - goto free_clk; - } - } - } - dspi->bitbang.txrx_bufs = davinci_spi_bufs; ret = davinci_spi_request_dma(dspi); diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index d0dd7814e997..4bd59a93d988 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -18,7 +18,6 @@ #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/of_platform.h> #include <linux/acpi.h> #include <linux/property.h> @@ -185,27 +184,6 @@ static int dw_spi_mmio_probe(struct platform_device *pdev) dws->num_cs = num_cs; - if (pdev->dev.of_node) { - int i; - - for (i = 0; i < dws->num_cs; i++) { - int cs_gpio = of_get_named_gpio(pdev->dev.of_node, - "cs-gpios", i); - - if (cs_gpio == -EPROBE_DEFER) { - ret = cs_gpio; - goto out; - } - - if (gpio_is_valid(cs_gpio)) { - ret = devm_gpio_request(&pdev->dev, cs_gpio, - dev_name(&pdev->dev)); - if (ret) - goto out; - } - } - } - init_func = device_get_match_data(&pdev->dev); if (init_func) { ret = init_func(pdev, dwsmmio); diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 4c9deb434b3a..ac81025f86ab 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -20,7 +20,6 @@ #include <linux/delay.h> #include <linux/slab.h> #include <linux/spi/spi.h> -#include <linux/gpio.h> #include "spi-dw.h" @@ -138,11 +137,10 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable) struct dw_spi *dws = spi_controller_get_devdata(spi->controller); struct chip_data *chip = spi_get_ctldata(spi); - /* Chip select logic is inverted from spi_set_cs() */ if (chip && chip->cs_control) - chip->cs_control(!enable); + chip->cs_control(enable); - if (!enable) + if (enable) dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select)); else if (dws->cs_override) dw_writel(dws, DW_SPI_SER, 0); @@ -317,7 +315,8 @@ static int dw_spi_transfer_one(struct spi_controller *master, /* Default SPI mode is SCPOL = 0, SCPH = 0 */ cr0 = (transfer->bits_per_word - 1) | (chip->type << SPI_FRF_OFFSET) - | (spi->mode << SPI_MODE_OFFSET) + | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) | + (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET)) | (chip->tmode << SPI_TMOD_OFFSET); /* @@ -397,7 +396,6 @@ static int dw_spi_setup(struct spi_device *spi) { struct dw_spi_chip *chip_info = NULL; struct chip_data *chip; - int ret; /* Only alloc on first setup */ chip = spi_get_ctldata(spi); @@ -425,13 +423,6 @@ static int dw_spi_setup(struct spi_device *spi) chip->tmode = SPI_TMOD_TR; - if (gpio_is_valid(spi->cs_gpio)) { - ret = gpio_direction_output(spi->cs_gpio, - !(spi->mode & SPI_CS_HIGH)); - if (ret) - return ret; - } - return 0; } @@ -496,6 +487,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) goto err_free_master; } + master->use_gpio_descriptors = true; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); master->bus_num = dws->bus_num; diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 7b605f95dbef..53335ccc98f6 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -233,6 +233,9 @@ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi) { u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi); + if (spi_controller_is_slave(dspi->master)) + return data; + if (dspi->len > 0) cmd |= SPI_PUSHR_CMD_CONT; return cmd << 16 | data; @@ -329,6 +332,11 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi) dma_async_issue_pending(dma->chan_rx); dma_async_issue_pending(dma->chan_tx); + if (spi_controller_is_slave(dspi->master)) { + wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete); + return 0; + } + time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete, DMA_COMPLETION_TIMEOUT); if (time_left == 0) { @@ -798,14 +806,18 @@ static int dspi_setup(struct spi_device *spi) ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate); chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0) - | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0) - | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0) - | SPI_CTAR_PCSSCK(pcssck) - | SPI_CTAR_CSSCK(cssck) - | SPI_CTAR_PASC(pasc) - | SPI_CTAR_ASC(asc) - | SPI_CTAR_PBR(pbr) - | SPI_CTAR_BR(br); + | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0); + + if (!spi_controller_is_slave(dspi->master)) { + chip->ctar_val |= SPI_CTAR_LSBFE(spi->mode & + SPI_LSB_FIRST ? 1 : 0) + | SPI_CTAR_PCSSCK(pcssck) + | SPI_CTAR_CSSCK(cssck) + | SPI_CTAR_PASC(pasc) + | SPI_CTAR_ASC(asc) + | SPI_CTAR_PBR(pbr) + | SPI_CTAR_BR(br); + } spi_set_ctldata(spi, chip); @@ -970,8 +982,13 @@ static const struct regmap_config dspi_xspi_regmap_config[] = { static void dspi_init(struct fsl_dspi *dspi) { - regmap_write(dspi->regmap, SPI_MCR, SPI_MCR_MASTER | SPI_MCR_PCSIS | - (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0)); + unsigned int mcr = SPI_MCR_PCSIS | + (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0); + + if (!spi_controller_is_slave(dspi->master)) + mcr |= SPI_MCR_MASTER; + + regmap_write(dspi->regmap, SPI_MCR, mcr); regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); if (dspi->devtype_data->xspi_mode) regmap_write(dspi->regmap, SPI_CTARE(0), @@ -1027,6 +1044,9 @@ static int dspi_probe(struct platform_device *pdev) } master->bus_num = bus_num; + if (of_property_read_bool(np, "spi-slave")) + master->slave = true; + dspi->devtype_data = of_device_get_match_data(&pdev->dev); if (!dspi->devtype_data) { dev_err(&pdev->dev, "can't get devtype_data\n"); diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c new file mode 100644 index 000000000000..6a713f78a62e --- /dev/null +++ b/drivers/spi/spi-fsl-qspi.c @@ -0,0 +1,966 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Freescale QuadSPI driver. + * + * Copyright (C) 2013 Freescale Semiconductor, Inc. + * Copyright (C) 2018 Bootlin + * Copyright (C) 2018 exceet electronics GmbH + * Copyright (C) 2018 Kontron Electronics GmbH + * + * Transition to SPI MEM interface: + * Authors: + * Boris Brezillon <bbrezillon@kernel.org> + * Frieder Schrempf <frieder.schrempf@kontron.de> + * Yogesh Gaur <yogeshnarayan.gaur@nxp.com> + * Suresh Gupta <suresh.gupta@nxp.com> + * + * Based on the original fsl-quadspi.c spi-nor driver: + * Author: Freescale Semiconductor, Inc. + * + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_qos.h> +#include <linux/sizes.h> + +#include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> + +/* + * The driver only uses one single LUT entry, that is updated on + * each call of exec_op(). Index 0 is preset at boot with a basic + * read operation, so let's use the last entry (15). + */ +#define SEQID_LUT 15 + +/* Registers used by the driver */ +#define QUADSPI_MCR 0x00 +#define QUADSPI_MCR_RESERVED_MASK GENMASK(19, 16) +#define QUADSPI_MCR_MDIS_MASK BIT(14) +#define QUADSPI_MCR_CLR_TXF_MASK BIT(11) +#define QUADSPI_MCR_CLR_RXF_MASK BIT(10) +#define QUADSPI_MCR_DDR_EN_MASK BIT(7) +#define QUADSPI_MCR_END_CFG_MASK GENMASK(3, 2) +#define QUADSPI_MCR_SWRSTHD_MASK BIT(1) +#define QUADSPI_MCR_SWRSTSD_MASK BIT(0) + +#define QUADSPI_IPCR 0x08 +#define QUADSPI_IPCR_SEQID(x) ((x) << 24) + +#define QUADSPI_BUF3CR 0x1c +#define QUADSPI_BUF3CR_ALLMST_MASK BIT(31) +#define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8) +#define QUADSPI_BUF3CR_ADATSZ_MASK GENMASK(15, 8) + +#define QUADSPI_BFGENCR 0x20 +#define QUADSPI_BFGENCR_SEQID(x) ((x) << 12) + +#define QUADSPI_BUF0IND 0x30 +#define QUADSPI_BUF1IND 0x34 +#define QUADSPI_BUF2IND 0x38 +#define QUADSPI_SFAR 0x100 + +#define QUADSPI_SMPR 0x108 +#define QUADSPI_SMPR_DDRSMP_MASK GENMASK(18, 16) +#define QUADSPI_SMPR_FSDLY_MASK BIT(6) +#define QUADSPI_SMPR_FSPHS_MASK BIT(5) +#define QUADSPI_SMPR_HSENA_MASK BIT(0) + +#define QUADSPI_RBCT 0x110 +#define QUADSPI_RBCT_WMRK_MASK GENMASK(4, 0) +#define QUADSPI_RBCT_RXBRD_USEIPS BIT(8) + +#define QUADSPI_TBDR 0x154 + +#define QUADSPI_SR 0x15c +#define QUADSPI_SR_IP_ACC_MASK BIT(1) +#define QUADSPI_SR_AHB_ACC_MASK BIT(2) + +#define QUADSPI_FR 0x160 +#define QUADSPI_FR_TFF_MASK BIT(0) + +#define QUADSPI_SPTRCLR 0x16c +#define QUADSPI_SPTRCLR_IPPTRC BIT(8) +#define QUADSPI_SPTRCLR_BFPTRC BIT(0) + +#define QUADSPI_SFA1AD 0x180 +#define QUADSPI_SFA2AD 0x184 +#define QUADSPI_SFB1AD 0x188 +#define QUADSPI_SFB2AD 0x18c +#define QUADSPI_RBDR(x) (0x200 + ((x) * 4)) + +#define QUADSPI_LUTKEY 0x300 +#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0 + +#define QUADSPI_LCKCR 0x304 +#define QUADSPI_LCKER_LOCK BIT(0) +#define QUADSPI_LCKER_UNLOCK BIT(1) + +#define QUADSPI_RSER 0x164 +#define QUADSPI_RSER_TFIE BIT(0) + +#define QUADSPI_LUT_BASE 0x310 +#define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4) +#define QUADSPI_LUT_REG(idx) \ + (QUADSPI_LUT_BASE + QUADSPI_LUT_OFFSET + (idx) * 4) + +/* Instruction set for the LUT register */ +#define LUT_STOP 0 +#define LUT_CMD 1 +#define LUT_ADDR 2 +#define LUT_DUMMY 3 +#define LUT_MODE 4 +#define LUT_MODE2 5 +#define LUT_MODE4 6 +#define LUT_FSL_READ 7 +#define LUT_FSL_WRITE 8 +#define LUT_JMP_ON_CS 9 +#define LUT_ADDR_DDR 10 +#define LUT_MODE_DDR 11 +#define LUT_MODE2_DDR 12 +#define LUT_MODE4_DDR 13 +#define LUT_FSL_READ_DDR 14 +#define LUT_FSL_WRITE_DDR 15 +#define LUT_DATA_LEARN 16 + +/* + * The PAD definitions for LUT register. + * + * The pad stands for the number of IO lines [0:3]. + * For example, the quad read needs four IO lines, + * so you should use LUT_PAD(4). + */ +#define LUT_PAD(x) (fls(x) - 1) + +/* + * Macro for constructing the LUT entries with the following + * register layout: + * + * --------------------------------------------------- + * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 | + * --------------------------------------------------- + */ +#define LUT_DEF(idx, ins, pad, opr) \ + ((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) % 2) * 16)) + +/* Controller needs driver to swap endianness */ +#define QUADSPI_QUIRK_SWAP_ENDIAN BIT(0) + +/* Controller needs 4x internal clock */ +#define QUADSPI_QUIRK_4X_INT_CLK BIT(1) + +/* + * TKT253890, the controller needs the driver to fill the txfifo with + * 16 bytes at least to trigger a data transfer, even though the extra + * data won't be transferred. + */ +#define QUADSPI_QUIRK_TKT253890 BIT(2) + +/* TKT245618, the controller cannot wake up from wait mode */ +#define QUADSPI_QUIRK_TKT245618 BIT(3) + +/* + * Controller adds QSPI_AMBA_BASE (base address of the mapped memory) + * internally. No need to add it when setting SFXXAD and SFAR registers + */ +#define QUADSPI_QUIRK_BASE_INTERNAL BIT(4) + +struct fsl_qspi_devtype_data { + unsigned int rxfifo; + unsigned int txfifo; + unsigned int ahb_buf_size; + unsigned int quirks; + bool little_endian; +}; + +static const struct fsl_qspi_devtype_data vybrid_data = { + .rxfifo = SZ_128, + .txfifo = SZ_64, + .ahb_buf_size = SZ_1K, + .quirks = QUADSPI_QUIRK_SWAP_ENDIAN, + .little_endian = true, +}; + +static const struct fsl_qspi_devtype_data imx6sx_data = { + .rxfifo = SZ_128, + .txfifo = SZ_512, + .ahb_buf_size = SZ_1K, + .quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618, + .little_endian = true, +}; + +static const struct fsl_qspi_devtype_data imx7d_data = { + .rxfifo = SZ_512, + .txfifo = SZ_512, + .ahb_buf_size = SZ_1K, + .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK, + .little_endian = true, +}; + +static const struct fsl_qspi_devtype_data imx6ul_data = { + .rxfifo = SZ_128, + .txfifo = SZ_512, + .ahb_buf_size = SZ_1K, + .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK, + .little_endian = true, +}; + +static const struct fsl_qspi_devtype_data ls1021a_data = { + .rxfifo = SZ_128, + .txfifo = SZ_64, + .ahb_buf_size = SZ_1K, + .quirks = 0, + .little_endian = false, +}; + +static const struct fsl_qspi_devtype_data ls2080a_data = { + .rxfifo = SZ_128, + .txfifo = SZ_64, + .ahb_buf_size = SZ_1K, + .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL, + .little_endian = true, +}; + +struct fsl_qspi { + void __iomem *iobase; + void __iomem *ahb_addr; + u32 memmap_phy; + struct clk *clk, *clk_en; + struct device *dev; + struct completion c; + const struct fsl_qspi_devtype_data *devtype_data; + struct mutex lock; + struct pm_qos_request pm_qos_req; + int selected; +}; + +static inline int needs_swap_endian(struct fsl_qspi *q) +{ + return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN; +} + +static inline int needs_4x_clock(struct fsl_qspi *q) +{ + return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK; +} + +static inline int needs_fill_txfifo(struct fsl_qspi *q) +{ + return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890; +} + +static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) +{ + return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618; +} + +static inline int needs_amba_base_offset(struct fsl_qspi *q) +{ + return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL); +} + +/* + * An IC bug makes it necessary to rearrange the 32-bit data. + * Later chips, such as IMX6SLX, have fixed this bug. + */ +static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a) +{ + return needs_swap_endian(q) ? __swab32(a) : a; +} + +/* + * R/W functions for big- or little-endian registers: + * The QSPI controller's endianness is independent of + * the CPU core's endianness. So far, although the CPU + * core is little-endian the QSPI controller can use + * big-endian or little-endian. + */ +static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr) +{ + if (q->devtype_data->little_endian) + iowrite32(val, addr); + else + iowrite32be(val, addr); +} + +static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr) +{ + if (q->devtype_data->little_endian) + return ioread32(addr); + + return ioread32be(addr); +} + +static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id) +{ + struct fsl_qspi *q = dev_id; + u32 reg; + + /* clear interrupt */ + reg = qspi_readl(q, q->iobase + QUADSPI_FR); + qspi_writel(q, reg, q->iobase + QUADSPI_FR); + + if (reg & QUADSPI_FR_TFF_MASK) + complete(&q->c); + + dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", 0, reg); + return IRQ_HANDLED; +} + +static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width) +{ + switch (width) { + case 1: + case 2: + case 4: + return 0; + } + + return -ENOTSUPP; +} + +static bool fsl_qspi_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); + int ret; + + ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth); + + if (op->addr.nbytes) + ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth); + + if (op->dummy.nbytes) + ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth); + + if (op->data.nbytes) + ret |= fsl_qspi_check_buswidth(q, op->data.buswidth); + + if (ret) + return false; + + /* + * The number of instructions needed for the op, needs + * to fit into a single LUT entry. + */ + if (op->addr.nbytes + + (op->dummy.nbytes ? 1:0) + + (op->data.nbytes ? 1:0) > 6) + return false; + + /* Max 64 dummy clock cycles supported */ + if (op->dummy.nbytes && + (op->dummy.nbytes * 8 / op->dummy.buswidth > 64)) + return false; + + /* Max data length, check controller limits and alignment */ + if (op->data.dir == SPI_MEM_DATA_IN && + (op->data.nbytes > q->devtype_data->ahb_buf_size || + (op->data.nbytes > q->devtype_data->rxfifo - 4 && + !IS_ALIGNED(op->data.nbytes, 8)))) + return false; + + if (op->data.dir == SPI_MEM_DATA_OUT && + op->data.nbytes > q->devtype_data->txfifo) + return false; + + return true; +} + +static void fsl_qspi_prepare_lut(struct fsl_qspi *q, + const struct spi_mem_op *op) +{ + void __iomem *base = q->iobase; + u32 lutval[4] = {}; + int lutidx = 1, i; + + lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth), + op->cmd.opcode); + + /* + * For some unknown reason, using LUT_ADDR doesn't work in some + * cases (at least with only one byte long addresses), so + * let's use LUT_MODE to write the address bytes one by one + */ + for (i = 0; i < op->addr.nbytes; i++) { + u8 addrbyte = op->addr.val >> (8 * (op->addr.nbytes - i - 1)); + + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_MODE, + LUT_PAD(op->addr.buswidth), + addrbyte); + lutidx++; + } + + if (op->dummy.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY, + LUT_PAD(op->dummy.buswidth), + op->dummy.nbytes * 8 / + op->dummy.buswidth); + lutidx++; + } + + if (op->data.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, + op->data.dir == SPI_MEM_DATA_IN ? + LUT_FSL_READ : LUT_FSL_WRITE, + LUT_PAD(op->data.buswidth), + 0); + lutidx++; + } + + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0); + + /* unlock LUT */ + qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); + qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR); + + /* fill LUT */ + for (i = 0; i < ARRAY_SIZE(lutval); i++) + qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i)); + + /* lock LUT */ + qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); + qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR); +} + +static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q) +{ + int ret; + + ret = clk_prepare_enable(q->clk_en); + if (ret) + return ret; + + ret = clk_prepare_enable(q->clk); + if (ret) { + clk_disable_unprepare(q->clk_en); + return ret; + } + + if (needs_wakeup_wait_mode(q)) + pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0); + + return 0; +} + +static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q) +{ + if (needs_wakeup_wait_mode(q)) + pm_qos_remove_request(&q->pm_qos_req); + + clk_disable_unprepare(q->clk); + clk_disable_unprepare(q->clk_en); +} + +/* + * If we have changed the content of the flash by writing or erasing, or if we + * read from flash with a different offset into the page buffer, we need to + * invalidate the AHB buffer. If we do not do so, we may read out the wrong + * data. The spec tells us reset the AHB domain and Serial Flash domain at + * the same time. + */ +static void fsl_qspi_invalidate(struct fsl_qspi *q) +{ + u32 reg; + + reg = qspi_readl(q, q->iobase + QUADSPI_MCR); + reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK; + qspi_writel(q, reg, q->iobase + QUADSPI_MCR); + + /* + * The minimum delay : 1 AHB + 2 SFCK clocks. + * Delay 1 us is enough. + */ + udelay(1); + + reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK); + qspi_writel(q, reg, q->iobase + QUADSPI_MCR); +} + +static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi) +{ + unsigned long rate = spi->max_speed_hz; + int ret; + + if (q->selected == spi->chip_select) + return; + + if (needs_4x_clock(q)) + rate *= 4; + + fsl_qspi_clk_disable_unprep(q); + + ret = clk_set_rate(q->clk, rate); + if (ret) + return; + + ret = fsl_qspi_clk_prep_enable(q); + if (ret) + return; + + q->selected = spi->chip_select; + + fsl_qspi_invalidate(q); +} + +static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op) +{ + memcpy_fromio(op->data.buf.in, + q->ahb_addr + q->selected * q->devtype_data->ahb_buf_size, + op->data.nbytes); +} + +static void fsl_qspi_fill_txfifo(struct fsl_qspi *q, + const struct spi_mem_op *op) +{ + void __iomem *base = q->iobase; + int i; + u32 val; + + for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) { + memcpy(&val, op->data.buf.out + i, 4); + val = fsl_qspi_endian_xchg(q, val); + qspi_writel(q, val, base + QUADSPI_TBDR); + } + + if (i < op->data.nbytes) { + memcpy(&val, op->data.buf.out + i, op->data.nbytes - i); + val = fsl_qspi_endian_xchg(q, val); + qspi_writel(q, val, base + QUADSPI_TBDR); + } + + if (needs_fill_txfifo(q)) { + for (i = op->data.nbytes; i < 16; i += 4) + qspi_writel(q, 0, base + QUADSPI_TBDR); + } +} + +static void fsl_qspi_read_rxfifo(struct fsl_qspi *q, + const struct spi_mem_op *op) +{ + void __iomem *base = q->iobase; + int i; + u8 *buf = op->data.buf.in; + u32 val; + + for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) { + val = qspi_readl(q, base + QUADSPI_RBDR(i / 4)); + val = fsl_qspi_endian_xchg(q, val); + memcpy(buf + i, &val, 4); + } + + if (i < op->data.nbytes) { + val = qspi_readl(q, base + QUADSPI_RBDR(i / 4)); + val = fsl_qspi_endian_xchg(q, val); + memcpy(buf + i, &val, op->data.nbytes - i); + } +} + +static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op) +{ + void __iomem *base = q->iobase; + int err = 0; + + init_completion(&q->c); + + /* + * Always start the sequence at the same index since we update + * the LUT at each exec_op() call. And also specify the DATA + * length, since it's has not been specified in the LUT. + */ + qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT), + base + QUADSPI_IPCR); + + /* Wait for the interrupt. */ + if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) + err = -ETIMEDOUT; + + if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN) + fsl_qspi_read_rxfifo(q, op); + + return err; +} + +static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base, + u32 mask, u32 delay_us, u32 timeout_us) +{ + u32 reg; + + if (!q->devtype_data->little_endian) + mask = (u32)cpu_to_be32(mask); + + return readl_poll_timeout(base, reg, !(reg & mask), delay_us, + timeout_us); +} + +static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); + void __iomem *base = q->iobase; + u32 addr_offset = 0; + int err = 0; + + mutex_lock(&q->lock); + + /* wait for the controller being ready */ + fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK | + QUADSPI_SR_AHB_ACC_MASK), 10, 1000); + + fsl_qspi_select_mem(q, mem->spi); + + if (needs_amba_base_offset(q)) + addr_offset = q->memmap_phy; + + qspi_writel(q, + q->selected * q->devtype_data->ahb_buf_size + addr_offset, + base + QUADSPI_SFAR); + + qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) | + QUADSPI_MCR_CLR_RXF_MASK | QUADSPI_MCR_CLR_TXF_MASK, + base + QUADSPI_MCR); + + qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC, + base + QUADSPI_SPTRCLR); + + fsl_qspi_prepare_lut(q, op); + + /* + * If we have large chunks of data, we read them through the AHB bus + * by accessing the mapped memory. In all other cases we use + * IP commands to access the flash. + */ + if (op->data.nbytes > (q->devtype_data->rxfifo - 4) && + op->data.dir == SPI_MEM_DATA_IN) { + fsl_qspi_read_ahb(q, op); + } else { + qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | + QUADSPI_RBCT_RXBRD_USEIPS, base + QUADSPI_RBCT); + + if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) + fsl_qspi_fill_txfifo(q, op); + + err = fsl_qspi_do_op(q, op); + } + + /* Invalidate the data in the AHB buffer. */ + fsl_qspi_invalidate(q); + + mutex_unlock(&q->lock); + + return err; +} + +static int fsl_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) +{ + struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); + + if (op->data.dir == SPI_MEM_DATA_OUT) { + if (op->data.nbytes > q->devtype_data->txfifo) + op->data.nbytes = q->devtype_data->txfifo; + } else { + if (op->data.nbytes > q->devtype_data->ahb_buf_size) + op->data.nbytes = q->devtype_data->ahb_buf_size; + else if (op->data.nbytes > (q->devtype_data->rxfifo - 4)) + op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8); + } + + return 0; +} + +static int fsl_qspi_default_setup(struct fsl_qspi *q) +{ + void __iomem *base = q->iobase; + u32 reg, addr_offset = 0; + int ret; + + /* disable and unprepare clock to avoid glitch pass to controller */ + fsl_qspi_clk_disable_unprep(q); + + /* the default frequency, we will change it later if necessary. */ + ret = clk_set_rate(q->clk, 66000000); + if (ret) + return ret; + + ret = fsl_qspi_clk_prep_enable(q); + if (ret) + return ret; + + /* Reset the module */ + qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK, + base + QUADSPI_MCR); + udelay(1); + + /* Disable the module */ + qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK, + base + QUADSPI_MCR); + + reg = qspi_readl(q, base + QUADSPI_SMPR); + qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK + | QUADSPI_SMPR_FSPHS_MASK + | QUADSPI_SMPR_HSENA_MASK + | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR); + + /* We only use the buffer3 for AHB read */ + qspi_writel(q, 0, base + QUADSPI_BUF0IND); + qspi_writel(q, 0, base + QUADSPI_BUF1IND); + qspi_writel(q, 0, base + QUADSPI_BUF2IND); + + qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT), + q->iobase + QUADSPI_BFGENCR); + qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT); + qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK | + QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8), + base + QUADSPI_BUF3CR); + + if (needs_amba_base_offset(q)) + addr_offset = q->memmap_phy; + + /* + * In HW there can be a maximum of four chips on two buses with + * two chip selects on each bus. We use four chip selects in SW + * to differentiate between the four chips. + * We use ahb_buf_size for each chip and set SFA1AD, SFA2AD, SFB1AD, + * SFB2AD accordingly. + */ + qspi_writel(q, q->devtype_data->ahb_buf_size + addr_offset, + base + QUADSPI_SFA1AD); + qspi_writel(q, q->devtype_data->ahb_buf_size * 2 + addr_offset, + base + QUADSPI_SFA2AD); + qspi_writel(q, q->devtype_data->ahb_buf_size * 3 + addr_offset, + base + QUADSPI_SFB1AD); + qspi_writel(q, q->devtype_data->ahb_buf_size * 4 + addr_offset, + base + QUADSPI_SFB2AD); + + q->selected = -1; + + /* Enable the module */ + qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK, + base + QUADSPI_MCR); + + /* clear all interrupt status */ + qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR); + + /* enable the interrupt */ + qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER); + + return 0; +} + +static const char *fsl_qspi_get_name(struct spi_mem *mem) +{ + struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); + struct device *dev = &mem->spi->dev; + const char *name; + + /* + * In order to keep mtdparts compatible with the old MTD driver at + * mtd/spi-nor/fsl-quadspi.c, we set a custom name derived from the + * platform_device of the controller. + */ + if (of_get_available_child_count(q->dev->of_node) == 1) + return dev_name(q->dev); + + name = devm_kasprintf(dev, GFP_KERNEL, + "%s-%d", dev_name(q->dev), + mem->spi->chip_select); + + if (!name) { + dev_err(dev, "failed to get memory for custom flash name\n"); + return ERR_PTR(-ENOMEM); + } + + return name; +} + +static const struct spi_controller_mem_ops fsl_qspi_mem_ops = { + .adjust_op_size = fsl_qspi_adjust_op_size, + .supports_op = fsl_qspi_supports_op, + .exec_op = fsl_qspi_exec_op, + .get_name = fsl_qspi_get_name, +}; + +static int fsl_qspi_probe(struct platform_device *pdev) +{ + struct spi_controller *ctlr; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct resource *res; + struct fsl_qspi *q; + int ret; + + ctlr = spi_alloc_master(&pdev->dev, sizeof(*q)); + if (!ctlr) + return -ENOMEM; + + ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | + SPI_TX_DUAL | SPI_TX_QUAD; + + q = spi_controller_get_devdata(ctlr); + q->dev = dev; + q->devtype_data = of_device_get_match_data(dev); + if (!q->devtype_data) { + ret = -ENODEV; + goto err_put_ctrl; + } + + platform_set_drvdata(pdev, q); + + /* find the resources */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI"); + q->iobase = devm_ioremap_resource(dev, res); + if (IS_ERR(q->iobase)) { + ret = PTR_ERR(q->iobase); + goto err_put_ctrl; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "QuadSPI-memory"); + q->ahb_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(q->ahb_addr)) { + ret = PTR_ERR(q->ahb_addr); + goto err_put_ctrl; + } + + q->memmap_phy = res->start; + + /* find the clocks */ + q->clk_en = devm_clk_get(dev, "qspi_en"); + if (IS_ERR(q->clk_en)) { + ret = PTR_ERR(q->clk_en); + goto err_put_ctrl; + } + + q->clk = devm_clk_get(dev, "qspi"); + if (IS_ERR(q->clk)) { + ret = PTR_ERR(q->clk); + goto err_put_ctrl; + } + + ret = fsl_qspi_clk_prep_enable(q); + if (ret) { + dev_err(dev, "can not enable the clock\n"); + goto err_put_ctrl; + } + + /* find the irq */ + ret = platform_get_irq(pdev, 0); + if (ret < 0) { + dev_err(dev, "failed to get the irq: %d\n", ret); + goto err_disable_clk; + } + + ret = devm_request_irq(dev, ret, + fsl_qspi_irq_handler, 0, pdev->name, q); + if (ret) { + dev_err(dev, "failed to request irq: %d\n", ret); + goto err_disable_clk; + } + + mutex_init(&q->lock); + + ctlr->bus_num = -1; + ctlr->num_chipselect = 4; + ctlr->mem_ops = &fsl_qspi_mem_ops; + + fsl_qspi_default_setup(q); + + ctlr->dev.of_node = np; + + ret = spi_register_controller(ctlr); + if (ret) + goto err_destroy_mutex; + + return 0; + +err_destroy_mutex: + mutex_destroy(&q->lock); + +err_disable_clk: + fsl_qspi_clk_disable_unprep(q); + +err_put_ctrl: + spi_controller_put(ctlr); + + dev_err(dev, "Freescale QuadSPI probe failed\n"); + return ret; +} + +static int fsl_qspi_remove(struct platform_device *pdev) +{ + struct fsl_qspi *q = platform_get_drvdata(pdev); + + /* disable the hardware */ + qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); + qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); + + fsl_qspi_clk_disable_unprep(q); + + mutex_destroy(&q->lock); + + return 0; +} + +static int fsl_qspi_suspend(struct device *dev) +{ + return 0; +} + +static int fsl_qspi_resume(struct device *dev) +{ + struct fsl_qspi *q = dev_get_drvdata(dev); + + fsl_qspi_default_setup(q); + + return 0; +} + +static const struct of_device_id fsl_qspi_dt_ids[] = { + { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, }, + { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, }, + { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, }, + { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, }, + { .compatible = "fsl,ls1021a-qspi", .data = &ls1021a_data, }, + { .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids); + +static const struct dev_pm_ops fsl_qspi_pm_ops = { + .suspend = fsl_qspi_suspend, + .resume = fsl_qspi_resume, +}; + +static struct platform_driver fsl_qspi_driver = { + .driver = { + .name = "fsl-quadspi", + .of_match_table = fsl_qspi_dt_ids, + .pm = &fsl_qspi_pm_ops, + }, + .probe = fsl_qspi_probe, + .remove = fsl_qspi_remove, +}; +module_platform_driver(fsl_qspi_driver); + +MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver"); +MODULE_AUTHOR("Freescale Semiconductor Inc."); +MODULE_AUTHOR("Boris Brezillon <bbrezillon@kernel.org>"); +MODULE_AUTHOR("Frieder Schrempf <frieder.schrempf@kontron.de>"); +MODULE_AUTHOR("Yogesh Gaur <yogeshnarayan.gaur@nxp.com>"); +MODULE_AUTHOR("Suresh Gupta <suresh.gupta@nxp.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index fdb7cb88fb56..5f0b0d5bfef4 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -89,9 +89,6 @@ struct spi_geni_master { int irq; }; -static void handle_fifo_timeout(struct spi_master *spi, - struct spi_message *msg); - static int get_spi_clk_cfg(unsigned int speed_hz, struct spi_geni_master *mas, unsigned int *clk_idx, @@ -122,6 +119,32 @@ static int get_spi_clk_cfg(unsigned int speed_hz, return ret; } +static void handle_fifo_timeout(struct spi_master *spi, + struct spi_message *msg) +{ + struct spi_geni_master *mas = spi_master_get_devdata(spi); + unsigned long time_left, flags; + struct geni_se *se = &mas->se; + + spin_lock_irqsave(&mas->lock, flags); + reinit_completion(&mas->xfer_done); + mas->cur_mcmd = CMD_CANCEL; + geni_se_cancel_m_cmd(se); + writel(0, se->base + SE_GENI_TX_WATERMARK_REG); + spin_unlock_irqrestore(&mas->lock, flags); + time_left = wait_for_completion_timeout(&mas->xfer_done, HZ); + if (time_left) + return; + + spin_lock_irqsave(&mas->lock, flags); + reinit_completion(&mas->xfer_done); + geni_se_abort_m_cmd(se); + spin_unlock_irqrestore(&mas->lock, flags); + time_left = wait_for_completion_timeout(&mas->xfer_done, HZ); + if (!time_left) + dev_err(mas->dev, "Failed to cancel/abort m_cmd\n"); +} + static void spi_geni_set_cs(struct spi_device *slv, bool set_flag) { struct spi_geni_master *mas = spi_master_get_devdata(slv->master); @@ -233,7 +256,6 @@ static int spi_geni_prepare_message(struct spi_master *spi, struct geni_se *se = &mas->se; geni_se_select_mode(se, GENI_SE_FIFO); - reinit_completion(&mas->xfer_done); ret = setup_fifo_params(spi_msg->spi, spi); if (ret) dev_err(mas->dev, "Couldn't select mode %d\n", ret); @@ -357,32 +379,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG); } -static void handle_fifo_timeout(struct spi_master *spi, - struct spi_message *msg) -{ - struct spi_geni_master *mas = spi_master_get_devdata(spi); - unsigned long time_left, flags; - struct geni_se *se = &mas->se; - - spin_lock_irqsave(&mas->lock, flags); - reinit_completion(&mas->xfer_done); - mas->cur_mcmd = CMD_CANCEL; - geni_se_cancel_m_cmd(se); - writel(0, se->base + SE_GENI_TX_WATERMARK_REG); - spin_unlock_irqrestore(&mas->lock, flags); - time_left = wait_for_completion_timeout(&mas->xfer_done, HZ); - if (time_left) - return; - - spin_lock_irqsave(&mas->lock, flags); - reinit_completion(&mas->xfer_done); - geni_se_abort_m_cmd(se); - spin_unlock_irqrestore(&mas->lock, flags); - time_left = wait_for_completion_timeout(&mas->xfer_done, HZ); - if (!time_left) - dev_err(mas->dev, "Failed to cancel/abort m_cmd\n"); -} - static int spi_geni_transfer_one(struct spi_master *spi, struct spi_device *slv, struct spi_transfer *xfer) diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index 9487c9cd68bd..a4d8d19ecff9 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -552,6 +552,75 @@ void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc) } EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy); +static void devm_spi_mem_dirmap_release(struct device *dev, void *res) +{ + struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res; + + spi_mem_dirmap_destroy(desc); +} + +/** + * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach + * it to a device + * @dev: device the dirmap desc will be attached to + * @mem: SPI mem device this direct mapping should be created for + * @info: direct mapping information + * + * devm_ variant of the spi_mem_dirmap_create() function. See + * spi_mem_dirmap_create() for more details. + * + * Return: a valid pointer in case of success, and ERR_PTR() otherwise. + */ +struct spi_mem_dirmap_desc * +devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem, + const struct spi_mem_dirmap_info *info) +{ + struct spi_mem_dirmap_desc **ptr, *desc; + + ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr), + GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + desc = spi_mem_dirmap_create(mem, info); + if (IS_ERR(desc)) { + devres_free(ptr); + } else { + *ptr = desc; + devres_add(dev, ptr); + } + + return desc; +} +EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create); + +static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data) +{ + struct spi_mem_dirmap_desc **ptr = res; + + if (WARN_ON(!ptr || !*ptr)) + return 0; + + return *ptr == data; +} + +/** + * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached + * to a device + * @dev: device the dirmap desc is attached to + * @desc: the direct mapping descriptor to destroy + * + * devm_ variant of the spi_mem_dirmap_destroy() function. See + * spi_mem_dirmap_destroy() for more details. + */ +void devm_spi_mem_dirmap_destroy(struct device *dev, + struct spi_mem_dirmap_desc *desc) +{ + devres_release(dev, devm_spi_mem_dirmap_release, + devm_spi_mem_dirmap_match, desc); +} +EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy); + /** * spi_mem_dirmap_dirmap_read() - Read data through a direct mapping * @desc: direct mapping descriptor diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 6ac95a2a21ce..7bf53cfc25d6 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -39,6 +39,7 @@ #include <linux/stmp_device.h> #include <linux/spi/spi.h> #include <linux/spi/mxs-spi.h> +#include <trace/events/spi.h> #define DRIVER_NAME "mxs-spi" @@ -374,6 +375,8 @@ static int mxs_spi_transfer_one(struct spi_master *master, list_for_each_entry(t, &m->transfers, transfer_list) { + trace_spi_transfer_start(m, t); + status = mxs_spi_setup_transfer(m->spi, t); if (status) break; @@ -419,6 +422,8 @@ static int mxs_spi_transfer_one(struct spi_master *master, flag); } + trace_spi_transfer_stop(m, t); + if (status) { stmp_reset_block(ssp->base); break; diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c new file mode 100644 index 000000000000..8894f98cc99c --- /dev/null +++ b/drivers/spi/spi-nxp-fspi.c @@ -0,0 +1,1106 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * NXP FlexSPI(FSPI) controller driver. + * + * Copyright 2019 NXP. + * + * FlexSPI is a flexsible SPI host controller which supports two SPI + * channels and up to 4 external devices. Each channel supports + * Single/Dual/Quad/Octal mode data transfer (1/2/4/8 bidirectional + * data lines). + * + * FlexSPI controller is driven by the LUT(Look-up Table) registers + * LUT registers are a look-up-table for sequences of instructions. + * A valid sequence consists of four LUT registers. + * Maximum 32 LUT sequences can be programmed simultaneously. + * + * LUTs are being created at run-time based on the commands passed + * from the spi-mem framework, thus using single LUT index. + * + * Software triggered Flash read/write access by IP Bus. + * + * Memory mapped read access by AHB Bus. + * + * Based on SPI MEM interface and spi-fsl-qspi.c driver. + * + * Author: + * Yogesh Narayan Gaur <yogeshnarayan.gaur@nxp.com> + * Boris Brezillon <bbrezillon@kernel.org> + * Frieder Schrempf <frieder.schrempf@kontron.de> + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_qos.h> +#include <linux/sizes.h> + +#include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> + +/* + * The driver only uses one single LUT entry, that is updated on + * each call of exec_op(). Index 0 is preset at boot with a basic + * read operation, so let's use the last entry (31). + */ +#define SEQID_LUT 31 + +/* Registers used by the driver */ +#define FSPI_MCR0 0x00 +#define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24) +#define FSPI_MCR0_IP_TIMEOUT(x) ((x) << 16) +#define FSPI_MCR0_LEARN_EN BIT(15) +#define FSPI_MCR0_SCRFRUN_EN BIT(14) +#define FSPI_MCR0_OCTCOMB_EN BIT(13) +#define FSPI_MCR0_DOZE_EN BIT(12) +#define FSPI_MCR0_HSEN BIT(11) +#define FSPI_MCR0_SERCLKDIV BIT(8) +#define FSPI_MCR0_ATDF_EN BIT(7) +#define FSPI_MCR0_ARDF_EN BIT(6) +#define FSPI_MCR0_RXCLKSRC(x) ((x) << 4) +#define FSPI_MCR0_END_CFG(x) ((x) << 2) +#define FSPI_MCR0_MDIS BIT(1) +#define FSPI_MCR0_SWRST BIT(0) + +#define FSPI_MCR1 0x04 +#define FSPI_MCR1_SEQ_TIMEOUT(x) ((x) << 16) +#define FSPI_MCR1_AHB_TIMEOUT(x) (x) + +#define FSPI_MCR2 0x08 +#define FSPI_MCR2_IDLE_WAIT(x) ((x) << 24) +#define FSPI_MCR2_SAMEDEVICEEN BIT(15) +#define FSPI_MCR2_CLRLRPHS BIT(14) +#define FSPI_MCR2_ABRDATSZ BIT(8) +#define FSPI_MCR2_ABRLEARN BIT(7) +#define FSPI_MCR2_ABR_READ BIT(6) +#define FSPI_MCR2_ABRWRITE BIT(5) +#define FSPI_MCR2_ABRDUMMY BIT(4) +#define FSPI_MCR2_ABR_MODE BIT(3) +#define FSPI_MCR2_ABRCADDR BIT(2) +#define FSPI_MCR2_ABRRADDR BIT(1) +#define FSPI_MCR2_ABR_CMD BIT(0) + +#define FSPI_AHBCR 0x0c +#define FSPI_AHBCR_RDADDROPT BIT(6) +#define FSPI_AHBCR_PREF_EN BIT(5) +#define FSPI_AHBCR_BUFF_EN BIT(4) +#define FSPI_AHBCR_CACH_EN BIT(3) +#define FSPI_AHBCR_CLRTXBUF BIT(2) +#define FSPI_AHBCR_CLRRXBUF BIT(1) +#define FSPI_AHBCR_PAR_EN BIT(0) + +#define FSPI_INTEN 0x10 +#define FSPI_INTEN_SCLKSBWR BIT(9) +#define FSPI_INTEN_SCLKSBRD BIT(8) +#define FSPI_INTEN_DATALRNFL BIT(7) +#define FSPI_INTEN_IPTXWE BIT(6) +#define FSPI_INTEN_IPRXWA BIT(5) +#define FSPI_INTEN_AHBCMDERR BIT(4) +#define FSPI_INTEN_IPCMDERR BIT(3) +#define FSPI_INTEN_AHBCMDGE BIT(2) +#define FSPI_INTEN_IPCMDGE BIT(1) +#define FSPI_INTEN_IPCMDDONE BIT(0) + +#define FSPI_INTR 0x14 +#define FSPI_INTR_SCLKSBWR BIT(9) +#define FSPI_INTR_SCLKSBRD BIT(8) +#define FSPI_INTR_DATALRNFL BIT(7) +#define FSPI_INTR_IPTXWE BIT(6) +#define FSPI_INTR_IPRXWA BIT(5) +#define FSPI_INTR_AHBCMDERR BIT(4) +#define FSPI_INTR_IPCMDERR BIT(3) +#define FSPI_INTR_AHBCMDGE BIT(2) +#define FSPI_INTR_IPCMDGE BIT(1) +#define FSPI_INTR_IPCMDDONE BIT(0) + +#define FSPI_LUTKEY 0x18 +#define FSPI_LUTKEY_VALUE 0x5AF05AF0 + +#define FSPI_LCKCR 0x1C + +#define FSPI_LCKER_LOCK 0x1 +#define FSPI_LCKER_UNLOCK 0x2 + +#define FSPI_BUFXCR_INVALID_MSTRID 0xE +#define FSPI_AHBRX_BUF0CR0 0x20 +#define FSPI_AHBRX_BUF1CR0 0x24 +#define FSPI_AHBRX_BUF2CR0 0x28 +#define FSPI_AHBRX_BUF3CR0 0x2C +#define FSPI_AHBRX_BUF4CR0 0x30 +#define FSPI_AHBRX_BUF5CR0 0x34 +#define FSPI_AHBRX_BUF6CR0 0x38 +#define FSPI_AHBRX_BUF7CR0 0x3C +#define FSPI_AHBRXBUF0CR7_PREF BIT(31) + +#define FSPI_AHBRX_BUF0CR1 0x40 +#define FSPI_AHBRX_BUF1CR1 0x44 +#define FSPI_AHBRX_BUF2CR1 0x48 +#define FSPI_AHBRX_BUF3CR1 0x4C +#define FSPI_AHBRX_BUF4CR1 0x50 +#define FSPI_AHBRX_BUF5CR1 0x54 +#define FSPI_AHBRX_BUF6CR1 0x58 +#define FSPI_AHBRX_BUF7CR1 0x5C + +#define FSPI_FLSHA1CR0 0x60 +#define FSPI_FLSHA2CR0 0x64 +#define FSPI_FLSHB1CR0 0x68 +#define FSPI_FLSHB2CR0 0x6C +#define FSPI_FLSHXCR0_SZ_KB 10 +#define FSPI_FLSHXCR0_SZ(x) ((x) >> FSPI_FLSHXCR0_SZ_KB) + +#define FSPI_FLSHA1CR1 0x70 +#define FSPI_FLSHA2CR1 0x74 +#define FSPI_FLSHB1CR1 0x78 +#define FSPI_FLSHB2CR1 0x7C +#define FSPI_FLSHXCR1_CSINTR(x) ((x) << 16) +#define FSPI_FLSHXCR1_CAS(x) ((x) << 11) +#define FSPI_FLSHXCR1_WA BIT(10) +#define FSPI_FLSHXCR1_TCSH(x) ((x) << 5) +#define FSPI_FLSHXCR1_TCSS(x) (x) + +#define FSPI_FLSHA1CR2 0x80 +#define FSPI_FLSHA2CR2 0x84 +#define FSPI_FLSHB1CR2 0x88 +#define FSPI_FLSHB2CR2 0x8C +#define FSPI_FLSHXCR2_CLRINSP BIT(24) +#define FSPI_FLSHXCR2_AWRWAIT BIT(16) +#define FSPI_FLSHXCR2_AWRSEQN_SHIFT 13 +#define FSPI_FLSHXCR2_AWRSEQI_SHIFT 8 +#define FSPI_FLSHXCR2_ARDSEQN_SHIFT 5 +#define FSPI_FLSHXCR2_ARDSEQI_SHIFT 0 + +#define FSPI_IPCR0 0xA0 + +#define FSPI_IPCR1 0xA4 +#define FSPI_IPCR1_IPAREN BIT(31) +#define FSPI_IPCR1_SEQNUM_SHIFT 24 +#define FSPI_IPCR1_SEQID_SHIFT 16 +#define FSPI_IPCR1_IDATSZ(x) (x) + +#define FSPI_IPCMD 0xB0 +#define FSPI_IPCMD_TRG BIT(0) + +#define FSPI_DLPR 0xB4 + +#define FSPI_IPRXFCR 0xB8 +#define FSPI_IPRXFCR_CLR BIT(0) +#define FSPI_IPRXFCR_DMA_EN BIT(1) +#define FSPI_IPRXFCR_WMRK(x) ((x) << 2) + +#define FSPI_IPTXFCR 0xBC +#define FSPI_IPTXFCR_CLR BIT(0) +#define FSPI_IPTXFCR_DMA_EN BIT(1) +#define FSPI_IPTXFCR_WMRK(x) ((x) << 2) + +#define FSPI_DLLACR 0xC0 +#define FSPI_DLLACR_OVRDEN BIT(8) + +#define FSPI_DLLBCR 0xC4 +#define FSPI_DLLBCR_OVRDEN BIT(8) + +#define FSPI_STS0 0xE0 +#define FSPI_STS0_DLPHB(x) ((x) << 8) +#define FSPI_STS0_DLPHA(x) ((x) << 4) +#define FSPI_STS0_CMD_SRC(x) ((x) << 2) +#define FSPI_STS0_ARB_IDLE BIT(1) +#define FSPI_STS0_SEQ_IDLE BIT(0) + +#define FSPI_STS1 0xE4 +#define FSPI_STS1_IP_ERRCD(x) ((x) << 24) +#define FSPI_STS1_IP_ERRID(x) ((x) << 16) +#define FSPI_STS1_AHB_ERRCD(x) ((x) << 8) +#define FSPI_STS1_AHB_ERRID(x) (x) + +#define FSPI_AHBSPNST 0xEC +#define FSPI_AHBSPNST_DATLFT(x) ((x) << 16) +#define FSPI_AHBSPNST_BUFID(x) ((x) << 1) +#define FSPI_AHBSPNST_ACTIVE BIT(0) + +#define FSPI_IPRXFSTS 0xF0 +#define FSPI_IPRXFSTS_RDCNTR(x) ((x) << 16) +#define FSPI_IPRXFSTS_FILL(x) (x) + +#define FSPI_IPTXFSTS 0xF4 +#define FSPI_IPTXFSTS_WRCNTR(x) ((x) << 16) +#define FSPI_IPTXFSTS_FILL(x) (x) + +#define FSPI_RFDR 0x100 +#define FSPI_TFDR 0x180 + +#define FSPI_LUT_BASE 0x200 +#define FSPI_LUT_OFFSET (SEQID_LUT * 4 * 4) +#define FSPI_LUT_REG(idx) \ + (FSPI_LUT_BASE + FSPI_LUT_OFFSET + (idx) * 4) + +/* register map end */ + +/* Instruction set for the LUT register. */ +#define LUT_STOP 0x00 +#define LUT_CMD 0x01 +#define LUT_ADDR 0x02 +#define LUT_CADDR_SDR 0x03 +#define LUT_MODE 0x04 +#define LUT_MODE2 0x05 +#define LUT_MODE4 0x06 +#define LUT_MODE8 0x07 +#define LUT_NXP_WRITE 0x08 +#define LUT_NXP_READ 0x09 +#define LUT_LEARN_SDR 0x0A +#define LUT_DATSZ_SDR 0x0B +#define LUT_DUMMY 0x0C +#define LUT_DUMMY_RWDS_SDR 0x0D +#define LUT_JMP_ON_CS 0x1F +#define LUT_CMD_DDR 0x21 +#define LUT_ADDR_DDR 0x22 +#define LUT_CADDR_DDR 0x23 +#define LUT_MODE_DDR 0x24 +#define LUT_MODE2_DDR 0x25 +#define LUT_MODE4_DDR 0x26 +#define LUT_MODE8_DDR 0x27 +#define LUT_WRITE_DDR 0x28 +#define LUT_READ_DDR 0x29 +#define LUT_LEARN_DDR 0x2A +#define LUT_DATSZ_DDR 0x2B +#define LUT_DUMMY_DDR 0x2C +#define LUT_DUMMY_RWDS_DDR 0x2D + +/* + * Calculate number of required PAD bits for LUT register. + * + * The pad stands for the number of IO lines [0:7]. + * For example, the octal read needs eight IO lines, + * so you should use LUT_PAD(8). This macro + * returns 3 i.e. use eight (2^3) IP lines for read. + */ +#define LUT_PAD(x) (fls(x) - 1) + +/* + * Macro for constructing the LUT entries with the following + * register layout: + * + * --------------------------------------------------- + * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 | + * --------------------------------------------------- + */ +#define PAD_SHIFT 8 +#define INSTR_SHIFT 10 +#define OPRND_SHIFT 16 + +/* Macros for constructing the LUT register. */ +#define LUT_DEF(idx, ins, pad, opr) \ + ((((ins) << INSTR_SHIFT) | ((pad) << PAD_SHIFT) | \ + (opr)) << (((idx) % 2) * OPRND_SHIFT)) + +#define POLL_TOUT 5000 +#define NXP_FSPI_MAX_CHIPSELECT 4 + +struct nxp_fspi_devtype_data { + unsigned int rxfifo; + unsigned int txfifo; + unsigned int ahb_buf_size; + unsigned int quirks; + bool little_endian; +}; + +static const struct nxp_fspi_devtype_data lx2160a_data = { + .rxfifo = SZ_512, /* (64 * 64 bits) */ + .txfifo = SZ_1K, /* (128 * 64 bits) */ + .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ + .quirks = 0, + .little_endian = true, /* little-endian */ +}; + +struct nxp_fspi { + void __iomem *iobase; + void __iomem *ahb_addr; + u32 memmap_phy; + u32 memmap_phy_size; + struct clk *clk, *clk_en; + struct device *dev; + struct completion c; + const struct nxp_fspi_devtype_data *devtype_data; + struct mutex lock; + struct pm_qos_request pm_qos_req; + int selected; +}; + +/* + * R/W functions for big- or little-endian registers: + * The FSPI controller's endianness is independent of + * the CPU core's endianness. So far, although the CPU + * core is little-endian the FSPI controller can use + * big-endian or little-endian. + */ +static void fspi_writel(struct nxp_fspi *f, u32 val, void __iomem *addr) +{ + if (f->devtype_data->little_endian) + iowrite32(val, addr); + else + iowrite32be(val, addr); +} + +static u32 fspi_readl(struct nxp_fspi *f, void __iomem *addr) +{ + if (f->devtype_data->little_endian) + return ioread32(addr); + else + return ioread32be(addr); +} + +static irqreturn_t nxp_fspi_irq_handler(int irq, void *dev_id) +{ + struct nxp_fspi *f = dev_id; + u32 reg; + + /* clear interrupt */ + reg = fspi_readl(f, f->iobase + FSPI_INTR); + fspi_writel(f, FSPI_INTR_IPCMDDONE, f->iobase + FSPI_INTR); + + if (reg & FSPI_INTR_IPCMDDONE) + complete(&f->c); + + return IRQ_HANDLED; +} + +static int nxp_fspi_check_buswidth(struct nxp_fspi *f, u8 width) +{ + switch (width) { + case 1: + case 2: + case 4: + case 8: + return 0; + } + + return -ENOTSUPP; +} + +static bool nxp_fspi_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master); + int ret; + + ret = nxp_fspi_check_buswidth(f, op->cmd.buswidth); + + if (op->addr.nbytes) + ret |= nxp_fspi_check_buswidth(f, op->addr.buswidth); + + if (op->dummy.nbytes) + ret |= nxp_fspi_check_buswidth(f, op->dummy.buswidth); + + if (op->data.nbytes) + ret |= nxp_fspi_check_buswidth(f, op->data.buswidth); + + if (ret) + return false; + + /* + * The number of address bytes should be equal to or less than 4 bytes. + */ + if (op->addr.nbytes > 4) + return false; + + /* + * If requested address value is greater than controller assigned + * memory mapped space, return error as it didn't fit in the range + * of assigned address space. + */ + if (op->addr.val >= f->memmap_phy_size) + return false; + + /* Max 64 dummy clock cycles supported */ + if (op->dummy.buswidth && + (op->dummy.nbytes * 8 / op->dummy.buswidth > 64)) + return false; + + /* Max data length, check controller limits and alignment */ + if (op->data.dir == SPI_MEM_DATA_IN && + (op->data.nbytes > f->devtype_data->ahb_buf_size || + (op->data.nbytes > f->devtype_data->rxfifo - 4 && + !IS_ALIGNED(op->data.nbytes, 8)))) + return false; + + if (op->data.dir == SPI_MEM_DATA_OUT && + op->data.nbytes > f->devtype_data->txfifo) + return false; + + return true; +} + +/* Instead of busy looping invoke readl_poll_timeout functionality. */ +static int fspi_readl_poll_tout(struct nxp_fspi *f, void __iomem *base, + u32 mask, u32 delay_us, + u32 timeout_us, bool c) +{ + u32 reg; + + if (!f->devtype_data->little_endian) + mask = (u32)cpu_to_be32(mask); + + if (c) + return readl_poll_timeout(base, reg, (reg & mask), + delay_us, timeout_us); + else + return readl_poll_timeout(base, reg, !(reg & mask), + delay_us, timeout_us); +} + +/* + * If the slave device content being changed by Write/Erase, need to + * invalidate the AHB buffer. This can be achieved by doing the reset + * of controller after setting MCR0[SWRESET] bit. + */ +static inline void nxp_fspi_invalid(struct nxp_fspi *f) +{ + u32 reg; + int ret; + + reg = fspi_readl(f, f->iobase + FSPI_MCR0); + fspi_writel(f, reg | FSPI_MCR0_SWRST, f->iobase + FSPI_MCR0); + + /* w1c register, wait unit clear */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0, + FSPI_MCR0_SWRST, 0, POLL_TOUT, false); + WARN_ON(ret); +} + +static void nxp_fspi_prepare_lut(struct nxp_fspi *f, + const struct spi_mem_op *op) +{ + void __iomem *base = f->iobase; + u32 lutval[4] = {}; + int lutidx = 1, i; + + /* cmd */ + lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth), + op->cmd.opcode); + + /* addr bytes */ + if (op->addr.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_ADDR, + LUT_PAD(op->addr.buswidth), + op->addr.nbytes * 8); + lutidx++; + } + + /* dummy bytes, if needed */ + if (op->dummy.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY, + /* + * Due to FlexSPI controller limitation number of PAD for dummy + * buswidth needs to be programmed as equal to data buswidth. + */ + LUT_PAD(op->data.buswidth), + op->dummy.nbytes * 8 / + op->dummy.buswidth); + lutidx++; + } + + /* read/write data bytes */ + if (op->data.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, + op->data.dir == SPI_MEM_DATA_IN ? + LUT_NXP_READ : LUT_NXP_WRITE, + LUT_PAD(op->data.buswidth), + 0); + lutidx++; + } + + /* stop condition. */ + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0); + + /* unlock LUT */ + fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY); + fspi_writel(f, FSPI_LCKER_UNLOCK, f->iobase + FSPI_LCKCR); + + /* fill LUT */ + for (i = 0; i < ARRAY_SIZE(lutval); i++) + fspi_writel(f, lutval[i], base + FSPI_LUT_REG(i)); + + dev_dbg(f->dev, "CMD[%x] lutval[0:%x \t 1:%x \t 2:%x \t 3:%x]\n", + op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3]); + + /* lock LUT */ + fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY); + fspi_writel(f, FSPI_LCKER_LOCK, f->iobase + FSPI_LCKCR); +} + +static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f) +{ + int ret; + + ret = clk_prepare_enable(f->clk_en); + if (ret) + return ret; + + ret = clk_prepare_enable(f->clk); + if (ret) { + clk_disable_unprepare(f->clk_en); + return ret; + } + + return 0; +} + +static void nxp_fspi_clk_disable_unprep(struct nxp_fspi *f) +{ + clk_disable_unprepare(f->clk); + clk_disable_unprepare(f->clk_en); +} + +/* + * In FlexSPI controller, flash access is based on value of FSPI_FLSHXXCR0 + * register and start base address of the slave device. + * + * (Higher address) + * -------- <-- FLSHB2CR0 + * | B2 | + * | | + * B2 start address --> -------- <-- FLSHB1CR0 + * | B1 | + * | | + * B1 start address --> -------- <-- FLSHA2CR0 + * | A2 | + * | | + * A2 start address --> -------- <-- FLSHA1CR0 + * | A1 | + * | | + * A1 start address --> -------- (Lower address) + * + * + * Start base address defines the starting address range for given CS and + * FSPI_FLSHXXCR0 defines the size of the slave device connected at given CS. + * + * But, different targets are having different combinations of number of CS, + * some targets only have single CS or two CS covering controller's full + * memory mapped space area. + * Thus, implementation is being done as independent of the size and number + * of the connected slave device. + * Assign controller memory mapped space size as the size to the connected + * slave device. + * Mark FLSHxxCR0 as zero initially and then assign value only to the selected + * chip-select Flash configuration register. + * + * For e.g. to access CS2 (B1), FLSHB1CR0 register would be equal to the + * memory mapped size of the controller. + * Value for rest of the CS FLSHxxCR0 register would be zero. + * + */ +static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi) +{ + unsigned long rate = spi->max_speed_hz; + int ret; + uint64_t size_kb; + + /* + * Return, if previously selected slave device is same as current + * requested slave device. + */ + if (f->selected == spi->chip_select) + return; + + /* Reset FLSHxxCR0 registers */ + fspi_writel(f, 0, f->iobase + FSPI_FLSHA1CR0); + fspi_writel(f, 0, f->iobase + FSPI_FLSHA2CR0); + fspi_writel(f, 0, f->iobase + FSPI_FLSHB1CR0); + fspi_writel(f, 0, f->iobase + FSPI_FLSHB2CR0); + + /* Assign controller memory mapped space as size, KBytes, of flash. */ + size_kb = FSPI_FLSHXCR0_SZ(f->memmap_phy_size); + + fspi_writel(f, size_kb, f->iobase + FSPI_FLSHA1CR0 + + 4 * spi->chip_select); + + dev_dbg(f->dev, "Slave device [CS:%x] selected\n", spi->chip_select); + + nxp_fspi_clk_disable_unprep(f); + + ret = clk_set_rate(f->clk, rate); + if (ret) + return; + + ret = nxp_fspi_clk_prep_enable(f); + if (ret) + return; + + f->selected = spi->chip_select; +} + +static void nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op) +{ + u32 len = op->data.nbytes; + + /* Read out the data directly from the AHB buffer. */ + memcpy_fromio(op->data.buf.in, (f->ahb_addr + op->addr.val), len); +} + +static void nxp_fspi_fill_txfifo(struct nxp_fspi *f, + const struct spi_mem_op *op) +{ + void __iomem *base = f->iobase; + int i, ret; + u8 *buf = (u8 *) op->data.buf.out; + + /* clear the TX FIFO. */ + fspi_writel(f, FSPI_IPTXFCR_CLR, base + FSPI_IPTXFCR); + + /* + * Default value of water mark level is 8 bytes, hence in single + * write request controller can write max 8 bytes of data. + */ + + for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 8); i += 8) { + /* Wait for TXFIFO empty */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR, + FSPI_INTR_IPTXWE, 0, + POLL_TOUT, true); + WARN_ON(ret); + + fspi_writel(f, *(u32 *) (buf + i), base + FSPI_TFDR); + fspi_writel(f, *(u32 *) (buf + i + 4), base + FSPI_TFDR + 4); + fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR); + } + + if (i < op->data.nbytes) { + u32 data = 0; + int j; + /* Wait for TXFIFO empty */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR, + FSPI_INTR_IPTXWE, 0, + POLL_TOUT, true); + WARN_ON(ret); + + for (j = 0; j < ALIGN(op->data.nbytes - i, 4); j += 4) { + memcpy(&data, buf + i + j, 4); + fspi_writel(f, data, base + FSPI_TFDR + j); + } + fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR); + } +} + +static void nxp_fspi_read_rxfifo(struct nxp_fspi *f, + const struct spi_mem_op *op) +{ + void __iomem *base = f->iobase; + int i, ret; + int len = op->data.nbytes; + u8 *buf = (u8 *) op->data.buf.in; + + /* + * Default value of water mark level is 8 bytes, hence in single + * read request controller can read max 8 bytes of data. + */ + for (i = 0; i < ALIGN_DOWN(len, 8); i += 8) { + /* Wait for RXFIFO available */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR, + FSPI_INTR_IPRXWA, 0, + POLL_TOUT, true); + WARN_ON(ret); + + *(u32 *)(buf + i) = fspi_readl(f, base + FSPI_RFDR); + *(u32 *)(buf + i + 4) = fspi_readl(f, base + FSPI_RFDR + 4); + /* move the FIFO pointer */ + fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR); + } + + if (i < len) { + u32 tmp; + int size, j; + + buf = op->data.buf.in + i; + /* Wait for RXFIFO available */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR, + FSPI_INTR_IPRXWA, 0, + POLL_TOUT, true); + WARN_ON(ret); + + len = op->data.nbytes - i; + for (j = 0; j < op->data.nbytes - i; j += 4) { + tmp = fspi_readl(f, base + FSPI_RFDR + j); + size = min(len, 4); + memcpy(buf + j, &tmp, size); + len -= size; + } + } + + /* invalid the RXFIFO */ + fspi_writel(f, FSPI_IPRXFCR_CLR, base + FSPI_IPRXFCR); + /* move the FIFO pointer */ + fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR); +} + +static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op) +{ + void __iomem *base = f->iobase; + int seqnum = 0; + int err = 0; + u32 reg; + + reg = fspi_readl(f, base + FSPI_IPRXFCR); + /* invalid RXFIFO first */ + reg &= ~FSPI_IPRXFCR_DMA_EN; + reg = reg | FSPI_IPRXFCR_CLR; + fspi_writel(f, reg, base + FSPI_IPRXFCR); + + init_completion(&f->c); + + fspi_writel(f, op->addr.val, base + FSPI_IPCR0); + /* + * Always start the sequence at the same index since we update + * the LUT at each exec_op() call. And also specify the DATA + * length, since it's has not been specified in the LUT. + */ + fspi_writel(f, op->data.nbytes | + (SEQID_LUT << FSPI_IPCR1_SEQID_SHIFT) | + (seqnum << FSPI_IPCR1_SEQNUM_SHIFT), + base + FSPI_IPCR1); + + /* Trigger the LUT now. */ + fspi_writel(f, FSPI_IPCMD_TRG, base + FSPI_IPCMD); + + /* Wait for the interrupt. */ + if (!wait_for_completion_timeout(&f->c, msecs_to_jiffies(1000))) + err = -ETIMEDOUT; + + /* Invoke IP data read, if request is of data read. */ + if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN) + nxp_fspi_read_rxfifo(f, op); + + return err; +} + +static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master); + int err = 0; + + mutex_lock(&f->lock); + + /* Wait for controller being ready. */ + err = fspi_readl_poll_tout(f, f->iobase + FSPI_STS0, + FSPI_STS0_ARB_IDLE, 1, POLL_TOUT, true); + WARN_ON(err); + + nxp_fspi_select_mem(f, mem->spi); + + nxp_fspi_prepare_lut(f, op); + /* + * If we have large chunks of data, we read them through the AHB bus + * by accessing the mapped memory. In all other cases we use + * IP commands to access the flash. + */ + if (op->data.nbytes > (f->devtype_data->rxfifo - 4) && + op->data.dir == SPI_MEM_DATA_IN) { + nxp_fspi_read_ahb(f, op); + } else { + if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) + nxp_fspi_fill_txfifo(f, op); + + err = nxp_fspi_do_op(f, op); + } + + /* Invalidate the data in the AHB buffer. */ + nxp_fspi_invalid(f); + + mutex_unlock(&f->lock); + + return err; +} + +static int nxp_fspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) +{ + struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master); + + if (op->data.dir == SPI_MEM_DATA_OUT) { + if (op->data.nbytes > f->devtype_data->txfifo) + op->data.nbytes = f->devtype_data->txfifo; + } else { + if (op->data.nbytes > f->devtype_data->ahb_buf_size) + op->data.nbytes = f->devtype_data->ahb_buf_size; + else if (op->data.nbytes > (f->devtype_data->rxfifo - 4)) + op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8); + } + + return 0; +} + +static int nxp_fspi_default_setup(struct nxp_fspi *f) +{ + void __iomem *base = f->iobase; + int ret, i; + u32 reg; + + /* disable and unprepare clock to avoid glitch pass to controller */ + nxp_fspi_clk_disable_unprep(f); + + /* the default frequency, we will change it later if necessary. */ + ret = clk_set_rate(f->clk, 20000000); + if (ret) + return ret; + + ret = nxp_fspi_clk_prep_enable(f); + if (ret) + return ret; + + /* Reset the module */ + /* w1c register, wait unit clear */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0, + FSPI_MCR0_SWRST, 0, POLL_TOUT, false); + WARN_ON(ret); + + /* Disable the module */ + fspi_writel(f, FSPI_MCR0_MDIS, base + FSPI_MCR0); + + /* Reset the DLL register to default value */ + fspi_writel(f, FSPI_DLLACR_OVRDEN, base + FSPI_DLLACR); + fspi_writel(f, FSPI_DLLBCR_OVRDEN, base + FSPI_DLLBCR); + + /* enable module */ + fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) | FSPI_MCR0_IP_TIMEOUT(0xFF), + base + FSPI_MCR0); + + /* + * Disable same device enable bit and configure all slave devices + * independently. + */ + reg = fspi_readl(f, f->iobase + FSPI_MCR2); + reg = reg & ~(FSPI_MCR2_SAMEDEVICEEN); + fspi_writel(f, reg, base + FSPI_MCR2); + + /* AHB configuration for access buffer 0~7. */ + for (i = 0; i < 7; i++) + fspi_writel(f, 0, base + FSPI_AHBRX_BUF0CR0 + 4 * i); + + /* + * Set ADATSZ with the maximum AHB buffer size to improve the read + * performance. + */ + fspi_writel(f, (f->devtype_data->ahb_buf_size / 8 | + FSPI_AHBRXBUF0CR7_PREF), base + FSPI_AHBRX_BUF7CR0); + + /* prefetch and no start address alignment limitation */ + fspi_writel(f, FSPI_AHBCR_PREF_EN | FSPI_AHBCR_RDADDROPT, + base + FSPI_AHBCR); + + /* AHB Read - Set lut sequence ID for all CS. */ + fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2); + fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2); + fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB1CR2); + fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB2CR2); + + f->selected = -1; + + /* enable the interrupt */ + fspi_writel(f, FSPI_INTEN_IPCMDDONE, base + FSPI_INTEN); + + return 0; +} + +static const char *nxp_fspi_get_name(struct spi_mem *mem) +{ + struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master); + struct device *dev = &mem->spi->dev; + const char *name; + + // Set custom name derived from the platform_device of the controller. + if (of_get_available_child_count(f->dev->of_node) == 1) + return dev_name(f->dev); + + name = devm_kasprintf(dev, GFP_KERNEL, + "%s-%d", dev_name(f->dev), + mem->spi->chip_select); + + if (!name) { + dev_err(dev, "failed to get memory for custom flash name\n"); + return ERR_PTR(-ENOMEM); + } + + return name; +} + +static const struct spi_controller_mem_ops nxp_fspi_mem_ops = { + .adjust_op_size = nxp_fspi_adjust_op_size, + .supports_op = nxp_fspi_supports_op, + .exec_op = nxp_fspi_exec_op, + .get_name = nxp_fspi_get_name, +}; + +static int nxp_fspi_probe(struct platform_device *pdev) +{ + struct spi_controller *ctlr; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct resource *res; + struct nxp_fspi *f; + int ret; + + ctlr = spi_alloc_master(&pdev->dev, sizeof(*f)); + if (!ctlr) + return -ENOMEM; + + ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL | + SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL; + + f = spi_controller_get_devdata(ctlr); + f->dev = dev; + f->devtype_data = of_device_get_match_data(dev); + if (!f->devtype_data) { + ret = -ENODEV; + goto err_put_ctrl; + } + + platform_set_drvdata(pdev, f); + + /* find the resources - configuration register address space */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_base"); + f->iobase = devm_ioremap_resource(dev, res); + if (IS_ERR(f->iobase)) { + ret = PTR_ERR(f->iobase); + goto err_put_ctrl; + } + + /* find the resources - controller memory mapped space */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap"); + f->ahb_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(f->ahb_addr)) { + ret = PTR_ERR(f->ahb_addr); + goto err_put_ctrl; + } + + /* assign memory mapped starting address and mapped size. */ + f->memmap_phy = res->start; + f->memmap_phy_size = resource_size(res); + + /* find the clocks */ + f->clk_en = devm_clk_get(dev, "fspi_en"); + if (IS_ERR(f->clk_en)) { + ret = PTR_ERR(f->clk_en); + goto err_put_ctrl; + } + + f->clk = devm_clk_get(dev, "fspi"); + if (IS_ERR(f->clk)) { + ret = PTR_ERR(f->clk); + goto err_put_ctrl; + } + + ret = nxp_fspi_clk_prep_enable(f); + if (ret) { + dev_err(dev, "can not enable the clock\n"); + goto err_put_ctrl; + } + + /* find the irq */ + ret = platform_get_irq(pdev, 0); + if (ret < 0) { + dev_err(dev, "failed to get the irq: %d\n", ret); + goto err_disable_clk; + } + + ret = devm_request_irq(dev, ret, + nxp_fspi_irq_handler, 0, pdev->name, f); + if (ret) { + dev_err(dev, "failed to request irq: %d\n", ret); + goto err_disable_clk; + } + + mutex_init(&f->lock); + + ctlr->bus_num = -1; + ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT; + ctlr->mem_ops = &nxp_fspi_mem_ops; + + nxp_fspi_default_setup(f); + + ctlr->dev.of_node = np; + + ret = spi_register_controller(ctlr); + if (ret) + goto err_destroy_mutex; + + return 0; + +err_destroy_mutex: + mutex_destroy(&f->lock); + +err_disable_clk: + nxp_fspi_clk_disable_unprep(f); + +err_put_ctrl: + spi_controller_put(ctlr); + + dev_err(dev, "NXP FSPI probe failed\n"); + return ret; +} + +static int nxp_fspi_remove(struct platform_device *pdev) +{ + struct nxp_fspi *f = platform_get_drvdata(pdev); + + /* disable the hardware */ + fspi_writel(f, FSPI_MCR0_MDIS, f->iobase + FSPI_MCR0); + + nxp_fspi_clk_disable_unprep(f); + + mutex_destroy(&f->lock); + + return 0; +} + +static int nxp_fspi_suspend(struct device *dev) +{ + return 0; +} + +static int nxp_fspi_resume(struct device *dev) +{ + struct nxp_fspi *f = dev_get_drvdata(dev); + + nxp_fspi_default_setup(f); + + return 0; +} + +static const struct of_device_id nxp_fspi_dt_ids[] = { + { .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids); + +static const struct dev_pm_ops nxp_fspi_pm_ops = { + .suspend = nxp_fspi_suspend, + .resume = nxp_fspi_resume, +}; + +static struct platform_driver nxp_fspi_driver = { + .driver = { + .name = "nxp-fspi", + .of_match_table = nxp_fspi_dt_ids, + .pm = &nxp_fspi_pm_ops, + }, + .probe = nxp_fspi_probe, + .remove = nxp_fspi_remove, +}; +module_platform_driver(nxp_fspi_driver); + +MODULE_DESCRIPTION("NXP FSPI Controller Driver"); +MODULE_AUTHOR("NXP Semiconductor"); +MODULE_AUTHOR("Yogesh Narayan Gaur <yogeshnarayan.gaur@nxp.com>"); +MODULE_AUTHOR("Boris Brezillon <bbrezillon@kernel.org>"); +MODULE_AUTHOR("Frieder Schrempf <frieder.schrempf@kontron.de>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 0c793e31d60f..26684178786f 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -253,6 +253,7 @@ #define STATE_RUNNING ((void *) 1) #define STATE_DONE ((void *) 2) #define STATE_ERROR ((void *) -1) +#define STATE_TIMEOUT ((void *) -2) /* * SSP State - Whether Enabled or Disabled @@ -1484,6 +1485,30 @@ err_config_dma: writew(irqflags, SSP_IMSC(pl022->virtbase)); } +static void print_current_status(struct pl022 *pl022) +{ + u32 read_cr0; + u16 read_cr1, read_dmacr, read_sr; + + if (pl022->vendor->extended_cr) + read_cr0 = readl(SSP_CR0(pl022->virtbase)); + else + read_cr0 = readw(SSP_CR0(pl022->virtbase)); + read_cr1 = readw(SSP_CR1(pl022->virtbase)); + read_dmacr = readw(SSP_DMACR(pl022->virtbase)); + read_sr = readw(SSP_SR(pl022->virtbase)); + + dev_warn(&pl022->adev->dev, "spi-pl022 CR0: %x\n", read_cr0); + dev_warn(&pl022->adev->dev, "spi-pl022 CR1: %x\n", read_cr1); + dev_warn(&pl022->adev->dev, "spi-pl022 DMACR: %x\n", read_dmacr); + dev_warn(&pl022->adev->dev, "spi-pl022 SR: %x\n", read_sr); + dev_warn(&pl022->adev->dev, + "spi-pl022 exp_fifo_level/fifodepth: %u/%d\n", + pl022->exp_fifo_level, + pl022->vendor->fifodepth); + +} + static void do_polling_transfer(struct pl022 *pl022) { struct spi_message *message = NULL; @@ -1535,7 +1560,8 @@ static void do_polling_transfer(struct pl022 *pl022) if (time_after(time, timeout)) { dev_warn(&pl022->adev->dev, "%s: timeout!\n", __func__); - message->state = STATE_ERROR; + message->state = STATE_TIMEOUT; + print_current_status(pl022); goto out; } cpu_relax(); @@ -1553,6 +1579,8 @@ out: /* Handle end of message */ if (message->state == STATE_DONE) message->status = 0; + else if (message->state == STATE_TIMEOUT) + message->status = -EAGAIN; else message->status = -EIO; diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c index 2fa7f4b43492..15592598273e 100644 --- a/drivers/spi/spi-pxa2xx-dma.c +++ b/drivers/spi/spi-pxa2xx-dma.c @@ -23,7 +23,7 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, bool error) { - struct spi_message *msg = drv_data->master->cur_msg; + struct spi_message *msg = drv_data->controller->cur_msg; /* * It is possible that one CPU is handling ROR interrupt and other @@ -59,7 +59,7 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, msg->status = -EIO; } - spi_finalize_current_transfer(drv_data->master); + spi_finalize_current_transfer(drv_data->controller); } } @@ -74,7 +74,7 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, struct spi_transfer *xfer) { struct chip_data *chip = - spi_get_ctldata(drv_data->master->cur_msg->spi); + spi_get_ctldata(drv_data->controller->cur_msg->spi); enum dma_slave_buswidth width; struct dma_slave_config cfg; struct dma_chan *chan; @@ -102,14 +102,14 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, cfg.dst_maxburst = chip->dma_burst_size; sgt = &xfer->tx_sg; - chan = drv_data->master->dma_tx; + chan = drv_data->controller->dma_tx; } else { cfg.src_addr = drv_data->ssdr_physical; cfg.src_addr_width = width; cfg.src_maxburst = chip->dma_burst_size; sgt = &xfer->rx_sg; - chan = drv_data->master->dma_rx; + chan = drv_data->controller->dma_rx; } ret = dmaengine_slave_config(chan, &cfg); @@ -130,8 +130,8 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) if (status & SSSR_ROR) { dev_err(&drv_data->pdev->dev, "FIFO overrun\n"); - dmaengine_terminate_async(drv_data->master->dma_rx); - dmaengine_terminate_async(drv_data->master->dma_tx); + dmaengine_terminate_async(drv_data->controller->dma_rx); + dmaengine_terminate_async(drv_data->controller->dma_tx); pxa2xx_spi_dma_transfer_complete(drv_data, true); return IRQ_HANDLED; @@ -171,15 +171,15 @@ int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, return 0; err_rx: - dmaengine_terminate_async(drv_data->master->dma_tx); + dmaengine_terminate_async(drv_data->controller->dma_tx); err_tx: return err; } void pxa2xx_spi_dma_start(struct driver_data *drv_data) { - dma_async_issue_pending(drv_data->master->dma_rx); - dma_async_issue_pending(drv_data->master->dma_tx); + dma_async_issue_pending(drv_data->controller->dma_rx); + dma_async_issue_pending(drv_data->controller->dma_tx); atomic_set(&drv_data->dma_running, 1); } @@ -187,30 +187,30 @@ void pxa2xx_spi_dma_start(struct driver_data *drv_data) void pxa2xx_spi_dma_stop(struct driver_data *drv_data) { atomic_set(&drv_data->dma_running, 0); - dmaengine_terminate_sync(drv_data->master->dma_rx); - dmaengine_terminate_sync(drv_data->master->dma_tx); + dmaengine_terminate_sync(drv_data->controller->dma_rx); + dmaengine_terminate_sync(drv_data->controller->dma_tx); } int pxa2xx_spi_dma_setup(struct driver_data *drv_data) { - struct pxa2xx_spi_master *pdata = drv_data->master_info; + struct pxa2xx_spi_controller *pdata = drv_data->controller_info; struct device *dev = &drv_data->pdev->dev; - struct spi_controller *master = drv_data->master; + struct spi_controller *controller = drv_data->controller; dma_cap_mask_t mask; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - master->dma_tx = dma_request_slave_channel_compat(mask, + controller->dma_tx = dma_request_slave_channel_compat(mask, pdata->dma_filter, pdata->tx_param, dev, "tx"); - if (!master->dma_tx) + if (!controller->dma_tx) return -ENODEV; - master->dma_rx = dma_request_slave_channel_compat(mask, + controller->dma_rx = dma_request_slave_channel_compat(mask, pdata->dma_filter, pdata->rx_param, dev, "rx"); - if (!master->dma_rx) { - dma_release_channel(master->dma_tx); - master->dma_tx = NULL; + if (!controller->dma_rx) { + dma_release_channel(controller->dma_tx); + controller->dma_tx = NULL; return -ENODEV; } @@ -219,17 +219,17 @@ int pxa2xx_spi_dma_setup(struct driver_data *drv_data) void pxa2xx_spi_dma_release(struct driver_data *drv_data) { - struct spi_controller *master = drv_data->master; + struct spi_controller *controller = drv_data->controller; - if (master->dma_rx) { - dmaengine_terminate_sync(master->dma_rx); - dma_release_channel(master->dma_rx); - master->dma_rx = NULL; + if (controller->dma_rx) { + dmaengine_terminate_sync(controller->dma_rx); + dma_release_channel(controller->dma_rx); + controller->dma_rx = NULL; } - if (master->dma_tx) { - dmaengine_terminate_sync(master->dma_tx); - dma_release_channel(master->dma_tx); - master->dma_tx = NULL; + if (controller->dma_tx) { + dmaengine_terminate_sync(controller->dma_tx); + dma_release_channel(controller->dma_tx); + controller->dma_tx = NULL; } } diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index 869f188b02eb..1727fdfbac28 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c @@ -197,7 +197,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, struct platform_device_info pi; int ret; struct platform_device *pdev; - struct pxa2xx_spi_master spi_pdata; + struct pxa2xx_spi_controller spi_pdata; struct ssp_device *ssp; struct pxa_spi_info *c; char buf[40]; @@ -265,7 +265,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, static void pxa2xx_spi_pci_remove(struct pci_dev *dev) { struct platform_device *pdev = pci_get_drvdata(dev); - struct pxa2xx_spi_master *spi_pdata; + struct pxa2xx_spi_controller *spi_pdata; spi_pdata = dev_get_platdata(&pdev->dev); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 3e82eaad0f2d..b6ddba833d02 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -328,7 +328,7 @@ static void lpss_ssp_setup(struct driver_data *drv_data) __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value); /* Enable multiblock DMA transfers */ - if (drv_data->master_info->enable_dma) { + if (drv_data->controller_info->enable_dma) { __lpss_ssp_write_priv(drv_data, config->reg_ssp, 1); if (config->reg_general >= 0) { @@ -368,7 +368,7 @@ static void lpss_ssp_select_cs(struct spi_device *spi, __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value); ndelay(1000000000 / - (drv_data->master->max_speed_hz / 2)); + (drv_data->controller->max_speed_hz / 2)); } } @@ -567,7 +567,7 @@ static int u32_reader(struct driver_data *drv_data) static void reset_sccr1(struct driver_data *drv_data) { struct chip_data *chip = - spi_get_ctldata(drv_data->master->cur_msg->spi); + spi_get_ctldata(drv_data->controller->cur_msg->spi); u32 sccr1_reg; sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1; @@ -599,8 +599,8 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg) dev_err(&drv_data->pdev->dev, "%s\n", msg); - drv_data->master->cur_msg->status = -EIO; - spi_finalize_current_transfer(drv_data->master); + drv_data->controller->cur_msg->status = -EIO; + spi_finalize_current_transfer(drv_data->controller); } static void int_transfer_complete(struct driver_data *drv_data) @@ -611,7 +611,7 @@ static void int_transfer_complete(struct driver_data *drv_data) if (!pxa25x_ssp_comp(drv_data)) pxa2xx_spi_write(drv_data, SSTO, 0); - spi_finalize_current_transfer(drv_data->master); + spi_finalize_current_transfer(drv_data->controller); } static irqreturn_t interrupt_transfer(struct driver_data *drv_data) @@ -747,7 +747,7 @@ static irqreturn_t ssp_int(int irq, void *dev_id) pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg & ~drv_data->int_cr1); pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg); - if (!drv_data->master->cur_msg) { + if (!drv_data->controller->cur_msg) { handle_bad_msg(drv_data); /* Never fail */ return IRQ_HANDLED; @@ -879,7 +879,7 @@ static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds) static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) { - unsigned long ssp_clk = drv_data->master->max_speed_hz; + unsigned long ssp_clk = drv_data->controller->max_speed_hz; const struct ssp_device *ssp = drv_data->ssp; rate = min_t(int, ssp_clk, rate); @@ -894,7 +894,7 @@ static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data, int rate) { struct chip_data *chip = - spi_get_ctldata(drv_data->master->cur_msg->spi); + spi_get_ctldata(drv_data->controller->cur_msg->spi); unsigned int clk_div; switch (drv_data->ssp_type) { @@ -908,7 +908,7 @@ static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data, return clk_div << 8; } -static bool pxa2xx_spi_can_dma(struct spi_controller *master, +static bool pxa2xx_spi_can_dma(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *xfer) { @@ -919,12 +919,12 @@ static bool pxa2xx_spi_can_dma(struct spi_controller *master, xfer->len >= chip->dma_burst_size; } -static int pxa2xx_spi_transfer_one(struct spi_controller *master, +static int pxa2xx_spi_transfer_one(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *transfer) { - struct driver_data *drv_data = spi_controller_get_devdata(master); - struct spi_message *message = master->cur_msg; + struct driver_data *drv_data = spi_controller_get_devdata(controller); + struct spi_message *message = controller->cur_msg; struct chip_data *chip = spi_get_ctldata(message->spi); u32 dma_thresh = chip->dma_threshold; u32 dma_burst = chip->dma_burst_size; @@ -1006,9 +1006,9 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master, "DMA burst size reduced to match bits_per_word\n"); } - dma_mapped = master->can_dma && - master->can_dma(master, message->spi, transfer) && - master->cur_msg_mapped; + dma_mapped = controller->can_dma && + controller->can_dma(controller, message->spi, transfer) && + controller->cur_msg_mapped; if (dma_mapped) { /* Ensure we have the correct interrupt handler */ @@ -1036,12 +1036,12 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master, cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits); if (!pxa25x_ssp_comp(drv_data)) dev_dbg(&message->spi->dev, "%u Hz actual, %s\n", - master->max_speed_hz + controller->max_speed_hz / (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)), dma_mapped ? "DMA" : "PIO"); else dev_dbg(&message->spi->dev, "%u Hz actual, %s\n", - master->max_speed_hz / 2 + controller->max_speed_hz / 2 / (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)), dma_mapped ? "DMA" : "PIO"); @@ -1092,7 +1092,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master, } } - if (spi_controller_is_slave(master)) { + if (spi_controller_is_slave(controller)) { while (drv_data->write(drv_data)) ; if (drv_data->gpiod_ready) { @@ -1111,9 +1111,9 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master, return 1; } -static int pxa2xx_spi_slave_abort(struct spi_master *master) +static int pxa2xx_spi_slave_abort(struct spi_controller *controller) { - struct driver_data *drv_data = spi_controller_get_devdata(master); + struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Stop and reset SSP */ write_SSSR_CS(drv_data, drv_data->clear_sr); @@ -1126,16 +1126,16 @@ static int pxa2xx_spi_slave_abort(struct spi_master *master) dev_dbg(&drv_data->pdev->dev, "transfer aborted\n"); - drv_data->master->cur_msg->status = -EINTR; - spi_finalize_current_transfer(drv_data->master); + drv_data->controller->cur_msg->status = -EINTR; + spi_finalize_current_transfer(drv_data->controller); return 0; } -static void pxa2xx_spi_handle_err(struct spi_controller *master, +static void pxa2xx_spi_handle_err(struct spi_controller *controller, struct spi_message *msg) { - struct driver_data *drv_data = spi_controller_get_devdata(master); + struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Disable the SSP */ pxa2xx_spi_write(drv_data, SSCR0, @@ -1159,9 +1159,9 @@ static void pxa2xx_spi_handle_err(struct spi_controller *master, pxa2xx_spi_dma_stop(drv_data); } -static int pxa2xx_spi_unprepare_transfer(struct spi_controller *master) +static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller) { - struct driver_data *drv_data = spi_controller_get_devdata(master); + struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Disable the SSP now */ pxa2xx_spi_write(drv_data, SSCR0, @@ -1260,7 +1260,7 @@ static int setup(struct spi_device *spi) break; default: tx_hi_thres = 0; - if (spi_controller_is_slave(drv_data->master)) { + if (spi_controller_is_slave(drv_data->controller)) { tx_thres = 1; rx_thres = 2; } else { @@ -1287,7 +1287,7 @@ static int setup(struct spi_device *spi) chip->frm = spi->chip_select; } - chip->enable_dma = drv_data->master_info->enable_dma; + chip->enable_dma = drv_data->controller_info->enable_dma; chip->timeout = TIMOUT_DFLT; } @@ -1310,7 +1310,7 @@ static int setup(struct spi_device *spi) if (chip_info->enable_loopback) chip->cr1 = SSCR1_LBM; } - if (spi_controller_is_slave(drv_data->master)) { + if (spi_controller_is_slave(drv_data->controller)) { chip->cr1 |= SSCR1_SCFR; chip->cr1 |= SSCR1_SCLKDIR; chip->cr1 |= SSCR1_SFRMDIR; @@ -1497,10 +1497,10 @@ static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param) #endif /* CONFIG_PCI */ -static struct pxa2xx_spi_master * +static struct pxa2xx_spi_controller * pxa2xx_spi_init_pdata(struct platform_device *pdev) { - struct pxa2xx_spi_master *pdata; + struct pxa2xx_spi_controller *pdata; struct acpi_device *adev; struct ssp_device *ssp; struct resource *res; @@ -1568,10 +1568,10 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev) return pdata; } -static int pxa2xx_spi_fw_translate_cs(struct spi_controller *master, +static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller, unsigned int cs) { - struct driver_data *drv_data = spi_controller_get_devdata(master); + struct driver_data *drv_data = spi_controller_get_devdata(controller); if (has_acpi_companion(&drv_data->pdev->dev)) { switch (drv_data->ssp_type) { @@ -1595,8 +1595,8 @@ static int pxa2xx_spi_fw_translate_cs(struct spi_controller *master, static int pxa2xx_spi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct pxa2xx_spi_master *platform_info; - struct spi_controller *master; + struct pxa2xx_spi_controller *platform_info; + struct spi_controller *controller; struct driver_data *drv_data; struct ssp_device *ssp; const struct lpss_config *config; @@ -1622,37 +1622,37 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) } if (platform_info->is_slave) - master = spi_alloc_slave(dev, sizeof(struct driver_data)); + controller = spi_alloc_slave(dev, sizeof(struct driver_data)); else - master = spi_alloc_master(dev, sizeof(struct driver_data)); + controller = spi_alloc_master(dev, sizeof(struct driver_data)); - if (!master) { - dev_err(&pdev->dev, "cannot alloc spi_master\n"); + if (!controller) { + dev_err(&pdev->dev, "cannot alloc spi_controller\n"); pxa_ssp_free(ssp); return -ENOMEM; } - drv_data = spi_controller_get_devdata(master); - drv_data->master = master; - drv_data->master_info = platform_info; + drv_data = spi_controller_get_devdata(controller); + drv_data->controller = controller; + drv_data->controller_info = platform_info; drv_data->pdev = pdev; drv_data->ssp = ssp; - master->dev.of_node = pdev->dev.of_node; + controller->dev.of_node = pdev->dev.of_node; /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; - - master->bus_num = ssp->port_id; - master->dma_alignment = DMA_ALIGNMENT; - master->cleanup = cleanup; - master->setup = setup; - master->set_cs = pxa2xx_spi_set_cs; - master->transfer_one = pxa2xx_spi_transfer_one; - master->slave_abort = pxa2xx_spi_slave_abort; - master->handle_err = pxa2xx_spi_handle_err; - master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer; - master->fw_translate_cs = pxa2xx_spi_fw_translate_cs; - master->auto_runtime_pm = true; - master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; + controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; + + controller->bus_num = ssp->port_id; + controller->dma_alignment = DMA_ALIGNMENT; + controller->cleanup = cleanup; + controller->setup = setup; + controller->set_cs = pxa2xx_spi_set_cs; + controller->transfer_one = pxa2xx_spi_transfer_one; + controller->slave_abort = pxa2xx_spi_slave_abort; + controller->handle_err = pxa2xx_spi_handle_err; + controller->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer; + controller->fw_translate_cs = pxa2xx_spi_fw_translate_cs; + controller->auto_runtime_pm = true; + controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; drv_data->ssp_type = ssp->type; @@ -1661,10 +1661,10 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) if (pxa25x_ssp_comp(drv_data)) { switch (drv_data->ssp_type) { case QUARK_X1000_SSP: - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); + controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); break; default: - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); + controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); break; } @@ -1673,7 +1673,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) drv_data->clear_sr = SSSR_ROR; drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; } else { - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); + controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; drv_data->dma_cr1 = DEFAULT_DMA_CR1; drv_data->clear_sr = SSSR_ROR | SSSR_TINT; @@ -1685,7 +1685,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) drv_data); if (status < 0) { dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq); - goto out_error_master_alloc; + goto out_error_controller_alloc; } /* Setup DMA if requested */ @@ -1695,8 +1695,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) dev_dbg(dev, "no DMA channels available, using PIO\n"); platform_info->enable_dma = false; } else { - master->can_dma = pxa2xx_spi_can_dma; - master->max_dma_len = MAX_DMA_LEN; + controller->can_dma = pxa2xx_spi_can_dma; + controller->max_dma_len = MAX_DMA_LEN; } } @@ -1705,7 +1705,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) if (status) goto out_error_dma_irq_alloc; - master->max_speed_hz = clk_get_rate(ssp->clk); + controller->max_speed_hz = clk_get_rate(ssp->clk); /* Load default SSP configuration */ pxa2xx_spi_write(drv_data, SSCR0, 0); @@ -1728,7 +1728,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) break; default: - if (spi_controller_is_slave(master)) { + if (spi_controller_is_slave(controller)) { tmp = SSCR1_SCFR | SSCR1_SCLKDIR | SSCR1_SFRMDIR | @@ -1741,7 +1741,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) } pxa2xx_spi_write(drv_data, SSCR1, tmp); tmp = SSCR0_Motorola | SSCR0_DataSize(8); - if (!spi_controller_is_slave(master)) + if (!spi_controller_is_slave(controller)) tmp |= SSCR0_SCR(2); pxa2xx_spi_write(drv_data, SSCR0, tmp); break; @@ -1766,24 +1766,24 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) platform_info->num_chipselect = config->cs_num; } } - master->num_chipselect = platform_info->num_chipselect; + controller->num_chipselect = platform_info->num_chipselect; count = gpiod_count(&pdev->dev, "cs"); if (count > 0) { int i; - master->num_chipselect = max_t(int, count, - master->num_chipselect); + controller->num_chipselect = max_t(int, count, + controller->num_chipselect); drv_data->cs_gpiods = devm_kcalloc(&pdev->dev, - master->num_chipselect, sizeof(struct gpio_desc *), + controller->num_chipselect, sizeof(struct gpio_desc *), GFP_KERNEL); if (!drv_data->cs_gpiods) { status = -ENOMEM; goto out_error_clock_enabled; } - for (i = 0; i < master->num_chipselect; i++) { + for (i = 0; i < controller->num_chipselect; i++) { struct gpio_desc *gpiod; gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS); @@ -1816,9 +1816,9 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) /* Register with the SPI framework */ platform_set_drvdata(pdev, drv_data); - status = devm_spi_register_controller(&pdev->dev, master); + status = devm_spi_register_controller(&pdev->dev, controller); if (status != 0) { - dev_err(&pdev->dev, "problem registering spi master\n"); + dev_err(&pdev->dev, "problem registering spi controller\n"); goto out_error_clock_enabled; } @@ -1833,8 +1833,8 @@ out_error_dma_irq_alloc: pxa2xx_spi_dma_release(drv_data); free_irq(ssp->irq, drv_data); -out_error_master_alloc: - spi_controller_put(master); +out_error_controller_alloc: + spi_controller_put(controller); pxa_ssp_free(ssp); return status; } @@ -1855,7 +1855,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) clk_disable_unprepare(ssp->clk); /* Release DMA */ - if (drv_data->master_info->enable_dma) + if (drv_data->controller_info->enable_dma) pxa2xx_spi_dma_release(drv_data); pm_runtime_put_noidle(&pdev->dev); @@ -1877,7 +1877,7 @@ static int pxa2xx_spi_suspend(struct device *dev) struct ssp_device *ssp = drv_data->ssp; int status; - status = spi_controller_suspend(drv_data->master); + status = spi_controller_suspend(drv_data->controller); if (status != 0) return status; pxa2xx_spi_write(drv_data, SSCR0, 0); @@ -1902,7 +1902,7 @@ static int pxa2xx_spi_resume(struct device *dev) } /* Start the queue running */ - return spi_controller_resume(drv_data->master); + return spi_controller_resume(drv_data->controller); } #endif diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h index 4e324da66ef7..aba777b4502d 100644 --- a/drivers/spi/spi-pxa2xx.h +++ b/drivers/spi/spi-pxa2xx.h @@ -31,10 +31,10 @@ struct driver_data { /* SPI framework hookup */ enum pxa_ssp_type ssp_type; - struct spi_controller *master; + struct spi_controller *controller; /* PXA hookup */ - struct pxa2xx_spi_master *master_info; + struct pxa2xx_spi_controller *controller_info; /* SSP register addresses */ void __iomem *ioaddr; diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index a4ef641b5227..556870dcdf79 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -180,7 +180,7 @@ struct rspi_data { void __iomem *addr; u32 max_speed_hz; - struct spi_master *master; + struct spi_controller *ctlr; wait_queue_head_t wait; struct clk *clk; u16 spcmd; @@ -237,8 +237,8 @@ static u16 rspi_read_data(const struct rspi_data *rspi) /* optional functions */ struct spi_ops { int (*set_config_register)(struct rspi_data *rspi, int access_size); - int (*transfer_one)(struct spi_master *master, struct spi_device *spi, - struct spi_transfer *xfer); + int (*transfer_one)(struct spi_controller *ctlr, + struct spi_device *spi, struct spi_transfer *xfer); u16 mode_bits; u16 flags; u16 fifo_size; @@ -466,7 +466,7 @@ static int rspi_data_out(struct rspi_data *rspi, u8 data) { int error = rspi_wait_for_tx_empty(rspi); if (error < 0) { - dev_err(&rspi->master->dev, "transmit timeout\n"); + dev_err(&rspi->ctlr->dev, "transmit timeout\n"); return error; } rspi_write_data(rspi, data); @@ -480,7 +480,7 @@ static int rspi_data_in(struct rspi_data *rspi) error = rspi_wait_for_rx_full(rspi); if (error < 0) { - dev_err(&rspi->master->dev, "receive timeout\n"); + dev_err(&rspi->ctlr->dev, "receive timeout\n"); return error; } data = rspi_read_data(rspi); @@ -526,8 +526,8 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, /* First prepare and submit the DMA request(s), as this may fail */ if (rx) { - desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx, - rx->sgl, rx->nents, DMA_DEV_TO_MEM, + desc_rx = dmaengine_prep_slave_sg(rspi->ctlr->dma_rx, rx->sgl, + rx->nents, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_rx) { ret = -EAGAIN; @@ -546,8 +546,8 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, } if (tx) { - desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx, - tx->sgl, tx->nents, DMA_MEM_TO_DEV, + desc_tx = dmaengine_prep_slave_sg(rspi->ctlr->dma_tx, tx->sgl, + tx->nents, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_tx) { ret = -EAGAIN; @@ -584,9 +584,9 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, /* Now start DMA */ if (rx) - dma_async_issue_pending(rspi->master->dma_rx); + dma_async_issue_pending(rspi->ctlr->dma_rx); if (tx) - dma_async_issue_pending(rspi->master->dma_tx); + dma_async_issue_pending(rspi->ctlr->dma_tx); ret = wait_event_interruptible_timeout(rspi->wait, rspi->dma_callbacked, HZ); @@ -594,13 +594,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, ret = 0; } else { if (!ret) { - dev_err(&rspi->master->dev, "DMA timeout\n"); + dev_err(&rspi->ctlr->dev, "DMA timeout\n"); ret = -ETIMEDOUT; } if (tx) - dmaengine_terminate_all(rspi->master->dma_tx); + dmaengine_terminate_all(rspi->ctlr->dma_tx); if (rx) - dmaengine_terminate_all(rspi->master->dma_rx); + dmaengine_terminate_all(rspi->ctlr->dma_rx); } rspi_disable_irq(rspi, irq_mask); @@ -614,12 +614,12 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, no_dma_tx: if (rx) - dmaengine_terminate_all(rspi->master->dma_rx); + dmaengine_terminate_all(rspi->ctlr->dma_rx); no_dma_rx: if (ret == -EAGAIN) { pr_warn_once("%s %s: DMA not available, falling back to PIO\n", - dev_driver_string(&rspi->master->dev), - dev_name(&rspi->master->dev)); + dev_driver_string(&rspi->ctlr->dev), + dev_name(&rspi->ctlr->dev)); } return ret; } @@ -660,10 +660,10 @@ static bool __rspi_can_dma(const struct rspi_data *rspi, return xfer->len > rspi->ops->fifo_size; } -static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi, +static bool rspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); return __rspi_can_dma(rspi, xfer); } @@ -671,7 +671,7 @@ static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi, static int rspi_dma_check_then_transfer(struct rspi_data *rspi, struct spi_transfer *xfer) { - if (!rspi->master->can_dma || !__rspi_can_dma(rspi, xfer)) + if (!rspi->ctlr->can_dma || !__rspi_can_dma(rspi, xfer)) return -EAGAIN; /* rx_buf can be NULL on RSPI on SH in TX-only Mode */ @@ -698,10 +698,10 @@ static int rspi_common_transfer(struct rspi_data *rspi, return 0; } -static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi, - struct spi_transfer *xfer) +static int rspi_transfer_one(struct spi_controller *ctlr, + struct spi_device *spi, struct spi_transfer *xfer) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); u8 spcr; spcr = rspi_read8(rspi, RSPI_SPCR); @@ -716,11 +716,11 @@ static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi, return rspi_common_transfer(rspi, xfer); } -static int rspi_rz_transfer_one(struct spi_master *master, +static int rspi_rz_transfer_one(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); rspi_rz_receive_init(rspi); @@ -739,7 +739,7 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx, if (n == QSPI_BUFFER_SIZE) { ret = rspi_wait_for_tx_empty(rspi); if (ret < 0) { - dev_err(&rspi->master->dev, "transmit timeout\n"); + dev_err(&rspi->ctlr->dev, "transmit timeout\n"); return ret; } for (i = 0; i < n; i++) @@ -747,7 +747,7 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx, ret = rspi_wait_for_rx_full(rspi); if (ret < 0) { - dev_err(&rspi->master->dev, "receive timeout\n"); + dev_err(&rspi->ctlr->dev, "receive timeout\n"); return ret; } for (i = 0; i < n; i++) @@ -785,7 +785,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) unsigned int i, len; int ret; - if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { + if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) { ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL); if (ret != -EAGAIN) return ret; @@ -796,7 +796,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) if (len == QSPI_BUFFER_SIZE) { ret = rspi_wait_for_tx_empty(rspi); if (ret < 0) { - dev_err(&rspi->master->dev, "transmit timeout\n"); + dev_err(&rspi->ctlr->dev, "transmit timeout\n"); return ret; } for (i = 0; i < len; i++) @@ -822,7 +822,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer) unsigned int i, len; int ret; - if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { + if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) { int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg); if (ret != -EAGAIN) return ret; @@ -833,7 +833,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer) if (len == QSPI_BUFFER_SIZE) { ret = rspi_wait_for_rx_full(rspi); if (ret < 0) { - dev_err(&rspi->master->dev, "receive timeout\n"); + dev_err(&rspi->ctlr->dev, "receive timeout\n"); return ret; } for (i = 0; i < len; i++) @@ -849,10 +849,10 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer) return 0; } -static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi, - struct spi_transfer *xfer) +static int qspi_transfer_one(struct spi_controller *ctlr, + struct spi_device *spi, struct spi_transfer *xfer) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); if (spi->mode & SPI_LOOP) { return qspi_transfer_out_in(rspi, xfer); @@ -870,7 +870,7 @@ static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi, static int rspi_setup(struct spi_device *spi) { - struct rspi_data *rspi = spi_master_get_devdata(spi->master); + struct rspi_data *rspi = spi_controller_get_devdata(spi->controller); rspi->max_speed_hz = spi->max_speed_hz; @@ -955,10 +955,10 @@ static int qspi_setup_sequencer(struct rspi_data *rspi, return 0; } -static int rspi_prepare_message(struct spi_master *master, +static int rspi_prepare_message(struct spi_controller *ctlr, struct spi_message *msg) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); int ret; if (msg->spi->mode & @@ -974,10 +974,10 @@ static int rspi_prepare_message(struct spi_master *master, return 0; } -static int rspi_unprepare_message(struct spi_master *master, +static int rspi_unprepare_message(struct spi_controller *ctlr, struct spi_message *msg) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); /* Disable SPI function */ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR); @@ -1081,7 +1081,7 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev, return chan; } -static int rspi_request_dma(struct device *dev, struct spi_master *master, +static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr, const struct resource *res) { const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev); @@ -1099,37 +1099,37 @@ static int rspi_request_dma(struct device *dev, struct spi_master *master, return 0; } - master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id, - res->start + RSPI_SPDR); - if (!master->dma_tx) + ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id, + res->start + RSPI_SPDR); + if (!ctlr->dma_tx) return -ENODEV; - master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id, - res->start + RSPI_SPDR); - if (!master->dma_rx) { - dma_release_channel(master->dma_tx); - master->dma_tx = NULL; + ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id, + res->start + RSPI_SPDR); + if (!ctlr->dma_rx) { + dma_release_channel(ctlr->dma_tx); + ctlr->dma_tx = NULL; return -ENODEV; } - master->can_dma = rspi_can_dma; + ctlr->can_dma = rspi_can_dma; dev_info(dev, "DMA available"); return 0; } -static void rspi_release_dma(struct spi_master *master) +static void rspi_release_dma(struct spi_controller *ctlr) { - if (master->dma_tx) - dma_release_channel(master->dma_tx); - if (master->dma_rx) - dma_release_channel(master->dma_rx); + if (ctlr->dma_tx) + dma_release_channel(ctlr->dma_tx); + if (ctlr->dma_rx) + dma_release_channel(ctlr->dma_rx); } static int rspi_remove(struct platform_device *pdev) { struct rspi_data *rspi = platform_get_drvdata(pdev); - rspi_release_dma(rspi->master); + rspi_release_dma(rspi->ctlr); pm_runtime_disable(&pdev->dev); return 0; @@ -1139,7 +1139,7 @@ static const struct spi_ops rspi_ops = { .set_config_register = rspi_set_config_register, .transfer_one = rspi_transfer_one, .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, - .flags = SPI_MASTER_MUST_TX, + .flags = SPI_CONTROLLER_MUST_TX, .fifo_size = 8, }; @@ -1147,7 +1147,7 @@ static const struct spi_ops rspi_rz_ops = { .set_config_register = rspi_rz_set_config_register, .transfer_one = rspi_rz_transfer_one, .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, - .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX, + .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .fifo_size = 8, /* 8 for TX, 32 for RX */ }; @@ -1157,7 +1157,7 @@ static const struct spi_ops qspi_ops = { .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP | SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD, - .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX, + .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .fifo_size = 32, }; @@ -1174,7 +1174,7 @@ static const struct of_device_id rspi_of_match[] = { MODULE_DEVICE_TABLE(of, rspi_of_match); -static int rspi_parse_dt(struct device *dev, struct spi_master *master) +static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr) { u32 num_cs; int error; @@ -1186,12 +1186,12 @@ static int rspi_parse_dt(struct device *dev, struct spi_master *master) return error; } - master->num_chipselect = num_cs; + ctlr->num_chipselect = num_cs; return 0; } #else #define rspi_of_match NULL -static inline int rspi_parse_dt(struct device *dev, struct spi_master *master) +static inline int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr) { return -EINVAL; } @@ -1212,28 +1212,28 @@ static int rspi_request_irq(struct device *dev, unsigned int irq, static int rspi_probe(struct platform_device *pdev) { struct resource *res; - struct spi_master *master; + struct spi_controller *ctlr; struct rspi_data *rspi; int ret; const struct rspi_plat_data *rspi_pd; const struct spi_ops *ops; - master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data)); - if (master == NULL) + ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data)); + if (ctlr == NULL) return -ENOMEM; ops = of_device_get_match_data(&pdev->dev); if (ops) { - ret = rspi_parse_dt(&pdev->dev, master); + ret = rspi_parse_dt(&pdev->dev, ctlr); if (ret) goto error1; } else { ops = (struct spi_ops *)pdev->id_entry->driver_data; rspi_pd = dev_get_platdata(&pdev->dev); if (rspi_pd && rspi_pd->num_chipselect) - master->num_chipselect = rspi_pd->num_chipselect; + ctlr->num_chipselect = rspi_pd->num_chipselect; else - master->num_chipselect = 2; /* default */ + ctlr->num_chipselect = 2; /* default */ } /* ops parameter check */ @@ -1243,10 +1243,10 @@ static int rspi_probe(struct platform_device *pdev) goto error1; } - rspi = spi_master_get_devdata(master); + rspi = spi_controller_get_devdata(ctlr); platform_set_drvdata(pdev, rspi); rspi->ops = ops; - rspi->master = master; + rspi->ctlr = ctlr; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); rspi->addr = devm_ioremap_resource(&pdev->dev, res); @@ -1266,15 +1266,15 @@ static int rspi_probe(struct platform_device *pdev) init_waitqueue_head(&rspi->wait); - master->bus_num = pdev->id; - master->setup = rspi_setup; - master->auto_runtime_pm = true; - master->transfer_one = ops->transfer_one; - master->prepare_message = rspi_prepare_message; - master->unprepare_message = rspi_unprepare_message; - master->mode_bits = ops->mode_bits; - master->flags = ops->flags; - master->dev.of_node = pdev->dev.of_node; + ctlr->bus_num = pdev->id; + ctlr->setup = rspi_setup; + ctlr->auto_runtime_pm = true; + ctlr->transfer_one = ops->transfer_one; + ctlr->prepare_message = rspi_prepare_message; + ctlr->unprepare_message = rspi_unprepare_message; + ctlr->mode_bits = ops->mode_bits; + ctlr->flags = ops->flags; + ctlr->dev.of_node = pdev->dev.of_node; ret = platform_get_irq_byname(pdev, "rx"); if (ret < 0) { @@ -1311,13 +1311,13 @@ static int rspi_probe(struct platform_device *pdev) goto error2; } - ret = rspi_request_dma(&pdev->dev, master, res); + ret = rspi_request_dma(&pdev->dev, ctlr, res); if (ret < 0) dev_warn(&pdev->dev, "DMA not available, using PIO\n"); - ret = devm_spi_register_master(&pdev->dev, master); + ret = devm_spi_register_controller(&pdev->dev, ctlr); if (ret < 0) { - dev_err(&pdev->dev, "spi_register_master error.\n"); + dev_err(&pdev->dev, "devm_spi_register_controller error.\n"); goto error3; } @@ -1326,11 +1326,11 @@ static int rspi_probe(struct platform_device *pdev) return 0; error3: - rspi_release_dma(master); + rspi_release_dma(ctlr); error2: pm_runtime_disable(&pdev->dev); error1: - spi_master_put(master); + spi_controller_put(ctlr); return ret; } @@ -1349,14 +1349,14 @@ static int rspi_suspend(struct device *dev) { struct rspi_data *rspi = dev_get_drvdata(dev); - return spi_master_suspend(rspi->master); + return spi_controller_suspend(rspi->ctlr); } static int rspi_resume(struct device *dev) { struct rspi_data *rspi = dev_get_drvdata(dev); - return spi_master_resume(rspi->master); + return spi_controller_resume(rspi->ctlr); } static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume); diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c index dc0926e43665..7f73f91d412a 100644 --- a/drivers/spi/spi-sh-hspi.c +++ b/drivers/spi/spi-sh-hspi.c @@ -35,7 +35,7 @@ struct hspi_priv { void __iomem *addr; - struct spi_master *master; + struct spi_controller *ctlr; struct device *dev; struct clk *clk; }; @@ -140,10 +140,10 @@ static void hspi_hw_setup(struct hspi_priv *hspi, hspi_write(hspi, SPSCR, 0x21); /* master mode / CS control */ } -static int hspi_transfer_one_message(struct spi_master *master, +static int hspi_transfer_one_message(struct spi_controller *ctlr, struct spi_message *msg) { - struct hspi_priv *hspi = spi_master_get_devdata(master); + struct hspi_priv *hspi = spi_controller_get_devdata(ctlr); struct spi_transfer *t; u32 tx; u32 rx; @@ -205,7 +205,7 @@ static int hspi_transfer_one_message(struct spi_master *master, ndelay(nsecs); hspi_hw_cs_disable(hspi); } - spi_finalize_current_message(master); + spi_finalize_current_message(ctlr); return ret; } @@ -213,7 +213,7 @@ static int hspi_transfer_one_message(struct spi_master *master, static int hspi_probe(struct platform_device *pdev) { struct resource *res; - struct spi_master *master; + struct spi_controller *ctlr; struct hspi_priv *hspi; struct clk *clk; int ret; @@ -225,11 +225,9 @@ static int hspi_probe(struct platform_device *pdev) return -EINVAL; } - master = spi_alloc_master(&pdev->dev, sizeof(*hspi)); - if (!master) { - dev_err(&pdev->dev, "spi_alloc_master error.\n"); + ctlr = spi_alloc_master(&pdev->dev, sizeof(*hspi)); + if (!ctlr) return -ENOMEM; - } clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { @@ -238,33 +236,32 @@ static int hspi_probe(struct platform_device *pdev) goto error0; } - hspi = spi_master_get_devdata(master); + hspi = spi_controller_get_devdata(ctlr); platform_set_drvdata(pdev, hspi); /* init hspi */ - hspi->master = master; + hspi->ctlr = ctlr; hspi->dev = &pdev->dev; hspi->clk = clk; hspi->addr = devm_ioremap(hspi->dev, res->start, resource_size(res)); if (!hspi->addr) { - dev_err(&pdev->dev, "ioremap error.\n"); ret = -ENOMEM; goto error1; } pm_runtime_enable(&pdev->dev); - master->bus_num = pdev->id; - master->mode_bits = SPI_CPOL | SPI_CPHA; - master->dev.of_node = pdev->dev.of_node; - master->auto_runtime_pm = true; - master->transfer_one_message = hspi_transfer_one_message; - master->bits_per_word_mask = SPI_BPW_MASK(8); + ctlr->bus_num = pdev->id; + ctlr->mode_bits = SPI_CPOL | SPI_CPHA; + ctlr->dev.of_node = pdev->dev.of_node; + ctlr->auto_runtime_pm = true; + ctlr->transfer_one_message = hspi_transfer_one_message; + ctlr->bits_per_word_mask = SPI_BPW_MASK(8); - ret = devm_spi_register_master(&pdev->dev, master); + ret = devm_spi_register_controller(&pdev->dev, ctlr); if (ret < 0) { - dev_err(&pdev->dev, "spi_register_master error.\n"); + dev_err(&pdev->dev, "devm_spi_register_controller error.\n"); goto error2; } @@ -275,7 +272,7 @@ static int hspi_probe(struct platform_device *pdev) error1: clk_put(clk); error0: - spi_master_put(master); + spi_controller_put(ctlr); return ret; } diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index d14b407cc800..e2eb466db10a 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * SuperH MSIOF SPI Master Interface + * SuperH MSIOF SPI Controller Interface * * Copyright (c) 2009 Magnus Damm * Copyright (C) 2014 Renesas Electronics Corporation @@ -32,14 +32,15 @@ #include <asm/unaligned.h> struct sh_msiof_chipdata { + u32 bits_per_word_mask; u16 tx_fifo_size; u16 rx_fifo_size; - u16 master_flags; + u16 ctlr_flags; u16 min_div_pow; }; struct sh_msiof_spi_priv { - struct spi_master *master; + struct spi_controller *ctlr; void __iomem *mapbase; struct clk *clk; struct platform_device *pdev; @@ -287,7 +288,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps); sh_msiof_write(p, TSCR, scr); - if (!(p->master->flags & SPI_MASTER_MUST_TX)) + if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) sh_msiof_write(p, RSCR, scr); } @@ -351,14 +352,14 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss, tmp |= !cs_high << MDR1_SYNCAC_SHIFT; tmp |= lsb_first << MDR1_BITLSB_SHIFT; tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p); - if (spi_controller_is_slave(p->master)) { + if (spi_controller_is_slave(p->ctlr)) { sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON); } else { sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON | (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT); } - if (p->master->flags & SPI_MASTER_MUST_TX) { + if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) { /* These bits are reserved if RX needs TX */ tmp &= ~0x0000ffff; } @@ -382,7 +383,7 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, { u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words); - if (tx_buf || (p->master->flags & SPI_MASTER_MUST_TX)) + if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) sh_msiof_write(p, TMDR2, dr2); else sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1); @@ -539,8 +540,9 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, static int sh_msiof_spi_setup(struct spi_device *spi) { - struct device_node *np = spi->master->dev.of_node; - struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); + struct device_node *np = spi->controller->dev.of_node; + struct sh_msiof_spi_priv *p = + spi_controller_get_devdata(spi->controller); u32 clr, set, tmp; if (!np) { @@ -556,7 +558,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi) return 0; } - if (spi_controller_is_slave(p->master)) + if (spi_controller_is_slave(p->ctlr)) return 0; if (p->native_cs_inited && @@ -581,10 +583,10 @@ static int sh_msiof_spi_setup(struct spi_device *spi) return 0; } -static int sh_msiof_prepare_message(struct spi_master *master, +static int sh_msiof_prepare_message(struct spi_controller *ctlr, struct spi_message *msg) { - struct sh_msiof_spi_priv *p = spi_master_get_devdata(master); + struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); const struct spi_device *spi = msg->spi; u32 ss, cs_high; @@ -605,7 +607,7 @@ static int sh_msiof_prepare_message(struct spi_master *master, static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf) { - bool slave = spi_controller_is_slave(p->master); + bool slave = spi_controller_is_slave(p->ctlr); int ret = 0; /* setup clock and rx/tx signals */ @@ -625,7 +627,7 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf) static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf) { - bool slave = spi_controller_is_slave(p->master); + bool slave = spi_controller_is_slave(p->ctlr); int ret = 0; /* shut down frame, rx/tx and clock signals */ @@ -641,9 +643,9 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf) return ret; } -static int sh_msiof_slave_abort(struct spi_master *master) +static int sh_msiof_slave_abort(struct spi_controller *ctlr) { - struct sh_msiof_spi_priv *p = spi_master_get_devdata(master); + struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); p->slave_aborted = true; complete(&p->done); @@ -654,7 +656,7 @@ static int sh_msiof_slave_abort(struct spi_master *master) static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p, struct completion *x) { - if (spi_controller_is_slave(p->master)) { + if (spi_controller_is_slave(p->ctlr)) { if (wait_for_completion_interruptible(x) || p->slave_aborted) { dev_dbg(&p->pdev->dev, "interrupted\n"); @@ -754,7 +756,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, /* First prepare and submit the DMA request(s), as this may fail */ if (rx) { ier_bits |= IER_RDREQE | IER_RDMAE; - desc_rx = dmaengine_prep_slave_single(p->master->dma_rx, + desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx, p->rx_dma_addr, len, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_rx) @@ -769,9 +771,9 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, if (tx) { ier_bits |= IER_TDREQE | IER_TDMAE; - dma_sync_single_for_device(p->master->dma_tx->device->dev, + dma_sync_single_for_device(p->ctlr->dma_tx->device->dev, p->tx_dma_addr, len, DMA_TO_DEVICE); - desc_tx = dmaengine_prep_slave_single(p->master->dma_tx, + desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx, p->tx_dma_addr, len, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_tx) { @@ -803,9 +805,9 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, /* Now start DMA */ if (rx) - dma_async_issue_pending(p->master->dma_rx); + dma_async_issue_pending(p->ctlr->dma_rx); if (tx) - dma_async_issue_pending(p->master->dma_tx); + dma_async_issue_pending(p->ctlr->dma_tx); ret = sh_msiof_spi_start(p, rx); if (ret) { @@ -845,9 +847,8 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, } if (rx) - dma_sync_single_for_cpu(p->master->dma_rx->device->dev, - p->rx_dma_addr, len, - DMA_FROM_DEVICE); + dma_sync_single_for_cpu(p->ctlr->dma_rx->device->dev, + p->rx_dma_addr, len, DMA_FROM_DEVICE); return 0; @@ -856,10 +857,10 @@ stop_reset: sh_msiof_spi_stop(p, rx); stop_dma: if (tx) - dmaengine_terminate_all(p->master->dma_tx); + dmaengine_terminate_all(p->ctlr->dma_tx); no_dma_tx: if (rx) - dmaengine_terminate_all(p->master->dma_rx); + dmaengine_terminate_all(p->ctlr->dma_rx); sh_msiof_write(p, IER, 0); return ret; } @@ -907,11 +908,11 @@ static void copy_plain32(u32 *dst, const u32 *src, unsigned int words) memcpy(dst, src, words * 4); } -static int sh_msiof_transfer_one(struct spi_master *master, +static int sh_msiof_transfer_one(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *t) { - struct sh_msiof_spi_priv *p = spi_master_get_devdata(master); + struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); void (*copy32)(u32 *, const u32 *, unsigned int); void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int); void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int); @@ -926,10 +927,10 @@ static int sh_msiof_transfer_one(struct spi_master *master, int ret; /* setup clocks (clock already enabled in chipselect()) */ - if (!spi_controller_is_slave(p->master)) + if (!spi_controller_is_slave(p->ctlr)) sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz); - while (master->dma_tx && len > 15) { + while (ctlr->dma_tx && len > 15) { /* * DMA supports 32-bit words only, hence pack 8-bit and 16-bit * words, with byte resp. word swapping. @@ -937,17 +938,13 @@ static int sh_msiof_transfer_one(struct spi_master *master, unsigned int l = 0; if (tx_buf) - l = min(len, p->tx_fifo_size * 4); + l = min(round_down(len, 4), p->tx_fifo_size * 4); if (rx_buf) - l = min(len, p->rx_fifo_size * 4); + l = min(round_down(len, 4), p->rx_fifo_size * 4); if (bits <= 8) { - if (l & 3) - break; copy32 = copy_bswap32; } else if (bits <= 16) { - if (l & 3) - break; copy32 = copy_wswap32; } else { copy32 = copy_plain32; @@ -1052,23 +1049,28 @@ static int sh_msiof_transfer_one(struct spi_master *master, } static const struct sh_msiof_chipdata sh_data = { + .bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32), .tx_fifo_size = 64, .rx_fifo_size = 64, - .master_flags = 0, + .ctlr_flags = 0, .min_div_pow = 0, }; static const struct sh_msiof_chipdata rcar_gen2_data = { + .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | + SPI_BPW_MASK(24) | SPI_BPW_MASK(32), .tx_fifo_size = 64, .rx_fifo_size = 64, - .master_flags = SPI_MASTER_MUST_TX, + .ctlr_flags = SPI_CONTROLLER_MUST_TX, .min_div_pow = 0, }; static const struct sh_msiof_chipdata rcar_gen3_data = { + .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | + SPI_BPW_MASK(24) | SPI_BPW_MASK(32), .tx_fifo_size = 64, .rx_fifo_size = 64, - .master_flags = SPI_MASTER_MUST_TX, + .ctlr_flags = SPI_CONTROLLER_MUST_TX, .min_div_pow = 1, }; @@ -1136,7 +1138,7 @@ static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p) if (ret <= 0) return 0; - num_cs = max_t(unsigned int, ret, p->master->num_chipselect); + num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect); for (i = 0; i < num_cs; i++) { struct gpio_desc *gpiod; @@ -1206,10 +1208,10 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p) { struct platform_device *pdev = p->pdev; struct device *dev = &pdev->dev; - const struct sh_msiof_spi_info *info = dev_get_platdata(dev); + const struct sh_msiof_spi_info *info = p->info; unsigned int dma_tx_id, dma_rx_id; const struct resource *res; - struct spi_master *master; + struct spi_controller *ctlr; struct device *tx_dev, *rx_dev; if (dev->of_node) { @@ -1229,17 +1231,15 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p) if (!res) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - master = p->master; - master->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV, - dma_tx_id, - res->start + TFDR); - if (!master->dma_tx) + ctlr = p->ctlr; + ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV, + dma_tx_id, res->start + TFDR); + if (!ctlr->dma_tx) return -ENODEV; - master->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM, - dma_rx_id, - res->start + RFDR); - if (!master->dma_rx) + ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM, + dma_rx_id, res->start + RFDR); + if (!ctlr->dma_rx) goto free_tx_chan; p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); @@ -1250,13 +1250,13 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p) if (!p->rx_dma_page) goto free_tx_page; - tx_dev = master->dma_tx->device->dev; + tx_dev = ctlr->dma_tx->device->dev; p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(tx_dev, p->tx_dma_addr)) goto free_rx_page; - rx_dev = master->dma_rx->device->dev; + rx_dev = ctlr->dma_rx->device->dev; p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(rx_dev, p->rx_dma_addr)) @@ -1272,34 +1272,34 @@ free_rx_page: free_tx_page: free_page((unsigned long)p->tx_dma_page); free_rx_chan: - dma_release_channel(master->dma_rx); + dma_release_channel(ctlr->dma_rx); free_tx_chan: - dma_release_channel(master->dma_tx); - master->dma_tx = NULL; + dma_release_channel(ctlr->dma_tx); + ctlr->dma_tx = NULL; return -ENODEV; } static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p) { - struct spi_master *master = p->master; + struct spi_controller *ctlr = p->ctlr; - if (!master->dma_tx) + if (!ctlr->dma_tx) return; - dma_unmap_single(master->dma_rx->device->dev, p->rx_dma_addr, - PAGE_SIZE, DMA_FROM_DEVICE); - dma_unmap_single(master->dma_tx->device->dev, p->tx_dma_addr, - PAGE_SIZE, DMA_TO_DEVICE); + dma_unmap_single(ctlr->dma_rx->device->dev, p->rx_dma_addr, PAGE_SIZE, + DMA_FROM_DEVICE); + dma_unmap_single(ctlr->dma_tx->device->dev, p->tx_dma_addr, PAGE_SIZE, + DMA_TO_DEVICE); free_page((unsigned long)p->rx_dma_page); free_page((unsigned long)p->tx_dma_page); - dma_release_channel(master->dma_rx); - dma_release_channel(master->dma_tx); + dma_release_channel(ctlr->dma_rx); + dma_release_channel(ctlr->dma_tx); } static int sh_msiof_spi_probe(struct platform_device *pdev) { struct resource *r; - struct spi_master *master; + struct spi_controller *ctlr; const struct sh_msiof_chipdata *chipdata; struct sh_msiof_spi_info *info; struct sh_msiof_spi_priv *p; @@ -1320,18 +1320,18 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) } if (info->mode == MSIOF_SPI_SLAVE) - master = spi_alloc_slave(&pdev->dev, - sizeof(struct sh_msiof_spi_priv)); + ctlr = spi_alloc_slave(&pdev->dev, + sizeof(struct sh_msiof_spi_priv)); else - master = spi_alloc_master(&pdev->dev, - sizeof(struct sh_msiof_spi_priv)); - if (master == NULL) + ctlr = spi_alloc_master(&pdev->dev, + sizeof(struct sh_msiof_spi_priv)); + if (ctlr == NULL) return -ENOMEM; - p = spi_master_get_devdata(master); + p = spi_controller_get_devdata(ctlr); platform_set_drvdata(pdev, p); - p->master = master; + p->ctlr = ctlr; p->info = info; p->min_div_pow = chipdata->min_div_pow; @@ -1378,31 +1378,31 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) p->rx_fifo_size = p->info->rx_fifo_override; /* Setup GPIO chip selects */ - master->num_chipselect = p->info->num_chipselect; + ctlr->num_chipselect = p->info->num_chipselect; ret = sh_msiof_get_cs_gpios(p); if (ret) goto err1; - /* init master code */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; - master->flags = chipdata->master_flags; - master->bus_num = pdev->id; - master->dev.of_node = pdev->dev.of_node; - master->setup = sh_msiof_spi_setup; - master->prepare_message = sh_msiof_prepare_message; - master->slave_abort = sh_msiof_slave_abort; - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); - master->auto_runtime_pm = true; - master->transfer_one = sh_msiof_transfer_one; + /* init controller code */ + ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; + ctlr->flags = chipdata->ctlr_flags; + ctlr->bus_num = pdev->id; + ctlr->dev.of_node = pdev->dev.of_node; + ctlr->setup = sh_msiof_spi_setup; + ctlr->prepare_message = sh_msiof_prepare_message; + ctlr->slave_abort = sh_msiof_slave_abort; + ctlr->bits_per_word_mask = chipdata->bits_per_word_mask; + ctlr->auto_runtime_pm = true; + ctlr->transfer_one = sh_msiof_transfer_one; ret = sh_msiof_request_dma(p); if (ret < 0) dev_warn(&pdev->dev, "DMA not available, using PIO\n"); - ret = devm_spi_register_master(&pdev->dev, master); + ret = devm_spi_register_controller(&pdev->dev, ctlr); if (ret < 0) { - dev_err(&pdev->dev, "spi_register_master error.\n"); + dev_err(&pdev->dev, "devm_spi_register_controller error.\n"); goto err2; } @@ -1412,7 +1412,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) sh_msiof_release_dma(p); pm_runtime_disable(&pdev->dev); err1: - spi_master_put(master); + spi_controller_put(ctlr); return ret; } @@ -1436,14 +1436,14 @@ static int sh_msiof_spi_suspend(struct device *dev) { struct sh_msiof_spi_priv *p = dev_get_drvdata(dev); - return spi_master_suspend(p->master); + return spi_controller_suspend(p->ctlr); } static int sh_msiof_spi_resume(struct device *dev) { struct sh_msiof_spi_priv *p = dev_get_drvdata(dev); - return spi_master_resume(p->master); + return spi_controller_resume(p->ctlr); } static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend, @@ -1465,7 +1465,7 @@ static struct platform_driver sh_msiof_spi_drv = { }; module_platform_driver(sh_msiof_spi_drv); -MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver"); +MODULE_DESCRIPTION("SuperH MSIOF SPI Controller Interface Driver"); MODULE_AUTHOR("Magnus Damm"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:spi_sh_msiof"); diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c new file mode 100644 index 000000000000..93ec2c6cdbfd --- /dev/null +++ b/drivers/spi/spi-sifive.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright 2018 SiFive, Inc. +// +// SiFive SPI controller driver (master mode only) +// +// Author: SiFive, Inc. +// sifive@sifive.com + +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/io.h> +#include <linux/log2.h> + +#define SIFIVE_SPI_DRIVER_NAME "sifive_spi" + +#define SIFIVE_SPI_MAX_CS 32 +#define SIFIVE_SPI_DEFAULT_DEPTH 8 +#define SIFIVE_SPI_DEFAULT_MAX_BITS 8 + +/* register offsets */ +#define SIFIVE_SPI_REG_SCKDIV 0x00 /* Serial clock divisor */ +#define SIFIVE_SPI_REG_SCKMODE 0x04 /* Serial clock mode */ +#define SIFIVE_SPI_REG_CSID 0x10 /* Chip select ID */ +#define SIFIVE_SPI_REG_CSDEF 0x14 /* Chip select default */ +#define SIFIVE_SPI_REG_CSMODE 0x18 /* Chip select mode */ +#define SIFIVE_SPI_REG_DELAY0 0x28 /* Delay control 0 */ +#define SIFIVE_SPI_REG_DELAY1 0x2c /* Delay control 1 */ +#define SIFIVE_SPI_REG_FMT 0x40 /* Frame format */ +#define SIFIVE_SPI_REG_TXDATA 0x48 /* Tx FIFO data */ +#define SIFIVE_SPI_REG_RXDATA 0x4c /* Rx FIFO data */ +#define SIFIVE_SPI_REG_TXMARK 0x50 /* Tx FIFO watermark */ +#define SIFIVE_SPI_REG_RXMARK 0x54 /* Rx FIFO watermark */ +#define SIFIVE_SPI_REG_FCTRL 0x60 /* SPI flash interface control */ +#define SIFIVE_SPI_REG_FFMT 0x64 /* SPI flash instruction format */ +#define SIFIVE_SPI_REG_IE 0x70 /* Interrupt Enable Register */ +#define SIFIVE_SPI_REG_IP 0x74 /* Interrupt Pendings Register */ + +/* sckdiv bits */ +#define SIFIVE_SPI_SCKDIV_DIV_MASK 0xfffU + +/* sckmode bits */ +#define SIFIVE_SPI_SCKMODE_PHA BIT(0) +#define SIFIVE_SPI_SCKMODE_POL BIT(1) +#define SIFIVE_SPI_SCKMODE_MODE_MASK (SIFIVE_SPI_SCKMODE_PHA | \ + SIFIVE_SPI_SCKMODE_POL) + +/* csmode bits */ +#define SIFIVE_SPI_CSMODE_MODE_AUTO 0U +#define SIFIVE_SPI_CSMODE_MODE_HOLD 2U +#define SIFIVE_SPI_CSMODE_MODE_OFF 3U + +/* delay0 bits */ +#define SIFIVE_SPI_DELAY0_CSSCK(x) ((u32)(x)) +#define SIFIVE_SPI_DELAY0_CSSCK_MASK 0xffU +#define SIFIVE_SPI_DELAY0_SCKCS(x) ((u32)(x) << 16) +#define SIFIVE_SPI_DELAY0_SCKCS_MASK (0xffU << 16) + +/* delay1 bits */ +#define SIFIVE_SPI_DELAY1_INTERCS(x) ((u32)(x)) +#define SIFIVE_SPI_DELAY1_INTERCS_MASK 0xffU +#define SIFIVE_SPI_DELAY1_INTERXFR(x) ((u32)(x) << 16) +#define SIFIVE_SPI_DELAY1_INTERXFR_MASK (0xffU << 16) + +/* fmt bits */ +#define SIFIVE_SPI_FMT_PROTO_SINGLE 0U +#define SIFIVE_SPI_FMT_PROTO_DUAL 1U +#define SIFIVE_SPI_FMT_PROTO_QUAD 2U +#define SIFIVE_SPI_FMT_PROTO_MASK 3U +#define SIFIVE_SPI_FMT_ENDIAN BIT(2) +#define SIFIVE_SPI_FMT_DIR BIT(3) +#define SIFIVE_SPI_FMT_LEN(x) ((u32)(x) << 16) +#define SIFIVE_SPI_FMT_LEN_MASK (0xfU << 16) + +/* txdata bits */ +#define SIFIVE_SPI_TXDATA_DATA_MASK 0xffU +#define SIFIVE_SPI_TXDATA_FULL BIT(31) + +/* rxdata bits */ +#define SIFIVE_SPI_RXDATA_DATA_MASK 0xffU +#define SIFIVE_SPI_RXDATA_EMPTY BIT(31) + +/* ie and ip bits */ +#define SIFIVE_SPI_IP_TXWM BIT(0) +#define SIFIVE_SPI_IP_RXWM BIT(1) + +struct sifive_spi { + void __iomem *regs; /* virt. address of control registers */ + struct clk *clk; /* bus clock */ + unsigned int fifo_depth; /* fifo depth in words */ + u32 cs_inactive; /* level of the CS pins when inactive */ + struct completion done; /* wake-up from interrupt */ +}; + +static void sifive_spi_write(struct sifive_spi *spi, int offset, u32 value) +{ + iowrite32(value, spi->regs + offset); +} + +static u32 sifive_spi_read(struct sifive_spi *spi, int offset) +{ + return ioread32(spi->regs + offset); +} + +static void sifive_spi_init(struct sifive_spi *spi) +{ + /* Watermark interrupts are disabled by default */ + sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0); + + /* Default watermark FIFO threshold values */ + sifive_spi_write(spi, SIFIVE_SPI_REG_TXMARK, 1); + sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK, 0); + + /* Set CS/SCK Delays and Inactive Time to defaults */ + sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY0, + SIFIVE_SPI_DELAY0_CSSCK(1) | + SIFIVE_SPI_DELAY0_SCKCS(1)); + sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY1, + SIFIVE_SPI_DELAY1_INTERCS(1) | + SIFIVE_SPI_DELAY1_INTERXFR(0)); + + /* Exit specialized memory-mapped SPI flash mode */ + sifive_spi_write(spi, SIFIVE_SPI_REG_FCTRL, 0); +} + +static int +sifive_spi_prepare_message(struct spi_master *master, struct spi_message *msg) +{ + struct sifive_spi *spi = spi_master_get_devdata(master); + struct spi_device *device = msg->spi; + + /* Update the chip select polarity */ + if (device->mode & SPI_CS_HIGH) + spi->cs_inactive &= ~BIT(device->chip_select); + else + spi->cs_inactive |= BIT(device->chip_select); + sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive); + + /* Select the correct device */ + sifive_spi_write(spi, SIFIVE_SPI_REG_CSID, device->chip_select); + + /* Set clock mode */ + sifive_spi_write(spi, SIFIVE_SPI_REG_SCKMODE, + device->mode & SIFIVE_SPI_SCKMODE_MODE_MASK); + + return 0; +} + +static void sifive_spi_set_cs(struct spi_device *device, bool is_high) +{ + struct sifive_spi *spi = spi_master_get_devdata(device->master); + + /* Reverse polarity is handled by SCMR/CPOL. Not inverted CS. */ + if (device->mode & SPI_CS_HIGH) + is_high = !is_high; + + sifive_spi_write(spi, SIFIVE_SPI_REG_CSMODE, is_high ? + SIFIVE_SPI_CSMODE_MODE_AUTO : + SIFIVE_SPI_CSMODE_MODE_HOLD); +} + +static int +sifive_spi_prep_transfer(struct sifive_spi *spi, struct spi_device *device, + struct spi_transfer *t) +{ + u32 cr; + unsigned int mode; + + /* Calculate and program the clock rate */ + cr = DIV_ROUND_UP(clk_get_rate(spi->clk) >> 1, t->speed_hz) - 1; + cr &= SIFIVE_SPI_SCKDIV_DIV_MASK; + sifive_spi_write(spi, SIFIVE_SPI_REG_SCKDIV, cr); + + mode = max_t(unsigned int, t->rx_nbits, t->tx_nbits); + + /* Set frame format */ + cr = SIFIVE_SPI_FMT_LEN(t->bits_per_word); + switch (mode) { + case SPI_NBITS_QUAD: + cr |= SIFIVE_SPI_FMT_PROTO_QUAD; + break; + case SPI_NBITS_DUAL: + cr |= SIFIVE_SPI_FMT_PROTO_DUAL; + break; + default: + cr |= SIFIVE_SPI_FMT_PROTO_SINGLE; + break; + } + if (device->mode & SPI_LSB_FIRST) + cr |= SIFIVE_SPI_FMT_ENDIAN; + if (!t->rx_buf) + cr |= SIFIVE_SPI_FMT_DIR; + sifive_spi_write(spi, SIFIVE_SPI_REG_FMT, cr); + + /* We will want to poll if the time we need to wait is + * less than the context switching time. + * Let's call that threshold 5us. The operation will take: + * (8/mode) * fifo_depth / hz <= 5 * 10^-6 + * 1600000 * fifo_depth <= hz * mode + */ + return 1600000 * spi->fifo_depth <= t->speed_hz * mode; +} + +static irqreturn_t sifive_spi_irq(int irq, void *dev_id) +{ + struct sifive_spi *spi = dev_id; + u32 ip = sifive_spi_read(spi, SIFIVE_SPI_REG_IP); + + if (ip & (SIFIVE_SPI_IP_TXWM | SIFIVE_SPI_IP_RXWM)) { + /* Disable interrupts until next transfer */ + sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0); + complete(&spi->done); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static void sifive_spi_wait(struct sifive_spi *spi, u32 bit, int poll) +{ + if (poll) { + u32 cr; + + do { + cr = sifive_spi_read(spi, SIFIVE_SPI_REG_IP); + } while (!(cr & bit)); + } else { + reinit_completion(&spi->done); + sifive_spi_write(spi, SIFIVE_SPI_REG_IE, bit); + wait_for_completion(&spi->done); + } +} + +static void sifive_spi_tx(struct sifive_spi *spi, const u8 *tx_ptr) +{ + WARN_ON_ONCE((sifive_spi_read(spi, SIFIVE_SPI_REG_TXDATA) + & SIFIVE_SPI_TXDATA_FULL) != 0); + sifive_spi_write(spi, SIFIVE_SPI_REG_TXDATA, + *tx_ptr & SIFIVE_SPI_TXDATA_DATA_MASK); +} + +static void sifive_spi_rx(struct sifive_spi *spi, u8 *rx_ptr) +{ + u32 data = sifive_spi_read(spi, SIFIVE_SPI_REG_RXDATA); + + WARN_ON_ONCE((data & SIFIVE_SPI_RXDATA_EMPTY) != 0); + *rx_ptr = data & SIFIVE_SPI_RXDATA_DATA_MASK; +} + +static int +sifive_spi_transfer_one(struct spi_master *master, struct spi_device *device, + struct spi_transfer *t) +{ + struct sifive_spi *spi = spi_master_get_devdata(master); + int poll = sifive_spi_prep_transfer(spi, device, t); + const u8 *tx_ptr = t->tx_buf; + u8 *rx_ptr = t->rx_buf; + unsigned int remaining_words = t->len; + + while (remaining_words) { + unsigned int n_words = min(remaining_words, spi->fifo_depth); + unsigned int i; + + /* Enqueue n_words for transmission */ + for (i = 0; i < n_words; i++) + sifive_spi_tx(spi, tx_ptr++); + + if (rx_ptr) { + /* Wait for transmission + reception to complete */ + sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK, + n_words - 1); + sifive_spi_wait(spi, SIFIVE_SPI_IP_RXWM, poll); + + /* Read out all the data from the RX FIFO */ + for (i = 0; i < n_words; i++) + sifive_spi_rx(spi, rx_ptr++); + } else { + /* Wait for transmission to complete */ + sifive_spi_wait(spi, SIFIVE_SPI_IP_TXWM, poll); + } + + remaining_words -= n_words; + } + + return 0; +} + +static int sifive_spi_probe(struct platform_device *pdev) +{ + struct sifive_spi *spi; + struct resource *res; + int ret, irq, num_cs; + u32 cs_bits, max_bits_per_word; + struct spi_master *master; + + master = spi_alloc_master(&pdev->dev, sizeof(struct sifive_spi)); + if (!master) { + dev_err(&pdev->dev, "out of memory\n"); + return -ENOMEM; + } + + spi = spi_master_get_devdata(master); + init_completion(&spi->done); + platform_set_drvdata(pdev, master); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + spi->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spi->regs)) { + ret = PTR_ERR(spi->regs); + goto put_master; + } + + spi->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(spi->clk)) { + dev_err(&pdev->dev, "Unable to find bus clock\n"); + ret = PTR_ERR(spi->clk); + goto put_master; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Unable to find interrupt\n"); + ret = irq; + goto put_master; + } + + /* Optional parameters */ + ret = + of_property_read_u32(pdev->dev.of_node, "sifive,fifo-depth", + &spi->fifo_depth); + if (ret < 0) + spi->fifo_depth = SIFIVE_SPI_DEFAULT_DEPTH; + + ret = + of_property_read_u32(pdev->dev.of_node, "sifive,max-bits-per-word", + &max_bits_per_word); + + if (!ret && max_bits_per_word < 8) { + dev_err(&pdev->dev, "Only 8bit SPI words supported by the driver\n"); + ret = -EINVAL; + goto put_master; + } + + /* Spin up the bus clock before hitting registers */ + ret = clk_prepare_enable(spi->clk); + if (ret) { + dev_err(&pdev->dev, "Unable to enable bus clock\n"); + goto put_master; + } + + /* probe the number of CS lines */ + spi->cs_inactive = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF); + sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, 0xffffffffU); + cs_bits = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF); + sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive); + if (!cs_bits) { + dev_err(&pdev->dev, "Could not auto probe CS lines\n"); + ret = -EINVAL; + goto put_master; + } + + num_cs = ilog2(cs_bits) + 1; + if (num_cs > SIFIVE_SPI_MAX_CS) { + dev_err(&pdev->dev, "Invalid number of spi slaves\n"); + ret = -EINVAL; + goto put_master; + } + + /* Define our master */ + master->dev.of_node = pdev->dev.of_node; + master->bus_num = pdev->id; + master->num_chipselect = num_cs; + master->mode_bits = SPI_CPHA | SPI_CPOL + | SPI_CS_HIGH | SPI_LSB_FIRST + | SPI_TX_DUAL | SPI_TX_QUAD + | SPI_RX_DUAL | SPI_RX_QUAD; + /* TODO: add driver support for bits_per_word < 8 + * we need to "left-align" the bits (unless SPI_LSB_FIRST) + */ + master->bits_per_word_mask = SPI_BPW_MASK(8); + master->flags = SPI_CONTROLLER_MUST_TX | SPI_MASTER_GPIO_SS; + master->prepare_message = sifive_spi_prepare_message; + master->set_cs = sifive_spi_set_cs; + master->transfer_one = sifive_spi_transfer_one; + + pdev->dev.dma_mask = NULL; + /* Configure the SPI master hardware */ + sifive_spi_init(spi); + + /* Register for SPI Interrupt */ + ret = devm_request_irq(&pdev->dev, irq, sifive_spi_irq, 0, + dev_name(&pdev->dev), spi); + if (ret) { + dev_err(&pdev->dev, "Unable to bind to interrupt\n"); + goto put_master; + } + + dev_info(&pdev->dev, "mapped; irq=%d, cs=%d\n", + irq, master->num_chipselect); + + ret = devm_spi_register_master(&pdev->dev, master); + if (ret < 0) { + dev_err(&pdev->dev, "spi_register_master failed\n"); + goto put_master; + } + + return 0; + +put_master: + spi_master_put(master); + + return ret; +} + +static int sifive_spi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct sifive_spi *spi = spi_master_get_devdata(master); + + /* Disable all the interrupts just in case */ + sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0); + + return 0; +} + +static const struct of_device_id sifive_spi_of_match[] = { + { .compatible = "sifive,spi0", }, + {} +}; +MODULE_DEVICE_TABLE(of, sifive_spi_of_match); + +static struct platform_driver sifive_spi_driver = { + .probe = sifive_spi_probe, + .remove = sifive_spi_remove, + .driver = { + .name = SIFIVE_SPI_DRIVER_NAME, + .of_match_table = sifive_spi_of_match, + }, +}; +module_platform_driver(sifive_spi_driver); + +MODULE_AUTHOR("SiFive, Inc. <sifive@sifive.com>"); +MODULE_DESCRIPTION("SiFive SPI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c index fa324ce194b2..1b7eebb72c07 100644 --- a/drivers/spi/spi-sprd.c +++ b/drivers/spi/spi-sprd.c @@ -2,6 +2,9 @@ // Copyright (C) 2018 Spreadtrum Communications Inc. #include <linux/clk.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/dma/sprd-dma.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> @@ -9,6 +12,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/of_dma.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/spi/spi.h> @@ -128,11 +132,28 @@ #define SPRD_SPI_DEFAULT_SOURCE 26000000 #define SPRD_SPI_MAX_SPEED_HZ 48000000 #define SPRD_SPI_AUTOSUSPEND_DELAY 100 +#define SPRD_SPI_DMA_STEP 8 + +enum sprd_spi_dma_channel { + SPRD_SPI_RX, + SPRD_SPI_TX, + SPRD_SPI_MAX, +}; + +struct sprd_spi_dma { + bool enable; + struct dma_chan *dma_chan[SPRD_SPI_MAX]; + enum dma_slave_buswidth width; + u32 fragmens_len; + u32 rx_len; +}; struct sprd_spi { void __iomem *base; + phys_addr_t phy_base; struct device *dev; struct clk *clk; + int irq; u32 src_clk; u32 hw_mode; u32 trans_len; @@ -141,6 +162,8 @@ struct sprd_spi { u32 hw_speed_hz; u32 len; int status; + struct sprd_spi_dma dma; + struct completion xfer_completion; const void *tx_buf; void *rx_buf; int (*read_bufs)(struct sprd_spi *ss, u32 len); @@ -431,6 +454,208 @@ complete: return ret; } +static void sprd_spi_irq_enable(struct sprd_spi *ss) +{ + u32 val; + + /* Clear interrupt status before enabling interrupt. */ + writel_relaxed(SPRD_SPI_TX_END_CLR | SPRD_SPI_RX_END_CLR, + ss->base + SPRD_SPI_INT_CLR); + /* Enable SPI interrupt only in DMA mode. */ + val = readl_relaxed(ss->base + SPRD_SPI_INT_EN); + writel_relaxed(val | SPRD_SPI_TX_END_INT_EN | + SPRD_SPI_RX_END_INT_EN, + ss->base + SPRD_SPI_INT_EN); +} + +static void sprd_spi_irq_disable(struct sprd_spi *ss) +{ + writel_relaxed(0, ss->base + SPRD_SPI_INT_EN); +} + +static void sprd_spi_dma_enable(struct sprd_spi *ss, bool enable) +{ + u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL2); + + if (enable) + val |= SPRD_SPI_DMA_EN; + else + val &= ~SPRD_SPI_DMA_EN; + + writel_relaxed(val, ss->base + SPRD_SPI_CTL2); +} + +static int sprd_spi_dma_submit(struct dma_chan *dma_chan, + struct dma_slave_config *c, + struct sg_table *sg, + enum dma_transfer_direction dir) +{ + struct dma_async_tx_descriptor *desc; + dma_cookie_t cookie; + unsigned long flags; + int ret; + + ret = dmaengine_slave_config(dma_chan, c); + if (ret < 0) + return ret; + + flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE, SPRD_DMA_NO_TRG, + SPRD_DMA_FRAG_REQ, SPRD_DMA_TRANS_INT); + desc = dmaengine_prep_slave_sg(dma_chan, sg->sgl, sg->nents, dir, flags); + if (!desc) + return -ENODEV; + + cookie = dmaengine_submit(desc); + if (dma_submit_error(cookie)) + return dma_submit_error(cookie); + + dma_async_issue_pending(dma_chan); + + return 0; +} + +static int sprd_spi_dma_rx_config(struct sprd_spi *ss, struct spi_transfer *t) +{ + struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_RX]; + struct dma_slave_config config = { + .src_addr = ss->phy_base, + .src_addr_width = ss->dma.width, + .dst_addr_width = ss->dma.width, + .dst_maxburst = ss->dma.fragmens_len, + }; + int ret; + + ret = sprd_spi_dma_submit(dma_chan, &config, &t->rx_sg, DMA_DEV_TO_MEM); + if (ret) + return ret; + + return ss->dma.rx_len; +} + +static int sprd_spi_dma_tx_config(struct sprd_spi *ss, struct spi_transfer *t) +{ + struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_TX]; + struct dma_slave_config config = { + .dst_addr = ss->phy_base, + .src_addr_width = ss->dma.width, + .dst_addr_width = ss->dma.width, + .src_maxburst = ss->dma.fragmens_len, + }; + int ret; + + ret = sprd_spi_dma_submit(dma_chan, &config, &t->tx_sg, DMA_MEM_TO_DEV); + if (ret) + return ret; + + return t->len; +} + +static int sprd_spi_dma_request(struct sprd_spi *ss) +{ + ss->dma.dma_chan[SPRD_SPI_RX] = dma_request_chan(ss->dev, "rx_chn"); + if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_RX])) { + if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]) == -EPROBE_DEFER) + return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]); + + dev_err(ss->dev, "request RX DMA channel failed!\n"); + return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]); + } + + ss->dma.dma_chan[SPRD_SPI_TX] = dma_request_chan(ss->dev, "tx_chn"); + if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) { + if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]) == -EPROBE_DEFER) + return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]); + + dev_err(ss->dev, "request TX DMA channel failed!\n"); + dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]); + return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]); + } + + return 0; +} + +static void sprd_spi_dma_release(struct sprd_spi *ss) +{ + if (ss->dma.dma_chan[SPRD_SPI_RX]) + dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]); + + if (ss->dma.dma_chan[SPRD_SPI_TX]) + dma_release_channel(ss->dma.dma_chan[SPRD_SPI_TX]); +} + +static int sprd_spi_dma_txrx_bufs(struct spi_device *sdev, + struct spi_transfer *t) +{ + struct sprd_spi *ss = spi_master_get_devdata(sdev->master); + u32 trans_len = ss->trans_len; + int ret, write_size = 0; + + reinit_completion(&ss->xfer_completion); + sprd_spi_irq_enable(ss); + if (ss->trans_mode & SPRD_SPI_TX_MODE) { + write_size = sprd_spi_dma_tx_config(ss, t); + sprd_spi_set_tx_length(ss, trans_len); + + /* + * For our 3 wires mode or dual TX line mode, we need + * to request the controller to transfer. + */ + if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL) + sprd_spi_tx_req(ss); + } else { + sprd_spi_set_rx_length(ss, trans_len); + + /* + * For our 3 wires mode or dual TX line mode, we need + * to request the controller to read. + */ + if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL) + sprd_spi_rx_req(ss); + else + write_size = ss->write_bufs(ss, trans_len); + } + + if (write_size < 0) { + ret = write_size; + dev_err(ss->dev, "failed to write, ret = %d\n", ret); + goto trans_complete; + } + + if (ss->trans_mode & SPRD_SPI_RX_MODE) { + /* + * Set up the DMA receive data length, which must be an + * integral multiple of fragment length. But when the length + * of received data is less than fragment length, DMA can be + * configured to receive data according to the actual length + * of received data. + */ + ss->dma.rx_len = t->len > ss->dma.fragmens_len ? + (t->len - t->len % ss->dma.fragmens_len) : + t->len; + ret = sprd_spi_dma_rx_config(ss, t); + if (ret < 0) { + dev_err(&sdev->dev, + "failed to configure rx DMA, ret = %d\n", ret); + goto trans_complete; + } + } + + sprd_spi_dma_enable(ss, true); + wait_for_completion(&(ss->xfer_completion)); + + if (ss->trans_mode & SPRD_SPI_TX_MODE) + ret = write_size; + else + ret = ss->dma.rx_len; + +trans_complete: + sprd_spi_dma_enable(ss, false); + sprd_spi_enter_idle(ss); + sprd_spi_irq_disable(ss); + + return ret; +} + static void sprd_spi_set_speed(struct sprd_spi *ss, u32 speed_hz) { /* @@ -516,16 +741,22 @@ static int sprd_spi_setup_transfer(struct spi_device *sdev, ss->trans_len = t->len; ss->read_bufs = sprd_spi_read_bufs_u8; ss->write_bufs = sprd_spi_write_bufs_u8; + ss->dma.width = DMA_SLAVE_BUSWIDTH_1_BYTE; + ss->dma.fragmens_len = SPRD_SPI_DMA_STEP; break; case 16: ss->trans_len = t->len >> 1; ss->read_bufs = sprd_spi_read_bufs_u16; ss->write_bufs = sprd_spi_write_bufs_u16; + ss->dma.width = DMA_SLAVE_BUSWIDTH_2_BYTES; + ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 1; break; case 32: ss->trans_len = t->len >> 2; ss->read_bufs = sprd_spi_read_bufs_u32; ss->write_bufs = sprd_spi_write_bufs_u32; + ss->dma.width = DMA_SLAVE_BUSWIDTH_4_BYTES; + ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 2; break; default: return -EINVAL; @@ -563,7 +794,11 @@ static int sprd_spi_transfer_one(struct spi_controller *sctlr, if (ret) goto setup_err; - ret = sprd_spi_txrx_bufs(sdev, t); + if (sctlr->can_dma(sctlr, sdev, t)) + ret = sprd_spi_dma_txrx_bufs(sdev, t); + else + ret = sprd_spi_txrx_bufs(sdev, t); + if (ret == t->len) ret = 0; else if (ret >= 0) @@ -575,6 +810,53 @@ setup_err: return ret; } +static irqreturn_t sprd_spi_handle_irq(int irq, void *data) +{ + struct sprd_spi *ss = (struct sprd_spi *)data; + u32 val = readl_relaxed(ss->base + SPRD_SPI_INT_MASK_STS); + + if (val & SPRD_SPI_MASK_TX_END) { + writel_relaxed(SPRD_SPI_TX_END_CLR, ss->base + SPRD_SPI_INT_CLR); + if (!(ss->trans_mode & SPRD_SPI_RX_MODE)) + complete(&ss->xfer_completion); + + return IRQ_HANDLED; + } + + if (val & SPRD_SPI_MASK_RX_END) { + writel_relaxed(SPRD_SPI_RX_END_CLR, ss->base + SPRD_SPI_INT_CLR); + if (ss->dma.rx_len < ss->len) { + ss->rx_buf += ss->dma.rx_len; + ss->dma.rx_len += + ss->read_bufs(ss, ss->len - ss->dma.rx_len); + } + complete(&ss->xfer_completion); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int sprd_spi_irq_init(struct platform_device *pdev, struct sprd_spi *ss) +{ + int ret; + + ss->irq = platform_get_irq(pdev, 0); + if (ss->irq < 0) { + dev_err(&pdev->dev, "failed to get irq resource\n"); + return ss->irq; + } + + ret = devm_request_irq(&pdev->dev, ss->irq, sprd_spi_handle_irq, + 0, pdev->name, ss); + if (ret) + dev_err(&pdev->dev, "failed to request spi irq %d, ret = %d\n", + ss->irq, ret); + + return ret; +} + static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss) { struct clk *clk_spi, *clk_parent; @@ -605,6 +887,35 @@ static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss) return 0; } +static bool sprd_spi_can_dma(struct spi_controller *sctlr, + struct spi_device *spi, struct spi_transfer *t) +{ + struct sprd_spi *ss = spi_controller_get_devdata(sctlr); + + return ss->dma.enable && (t->len > SPRD_SPI_FIFO_SIZE); +} + +static int sprd_spi_dma_init(struct platform_device *pdev, struct sprd_spi *ss) +{ + int ret; + + ret = sprd_spi_dma_request(ss); + if (ret) { + if (ret == -EPROBE_DEFER) + return ret; + + dev_warn(&pdev->dev, + "failed to request dma, enter no dma mode, ret = %d\n", + ret); + + return 0; + } + + ss->dma.enable = true; + + return 0; +} + static int sprd_spi_probe(struct platform_device *pdev) { struct spi_controller *sctlr; @@ -625,25 +936,36 @@ static int sprd_spi_probe(struct platform_device *pdev) goto free_controller; } + ss->phy_base = res->start; ss->dev = &pdev->dev; sctlr->dev.of_node = pdev->dev.of_node; sctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE | SPI_TX_DUAL; sctlr->bus_num = pdev->id; sctlr->set_cs = sprd_spi_chipselect; sctlr->transfer_one = sprd_spi_transfer_one; + sctlr->can_dma = sprd_spi_can_dma; sctlr->auto_runtime_pm = true; sctlr->max_speed_hz = min_t(u32, ss->src_clk >> 1, SPRD_SPI_MAX_SPEED_HZ); + init_completion(&ss->xfer_completion); platform_set_drvdata(pdev, sctlr); ret = sprd_spi_clk_init(pdev, ss); if (ret) goto free_controller; - ret = clk_prepare_enable(ss->clk); + ret = sprd_spi_irq_init(pdev, ss); + if (ret) + goto free_controller; + + ret = sprd_spi_dma_init(pdev, ss); if (ret) goto free_controller; + ret = clk_prepare_enable(ss->clk); + if (ret) + goto release_dma; + ret = pm_runtime_set_active(&pdev->dev); if (ret < 0) goto disable_clk; @@ -672,6 +994,8 @@ err_rpm_put: pm_runtime_disable(&pdev->dev); disable_clk: clk_disable_unprepare(ss->clk); +release_dma: + sprd_spi_dma_release(ss); free_controller: spi_controller_put(sctlr); @@ -690,6 +1014,10 @@ static int sprd_spi_remove(struct platform_device *pdev) return ret; } + spi_controller_suspend(sctlr); + + if (ss->dma.enable) + sprd_spi_dma_release(ss); clk_disable_unprepare(ss->clk); pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); @@ -702,6 +1030,9 @@ static int __maybe_unused sprd_spi_runtime_suspend(struct device *dev) struct spi_controller *sctlr = dev_get_drvdata(dev); struct sprd_spi *ss = spi_controller_get_devdata(sctlr); + if (ss->dma.enable) + sprd_spi_dma_release(ss); + clk_disable_unprepare(ss->clk); return 0; @@ -717,7 +1048,14 @@ static int __maybe_unused sprd_spi_runtime_resume(struct device *dev) if (ret) return ret; - return 0; + if (!ss->dma.enable) + return 0; + + ret = sprd_spi_dma_request(ss); + if (ret) + clk_disable_unprepare(ss->clk); + + return ret; } static const struct dev_pm_ops sprd_spi_pm_ops = { diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index ad1e55d3d5d5..4186ed20d796 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -1,23 +1,10 @@ -/* - * STMicroelectronics STM32 SPI Controller driver (master mode only) - * - * Copyright (C) 2017, STMicroelectronics - All Rights Reserved - * Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics. - * - * License terms: GPL V2.0. - * - * spi_stm32 driver is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * spi_stm32 driver is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along with - * spi_stm32 driver. If not, see <http://www.gnu.org/licenses/>. - */ +// SPDX-License-Identifier: GPL-2.0 +// +// STMicroelectronics STM32 SPI Controller driver (master mode only) +// +// Copyright (C) 2017, STMicroelectronics - All Rights Reserved +// Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics. + #include <linux/debugfs.h> #include <linux/clk.h> #include <linux/delay.h> @@ -33,99 +20,251 @@ #define DRIVER_NAME "spi_stm32" -/* STM32 SPI registers */ -#define STM32_SPI_CR1 0x00 -#define STM32_SPI_CR2 0x04 -#define STM32_SPI_CFG1 0x08 -#define STM32_SPI_CFG2 0x0C -#define STM32_SPI_IER 0x10 -#define STM32_SPI_SR 0x14 -#define STM32_SPI_IFCR 0x18 -#define STM32_SPI_TXDR 0x20 -#define STM32_SPI_RXDR 0x30 -#define STM32_SPI_I2SCFGR 0x50 - -/* STM32_SPI_CR1 bit fields */ -#define SPI_CR1_SPE BIT(0) -#define SPI_CR1_MASRX BIT(8) -#define SPI_CR1_CSTART BIT(9) -#define SPI_CR1_CSUSP BIT(10) -#define SPI_CR1_HDDIR BIT(11) -#define SPI_CR1_SSI BIT(12) - -/* STM32_SPI_CR2 bit fields */ -#define SPI_CR2_TSIZE_SHIFT 0 -#define SPI_CR2_TSIZE GENMASK(15, 0) - -/* STM32_SPI_CFG1 bit fields */ -#define SPI_CFG1_DSIZE_SHIFT 0 -#define SPI_CFG1_DSIZE GENMASK(4, 0) -#define SPI_CFG1_FTHLV_SHIFT 5 -#define SPI_CFG1_FTHLV GENMASK(8, 5) -#define SPI_CFG1_RXDMAEN BIT(14) -#define SPI_CFG1_TXDMAEN BIT(15) -#define SPI_CFG1_MBR_SHIFT 28 -#define SPI_CFG1_MBR GENMASK(30, 28) -#define SPI_CFG1_MBR_MIN 0 -#define SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28) - -/* STM32_SPI_CFG2 bit fields */ -#define SPI_CFG2_MIDI_SHIFT 4 -#define SPI_CFG2_MIDI GENMASK(7, 4) -#define SPI_CFG2_COMM_SHIFT 17 -#define SPI_CFG2_COMM GENMASK(18, 17) -#define SPI_CFG2_SP_SHIFT 19 -#define SPI_CFG2_SP GENMASK(21, 19) -#define SPI_CFG2_MASTER BIT(22) -#define SPI_CFG2_LSBFRST BIT(23) -#define SPI_CFG2_CPHA BIT(24) -#define SPI_CFG2_CPOL BIT(25) -#define SPI_CFG2_SSM BIT(26) -#define SPI_CFG2_AFCNTR BIT(31) - -/* STM32_SPI_IER bit fields */ -#define SPI_IER_RXPIE BIT(0) -#define SPI_IER_TXPIE BIT(1) -#define SPI_IER_DXPIE BIT(2) -#define SPI_IER_EOTIE BIT(3) -#define SPI_IER_TXTFIE BIT(4) -#define SPI_IER_OVRIE BIT(6) -#define SPI_IER_MODFIE BIT(9) -#define SPI_IER_ALL GENMASK(10, 0) - -/* STM32_SPI_SR bit fields */ -#define SPI_SR_RXP BIT(0) -#define SPI_SR_TXP BIT(1) -#define SPI_SR_EOT BIT(3) -#define SPI_SR_OVR BIT(6) -#define SPI_SR_MODF BIT(9) -#define SPI_SR_SUSP BIT(11) -#define SPI_SR_RXPLVL_SHIFT 13 -#define SPI_SR_RXPLVL GENMASK(14, 13) -#define SPI_SR_RXWNE BIT(15) - -/* STM32_SPI_IFCR bit fields */ -#define SPI_IFCR_ALL GENMASK(11, 3) - -/* STM32_SPI_I2SCFGR bit fields */ -#define SPI_I2SCFGR_I2SMOD BIT(0) - -/* SPI Master Baud Rate min/max divisor */ -#define SPI_MBR_DIV_MIN (2 << SPI_CFG1_MBR_MIN) -#define SPI_MBR_DIV_MAX (2 << SPI_CFG1_MBR_MAX) - -/* SPI Communication mode */ +/* STM32F4 SPI registers */ +#define STM32F4_SPI_CR1 0x00 +#define STM32F4_SPI_CR2 0x04 +#define STM32F4_SPI_SR 0x08 +#define STM32F4_SPI_DR 0x0C +#define STM32F4_SPI_I2SCFGR 0x1C + +/* STM32F4_SPI_CR1 bit fields */ +#define STM32F4_SPI_CR1_CPHA BIT(0) +#define STM32F4_SPI_CR1_CPOL BIT(1) +#define STM32F4_SPI_CR1_MSTR BIT(2) +#define STM32F4_SPI_CR1_BR_SHIFT 3 +#define STM32F4_SPI_CR1_BR GENMASK(5, 3) +#define STM32F4_SPI_CR1_SPE BIT(6) +#define STM32F4_SPI_CR1_LSBFRST BIT(7) +#define STM32F4_SPI_CR1_SSI BIT(8) +#define STM32F4_SPI_CR1_SSM BIT(9) +#define STM32F4_SPI_CR1_RXONLY BIT(10) +#define STM32F4_SPI_CR1_DFF BIT(11) +#define STM32F4_SPI_CR1_CRCNEXT BIT(12) +#define STM32F4_SPI_CR1_CRCEN BIT(13) +#define STM32F4_SPI_CR1_BIDIOE BIT(14) +#define STM32F4_SPI_CR1_BIDIMODE BIT(15) +#define STM32F4_SPI_CR1_BR_MIN 0 +#define STM32F4_SPI_CR1_BR_MAX (GENMASK(5, 3) >> 3) + +/* STM32F4_SPI_CR2 bit fields */ +#define STM32F4_SPI_CR2_RXDMAEN BIT(0) +#define STM32F4_SPI_CR2_TXDMAEN BIT(1) +#define STM32F4_SPI_CR2_SSOE BIT(2) +#define STM32F4_SPI_CR2_FRF BIT(4) +#define STM32F4_SPI_CR2_ERRIE BIT(5) +#define STM32F4_SPI_CR2_RXNEIE BIT(6) +#define STM32F4_SPI_CR2_TXEIE BIT(7) + +/* STM32F4_SPI_SR bit fields */ +#define STM32F4_SPI_SR_RXNE BIT(0) +#define STM32F4_SPI_SR_TXE BIT(1) +#define STM32F4_SPI_SR_CHSIDE BIT(2) +#define STM32F4_SPI_SR_UDR BIT(3) +#define STM32F4_SPI_SR_CRCERR BIT(4) +#define STM32F4_SPI_SR_MODF BIT(5) +#define STM32F4_SPI_SR_OVR BIT(6) +#define STM32F4_SPI_SR_BSY BIT(7) +#define STM32F4_SPI_SR_FRE BIT(8) + +/* STM32F4_SPI_I2SCFGR bit fields */ +#define STM32F4_SPI_I2SCFGR_I2SMOD BIT(11) + +/* STM32F4 SPI Baud Rate min/max divisor */ +#define STM32F4_SPI_BR_DIV_MIN (2 << STM32F4_SPI_CR1_BR_MIN) +#define STM32F4_SPI_BR_DIV_MAX (2 << STM32F4_SPI_CR1_BR_MAX) + +/* STM32H7 SPI registers */ +#define STM32H7_SPI_CR1 0x00 +#define STM32H7_SPI_CR2 0x04 +#define STM32H7_SPI_CFG1 0x08 +#define STM32H7_SPI_CFG2 0x0C +#define STM32H7_SPI_IER 0x10 +#define STM32H7_SPI_SR 0x14 +#define STM32H7_SPI_IFCR 0x18 +#define STM32H7_SPI_TXDR 0x20 +#define STM32H7_SPI_RXDR 0x30 +#define STM32H7_SPI_I2SCFGR 0x50 + +/* STM32H7_SPI_CR1 bit fields */ +#define STM32H7_SPI_CR1_SPE BIT(0) +#define STM32H7_SPI_CR1_MASRX BIT(8) +#define STM32H7_SPI_CR1_CSTART BIT(9) +#define STM32H7_SPI_CR1_CSUSP BIT(10) +#define STM32H7_SPI_CR1_HDDIR BIT(11) +#define STM32H7_SPI_CR1_SSI BIT(12) + +/* STM32H7_SPI_CR2 bit fields */ +#define STM32H7_SPI_CR2_TSIZE_SHIFT 0 +#define STM32H7_SPI_CR2_TSIZE GENMASK(15, 0) + +/* STM32H7_SPI_CFG1 bit fields */ +#define STM32H7_SPI_CFG1_DSIZE_SHIFT 0 +#define STM32H7_SPI_CFG1_DSIZE GENMASK(4, 0) +#define STM32H7_SPI_CFG1_FTHLV_SHIFT 5 +#define STM32H7_SPI_CFG1_FTHLV GENMASK(8, 5) +#define STM32H7_SPI_CFG1_RXDMAEN BIT(14) +#define STM32H7_SPI_CFG1_TXDMAEN BIT(15) +#define STM32H7_SPI_CFG1_MBR_SHIFT 28 +#define STM32H7_SPI_CFG1_MBR GENMASK(30, 28) +#define STM32H7_SPI_CFG1_MBR_MIN 0 +#define STM32H7_SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28) + +/* STM32H7_SPI_CFG2 bit fields */ +#define STM32H7_SPI_CFG2_MIDI_SHIFT 4 +#define STM32H7_SPI_CFG2_MIDI GENMASK(7, 4) +#define STM32H7_SPI_CFG2_COMM_SHIFT 17 +#define STM32H7_SPI_CFG2_COMM GENMASK(18, 17) +#define STM32H7_SPI_CFG2_SP_SHIFT 19 +#define STM32H7_SPI_CFG2_SP GENMASK(21, 19) +#define STM32H7_SPI_CFG2_MASTER BIT(22) +#define STM32H7_SPI_CFG2_LSBFRST BIT(23) +#define STM32H7_SPI_CFG2_CPHA BIT(24) +#define STM32H7_SPI_CFG2_CPOL BIT(25) +#define STM32H7_SPI_CFG2_SSM BIT(26) +#define STM32H7_SPI_CFG2_AFCNTR BIT(31) + +/* STM32H7_SPI_IER bit fields */ +#define STM32H7_SPI_IER_RXPIE BIT(0) +#define STM32H7_SPI_IER_TXPIE BIT(1) +#define STM32H7_SPI_IER_DXPIE BIT(2) +#define STM32H7_SPI_IER_EOTIE BIT(3) +#define STM32H7_SPI_IER_TXTFIE BIT(4) +#define STM32H7_SPI_IER_OVRIE BIT(6) +#define STM32H7_SPI_IER_MODFIE BIT(9) +#define STM32H7_SPI_IER_ALL GENMASK(10, 0) + +/* STM32H7_SPI_SR bit fields */ +#define STM32H7_SPI_SR_RXP BIT(0) +#define STM32H7_SPI_SR_TXP BIT(1) +#define STM32H7_SPI_SR_EOT BIT(3) +#define STM32H7_SPI_SR_OVR BIT(6) +#define STM32H7_SPI_SR_MODF BIT(9) +#define STM32H7_SPI_SR_SUSP BIT(11) +#define STM32H7_SPI_SR_RXPLVL_SHIFT 13 +#define STM32H7_SPI_SR_RXPLVL GENMASK(14, 13) +#define STM32H7_SPI_SR_RXWNE BIT(15) + +/* STM32H7_SPI_IFCR bit fields */ +#define STM32H7_SPI_IFCR_ALL GENMASK(11, 3) + +/* STM32H7_SPI_I2SCFGR bit fields */ +#define STM32H7_SPI_I2SCFGR_I2SMOD BIT(0) + +/* STM32H7 SPI Master Baud Rate min/max divisor */ +#define STM32H7_SPI_MBR_DIV_MIN (2 << STM32H7_SPI_CFG1_MBR_MIN) +#define STM32H7_SPI_MBR_DIV_MAX (2 << STM32H7_SPI_CFG1_MBR_MAX) + +/* STM32H7 SPI Communication mode */ +#define STM32H7_SPI_FULL_DUPLEX 0 +#define STM32H7_SPI_SIMPLEX_TX 1 +#define STM32H7_SPI_SIMPLEX_RX 2 +#define STM32H7_SPI_HALF_DUPLEX 3 + +/* SPI Communication type */ #define SPI_FULL_DUPLEX 0 #define SPI_SIMPLEX_TX 1 #define SPI_SIMPLEX_RX 2 -#define SPI_HALF_DUPLEX 3 +#define SPI_3WIRE_TX 3 +#define SPI_3WIRE_RX 4 #define SPI_1HZ_NS 1000000000 +/* + * use PIO for small transfers, avoiding DMA setup/teardown overhead for drivers + * without fifo buffers. + */ +#define SPI_DMA_MIN_BYTES 16 + +/** + * stm32_spi_reg - stm32 SPI register & bitfield desc + * @reg: register offset + * @mask: bitfield mask + * @shift: left shift + */ +struct stm32_spi_reg { + int reg; + int mask; + int shift; +}; + +/** + * stm32_spi_regspec - stm32 registers definition, compatible dependent data + * en: enable register and SPI enable bit + * dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit + * dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit + * cpol: clock polarity register and polarity bit + * cpha: clock phase register and phase bit + * lsb_first: LSB transmitted first register and bit + * br: baud rate register and bitfields + * rx: SPI RX data register + * tx: SPI TX data register + */ +struct stm32_spi_regspec { + const struct stm32_spi_reg en; + const struct stm32_spi_reg dma_rx_en; + const struct stm32_spi_reg dma_tx_en; + const struct stm32_spi_reg cpol; + const struct stm32_spi_reg cpha; + const struct stm32_spi_reg lsb_first; + const struct stm32_spi_reg br; + const struct stm32_spi_reg rx; + const struct stm32_spi_reg tx; +}; + +struct stm32_spi; + +/** + * stm32_spi_cfg - stm32 compatible configuration data + * @regs: registers descriptions + * @get_fifo_size: routine to get fifo size + * @get_bpw_mask: routine to get bits per word mask + * @disable: routine to disable controller + * @config: routine to configure controller as SPI Master + * @set_bpw: routine to configure registers to for bits per word + * @set_mode: routine to configure registers to desired mode + * @set_data_idleness: optional routine to configure registers to desired idle + * time between frames (if driver has this functionality) + * set_number_of_data: optional routine to configure registers to desired + * number of data (if driver has this functionality) + * @can_dma: routine to determine if the transfer is eligible for DMA use + * @transfer_one_dma_start: routine to start transfer a single spi_transfer + * using DMA + * @dma_rx cb: routine to call after DMA RX channel operation is complete + * @dma_tx cb: routine to call after DMA TX channel operation is complete + * @transfer_one_irq: routine to configure interrupts for driver + * @irq_handler_event: Interrupt handler for SPI controller events + * @irq_handler_thread: thread of interrupt handler for SPI controller + * @baud_rate_div_min: minimum baud rate divisor + * @baud_rate_div_max: maximum baud rate divisor + * @has_fifo: boolean to know if fifo is used for driver + * @has_startbit: boolean to know if start bit is used to start transfer + */ +struct stm32_spi_cfg { + const struct stm32_spi_regspec *regs; + int (*get_fifo_size)(struct stm32_spi *spi); + int (*get_bpw_mask)(struct stm32_spi *spi); + void (*disable)(struct stm32_spi *spi); + int (*config)(struct stm32_spi *spi); + void (*set_bpw)(struct stm32_spi *spi); + int (*set_mode)(struct stm32_spi *spi, unsigned int comm_type); + void (*set_data_idleness)(struct stm32_spi *spi, u32 length); + int (*set_number_of_data)(struct stm32_spi *spi, u32 length); + void (*transfer_one_dma_start)(struct stm32_spi *spi); + void (*dma_rx_cb)(void *data); + void (*dma_tx_cb)(void *data); + int (*transfer_one_irq)(struct stm32_spi *spi); + irqreturn_t (*irq_handler_event)(int irq, void *dev_id); + irqreturn_t (*irq_handler_thread)(int irq, void *dev_id); + unsigned int baud_rate_div_min; + unsigned int baud_rate_div_max; + bool has_fifo; +}; + /** * struct stm32_spi - private data of the SPI controller * @dev: driver model representation of the controller * @master: controller master interface + * @cfg: compatible configuration data * @base: virtual memory area * @clk: hw kernel clock feeding the SPI clock generator * @clk_rate: rate of the hw kernel clock feeding the SPI clock generator @@ -151,6 +290,7 @@ struct stm32_spi { struct device *dev; struct spi_master *master; + const struct stm32_spi_cfg *cfg; void __iomem *base; struct clk *clk; u32 clk_rate; @@ -176,6 +316,40 @@ struct stm32_spi { dma_addr_t phys_addr; }; +static const struct stm32_spi_regspec stm32f4_spi_regspec = { + .en = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE }, + + .dma_rx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_RXDMAEN }, + .dma_tx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN }, + + .cpol = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPOL }, + .cpha = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPHA }, + .lsb_first = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_LSBFRST }, + .br = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_BR, STM32F4_SPI_CR1_BR_SHIFT }, + + .rx = { STM32F4_SPI_DR }, + .tx = { STM32F4_SPI_DR }, +}; + +static const struct stm32_spi_regspec stm32h7_spi_regspec = { + /* SPI data transfer is enabled but spi_ker_ck is idle. + * CFG1 and CFG2 registers are write protected when SPE is enabled. + */ + .en = { STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE }, + + .dma_rx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_RXDMAEN }, + .dma_tx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN }, + + .cpol = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPOL }, + .cpha = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPHA }, + .lsb_first = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_LSBFRST }, + .br = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_MBR, + STM32H7_SPI_CFG1_MBR_SHIFT }, + + .rx = { STM32H7_SPI_RXDR }, + .tx = { STM32H7_SPI_TXDR }, +}; + static inline void stm32_spi_set_bits(struct stm32_spi *spi, u32 offset, u32 bits) { @@ -191,22 +365,22 @@ static inline void stm32_spi_clr_bits(struct stm32_spi *spi, } /** - * stm32_spi_get_fifo_size - Return fifo size + * stm32h7_spi_get_fifo_size - Return fifo size * @spi: pointer to the spi controller data structure */ -static int stm32_spi_get_fifo_size(struct stm32_spi *spi) +static int stm32h7_spi_get_fifo_size(struct stm32_spi *spi) { unsigned long flags; u32 count = 0; spin_lock_irqsave(&spi->lock, flags); - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE); + stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE); - while (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP) - writeb_relaxed(++count, spi->base + STM32_SPI_TXDR); + while (readl_relaxed(spi->base + STM32H7_SPI_SR) & STM32H7_SPI_SR_TXP) + writeb_relaxed(++count, spi->base + STM32H7_SPI_TXDR); - stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE); + stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE); spin_unlock_irqrestore(&spi->lock, flags); @@ -216,10 +390,20 @@ static int stm32_spi_get_fifo_size(struct stm32_spi *spi) } /** - * stm32_spi_get_bpw_mask - Return bits per word mask + * stm32f4_spi_get_bpw_mask - Return bits per word mask * @spi: pointer to the spi controller data structure */ -static int stm32_spi_get_bpw_mask(struct stm32_spi *spi) +static int stm32f4_spi_get_bpw_mask(struct stm32_spi *spi) +{ + dev_dbg(spi->dev, "8-bit or 16-bit data frame supported\n"); + return SPI_BPW_MASK(8) | SPI_BPW_MASK(16); +} + +/** + * stm32h7_spi_get_bpw_mask - Return bits per word mask + * @spi: pointer to the spi controller data structure + */ +static int stm32h7_spi_get_bpw_mask(struct stm32_spi *spi) { unsigned long flags; u32 cfg1, max_bpw; @@ -230,10 +414,11 @@ static int stm32_spi_get_bpw_mask(struct stm32_spi *spi) * The most significant bit at DSIZE bit field is reserved when the * maximum data size of periperal instances is limited to 16-bit */ - stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_DSIZE); + stm32_spi_set_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_DSIZE); - cfg1 = readl_relaxed(spi->base + STM32_SPI_CFG1); - max_bpw = (cfg1 & SPI_CFG1_DSIZE) >> SPI_CFG1_DSIZE_SHIFT; + cfg1 = readl_relaxed(spi->base + STM32H7_SPI_CFG1); + max_bpw = (cfg1 & STM32H7_SPI_CFG1_DSIZE) >> + STM32H7_SPI_CFG1_DSIZE_SHIFT; max_bpw += 1; spin_unlock_irqrestore(&spi->lock, flags); @@ -244,13 +429,16 @@ static int stm32_spi_get_bpw_mask(struct stm32_spi *spi) } /** - * stm32_spi_prepare_mbr - Determine SPI_CFG1.MBR value + * stm32_spi_prepare_mbr - Determine baud rate divisor value * @spi: pointer to the spi controller data structure * @speed_hz: requested speed + * @min_div: minimum baud rate divisor + * @max_div: maximum baud rate divisor * - * Return SPI_CFG1.MBR value in case of success or -EINVAL + * Return baud rate divisor value in case of success or -EINVAL */ -static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz) +static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz, + u32 min_div, u32 max_div) { u32 div, mbrdiv; @@ -263,8 +451,7 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz) * no need to check it there. * However, we need to ensure the following calculations. */ - if (div < SPI_MBR_DIV_MIN || - div > SPI_MBR_DIV_MAX) + if ((div < min_div) || (div > max_div)) return -EINVAL; /* Determine the first power of 2 greater than or equal to div */ @@ -279,10 +466,10 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz) } /** - * stm32_spi_prepare_fthlv - Determine FIFO threshold level + * stm32h7_spi_prepare_fthlv - Determine FIFO threshold level * @spi: pointer to the spi controller data structure */ -static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi) +static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi) { u32 fthlv, half_fifo; @@ -306,32 +493,62 @@ static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi) } /** - * stm32_spi_write_txfifo - Write bytes in Transmit Data Register + * stm32f4_spi_write_tx - Write bytes to Transmit Data Register * @spi: pointer to the spi controller data structure * * Read from tx_buf depends on remaining bytes to avoid to read beyond * tx_buf end. */ -static void stm32_spi_write_txfifo(struct stm32_spi *spi) +static void stm32f4_spi_write_tx(struct stm32_spi *spi) +{ + if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) & + STM32F4_SPI_SR_TXE)) { + u32 offs = spi->cur_xferlen - spi->tx_len; + + if (spi->cur_bpw == 16) { + const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs); + + writew_relaxed(*tx_buf16, spi->base + STM32F4_SPI_DR); + spi->tx_len -= sizeof(u16); + } else { + const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs); + + writeb_relaxed(*tx_buf8, spi->base + STM32F4_SPI_DR); + spi->tx_len -= sizeof(u8); + } + } + + dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len); +} + +/** + * stm32h7_spi_write_txfifo - Write bytes in Transmit Data Register + * @spi: pointer to the spi controller data structure + * + * Read from tx_buf depends on remaining bytes to avoid to read beyond + * tx_buf end. + */ +static void stm32h7_spi_write_txfifo(struct stm32_spi *spi) { while ((spi->tx_len > 0) && - (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP)) { + (readl_relaxed(spi->base + STM32H7_SPI_SR) & + STM32H7_SPI_SR_TXP)) { u32 offs = spi->cur_xferlen - spi->tx_len; if (spi->tx_len >= sizeof(u32)) { const u32 *tx_buf32 = (const u32 *)(spi->tx_buf + offs); - writel_relaxed(*tx_buf32, spi->base + STM32_SPI_TXDR); + writel_relaxed(*tx_buf32, spi->base + STM32H7_SPI_TXDR); spi->tx_len -= sizeof(u32); } else if (spi->tx_len >= sizeof(u16)) { const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs); - writew_relaxed(*tx_buf16, spi->base + STM32_SPI_TXDR); + writew_relaxed(*tx_buf16, spi->base + STM32H7_SPI_TXDR); spi->tx_len -= sizeof(u16); } else { const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs); - writeb_relaxed(*tx_buf8, spi->base + STM32_SPI_TXDR); + writeb_relaxed(*tx_buf8, spi->base + STM32H7_SPI_TXDR); spi->tx_len -= sizeof(u8); } } @@ -340,43 +557,74 @@ static void stm32_spi_write_txfifo(struct stm32_spi *spi) } /** - * stm32_spi_read_rxfifo - Read bytes in Receive Data Register + * stm32f4_spi_read_rx - Read bytes from Receive Data Register + * @spi: pointer to the spi controller data structure + * + * Write in rx_buf depends on remaining bytes to avoid to write beyond + * rx_buf end. + */ +static void stm32f4_spi_read_rx(struct stm32_spi *spi) +{ + if ((spi->rx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) & + STM32F4_SPI_SR_RXNE)) { + u32 offs = spi->cur_xferlen - spi->rx_len; + + if (spi->cur_bpw == 16) { + u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs); + + *rx_buf16 = readw_relaxed(spi->base + STM32F4_SPI_DR); + spi->rx_len -= sizeof(u16); + } else { + u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs); + + *rx_buf8 = readb_relaxed(spi->base + STM32F4_SPI_DR); + spi->rx_len -= sizeof(u8); + } + } + + dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->rx_len); +} + +/** + * stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register * @spi: pointer to the spi controller data structure * * Write in rx_buf depends on remaining bytes to avoid to write beyond * rx_buf end. */ -static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush) +static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush) { - u32 sr = readl_relaxed(spi->base + STM32_SPI_SR); - u32 rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT; + u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR); + u32 rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >> + STM32H7_SPI_SR_RXPLVL_SHIFT; while ((spi->rx_len > 0) && - ((sr & SPI_SR_RXP) || - (flush && ((sr & SPI_SR_RXWNE) || (rxplvl > 0))))) { + ((sr & STM32H7_SPI_SR_RXP) || + (flush && ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) { u32 offs = spi->cur_xferlen - spi->rx_len; if ((spi->rx_len >= sizeof(u32)) || - (flush && (sr & SPI_SR_RXWNE))) { + (flush && (sr & STM32H7_SPI_SR_RXWNE))) { u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs); - *rx_buf32 = readl_relaxed(spi->base + STM32_SPI_RXDR); + *rx_buf32 = readl_relaxed(spi->base + STM32H7_SPI_RXDR); spi->rx_len -= sizeof(u32); } else if ((spi->rx_len >= sizeof(u16)) || (flush && (rxplvl >= 2 || spi->cur_bpw > 8))) { u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs); - *rx_buf16 = readw_relaxed(spi->base + STM32_SPI_RXDR); + *rx_buf16 = readw_relaxed(spi->base + STM32H7_SPI_RXDR); spi->rx_len -= sizeof(u16); } else { u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs); - *rx_buf8 = readb_relaxed(spi->base + STM32_SPI_RXDR); + *rx_buf8 = readb_relaxed(spi->base + STM32H7_SPI_RXDR); spi->rx_len -= sizeof(u8); } - sr = readl_relaxed(spi->base + STM32_SPI_SR); - rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT; + sr = readl_relaxed(spi->base + STM32H7_SPI_SR); + rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >> + STM32H7_SPI_SR_RXPLVL_SHIFT; } dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__, @@ -386,26 +634,76 @@ static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush) /** * stm32_spi_enable - Enable SPI controller * @spi: pointer to the spi controller data structure - * - * SPI data transfer is enabled but spi_ker_ck is idle. - * SPI_CFG1 and SPI_CFG2 are now write protected. */ static void stm32_spi_enable(struct stm32_spi *spi) { dev_dbg(spi->dev, "enable controller\n"); - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE); + stm32_spi_set_bits(spi, spi->cfg->regs->en.reg, + spi->cfg->regs->en.mask); } /** - * stm32_spi_disable - Disable SPI controller + * stm32f4_spi_disable - Disable SPI controller + * @spi: pointer to the spi controller data structure + */ +static void stm32f4_spi_disable(struct stm32_spi *spi) +{ + unsigned long flags; + u32 sr; + + dev_dbg(spi->dev, "disable controller\n"); + + spin_lock_irqsave(&spi->lock, flags); + + if (!(readl_relaxed(spi->base + STM32F4_SPI_CR1) & + STM32F4_SPI_CR1_SPE)) { + spin_unlock_irqrestore(&spi->lock, flags); + return; + } + + /* Disable interrupts */ + stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXEIE | + STM32F4_SPI_CR2_RXNEIE | + STM32F4_SPI_CR2_ERRIE); + + /* Wait until BSY = 0 */ + if (readl_relaxed_poll_timeout_atomic(spi->base + STM32F4_SPI_SR, + sr, !(sr & STM32F4_SPI_SR_BSY), + 10, 100000) < 0) { + dev_warn(spi->dev, "disabling condition timeout\n"); + } + + if (spi->cur_usedma && spi->dma_tx) + dmaengine_terminate_all(spi->dma_tx); + if (spi->cur_usedma && spi->dma_rx) + dmaengine_terminate_all(spi->dma_rx); + + stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE); + + stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN | + STM32F4_SPI_CR2_RXDMAEN); + + /* Sequence to clear OVR flag */ + readl_relaxed(spi->base + STM32F4_SPI_DR); + readl_relaxed(spi->base + STM32F4_SPI_SR); + + spin_unlock_irqrestore(&spi->lock, flags); +} + +/** + * stm32h7_spi_disable - Disable SPI controller * @spi: pointer to the spi controller data structure * * RX-Fifo is flushed when SPI controller is disabled. To prevent any data - * loss, use stm32_spi_read_rxfifo(flush) to read the remaining bytes in + * loss, use stm32h7_spi_read_rxfifo(flush) to read the remaining bytes in * RX-Fifo. + * Normally, if TSIZE has been configured, we should relax the hardware at the + * reception of the EOT interrupt. But in case of error, EOT will not be + * raised. So the subsystem unprepare_message call allows us to properly + * complete the transfer from an hardware point of view. */ -static void stm32_spi_disable(struct stm32_spi *spi) +static void stm32h7_spi_disable(struct stm32_spi *spi) { unsigned long flags; u32 cr1, sr; @@ -414,23 +712,23 @@ static void stm32_spi_disable(struct stm32_spi *spi) spin_lock_irqsave(&spi->lock, flags); - cr1 = readl_relaxed(spi->base + STM32_SPI_CR1); + cr1 = readl_relaxed(spi->base + STM32H7_SPI_CR1); - if (!(cr1 & SPI_CR1_SPE)) { + if (!(cr1 & STM32H7_SPI_CR1_SPE)) { spin_unlock_irqrestore(&spi->lock, flags); return; } /* Wait on EOT or suspend the flow */ - if (readl_relaxed_poll_timeout_atomic(spi->base + STM32_SPI_SR, - sr, !(sr & SPI_SR_EOT), + if (readl_relaxed_poll_timeout_atomic(spi->base + STM32H7_SPI_SR, + sr, !(sr & STM32H7_SPI_SR_EOT), 10, 100000) < 0) { - if (cr1 & SPI_CR1_CSTART) { - writel_relaxed(cr1 | SPI_CR1_CSUSP, - spi->base + STM32_SPI_CR1); + if (cr1 & STM32H7_SPI_CR1_CSTART) { + writel_relaxed(cr1 | STM32H7_SPI_CR1_CSUSP, + spi->base + STM32H7_SPI_CR1); if (readl_relaxed_poll_timeout_atomic( - spi->base + STM32_SPI_SR, - sr, !(sr & SPI_SR_SUSP), + spi->base + STM32H7_SPI_SR, + sr, !(sr & STM32H7_SPI_SR_SUSP), 10, 100000) < 0) dev_warn(spi->dev, "Suspend request timeout\n"); @@ -438,21 +736,21 @@ static void stm32_spi_disable(struct stm32_spi *spi) } if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0)) - stm32_spi_read_rxfifo(spi, true); + stm32h7_spi_read_rxfifo(spi, true); - if (spi->cur_usedma && spi->tx_buf) + if (spi->cur_usedma && spi->dma_tx) dmaengine_terminate_all(spi->dma_tx); - if (spi->cur_usedma && spi->rx_buf) + if (spi->cur_usedma && spi->dma_rx) dmaengine_terminate_all(spi->dma_rx); - stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE); + stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE); - stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN | - SPI_CFG1_RXDMAEN); + stm32_spi_clr_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN | + STM32H7_SPI_CFG1_RXDMAEN); /* Disable interrupts and clear status flags */ - writel_relaxed(0, spi->base + STM32_SPI_IER); - writel_relaxed(SPI_IFCR_ALL, spi->base + STM32_SPI_IFCR); + writel_relaxed(0, spi->base + STM32H7_SPI_IER); + writel_relaxed(STM32H7_SPI_IFCR_ALL, spi->base + STM32H7_SPI_IFCR); spin_unlock_irqrestore(&spi->lock, flags); } @@ -460,26 +758,136 @@ static void stm32_spi_disable(struct stm32_spi *spi) /** * stm32_spi_can_dma - Determine if the transfer is eligible for DMA use * - * If the current transfer size is greater than fifo size, use DMA. + * If driver has fifo and the current transfer size is greater than fifo size, + * use DMA. Otherwise use DMA for transfer longer than defined DMA min bytes. */ static bool stm32_spi_can_dma(struct spi_master *master, struct spi_device *spi_dev, struct spi_transfer *transfer) { + unsigned int dma_size; struct stm32_spi *spi = spi_master_get_devdata(master); + if (spi->cfg->has_fifo) + dma_size = spi->fifo_size; + else + dma_size = SPI_DMA_MIN_BYTES; + dev_dbg(spi->dev, "%s: %s\n", __func__, - (transfer->len > spi->fifo_size) ? "true" : "false"); + (transfer->len > dma_size) ? "true" : "false"); + + return (transfer->len > dma_size); +} + +/** + * stm32f4_spi_irq_event - Interrupt handler for SPI controller events + * @irq: interrupt line + * @dev_id: SPI controller master interface + */ +static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id) +{ + struct spi_master *master = dev_id; + struct stm32_spi *spi = spi_master_get_devdata(master); + u32 sr, mask = 0; + unsigned long flags; + bool end = false; + + spin_lock_irqsave(&spi->lock, flags); + + sr = readl_relaxed(spi->base + STM32F4_SPI_SR); + /* + * BSY flag is not handled in interrupt but it is normal behavior when + * this flag is set. + */ + sr &= ~STM32F4_SPI_SR_BSY; + + if (!spi->cur_usedma && (spi->cur_comm == SPI_SIMPLEX_TX || + spi->cur_comm == SPI_3WIRE_TX)) { + /* OVR flag shouldn't be handled for TX only mode */ + sr &= ~STM32F4_SPI_SR_OVR | STM32F4_SPI_SR_RXNE; + mask |= STM32F4_SPI_SR_TXE; + } + + if (!spi->cur_usedma && spi->cur_comm == SPI_FULL_DUPLEX) { + /* TXE flag is set and is handled when RXNE flag occurs */ + sr &= ~STM32F4_SPI_SR_TXE; + mask |= STM32F4_SPI_SR_RXNE | STM32F4_SPI_SR_OVR; + } + + if (!(sr & mask)) { + dev_dbg(spi->dev, "spurious IT (sr=0x%08x)\n", sr); + spin_unlock_irqrestore(&spi->lock, flags); + return IRQ_NONE; + } + + if (sr & STM32F4_SPI_SR_OVR) { + dev_warn(spi->dev, "Overrun: received value discarded\n"); + + /* Sequence to clear OVR flag */ + readl_relaxed(spi->base + STM32F4_SPI_DR); + readl_relaxed(spi->base + STM32F4_SPI_SR); + + /* + * If overrun is detected, it means that something went wrong, + * so stop the current transfer. Transfer can wait for next + * RXNE but DR is already read and end never happens. + */ + end = true; + goto end_irq; + } + + if (sr & STM32F4_SPI_SR_TXE) { + if (spi->tx_buf) + stm32f4_spi_write_tx(spi); + if (spi->tx_len == 0) + end = true; + } + + if (sr & STM32F4_SPI_SR_RXNE) { + stm32f4_spi_read_rx(spi); + if (spi->rx_len == 0) + end = true; + else /* Load data for discontinuous mode */ + stm32f4_spi_write_tx(spi); + } + +end_irq: + if (end) { + /* Immediately disable interrupts to do not generate new one */ + stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, + STM32F4_SPI_CR2_TXEIE | + STM32F4_SPI_CR2_RXNEIE | + STM32F4_SPI_CR2_ERRIE); + spin_unlock_irqrestore(&spi->lock, flags); + return IRQ_WAKE_THREAD; + } + + spin_unlock_irqrestore(&spi->lock, flags); + return IRQ_HANDLED; +} + +/** + * stm32f4_spi_irq_thread - Thread of interrupt handler for SPI controller + * @irq: interrupt line + * @dev_id: SPI controller master interface + */ +static irqreturn_t stm32f4_spi_irq_thread(int irq, void *dev_id) +{ + struct spi_master *master = dev_id; + struct stm32_spi *spi = spi_master_get_devdata(master); + + spi_finalize_current_transfer(master); + stm32f4_spi_disable(spi); - return (transfer->len > spi->fifo_size); + return IRQ_HANDLED; } /** - * stm32_spi_irq - Interrupt handler for SPI controller events + * stm32h7_spi_irq_thread - Thread of interrupt handler for SPI controller * @irq: interrupt line * @dev_id: SPI controller master interface */ -static irqreturn_t stm32_spi_irq(int irq, void *dev_id) +static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) { struct spi_master *master = dev_id; struct stm32_spi *spi = spi_master_get_devdata(master); @@ -489,19 +897,19 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id) spin_lock_irqsave(&spi->lock, flags); - sr = readl_relaxed(spi->base + STM32_SPI_SR); - ier = readl_relaxed(spi->base + STM32_SPI_IER); + sr = readl_relaxed(spi->base + STM32H7_SPI_SR); + ier = readl_relaxed(spi->base + STM32H7_SPI_IER); mask = ier; /* EOTIE is triggered on EOT, SUSP and TXC events. */ - mask |= SPI_SR_SUSP; + mask |= STM32H7_SPI_SR_SUSP; /* * When TXTF is set, DXPIE and TXPIE are cleared. So in case of * Full-Duplex, need to poll RXP event to know if there are remaining * data, before disabling SPI. */ if (spi->rx_buf && !spi->cur_usedma) - mask |= SPI_SR_RXP; + mask |= STM32H7_SPI_SR_RXP; if (!(sr & mask)) { dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n", @@ -510,10 +918,10 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id) return IRQ_NONE; } - if (sr & SPI_SR_SUSP) { + if (sr & STM32H7_SPI_SR_SUSP) { dev_warn(spi->dev, "Communication suspended\n"); if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) - stm32_spi_read_rxfifo(spi, false); + stm32h7_spi_read_rxfifo(spi, false); /* * If communication is suspended while using DMA, it means * that something went wrong, so stop the current transfer @@ -522,15 +930,15 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id) end = true; } - if (sr & SPI_SR_MODF) { + if (sr & STM32H7_SPI_SR_MODF) { dev_warn(spi->dev, "Mode fault: transfer aborted\n"); end = true; } - if (sr & SPI_SR_OVR) { + if (sr & STM32H7_SPI_SR_OVR) { dev_warn(spi->dev, "Overrun: received value discarded\n"); if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) - stm32_spi_read_rxfifo(spi, false); + stm32h7_spi_read_rxfifo(spi, false); /* * If overrun is detected while using DMA, it means that * something went wrong, so stop the current transfer @@ -539,27 +947,27 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id) end = true; } - if (sr & SPI_SR_EOT) { + if (sr & STM32H7_SPI_SR_EOT) { if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) - stm32_spi_read_rxfifo(spi, true); + stm32h7_spi_read_rxfifo(spi, true); end = true; } - if (sr & SPI_SR_TXP) + if (sr & STM32H7_SPI_SR_TXP) if (!spi->cur_usedma && (spi->tx_buf && (spi->tx_len > 0))) - stm32_spi_write_txfifo(spi); + stm32h7_spi_write_txfifo(spi); - if (sr & SPI_SR_RXP) + if (sr & STM32H7_SPI_SR_RXP) if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) - stm32_spi_read_rxfifo(spi, false); + stm32h7_spi_read_rxfifo(spi, false); - writel_relaxed(mask, spi->base + STM32_SPI_IFCR); + writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR); spin_unlock_irqrestore(&spi->lock, flags); if (end) { spi_finalize_current_transfer(master); - stm32_spi_disable(spi); + stm32h7_spi_disable(spi); } return IRQ_HANDLED; @@ -598,7 +1006,7 @@ static int stm32_spi_prepare_msg(struct spi_master *master, struct spi_device *spi_dev = msg->spi; struct device_node *np = spi_dev->dev.of_node; unsigned long flags; - u32 cfg2_clrb = 0, cfg2_setb = 0; + u32 clrb = 0, setb = 0; /* SPI slave device may need time between data frames */ spi->cur_midi = 0; @@ -606,19 +1014,19 @@ static int stm32_spi_prepare_msg(struct spi_master *master, dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi); if (spi_dev->mode & SPI_CPOL) - cfg2_setb |= SPI_CFG2_CPOL; + setb |= spi->cfg->regs->cpol.mask; else - cfg2_clrb |= SPI_CFG2_CPOL; + clrb |= spi->cfg->regs->cpol.mask; if (spi_dev->mode & SPI_CPHA) - cfg2_setb |= SPI_CFG2_CPHA; + setb |= spi->cfg->regs->cpha.mask; else - cfg2_clrb |= SPI_CFG2_CPHA; + clrb |= spi->cfg->regs->cpha.mask; if (spi_dev->mode & SPI_LSB_FIRST) - cfg2_setb |= SPI_CFG2_LSBFRST; + setb |= spi->cfg->regs->lsb_first.mask; else - cfg2_clrb |= SPI_CFG2_LSBFRST; + clrb |= spi->cfg->regs->lsb_first.mask; dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n", spi_dev->mode & SPI_CPOL, @@ -628,11 +1036,12 @@ static int stm32_spi_prepare_msg(struct spi_master *master, spin_lock_irqsave(&spi->lock, flags); - if (cfg2_clrb || cfg2_setb) + /* CPOL, CPHA and LSB FIRST bits have common register */ + if (clrb || setb) writel_relaxed( - (readl_relaxed(spi->base + STM32_SPI_CFG2) & - ~cfg2_clrb) | cfg2_setb, - spi->base + STM32_SPI_CFG2); + (readl_relaxed(spi->base + spi->cfg->regs->cpol.reg) & + ~clrb) | setb, + spi->base + spi->cfg->regs->cpol.reg); spin_unlock_irqrestore(&spi->lock, flags); @@ -640,12 +1049,40 @@ static int stm32_spi_prepare_msg(struct spi_master *master, } /** - * stm32_spi_dma_cb - dma callback + * stm32f4_spi_dma_tx_cb - dma callback + * + * DMA callback is called when the transfer is complete for DMA TX channel. + */ +static void stm32f4_spi_dma_tx_cb(void *data) +{ + struct stm32_spi *spi = data; + + if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) { + spi_finalize_current_transfer(spi->master); + stm32f4_spi_disable(spi); + } +} + +/** + * stm32f4_spi_dma_rx_cb - dma callback + * + * DMA callback is called when the transfer is complete for DMA RX channel. + */ +static void stm32f4_spi_dma_rx_cb(void *data) +{ + struct stm32_spi *spi = data; + + spi_finalize_current_transfer(spi->master); + stm32f4_spi_disable(spi); +} + +/** + * stm32h7_spi_dma_cb - dma callback * * DMA callback is called when the transfer is complete or when an error * occurs. If the transfer is complete, EOT flag is raised. */ -static void stm32_spi_dma_cb(void *data) +static void stm32h7_spi_dma_cb(void *data) { struct stm32_spi *spi = data; unsigned long flags; @@ -653,11 +1090,11 @@ static void stm32_spi_dma_cb(void *data) spin_lock_irqsave(&spi->lock, flags); - sr = readl_relaxed(spi->base + STM32_SPI_SR); + sr = readl_relaxed(spi->base + STM32H7_SPI_SR); spin_unlock_irqrestore(&spi->lock, flags); - if (!(sr & SPI_SR_EOT)) + if (!(sr & STM32H7_SPI_SR_EOT)) dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr); /* Now wait for EOT, or SUSP or OVR in case of error */ @@ -681,23 +1118,27 @@ static void stm32_spi_dma_config(struct stm32_spi *spi, else buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; - /* Valid for DMA Half or Full Fifo threshold */ - if (spi->cur_fthlv == 2) + if (spi->cfg->has_fifo) { + /* Valid for DMA Half or Full Fifo threshold */ + if (spi->cur_fthlv == 2) + maxburst = 1; + else + maxburst = spi->cur_fthlv; + } else { maxburst = 1; - else - maxburst = spi->cur_fthlv; + } memset(dma_conf, 0, sizeof(struct dma_slave_config)); dma_conf->direction = dir; if (dma_conf->direction == DMA_DEV_TO_MEM) { /* RX */ - dma_conf->src_addr = spi->phys_addr + STM32_SPI_RXDR; + dma_conf->src_addr = spi->phys_addr + spi->cfg->regs->rx.reg; dma_conf->src_addr_width = buswidth; dma_conf->src_maxburst = maxburst; dev_dbg(spi->dev, "Rx DMA config buswidth=%d, maxburst=%d\n", buswidth, maxburst); } else if (dma_conf->direction == DMA_MEM_TO_DEV) { /* TX */ - dma_conf->dst_addr = spi->phys_addr + STM32_SPI_TXDR; + dma_conf->dst_addr = spi->phys_addr + spi->cfg->regs->tx.reg; dma_conf->dst_addr_width = buswidth; dma_conf->dst_maxburst = maxburst; @@ -707,27 +1148,68 @@ static void stm32_spi_dma_config(struct stm32_spi *spi, } /** - * stm32_spi_transfer_one_irq - transfer a single spi_transfer using - * interrupts + * stm32f4_spi_transfer_one_irq - transfer a single spi_transfer using + * interrupts * * It must returns 0 if the transfer is finished or 1 if the transfer is still * in progress. */ -static int stm32_spi_transfer_one_irq(struct stm32_spi *spi) +static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi) +{ + unsigned long flags; + u32 cr2 = 0; + + /* Enable the interrupts relative to the current communication mode */ + if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) { + cr2 |= STM32F4_SPI_CR2_TXEIE; + } else if (spi->cur_comm == SPI_FULL_DUPLEX) { + /* In transmit-only mode, the OVR flag is set in the SR register + * since the received data are never read. Therefore set OVR + * interrupt only when rx buffer is available. + */ + cr2 |= STM32F4_SPI_CR2_RXNEIE | STM32F4_SPI_CR2_ERRIE; + } else { + return -EINVAL; + } + + spin_lock_irqsave(&spi->lock, flags); + + stm32_spi_set_bits(spi, STM32F4_SPI_CR2, cr2); + + stm32_spi_enable(spi); + + /* starting data transfer when buffer is loaded */ + if (spi->tx_buf) + stm32f4_spi_write_tx(spi); + + spin_unlock_irqrestore(&spi->lock, flags); + + return 1; +} + +/** + * stm32h7_spi_transfer_one_irq - transfer a single spi_transfer using + * interrupts + * + * It must returns 0 if the transfer is finished or 1 if the transfer is still + * in progress. + */ +static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi) { unsigned long flags; u32 ier = 0; /* Enable the interrupts relative to the current communication mode */ if (spi->tx_buf && spi->rx_buf) /* Full Duplex */ - ier |= SPI_IER_DXPIE; + ier |= STM32H7_SPI_IER_DXPIE; else if (spi->tx_buf) /* Half-Duplex TX dir or Simplex TX */ - ier |= SPI_IER_TXPIE; + ier |= STM32H7_SPI_IER_TXPIE; else if (spi->rx_buf) /* Half-Duplex RX dir or Simplex RX */ - ier |= SPI_IER_RXPIE; + ier |= STM32H7_SPI_IER_RXPIE; /* Enable the interrupts relative to the end of transfer */ - ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE; + ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE | + STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE; spin_lock_irqsave(&spi->lock, flags); @@ -735,11 +1217,11 @@ static int stm32_spi_transfer_one_irq(struct stm32_spi *spi) /* Be sure to have data in fifo before starting data transfer */ if (spi->tx_buf) - stm32_spi_write_txfifo(spi); + stm32h7_spi_write_txfifo(spi); - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART); + stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART); - writel_relaxed(ier, spi->base + STM32_SPI_IER); + writel_relaxed(ier, spi->base + STM32H7_SPI_IER); spin_unlock_irqrestore(&spi->lock, flags); @@ -747,6 +1229,43 @@ static int stm32_spi_transfer_one_irq(struct stm32_spi *spi) } /** + * stm32f4_spi_transfer_one_dma_start - Set SPI driver registers to start + * transfer using DMA + */ +static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi) +{ + /* In DMA mode end of transfer is handled by DMA TX or RX callback. */ + if (spi->cur_comm == SPI_SIMPLEX_RX || spi->cur_comm == SPI_3WIRE_RX || + spi->cur_comm == SPI_FULL_DUPLEX) { + /* + * In transmit-only mode, the OVR flag is set in the SR register + * since the received data are never read. Therefore set OVR + * interrupt only when rx buffer is available. + */ + stm32_spi_set_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_ERRIE); + } + + stm32_spi_enable(spi); +} + +/** + * stm32h7_spi_transfer_one_dma_start - Set SPI driver registers to start + * transfer using DMA + */ +static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi) +{ + /* Enable the interrupts relative to the end of transfer */ + stm32_spi_set_bits(spi, STM32H7_SPI_IER, STM32H7_SPI_IER_EOTIE | + STM32H7_SPI_IER_TXTFIE | + STM32H7_SPI_IER_OVRIE | + STM32H7_SPI_IER_MODFIE); + + stm32_spi_enable(spi); + + stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART); +} + +/** * stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA * * It must returns 0 if the transfer is finished or 1 if the transfer is still @@ -758,17 +1277,17 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, struct dma_slave_config tx_dma_conf, rx_dma_conf; struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc; unsigned long flags; - u32 ier = 0; spin_lock_irqsave(&spi->lock, flags); rx_dma_desc = NULL; - if (spi->rx_buf) { + if (spi->rx_buf && spi->dma_rx) { stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM); dmaengine_slave_config(spi->dma_rx, &rx_dma_conf); /* Enable Rx DMA request */ - stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN); + stm32_spi_set_bits(spi, spi->cfg->regs->dma_rx_en.reg, + spi->cfg->regs->dma_rx_en.mask); rx_dma_desc = dmaengine_prep_slave_sg( spi->dma_rx, xfer->rx_sg.sgl, @@ -778,7 +1297,7 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, } tx_dma_desc = NULL; - if (spi->tx_buf) { + if (spi->tx_buf && spi->dma_tx) { stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV); dmaengine_slave_config(spi->dma_tx, &tx_dma_conf); @@ -789,12 +1308,15 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, DMA_PREP_INTERRUPT); } - if ((spi->tx_buf && !tx_dma_desc) || - (spi->rx_buf && !rx_dma_desc)) + if ((spi->tx_buf && spi->dma_tx && !tx_dma_desc) || + (spi->rx_buf && spi->dma_rx && !rx_dma_desc)) + goto dma_desc_error; + + if (spi->cur_comm == SPI_FULL_DUPLEX && (!tx_dma_desc || !rx_dma_desc)) goto dma_desc_error; if (rx_dma_desc) { - rx_dma_desc->callback = stm32_spi_dma_cb; + rx_dma_desc->callback = spi->cfg->dma_rx_cb; rx_dma_desc->callback_param = spi; if (dma_submit_error(dmaengine_submit(rx_dma_desc))) { @@ -806,8 +1328,9 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, } if (tx_dma_desc) { - if (spi->cur_comm == SPI_SIMPLEX_TX) { - tx_dma_desc->callback = stm32_spi_dma_cb; + if (spi->cur_comm == SPI_SIMPLEX_TX || + spi->cur_comm == SPI_3WIRE_TX) { + tx_dma_desc->callback = spi->cfg->dma_tx_cb; tx_dma_desc->callback_param = spi; } @@ -819,130 +1342,278 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, dma_async_issue_pending(spi->dma_tx); /* Enable Tx DMA request */ - stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN); + stm32_spi_set_bits(spi, spi->cfg->regs->dma_tx_en.reg, + spi->cfg->regs->dma_tx_en.mask); } - /* Enable the interrupts relative to the end of transfer */ - ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE; - writel_relaxed(ier, spi->base + STM32_SPI_IER); - - stm32_spi_enable(spi); - - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART); + spi->cfg->transfer_one_dma_start(spi); spin_unlock_irqrestore(&spi->lock, flags); return 1; dma_submit_error: - if (spi->rx_buf) + if (spi->dma_rx) dmaengine_terminate_all(spi->dma_rx); dma_desc_error: - stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN); + stm32_spi_clr_bits(spi, spi->cfg->regs->dma_rx_en.reg, + spi->cfg->regs->dma_rx_en.mask); spin_unlock_irqrestore(&spi->lock, flags); dev_info(spi->dev, "DMA issue: fall back to irq transfer\n"); - return stm32_spi_transfer_one_irq(spi); + spi->cur_usedma = false; + return spi->cfg->transfer_one_irq(spi); } /** - * stm32_spi_transfer_one_setup - common setup to transfer a single - * spi_transfer either using DMA or - * interrupts. + * stm32f4_spi_set_bpw - Configure bits per word + * @spi: pointer to the spi controller data structure */ -static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, - struct spi_device *spi_dev, - struct spi_transfer *transfer) +static void stm32f4_spi_set_bpw(struct stm32_spi *spi) { - unsigned long flags; - u32 cfg1_clrb = 0, cfg1_setb = 0, cfg2_clrb = 0, cfg2_setb = 0; - u32 mode, nb_words; - int ret = 0; - - spin_lock_irqsave(&spi->lock, flags); + if (spi->cur_bpw == 16) + stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF); + else + stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF); +} - if (spi->cur_bpw != transfer->bits_per_word) { - u32 bpw, fthlv; +/** + * stm32h7_spi_set_bpw - configure bits per word + * @spi: pointer to the spi controller data structure + */ +static void stm32h7_spi_set_bpw(struct stm32_spi *spi) +{ + u32 bpw, fthlv; + u32 cfg1_clrb = 0, cfg1_setb = 0; - spi->cur_bpw = transfer->bits_per_word; - bpw = spi->cur_bpw - 1; + bpw = spi->cur_bpw - 1; - cfg1_clrb |= SPI_CFG1_DSIZE; - cfg1_setb |= (bpw << SPI_CFG1_DSIZE_SHIFT) & SPI_CFG1_DSIZE; + cfg1_clrb |= STM32H7_SPI_CFG1_DSIZE; + cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) & + STM32H7_SPI_CFG1_DSIZE; - spi->cur_fthlv = stm32_spi_prepare_fthlv(spi); - fthlv = spi->cur_fthlv - 1; + spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi); + fthlv = spi->cur_fthlv - 1; - cfg1_clrb |= SPI_CFG1_FTHLV; - cfg1_setb |= (fthlv << SPI_CFG1_FTHLV_SHIFT) & SPI_CFG1_FTHLV; - } + cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV; + cfg1_setb |= (fthlv << STM32H7_SPI_CFG1_FTHLV_SHIFT) & + STM32H7_SPI_CFG1_FTHLV; - if (spi->cur_speed != transfer->speed_hz) { - int mbr; + writel_relaxed( + (readl_relaxed(spi->base + STM32H7_SPI_CFG1) & + ~cfg1_clrb) | cfg1_setb, + spi->base + STM32H7_SPI_CFG1); +} - /* Update spi->cur_speed with real clock speed */ - mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz); - if (mbr < 0) { - ret = mbr; - goto out; - } +/** + * stm32_spi_set_mbr - Configure baud rate divisor in master mode + * @spi: pointer to the spi controller data structure + * @mbrdiv: baud rate divisor value + */ +static void stm32_spi_set_mbr(struct stm32_spi *spi, u32 mbrdiv) +{ + u32 clrb = 0, setb = 0; - transfer->speed_hz = spi->cur_speed; + clrb |= spi->cfg->regs->br.mask; + setb |= ((u32)mbrdiv << spi->cfg->regs->br.shift) & + spi->cfg->regs->br.mask; - cfg1_clrb |= SPI_CFG1_MBR; - cfg1_setb |= ((u32)mbr << SPI_CFG1_MBR_SHIFT) & SPI_CFG1_MBR; - } + writel_relaxed((readl_relaxed(spi->base + spi->cfg->regs->br.reg) & + ~clrb) | setb, + spi->base + spi->cfg->regs->br.reg); +} - if (cfg1_clrb || cfg1_setb) - writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG1) & - ~cfg1_clrb) | cfg1_setb, - spi->base + STM32_SPI_CFG1); +/** + * stm32_spi_communication_type - return transfer communication type + * @spi_dev: pointer to the spi device + * transfer: pointer to spi transfer + */ +static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev, + struct spi_transfer *transfer) +{ + unsigned int type = SPI_FULL_DUPLEX; - mode = SPI_FULL_DUPLEX; if (spi_dev->mode & SPI_3WIRE) { /* MISO/MOSI signals shared */ /* * SPI_3WIRE and xfer->tx_buf != NULL and xfer->rx_buf != NULL - * is forbidden und unvalidated by SPI subsystem so depending + * is forbidden and unvalidated by SPI subsystem so depending * on the valid buffer, we can determine the direction of the * transfer. */ - mode = SPI_HALF_DUPLEX; if (!transfer->tx_buf) - stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR); - else if (!transfer->rx_buf) - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR); + type = SPI_3WIRE_RX; + else + type = SPI_3WIRE_TX; } else { if (!transfer->tx_buf) - mode = SPI_SIMPLEX_RX; + type = SPI_SIMPLEX_RX; else if (!transfer->rx_buf) - mode = SPI_SIMPLEX_TX; + type = SPI_SIMPLEX_TX; + } + + return type; +} + +/** + * stm32f4_spi_set_mode - configure communication mode + * @spi: pointer to the spi controller data structure + * @comm_type: type of communication to configure + */ +static int stm32f4_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type) +{ + if (comm_type == SPI_3WIRE_TX || comm_type == SPI_SIMPLEX_TX) { + stm32_spi_set_bits(spi, STM32F4_SPI_CR1, + STM32F4_SPI_CR1_BIDIMODE | + STM32F4_SPI_CR1_BIDIOE); + } else if (comm_type == SPI_FULL_DUPLEX) { + stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, + STM32F4_SPI_CR1_BIDIMODE | + STM32F4_SPI_CR1_BIDIOE); + } else { + return -EINVAL; } - if (spi->cur_comm != mode) { - spi->cur_comm = mode; - cfg2_clrb |= SPI_CFG2_COMM; - cfg2_setb |= (mode << SPI_CFG2_COMM_SHIFT) & SPI_CFG2_COMM; + return 0; +} + +/** + * stm32h7_spi_set_mode - configure communication mode + * @spi: pointer to the spi controller data structure + * @comm_type: type of communication to configure + */ +static int stm32h7_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type) +{ + u32 mode; + u32 cfg2_clrb = 0, cfg2_setb = 0; + + if (comm_type == SPI_3WIRE_RX) { + mode = STM32H7_SPI_HALF_DUPLEX; + stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR); + } else if (comm_type == SPI_3WIRE_TX) { + mode = STM32H7_SPI_HALF_DUPLEX; + stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR); + } else if (comm_type == SPI_SIMPLEX_RX) { + mode = STM32H7_SPI_SIMPLEX_RX; + } else if (comm_type == SPI_SIMPLEX_TX) { + mode = STM32H7_SPI_SIMPLEX_TX; + } else { + mode = STM32H7_SPI_FULL_DUPLEX; } - cfg2_clrb |= SPI_CFG2_MIDI; - if ((transfer->len > 1) && (spi->cur_midi > 0)) { + cfg2_clrb |= STM32H7_SPI_CFG2_COMM; + cfg2_setb |= (mode << STM32H7_SPI_CFG2_COMM_SHIFT) & + STM32H7_SPI_CFG2_COMM; + + writel_relaxed( + (readl_relaxed(spi->base + STM32H7_SPI_CFG2) & + ~cfg2_clrb) | cfg2_setb, + spi->base + STM32H7_SPI_CFG2); + + return 0; +} + +/** + * stm32h7_spi_data_idleness - configure minimum time delay inserted between two + * consecutive data frames in master mode + * @spi: pointer to the spi controller data structure + * @len: transfer len + */ +static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len) +{ + u32 cfg2_clrb = 0, cfg2_setb = 0; + + cfg2_clrb |= STM32H7_SPI_CFG2_MIDI; + if ((len > 1) && (spi->cur_midi > 0)) { u32 sck_period_ns = DIV_ROUND_UP(SPI_1HZ_NS, spi->cur_speed); u32 midi = min((u32)DIV_ROUND_UP(spi->cur_midi, sck_period_ns), - (u32)SPI_CFG2_MIDI >> SPI_CFG2_MIDI_SHIFT); + (u32)STM32H7_SPI_CFG2_MIDI >> + STM32H7_SPI_CFG2_MIDI_SHIFT); dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n", sck_period_ns, midi, midi * sck_period_ns); + cfg2_setb |= (midi << STM32H7_SPI_CFG2_MIDI_SHIFT) & + STM32H7_SPI_CFG2_MIDI; + } + + writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CFG2) & + ~cfg2_clrb) | cfg2_setb, + spi->base + STM32H7_SPI_CFG2); +} + +/** + * stm32h7_spi_number_of_data - configure number of data at current transfer + * @spi: pointer to the spi controller data structure + * @len: transfer length + */ +static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words) +{ + u32 cr2_clrb = 0, cr2_setb = 0; + + if (nb_words <= (STM32H7_SPI_CR2_TSIZE >> + STM32H7_SPI_CR2_TSIZE_SHIFT)) { + cr2_clrb |= STM32H7_SPI_CR2_TSIZE; + cr2_setb = nb_words << STM32H7_SPI_CR2_TSIZE_SHIFT; + writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CR2) & + ~cr2_clrb) | cr2_setb, + spi->base + STM32H7_SPI_CR2); + } else { + return -EMSGSIZE; + } + + return 0; +} + +/** + * stm32_spi_transfer_one_setup - common setup to transfer a single + * spi_transfer either using DMA or + * interrupts. + */ +static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, + struct spi_device *spi_dev, + struct spi_transfer *transfer) +{ + unsigned long flags; + unsigned int comm_type; + int nb_words, ret = 0; + + spin_lock_irqsave(&spi->lock, flags); + + if (spi->cur_bpw != transfer->bits_per_word) { + spi->cur_bpw = transfer->bits_per_word; + spi->cfg->set_bpw(spi); + } - cfg2_setb |= (midi << SPI_CFG2_MIDI_SHIFT) & SPI_CFG2_MIDI; + if (spi->cur_speed != transfer->speed_hz) { + int mbr; + + /* Update spi->cur_speed with real clock speed */ + mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz, + spi->cfg->baud_rate_div_min, + spi->cfg->baud_rate_div_max); + if (mbr < 0) { + ret = mbr; + goto out; + } + + transfer->speed_hz = spi->cur_speed; + stm32_spi_set_mbr(spi, mbr); } - if (cfg2_clrb || cfg2_setb) - writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG2) & - ~cfg2_clrb) | cfg2_setb, - spi->base + STM32_SPI_CFG2); + comm_type = stm32_spi_communication_type(spi_dev, transfer); + if (spi->cur_comm != comm_type) { + ret = spi->cfg->set_mode(spi, comm_type); + + if (ret < 0) + goto out; + + spi->cur_comm = comm_type; + } + + if (spi->cfg->set_data_idleness) + spi->cfg->set_data_idleness(spi, transfer->len); if (spi->cur_bpw <= 8) nb_words = transfer->len; @@ -950,13 +1621,11 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, nb_words = DIV_ROUND_UP(transfer->len * 8, 16); else nb_words = DIV_ROUND_UP(transfer->len * 8, 32); - nb_words <<= SPI_CR2_TSIZE_SHIFT; - if (nb_words <= SPI_CR2_TSIZE) { - writel_relaxed(nb_words, spi->base + STM32_SPI_CR2); - } else { - ret = -EMSGSIZE; - goto out; + if (spi->cfg->set_number_of_data) { + ret = spi->cfg->set_number_of_data(spi, nb_words); + if (ret < 0) + goto out; } spi->cur_xferlen = transfer->len; @@ -997,7 +1666,7 @@ static int stm32_spi_transfer_one(struct spi_master *master, spi->rx_len = spi->rx_buf ? transfer->len : 0; spi->cur_usedma = (master->can_dma && - stm32_spi_can_dma(master, spi_dev, transfer)); + master->can_dma(master, spi_dev, transfer)); ret = stm32_spi_transfer_one_setup(spi, spi_dev, transfer); if (ret) { @@ -1008,47 +1677,73 @@ static int stm32_spi_transfer_one(struct spi_master *master, if (spi->cur_usedma) return stm32_spi_transfer_one_dma(spi, transfer); else - return stm32_spi_transfer_one_irq(spi); + return spi->cfg->transfer_one_irq(spi); } /** * stm32_spi_unprepare_msg - relax the hardware - * - * Normally, if TSIZE has been configured, we should relax the hardware at the - * reception of the EOT interrupt. But in case of error, EOT will not be - * raised. So the subsystem unprepare_message call allows us to properly - * complete the transfer from an hardware point of view. */ static int stm32_spi_unprepare_msg(struct spi_master *master, struct spi_message *msg) { struct stm32_spi *spi = spi_master_get_devdata(master); - stm32_spi_disable(spi); + spi->cfg->disable(spi); + + return 0; +} + +/** + * stm32f4_spi_config - Configure SPI controller as SPI master + */ +static int stm32f4_spi_config(struct stm32_spi *spi) +{ + unsigned long flags; + + spin_lock_irqsave(&spi->lock, flags); + + /* Ensure I2SMOD bit is kept cleared */ + stm32_spi_clr_bits(spi, STM32F4_SPI_I2SCFGR, + STM32F4_SPI_I2SCFGR_I2SMOD); + + /* + * - SS input value high + * - transmitter half duplex direction + * - Set the master mode (default Motorola mode) + * - Consider 1 master/n slaves configuration and + * SS input value is determined by the SSI bit + */ + stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SSI | + STM32F4_SPI_CR1_BIDIOE | + STM32F4_SPI_CR1_MSTR | + STM32F4_SPI_CR1_SSM); + + spin_unlock_irqrestore(&spi->lock, flags); return 0; } /** - * stm32_spi_config - Configure SPI controller as SPI master + * stm32h7_spi_config - Configure SPI controller as SPI master */ -static int stm32_spi_config(struct stm32_spi *spi) +static int stm32h7_spi_config(struct stm32_spi *spi) { unsigned long flags; spin_lock_irqsave(&spi->lock, flags); /* Ensure I2SMOD bit is kept cleared */ - stm32_spi_clr_bits(spi, STM32_SPI_I2SCFGR, SPI_I2SCFGR_I2SMOD); + stm32_spi_clr_bits(spi, STM32H7_SPI_I2SCFGR, + STM32H7_SPI_I2SCFGR_I2SMOD); /* * - SS input value high * - transmitter half duplex direction * - automatic communication suspend when RX-Fifo is full */ - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SSI | - SPI_CR1_HDDIR | - SPI_CR1_MASRX); + stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SSI | + STM32H7_SPI_CR1_HDDIR | + STM32H7_SPI_CR1_MASRX); /* * - Set the master mode (default Motorola mode) @@ -1056,17 +1751,56 @@ static int stm32_spi_config(struct stm32_spi *spi) * SS input value is determined by the SSI bit * - keep control of all associated GPIOs */ - stm32_spi_set_bits(spi, STM32_SPI_CFG2, SPI_CFG2_MASTER | - SPI_CFG2_SSM | - SPI_CFG2_AFCNTR); + stm32_spi_set_bits(spi, STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_MASTER | + STM32H7_SPI_CFG2_SSM | + STM32H7_SPI_CFG2_AFCNTR); spin_unlock_irqrestore(&spi->lock, flags); return 0; } +static const struct stm32_spi_cfg stm32f4_spi_cfg = { + .regs = &stm32f4_spi_regspec, + .get_bpw_mask = stm32f4_spi_get_bpw_mask, + .disable = stm32f4_spi_disable, + .config = stm32f4_spi_config, + .set_bpw = stm32f4_spi_set_bpw, + .set_mode = stm32f4_spi_set_mode, + .transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start, + .dma_tx_cb = stm32f4_spi_dma_tx_cb, + .dma_rx_cb = stm32f4_spi_dma_rx_cb, + .transfer_one_irq = stm32f4_spi_transfer_one_irq, + .irq_handler_event = stm32f4_spi_irq_event, + .irq_handler_thread = stm32f4_spi_irq_thread, + .baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN, + .baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX, + .has_fifo = false, +}; + +static const struct stm32_spi_cfg stm32h7_spi_cfg = { + .regs = &stm32h7_spi_regspec, + .get_fifo_size = stm32h7_spi_get_fifo_size, + .get_bpw_mask = stm32h7_spi_get_bpw_mask, + .disable = stm32h7_spi_disable, + .config = stm32h7_spi_config, + .set_bpw = stm32h7_spi_set_bpw, + .set_mode = stm32h7_spi_set_mode, + .set_data_idleness = stm32h7_spi_data_idleness, + .set_number_of_data = stm32h7_spi_number_of_data, + .transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start, + .dma_rx_cb = stm32h7_spi_dma_cb, + .dma_tx_cb = stm32h7_spi_dma_cb, + .transfer_one_irq = stm32h7_spi_transfer_one_irq, + .irq_handler_thread = stm32h7_spi_irq_thread, + .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN, + .baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX, + .has_fifo = true, +}; + static const struct of_device_id stm32_spi_of_match[] = { - { .compatible = "st,stm32h7-spi", }, + { .compatible = "st,stm32h7-spi", .data = (void *)&stm32h7_spi_cfg }, + { .compatible = "st,stm32f4-spi", .data = (void *)&stm32f4_spi_cfg }, {}, }; MODULE_DEVICE_TABLE(of, stm32_spi_of_match); @@ -1090,12 +1824,17 @@ static int stm32_spi_probe(struct platform_device *pdev) spi->master = master; spin_lock_init(&spi->lock); + spi->cfg = (const struct stm32_spi_cfg *) + of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev)->data; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); spi->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(spi->base)) { ret = PTR_ERR(spi->base); goto err_master_put; } + spi->phys_addr = (dma_addr_t)res->start; spi->irq = platform_get_irq(pdev, 0); @@ -1104,16 +1843,17 @@ static int stm32_spi_probe(struct platform_device *pdev) ret = -ENOENT; goto err_master_put; } - ret = devm_request_threaded_irq(&pdev->dev, spi->irq, NULL, - stm32_spi_irq, IRQF_ONESHOT, - pdev->name, master); + ret = devm_request_threaded_irq(&pdev->dev, spi->irq, + spi->cfg->irq_handler_event, + spi->cfg->irq_handler_thread, + IRQF_ONESHOT, pdev->name, master); if (ret) { dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq, ret); goto err_master_put; } - spi->clk = devm_clk_get(&pdev->dev, 0); + spi->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(spi->clk)) { ret = PTR_ERR(spi->clk); dev_err(&pdev->dev, "clk get failed: %d\n", ret); @@ -1139,9 +1879,10 @@ static int stm32_spi_probe(struct platform_device *pdev) reset_control_deassert(spi->rst); } - spi->fifo_size = stm32_spi_get_fifo_size(spi); + if (spi->cfg->has_fifo) + spi->fifo_size = spi->cfg->get_fifo_size(spi); - ret = stm32_spi_config(spi); + ret = spi->cfg->config(spi); if (ret) { dev_err(&pdev->dev, "controller configuration failed: %d\n", ret); @@ -1151,11 +1892,11 @@ static int stm32_spi_probe(struct platform_device *pdev) master->dev.of_node = pdev->dev.of_node; master->auto_runtime_pm = true; master->bus_num = pdev->id; - master->mode_bits = SPI_MODE_3 | SPI_CS_HIGH | SPI_LSB_FIRST | - SPI_3WIRE | SPI_LOOP; - master->bits_per_word_mask = stm32_spi_get_bpw_mask(spi); - master->max_speed_hz = spi->clk_rate / SPI_MBR_DIV_MIN; - master->min_speed_hz = spi->clk_rate / SPI_MBR_DIV_MAX; + master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST | + SPI_3WIRE; + master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi); + master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min; + master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max; master->setup = stm32_spi_setup; master->prepare_message = stm32_spi_prepare_msg; master->transfer_one = stm32_spi_transfer_one; @@ -1233,7 +1974,7 @@ static int stm32_spi_remove(struct platform_device *pdev) struct spi_master *master = platform_get_drvdata(pdev); struct stm32_spi *spi = spi_master_get_devdata(master); - stm32_spi_disable(spi); + spi->cfg->disable(spi); if (master->dma_tx) dma_release_channel(master->dma_tx); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 9a7def7c3237..93986f879b09 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -19,6 +19,7 @@ #include <linux/spi/spi.h> #include <linux/spi/spi-mem.h> #include <linux/of_gpio.h> +#include <linux/gpio/consumer.h> #include <linux/pm_runtime.h> #include <linux/pm_domain.h> #include <linux/property.h> @@ -578,7 +579,10 @@ int spi_add_device(struct spi_device *spi) goto done; } - if (ctlr->cs_gpios) + /* Descriptors take precedence */ + if (ctlr->cs_gpiods) + spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; + else if (ctlr->cs_gpios) spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; /* Drivers may modify this initial i/o setup, but will @@ -772,10 +776,21 @@ static void spi_set_cs(struct spi_device *spi, bool enable) if (spi->mode & SPI_CS_HIGH) enable = !enable; - if (gpio_is_valid(spi->cs_gpio)) { - /* Honour the SPI_NO_CS flag */ - if (!(spi->mode & SPI_NO_CS)) - gpio_set_value(spi->cs_gpio, !enable); + if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) { + /* + * Honour the SPI_NO_CS flag and invert the enable line, as + * active low is default for SPI. Execution paths that handle + * polarity inversion in gpiolib (such as device tree) will + * enforce active high using the SPI_CS_HIGH resulting in a + * double inversion through the code above. + */ + if (!(spi->mode & SPI_NO_CS)) { + if (spi->cs_gpiod) + gpiod_set_value_cansleep(spi->cs_gpiod, + !enable); + else + gpio_set_value_cansleep(spi->cs_gpio, !enable); + } /* Some SPI masters need both GPIO CS & slave_select */ if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && spi->controller->set_cs) @@ -1615,13 +1630,21 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, spi->mode |= SPI_CPHA; if (of_property_read_bool(nc, "spi-cpol")) spi->mode |= SPI_CPOL; - if (of_property_read_bool(nc, "spi-cs-high")) - spi->mode |= SPI_CS_HIGH; if (of_property_read_bool(nc, "spi-3wire")) spi->mode |= SPI_3WIRE; if (of_property_read_bool(nc, "spi-lsb-first")) spi->mode |= SPI_LSB_FIRST; + /* + * For descriptors associated with the device, polarity inversion is + * handled in the gpiolib, so all chip selects are "active high" in + * the logical sense, the gpiolib will invert the line if need be. + */ + if (ctlr->use_gpio_descriptors) + spi->mode |= SPI_CS_HIGH; + else if (of_property_read_bool(nc, "spi-cs-high")) + spi->mode |= SPI_CS_HIGH; + /* Device DUAL/QUAD mode */ if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { switch (value) { @@ -2137,6 +2160,60 @@ static int of_spi_register_master(struct spi_controller *ctlr) } #endif +/** + * spi_get_gpio_descs() - grab chip select GPIOs for the master + * @ctlr: The SPI master to grab GPIO descriptors for + */ +static int spi_get_gpio_descs(struct spi_controller *ctlr) +{ + int nb, i; + struct gpio_desc **cs; + struct device *dev = &ctlr->dev; + + nb = gpiod_count(dev, "cs"); + ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); + + /* No GPIOs at all is fine, else return the error */ + if (nb == 0 || nb == -ENOENT) + return 0; + else if (nb < 0) + return nb; + + cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), + GFP_KERNEL); + if (!cs) + return -ENOMEM; + ctlr->cs_gpiods = cs; + + for (i = 0; i < nb; i++) { + /* + * Most chipselects are active low, the inverted + * semantics are handled by special quirks in gpiolib, + * so initializing them GPIOD_OUT_LOW here means + * "unasserted", in most cases this will drive the physical + * line high. + */ + cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, + GPIOD_OUT_LOW); + + if (cs[i]) { + /* + * If we find a CS GPIO, name it after the device and + * chip select line. + */ + char *gpioname; + + gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", + dev_name(dev), i); + if (!gpioname) + return -ENOMEM; + gpiod_set_consumer_name(cs[i], gpioname); + } + } + + return 0; +} + static int spi_controller_check_ops(struct spi_controller *ctlr) { /* @@ -2199,9 +2276,21 @@ int spi_register_controller(struct spi_controller *ctlr) return status; if (!spi_controller_is_slave(ctlr)) { - status = of_spi_register_master(ctlr); - if (status) - return status; + if (ctlr->use_gpio_descriptors) { + status = spi_get_gpio_descs(ctlr); + if (status) + return status; + /* + * A controller using GPIO descriptors always + * supports SPI_CS_HIGH if need be. + */ + ctlr->mode_bits |= SPI_CS_HIGH; + } else { + /* Legacy code path for GPIOs from DT */ + status = of_spi_register_master(ctlr); + if (status) + return status; + } } /* even if it's just one always-selected device, there must @@ -2915,6 +3004,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) * cs_change is set for each transfer. */ if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || + spi->cs_gpiod || gpio_is_valid(spi->cs_gpio))) { size_t maxsize; int ret; @@ -2961,6 +3051,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) * it is not set for this transfer. * Set transfer tx_nbits and rx_nbits as single transfer default * (SPI_NBITS_SINGLE) if it is not set for this transfer. + * Ensure transfer word_delay is at least as long as that required by + * device itself. */ message->frame_length = 0; list_for_each_entry(xfer, &message->transfers, transfer_list) { @@ -3031,6 +3123,9 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) !(spi->mode & SPI_RX_QUAD)) return -EINVAL; } + + if (xfer->word_delay_usecs < spi->word_delay_usecs) + xfer->word_delay_usecs = spi->word_delay_usecs; } message->status = -EINPROGRESS; |