From 7ef119d3085222873f7b05e569e319664903052c Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Thu, 23 May 2013 21:27:57 +0200 Subject: ssi code --- arch/arm/mach-omap2/Makefile | 4 + arch/arm/mach-omap2/board-rx51-peripherals.c | 9 + arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | 104 ++ arch/arm/mach-omap2/ssi.c | 78 ++ drivers/hsi/Kconfig | 1 + drivers/hsi/Makefile | 1 + drivers/hsi/clients/Kconfig | 21 + drivers/hsi/clients/Makefile | 4 +- drivers/hsi/clients/cmt_speech.c | 1440 ++++++++++++++++++++ drivers/hsi/clients/ssi_protocol.c | 1177 ++++++++++++++++ drivers/hsi/controllers/Kconfig | 23 + drivers/hsi/controllers/Makefile | 5 + drivers/hsi/controllers/omap_ssi.c | 1885 ++++++++++++++++++++++++++ include/linux/hsi/ssip_slave.h | 38 + include/linux/platform_data/hsi-omap-ssi.h | 202 +++ 15 files changed, 4991 insertions(+), 1 deletion(-) create mode 100644 arch/arm/mach-omap2/ssi.c create mode 100644 drivers/hsi/clients/cmt_speech.c create mode 100644 drivers/hsi/clients/ssi_protocol.c create mode 100644 drivers/hsi/controllers/Kconfig create mode 100644 drivers/hsi/controllers/Makefile create mode 100644 drivers/hsi/controllers/omap_ssi.c create mode 100644 include/linux/hsi/ssip_slave.h create mode 100644 include/linux/platform_data/hsi-omap-ssi.h diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index d4f671547c37..ace860ded36f 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -222,6 +222,10 @@ ifneq ($(CONFIG_DRM_OMAP),) obj-y += drm.o endif +# Synchronous Serial Interface (SSI) +omap-ssi-$(CONFIG_OMAP_SSI) := ssi.o +obj-y += $(omap-ssi-m) $(omap-ssi-y) + # Specific board support obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o obj-$(CONFIG_MACH_OMAP_H4) += board-h4.o diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index 9c2dd102fbbb..e870a4ea5233 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -73,6 +74,8 @@ #define LIS302_IRQ1_GPIO 181 #define LIS302_IRQ2_GPIO 180 /* Not yet in use */ +#define RX51_CAWAKE_GPIO 151 + /* List all SPI devices here. Note that the list/probe order seems to matter! */ enum { RX51_SPI_WL1251, @@ -265,6 +268,11 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = { }, }; +static struct omap_ssi_board_config ssi_board_config = { + .num_ports = 1, + .cawake_gpio = { RX51_CAWAKE_GPIO }, +}; + static struct platform_device rx51_battery_device = { .name = "rx51-battery", .id = -1, @@ -1295,6 +1303,7 @@ void __init rx51_peripherals_init(void) if (partition) omap_hsmmc_init(mmc); + omap_ssi_config(&ssi_board_config); rx51_charger_init(); rx51_init_twl4030_hwmon(); } diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 0c3a427da544..d6d365a6e3da 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -3693,6 +3693,109 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__aes = { .user = OCP_USER_MPU | OCP_USER_SDMA, }; +/* + * 'ssi' class + * synchronous serial interface (multichannel and full-duplex serial if) + */ + +static struct omap_hwmod_class_sysconfig omap34xx_ssi_sysc = { + .rev_offs = 0x0000, + .sysc_offs = 0x0010, + .syss_offs = 0x0014, + .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_EMUFREE | + SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | + SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | + SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | + MSTANDBY_SMART | MSTANDBY_SMART_WKUP), + .sysc_fields = &omap_hwmod_sysc_type1, +}; + +static struct omap_hwmod_class omap34xx_ssi_hwmod_class = { + .name = "ssi", + .sysc = &omap34xx_ssi_sysc, +}; + +static struct omap_hwmod_irq_info omap34xx_ssi_irqs[] = { + { .name = "ssi_p1_mpu_irq0", .irq = 67 }, + { .name = "ssi_p1_mpu_irq1", .irq = 68 }, + { .name = "ssi_p2_mpu_irq0", .irq = 69 }, + { .name = "ssi_p2_mpu_irq1", .irq = 70 }, + { .name = "ssi_gdd_mpu", .irq = 71 }, + { .irq = -1 }, +}; + +static struct omap_hwmod_addr_space omap34xx_ssi_addrs[] = { + { + .name = "sys", + .pa_start = 0x48058000, + .pa_end = 0x48058fff, + .flags = ADDR_TYPE_RT, + }, + { + /* generic distributed DMA */ + .name = "gdd", + .pa_start = 0x48059000, + .pa_end = 0x48059fff, + .flags = ADDR_TYPE_RT, + }, + { + /* port 1: synchronous serial transmitter */ + .name = "p1_sst", + .pa_start = 0x4805a000, + .pa_end = 0x4805a7ff, + .flags = ADDR_TYPE_RT, + }, + { + /* port 1: synchronous serial receiver */ + .name = "p1_ssr", + .pa_start = 0x4805a800, + .pa_end = 0x4805afff, + .flags = ADDR_TYPE_RT, + }, + { + /* port 2: synchronous serial transmitter */ + .name = "p2_sst", + .pa_start = 0x4805b000, + .pa_end = 0x4805b7ff, + .flags = ADDR_TYPE_RT, + }, + { + /* port 2: synchronous serial receiver */ + .name = "p2_ssr", + .pa_start = 0x4805b800, + .pa_end = 0x4805bfff, + .flags = ADDR_TYPE_RT, + }, + {} +}; + +static struct omap_hwmod omap34xx_ssi_hwmod = { + .name = "ssi", + .class = &omap34xx_ssi_hwmod_class, + .clkdm_name = "l3_init_clkdm", + .mpu_irqs = omap34xx_ssi_irqs, + .main_clk = "ssi_ssr_fck", + .prcm = { + .omap2 = { + .prcm_reg_id = 1, + .module_bit = OMAP3430_EN_SSI_SHIFT, + .module_offs = WKUP_MOD, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT, + }, + }, +}; + +/* SSI -> l3 */ +static struct omap_hwmod_ocp_if omap34xx_l3__ssi = { + .master = &omap34xx_ssi_hwmod, + .slave = &omap3xxx_l3_main_hwmod, + .clk = "ssi_ick", + .addr = omap34xx_ssi_addrs, + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + static struct omap_hwmod_ocp_if *omap3xxx_hwmod_ocp_ifs[] __initdata = { &omap3xxx_l3_main__l4_core, &omap3xxx_l3_main__l4_per, @@ -3818,6 +3921,7 @@ static struct omap_hwmod_ocp_if *omap34xx_hwmod_ocp_ifs[] __initdata = { #ifdef CONFIG_OMAP_IOMMU_IVA2 &omap3xxx_l3_main__mmu_iva, #endif + &omap34xx_l3__ssi, NULL }; diff --git a/arch/arm/mach-omap2/ssi.c b/arch/arm/mach-omap2/ssi.c new file mode 100644 index 000000000000..adf342dd20c7 --- /dev/null +++ b/arch/arm/mach-omap2/ssi.c @@ -0,0 +1,78 @@ +/* + * linux/arch/arm/mach-omap2/ssi.c + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include "omap-pm.h" + +static struct omap_ssi_platform_data ssi_pdata = { + .num_ports = SSI_NUM_PORTS, + .cawake_gpio = {0}, + .get_dev_context_loss_count = omap_pm_get_dev_context_loss_count, +}; + +static struct platform_device ssi_pdev = { + .name = "omap_ssi", + .id = 0, + .dev = { + .platform_data = &ssi_pdata, + }, +}; + +int __init omap_ssi_config(struct omap_ssi_board_config *ssi_config) +{ + unsigned int port, offset, cawake_gpio; + int err; + + ssi_pdata.num_ports = ssi_config->num_ports; + + for (port = 0, offset = 7; port < ssi_config->num_ports; port++, offset += 5) { + cawake_gpio = ssi_config->cawake_gpio[port]; + if (!cawake_gpio) + continue; /* Nothing to do */ + err = gpio_request(cawake_gpio, "cawake"); + if (err < 0) + goto rback; + gpio_direction_input(cawake_gpio); + + ssi_pdata.cawake_gpio[port] = ssi_config->cawake_gpio[port]; + } + + return 0; + +rback: + dev_err(&ssi_pdev.dev, "Request cawake (gpio%d) failed\n", cawake_gpio); + while (port > 0) + gpio_free(ssi_config->cawake_gpio[--port]); + + return err; +} + +static int __init ssi_init(void) +{ + return platform_device_register(&ssi_pdev); +} +subsys_initcall(ssi_init); diff --git a/drivers/hsi/Kconfig b/drivers/hsi/Kconfig index d94e38dd80c7..2c76de438eb1 100644 --- a/drivers/hsi/Kconfig +++ b/drivers/hsi/Kconfig @@ -14,6 +14,7 @@ config HSI_BOARDINFO bool default y +source "drivers/hsi/controllers/Kconfig" source "drivers/hsi/clients/Kconfig" endif # HSI diff --git a/drivers/hsi/Makefile b/drivers/hsi/Makefile index 9d5d33f90de2..360371e134f1 100644 --- a/drivers/hsi/Makefile +++ b/drivers/hsi/Makefile @@ -3,4 +3,5 @@ # obj-$(CONFIG_HSI_BOARDINFO) += hsi_boardinfo.o obj-$(CONFIG_HSI) += hsi.o +obj-y += controllers/ obj-y += clients/ diff --git a/drivers/hsi/clients/Kconfig b/drivers/hsi/clients/Kconfig index 3bacd275f479..06efcf65ef31 100644 --- a/drivers/hsi/clients/Kconfig +++ b/drivers/hsi/clients/Kconfig @@ -4,6 +4,17 @@ comment "HSI clients" +config SSI_PROTOCOL + tristate "SSI protocol" + default n + depends on HSI && OMAP_SSI && CMT && PHONET + ---help--- + If you say Y here, you will enable the SSI protocol aka McSAAB. + The SSI protocol is a link layer protocol for SSI, which is used + to transport Phonet messages between APE and CMT. + + If unsure, say N. + config HSI_CHAR tristate "HSI/SSI character driver" depends on HSI @@ -11,3 +22,13 @@ config HSI_CHAR If you say Y here, you will enable the HSI/SSI character driver. This driver provides a simple character device interface for serial communication with the cellular modem over HSI/SSI bus. + +config HSI_CMT_SPEECH + tristate "HSI/SSI CMT speech driver" + depends on HSI && SSI_PROTOCOL + ---help--- + If you say Y here, you will enable the HSI CMT speech driver. + This driver implements a character device interface for transferring + speech data frames over HSI. This driver is used in e.g. Nokia N900. + + If unsure, say Y, or else you will not be able to make voice calls. diff --git a/drivers/hsi/clients/Makefile b/drivers/hsi/clients/Makefile index 327c0e27c8b0..9938dc321024 100644 --- a/drivers/hsi/clients/Makefile +++ b/drivers/hsi/clients/Makefile @@ -2,4 +2,6 @@ # Makefile for HSI clients # -obj-$(CONFIG_HSI_CHAR) += hsi_char.o +obj-$(CONFIG_SSI_PROTOCOL) += ssi_protocol.o +obj-$(CONFIG_HSI_CHAR) += hsi_char.o +obj-$(CONFIG_HSI_CMT_SPEECH) += cmt_speech.o diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c new file mode 100644 index 000000000000..d6e7c70f4158 --- /dev/null +++ b/drivers/hsi/clients/cmt_speech.c @@ -0,0 +1,1440 @@ +/* + * cmt_speech.c - HSI CMT speech driver + * + * Copyright (C) 2008,2009,2010 Nokia Corporation. All rights reserved. + * + * Contact: Kai Vehmanen + * Original author: Peter Ujfalusi + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CS_MMAP_SIZE PAGE_SIZE +#define DRIVER_NAME "cmt_speech" + +struct char_queue { + struct list_head list; + u32 msg; +}; + +struct cs_char { + unsigned int opened; + struct hsi_client *cl; + struct cs_hsi_iface *hi; + struct list_head chardev_queue; + struct list_head dataind_queue; + int dataind_pending; + /* mmap things */ + unsigned long mmap_base; + unsigned long mmap_size; + spinlock_t lock; + struct fasync_struct *async_queue; + wait_queue_head_t wait; +}; + +#define SSI_CHANNEL_STATE_READING 1 +#define SSI_CHANNEL_STATE_WRITING (1 << 1) +#define SSI_CHANNEL_STATE_POLL (1 << 2) +#define SSI_CHANNEL_STATE_ERROR (1 << 3) + +#define CONTROL_HSI_CH 1 +#define DATA_HSI_CH 2 + +#define TARGET_MASK 0xf000000 +#define TARGET_REMOTE (1 << CS_DOMAIN_SHIFT) +#define TARGET_LOCAL 0 + +/* Number of pre-allocated commands buffers */ +#define CS_MAX_CMDS 4 + +/* + * During data transfers, transactions must be handled + * within 20ms (fixed value in cmtspeech HSI protocol) + */ +#define CS_QOS_LATENCY_FOR_DATA_USEC 20000 + +/* Timeout to wait for pending HSI transfers to complete */ +#define CS_HSI_TRANSFER_TIMEOUT_MS 500 + + +#define RX_PTR_BOUNDARY_SHIFT 8 +#define RX_PTR_MAX_SHIFT (RX_PTR_BOUNDARY_SHIFT + \ + CS_MAX_BUFFERS_SHIFT) +struct cs_hsi_iface { + struct hsi_client *cl; + struct hsi_client *master; + + unsigned int iface_state; + unsigned int wakeline_state; + unsigned int control_state; + unsigned int data_state; + + /* state exposed to application */ + struct cs_mmap_config_block *mmap_cfg; + + unsigned long mmap_base; + unsigned long mmap_size; + + unsigned int rx_slot; + unsigned int tx_slot; + + /* note: for security reasons, we do not trust the contents of + * mmap_cfg, but instead duplicate the variables here */ + unsigned int buf_size; + unsigned int rx_bufs; + unsigned int tx_bufs; + unsigned int rx_ptr_boundary; + unsigned int rx_offsets[CS_MAX_BUFFERS]; + unsigned int tx_offsets[CS_MAX_BUFFERS]; + /* size of aligned memory blocks */ + unsigned int slot_size; + unsigned int flags; + + struct list_head cmdqueue; + + struct hsi_msg *data_rx_msg; + struct hsi_msg *data_tx_msg; + wait_queue_head_t datawait; + + struct pm_qos_request pm_qos_req; + + spinlock_t lock; +}; + +static struct cs_char cs_char_data; + +static void cs_hsi_read_on_control(struct cs_hsi_iface *hi); +static void cs_hsi_read_on_data(struct cs_hsi_iface *hi); + +static inline void rx_ptr_shift_too_big(void) +{ + BUILD_BUG_ON((1LLU << RX_PTR_MAX_SHIFT) > UINT_MAX); +} + +static void cs_notify(u32 message, struct list_head *head) +{ + struct char_queue *entry; + + spin_lock(&cs_char_data.lock); + + if (!cs_char_data.opened) { + spin_unlock(&cs_char_data.lock); + goto out; + } + + entry = kmalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) { + dev_err(&cs_char_data.cl->device, + "Can't allocate new entry for the queue.\n"); + spin_unlock(&cs_char_data.lock); + goto out; + } + + entry->msg = message; + list_add_tail(&entry->list, head); + + spin_unlock(&cs_char_data.lock); + + wake_up_interruptible(&cs_char_data.wait); + kill_fasync(&cs_char_data.async_queue, SIGIO, POLL_IN); + +out: + return; +} + +static u32 cs_pop_entry(struct list_head *head) +{ + struct char_queue *entry; + u32 data; + + entry = list_entry(head->next, struct char_queue, list); + data = entry->msg; + list_del(&entry->list); + kfree(entry); + + return data; +} + +static void cs_notify_control(u32 message) +{ + cs_notify(message, &cs_char_data.chardev_queue); +} + +static void cs_notify_data(u32 message, int maxlength) +{ + cs_notify(message, &cs_char_data.dataind_queue); + + spin_lock(&cs_char_data.lock); + ++cs_char_data.dataind_pending; + while (cs_char_data.dataind_pending > maxlength && + !list_empty(&cs_char_data.dataind_queue)) { + dev_dbg(&cs_char_data.cl->device, "data notification " + "queue overrun (%u entries)\n", cs_char_data.dataind_pending); + + cs_pop_entry(&cs_char_data.dataind_queue); + --cs_char_data.dataind_pending; + } + spin_unlock(&cs_char_data.lock); +} + +static inline void cs_set_cmd(struct hsi_msg *msg, u32 cmd) +{ + u32 *data; + + data = sg_virt(msg->sgt.sgl); + *data = cmd; +} + +static inline u32 cs_get_cmd(struct hsi_msg *msg) +{ + u32 *data; + + data = sg_virt(msg->sgt.sgl); + + return *data; +} + +static void cs_release_cmd(struct hsi_msg *msg) +{ + struct cs_hsi_iface *hi = msg->context; + + list_add_tail(&msg->link, &hi->cmdqueue); +} + +static void cs_cmd_destructor(struct hsi_msg *msg) +{ + struct cs_hsi_iface *hi = msg->context; + + spin_lock(&hi->lock); + + dev_dbg(&cs_char_data.cl->device, "control cmd destructor\n"); + + if (hi->iface_state != CS_STATE_CLOSED) + dev_err(&hi->cl->device, "Cmd flushed while driver active\n"); + + if (msg->ttype == HSI_MSG_READ) + hi->control_state &= + ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING); + else if (msg->ttype == HSI_MSG_WRITE && + hi->control_state & SSI_CHANNEL_STATE_WRITING) + hi->control_state &= ~SSI_CHANNEL_STATE_WRITING; + + cs_release_cmd(msg); + + spin_unlock(&hi->lock); +} + +static struct hsi_msg *cs_claim_cmd(struct cs_hsi_iface* ssi) +{ + struct hsi_msg *msg; + + BUG_ON(list_empty(&ssi->cmdqueue)); + + msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link); + list_del(&msg->link); + msg->destructor = cs_cmd_destructor; + + return msg; +} + +static void cs_free_cmds(struct cs_hsi_iface *ssi) +{ + struct hsi_msg *msg, *tmp; + + list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) { + list_del(&msg->link); + msg->destructor = NULL; + kfree(sg_virt(msg->sgt.sgl)); + hsi_free_msg(msg); + } +} + +static int cs_alloc_cmds(struct cs_hsi_iface *hi) +{ + struct hsi_msg *msg; + u32 *buf; + unsigned int i; + + INIT_LIST_HEAD(&hi->cmdqueue); + + for (i = 0; i < CS_MAX_CMDS; i++) { + msg = hsi_alloc_msg(1, GFP_ATOMIC); + if (!msg) + goto out; + buf = kmalloc(sizeof(*buf), GFP_ATOMIC); + if (!buf) { + hsi_free_msg(msg); + goto out; + } + sg_init_one(msg->sgt.sgl, buf, sizeof(*buf)); + msg->channel = CONTROL_HSI_CH; + msg->context = hi; + list_add_tail(&msg->link, &hi->cmdqueue); + } + + return 0; + +out: + cs_free_cmds(hi); + return -ENOMEM; +} + +static void cs_hsi_data_destructor(struct hsi_msg *msg) +{ + struct cs_hsi_iface *hi = msg->context; + const char *dir = (msg->ttype == HSI_MSG_READ) ? "TX" : "RX"; + + dev_dbg(&cs_char_data.cl->device, "Freeing data %s message\n", dir); + + spin_lock(&hi->lock); + if (hi->iface_state != CS_STATE_CLOSED) + dev_err(&cs_char_data.cl->device, + "Data %s flush while device active\n", dir); + if (msg->ttype == HSI_MSG_READ) + hi->data_state &= + ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING); + else + hi->data_state &= ~SSI_CHANNEL_STATE_WRITING; + + msg->status = HSI_STATUS_COMPLETED; + if (unlikely(waitqueue_active(&hi->datawait))) + wake_up_interruptible(&hi->datawait); + + spin_unlock(&hi->lock); +} + +static int cs_hsi_alloc_data(struct cs_hsi_iface *hi) +{ + struct hsi_msg *txmsg, *rxmsg; + int res = 0; + + rxmsg = hsi_alloc_msg(1, GFP_KERNEL); + if (!rxmsg) { + res = -ENOMEM; + goto out1; + } + rxmsg->channel = DATA_HSI_CH; + rxmsg->destructor = cs_hsi_data_destructor; + rxmsg->context = hi; + + txmsg = hsi_alloc_msg(1, GFP_KERNEL); + if (!txmsg) { + res = -ENOMEM; + goto out2; + } + txmsg->channel = DATA_HSI_CH; + txmsg->destructor = cs_hsi_data_destructor; + txmsg->context = hi; + + hi->data_rx_msg = rxmsg; + hi->data_tx_msg = txmsg; + + return 0; + +out2: + hsi_free_msg(rxmsg); +out1: + return res; +} + +static void cs_hsi_free_data_msg(struct hsi_msg *msg) +{ + WARN_ON(msg->status != HSI_STATUS_COMPLETED && + msg->status != HSI_STATUS_ERROR); + hsi_free_msg(msg); +} + +static void cs_hsi_free_data(struct cs_hsi_iface *hi) +{ + cs_hsi_free_data_msg(hi->data_rx_msg); + cs_hsi_free_data_msg(hi->data_tx_msg); +} + +static inline void __cs_hsi_error_pre(struct cs_hsi_iface *hi, + struct hsi_msg *msg, const char *info, + unsigned int *state) +{ + spin_lock(&hi->lock); + dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n", + info, msg->status, *state); +} + +static inline void __cs_hsi_error_post(struct cs_hsi_iface *hi) +{ + spin_unlock(&hi->lock); +} + +static inline void __cs_hsi_error_read_bits(unsigned int *state) +{ + *state |= SSI_CHANNEL_STATE_ERROR; + *state &= ~(SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL); +} + +static inline void __cs_hsi_error_write_bits(unsigned int *state) +{ + *state |= SSI_CHANNEL_STATE_ERROR; + *state &= ~SSI_CHANNEL_STATE_WRITING; +} + +static void cs_hsi_control_read_error(struct cs_hsi_iface *hi, + struct hsi_msg *msg) +{ + __cs_hsi_error_pre(hi, msg, "control read", &hi->control_state); + cs_release_cmd(msg); + __cs_hsi_error_read_bits(&hi->control_state); + __cs_hsi_error_post(hi); +} + +static void cs_hsi_control_write_error(struct cs_hsi_iface *hi, + struct hsi_msg *msg) +{ + __cs_hsi_error_pre(hi, msg, "control write", &hi->control_state); + cs_release_cmd(msg); + __cs_hsi_error_write_bits(&hi->control_state); + __cs_hsi_error_post(hi); + +} + +static void cs_hsi_data_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg) +{ + __cs_hsi_error_pre(hi, msg, "data read", &hi->data_state); + __cs_hsi_error_read_bits(&hi->data_state); + __cs_hsi_error_post(hi); +} + +static void cs_hsi_data_write_error(struct cs_hsi_iface *hi, + struct hsi_msg *msg) +{ + __cs_hsi_error_pre(hi, msg, "data write", &hi->data_state); + __cs_hsi_error_write_bits(&hi->data_state); + __cs_hsi_error_post(hi); +} + +static void cs_hsi_read_on_control_complete(struct hsi_msg *msg) +{ + u32 cmd = cs_get_cmd(msg); + struct cs_hsi_iface *hi = msg->context; + + spin_lock(&hi->lock); + hi->control_state &= ~SSI_CHANNEL_STATE_READING; + if (msg->status == HSI_STATUS_ERROR) { + dev_err(&hi->cl->device, "Control RX error detected\n"); + cs_hsi_control_read_error(hi, msg); + spin_unlock(&hi->lock); + goto out; + } + dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd); + cs_release_cmd(msg); + if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) { + struct timespec *tstamp = + &hi->mmap_cfg->tstamp_rx_ctrl; + do_posix_clock_monotonic_gettime(tstamp); + } + spin_unlock(&hi->lock); + + cs_notify_control(cmd); + +out: + cs_hsi_read_on_control(hi); +} + +static void cs_hsi_peek_on_control_complete(struct hsi_msg *msg) +{ + struct cs_hsi_iface *hi = msg->context; + int ret; + + if (msg->status == HSI_STATUS_ERROR) { + dev_err(&hi->cl->device, "Control peek RX error detected\n"); + cs_hsi_control_read_error(hi, msg); + return; + } + + WARN_ON(!(hi->control_state & SSI_CHANNEL_STATE_READING)); + + dev_dbg(&hi->cl->device, "Peek on control complete, reading\n"); + msg->sgt.nents = 1; + msg->complete = cs_hsi_read_on_control_complete; + ret = hsi_async_read(hi->cl, msg); + if (ret) + cs_hsi_control_read_error(hi, msg); +} + +static void cs_hsi_read_on_control(struct cs_hsi_iface *hi) +{ + struct hsi_msg *msg; + int ret; + + spin_lock(&hi->lock); + if (hi->control_state & SSI_CHANNEL_STATE_READING) { + dev_err(&hi->cl->device, "Control read already pending (%d)\n", + hi->control_state); + spin_unlock(&hi->lock); + return; + } + if (hi->control_state & SSI_CHANNEL_STATE_ERROR) { + dev_err(&hi->cl->device, "Control read error (%d)\n", + hi->control_state); + spin_unlock(&hi->lock); + return; + } + hi->control_state |= SSI_CHANNEL_STATE_READING; + dev_dbg(&hi->cl->device, "Issuing RX on control\n"); + msg = cs_claim_cmd(hi); + spin_unlock(&hi->lock); + + msg->sgt.nents = 0; + msg->complete = cs_hsi_peek_on_control_complete; + ret = hsi_async_read(hi->cl, msg); + if (ret) + cs_hsi_control_read_error(hi, msg); +} + +static void cs_hsi_write_on_control_complete(struct hsi_msg *msg) +{ + struct cs_hsi_iface *hi = msg->context; + if (msg->status == HSI_STATUS_COMPLETED) { + spin_lock(&hi->lock); + hi->control_state &= ~SSI_CHANNEL_STATE_WRITING; + cs_release_cmd(msg); + spin_unlock(&hi->lock); + } else if (msg->status == HSI_STATUS_ERROR) { + cs_hsi_control_write_error(hi, msg); + } else { + dev_err(&hi->cl->device, + "unexpected status in control write callback %d\n", + msg->status); + } +} + +static int cs_hsi_write_on_control(struct cs_hsi_iface *hi, u32 message) +{ + struct hsi_msg *msg; + int ret; + + spin_lock(&hi->lock); + if (hi->control_state & SSI_CHANNEL_STATE_ERROR) { + spin_unlock(&hi->lock); + return -EIO; + } + if (hi->control_state & SSI_CHANNEL_STATE_WRITING) { + dev_err(&hi->cl->device, + "Write still pending on control channel.\n"); + spin_unlock(&hi->lock); + return -EBUSY; + } + hi->control_state |= SSI_CHANNEL_STATE_WRITING; + msg = cs_claim_cmd(hi); + spin_unlock(&hi->lock); + + cs_set_cmd(msg, message); + msg->sgt.nents = 1; + msg->complete = cs_hsi_write_on_control_complete; + dev_dbg(&hi->cl->device, + "Sending control message %08X\n", message); + ret = hsi_async_write(hi->cl, msg); + if (ret) { + dev_err(&hi->cl->device, + "async_write failed with %d\n", ret); + cs_hsi_control_write_error(hi, msg); + } + + /* + * Make sure control read is always pending when issuing + * new control writes. This is needed as the controller + * may flush our messages if e.g. the peer device reboots + * unexpectedly (and we cannot directly resubmit a new read from + * the message destructor; see cs_cmd_destructor()). + */ + if (!(hi->control_state & SSI_CHANNEL_STATE_READING)) { + dev_err(&hi->cl->device, "Restarting control reads\n"); + cs_hsi_read_on_control(hi); + } + + return 0; +} + +static void cs_hsi_read_on_data_complete(struct hsi_msg *msg) +{ + struct cs_hsi_iface *hi = msg->context; + u32 payload; + + if (unlikely(msg->status == HSI_STATUS_ERROR)) { + cs_hsi_data_read_error(hi, msg); + return; + } + + spin_lock(&hi->lock); + WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_READING)); + hi->data_state &= ~SSI_CHANNEL_STATE_READING; + payload = CS_RX_DATA_RECEIVED; + payload |= hi->rx_slot; + hi->rx_slot++; + hi->rx_slot %= hi->rx_ptr_boundary; + /* expose current rx ptr in mmap area */ + hi->mmap_cfg->rx_ptr = hi->rx_slot; + if (unlikely(waitqueue_active(&hi->datawait))) + wake_up_interruptible(&hi->datawait); + spin_unlock(&hi->lock); + + cs_notify_data(payload, hi->rx_bufs); + cs_hsi_read_on_data(hi); +} + +static void cs_hsi_peek_on_data_complete(struct hsi_msg *msg) +{ + struct cs_hsi_iface *hi = msg->context; + u32 *address; + int ret; + + if (unlikely(msg->status == HSI_STATUS_ERROR)) { + cs_hsi_data_read_error(hi, msg); + return; + } + if (unlikely(hi->iface_state != CS_STATE_CONFIGURED)) { + dev_err(&hi->cl->device, "Data received in invalid state\n"); + cs_hsi_data_read_error(hi, msg); + return; + } + + spin_lock(&hi->lock); + WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_POLL)); + hi->data_state &= ~SSI_CHANNEL_STATE_POLL; + hi->data_state |= SSI_CHANNEL_STATE_READING; + spin_unlock(&hi->lock); + + address = (u32 *)(hi->mmap_base + + hi->rx_offsets[hi->rx_slot % hi->rx_bufs]); + sg_init_one(msg->sgt.sgl, address, hi->buf_size); + msg->sgt.nents = 1; + msg->complete = cs_hsi_read_on_data_complete; + ret = hsi_async_read(hi->cl, msg); + if (ret) + cs_hsi_data_read_error(hi, msg); +} + +/** + * Read/write transaction is ongoing. Returns false if in + * SSI_CHANNEL_STATE_POLL state. + */ +static inline int cs_state_xfer_active(unsigned int state) +{ + return (state & SSI_CHANNEL_STATE_WRITING) || + (state & SSI_CHANNEL_STATE_READING); +} + +/** + * No pending read/writes + */ +static inline int cs_state_idle(unsigned int state) +{ + return !(state & ~SSI_CHANNEL_STATE_ERROR); +} + +static void cs_hsi_read_on_data(struct cs_hsi_iface *hi) +{ + struct hsi_msg *rxmsg; + int ret; + + spin_lock(&hi->lock); + if (hi->data_state & + (SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL)) { + dev_dbg(&hi->cl->device, "Data read already pending (%u)\n", + hi->data_state); + spin_unlock(&hi->lock); + return; + } + hi->data_state |= SSI_CHANNEL_STATE_POLL; + spin_unlock(&hi->lock); + + rxmsg = hi->data_rx_msg; + sg_init_one(rxmsg->sgt.sgl, (void *)hi->mmap_base, 0); + rxmsg->sgt.nents = 0; + rxmsg->complete = cs_hsi_peek_on_data_complete; + + ret = hsi_async_read(hi->cl, rxmsg); + if (ret) + cs_hsi_data_read_error(hi, rxmsg); +} + +static void cs_hsi_write_on_data_complete(struct hsi_msg *msg) +{ + struct cs_hsi_iface *hi = msg->context; + + if (msg->status == HSI_STATUS_COMPLETED) { + spin_lock(&hi->lock); + hi->data_state &= ~SSI_CHANNEL_STATE_WRITING; + if (unlikely(waitqueue_active(&hi->datawait))) + wake_up_interruptible(&hi->datawait); + spin_unlock(&hi->lock); + } else { + cs_hsi_data_write_error(hi, msg); + } +} + +static int cs_hsi_write_on_data(struct cs_hsi_iface *hi, unsigned int slot) +{ + u32 *address; + struct hsi_msg *txmsg; + int ret; + + spin_lock(&hi->lock); + if (hi->iface_state != CS_STATE_CONFIGURED) { + dev_err(&hi->cl->device, "Not configured, aborting\n"); + ret = -EINVAL; + goto error; + } + if (hi->data_state & SSI_CHANNEL_STATE_ERROR) { + dev_err(&hi->cl->device, "HSI error, aborting\n"); + ret = -EIO; + goto error; + } + if (hi->data_state & SSI_CHANNEL_STATE_WRITING) { + dev_err(&hi->cl->device, "Write pending on data channel.\n"); + ret = -EBUSY; + goto error; + } + hi->data_state |= SSI_CHANNEL_STATE_WRITING; + spin_unlock(&hi->lock); + + hi->tx_slot = slot; + address = (u32 *)(hi->mmap_base + hi->tx_offsets[hi->tx_slot]); + txmsg = hi->data_tx_msg; + sg_init_one(txmsg->sgt.sgl, address, hi->buf_size); + txmsg->complete = cs_hsi_write_on_data_complete; + ret = hsi_async_write(hi->cl, txmsg); + if (ret) + cs_hsi_data_write_error(hi, txmsg); + + return ret; + +error: + spin_unlock(&hi->lock); + if (ret == -EIO) + cs_hsi_data_write_error(hi, hi->data_tx_msg); + + return ret; +} + +static unsigned int cs_hsi_get_state(struct cs_hsi_iface *hi) +{ + return hi->iface_state; +} + +static int cs_hsi_command(struct cs_hsi_iface *hi, u32 cmd) +{ + int ret = 0; + + local_bh_disable(); + switch (cmd & TARGET_MASK) { + case TARGET_REMOTE: + ret = cs_hsi_write_on_control(hi, cmd); + break; + case TARGET_LOCAL: + if ((cmd & CS_CMD_MASK) == CS_TX_DATA_READY) + ret = cs_hsi_write_on_data(hi, cmd & CS_PARAM_MASK); + else + ret = -EINVAL; + break; + default: + ret = -EINVAL; + break; + } + local_bh_enable(); + + return ret; +} + +static void cs_hsi_set_wakeline(struct cs_hsi_iface *hi, + unsigned int new_state) +{ + int change = 0; + + spin_lock_bh(&hi->lock); + if (hi->wakeline_state != new_state) { + hi->wakeline_state = new_state; + change = 1; + dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n", + new_state, hi->cl); + } + spin_unlock_bh(&hi->lock); + + if (change) { + if (new_state) + ssip_slave_start_tx(hi->master); + else + ssip_slave_stop_tx(hi->master); + } + + dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n", + new_state, hi->cl); +} + +static void set_buffer_sizes(struct cs_hsi_iface *hi, int rx_bufs, int tx_bufs) +{ + hi->rx_bufs = rx_bufs; + hi->tx_bufs = tx_bufs; + hi->mmap_cfg->rx_bufs = rx_bufs; + hi->mmap_cfg->tx_bufs = tx_bufs; + + if (hi->flags & CS_FEAT_ROLLING_RX_COUNTER) { + /* + * For more robust overrun detection, let the rx + * pointer run in range 0..'boundary-1'. Boundary + * is a multiple of rx_bufs, and limited in max size + * by RX_PTR_MAX_SHIFT to allow for fast ptr-diff + * calculation. + */ + hi->rx_ptr_boundary = (rx_bufs << RX_PTR_BOUNDARY_SHIFT); + hi->mmap_cfg->rx_ptr_boundary = hi->rx_ptr_boundary; + } else { + hi->rx_ptr_boundary = hi->rx_bufs; + } +} + +static int check_buf_params(struct cs_hsi_iface *hi, + const struct cs_buffer_config *buf_cfg) +{ + size_t buf_size_aligned = L1_CACHE_ALIGN(buf_cfg->buf_size) * + (buf_cfg->rx_bufs + buf_cfg->tx_bufs); + size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg)); + int r = 0; + + if (buf_cfg->rx_bufs > CS_MAX_BUFFERS || + buf_cfg->tx_bufs > CS_MAX_BUFFERS) { + r = -EINVAL; + } else if ((buf_size_aligned + ctrl_size_aligned) >= hi->mmap_size) { + dev_err(&hi->cl->device, "No space for the requested buffer " + "configuration\n"); + r = -ENOBUFS; + } + + return r; +} + +/** + * Block until pending data transfers have completed. + */ +static int cs_hsi_data_sync(struct cs_hsi_iface *hi) +{ + int r = 0; + + spin_lock_bh(&hi->lock); + + if (!cs_state_xfer_active(hi->data_state)) { + dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n"); + goto out; + } + + for (;;) { + int s; + DEFINE_WAIT(wait); + if (!cs_state_xfer_active(hi->data_state)) + goto out; + if (signal_pending(current)) { + r = -ERESTARTSYS; + goto out; + } + /** + * prepare_to_wait must be called with hi->lock held + * so that callbacks can check for waitqueue_active() + */ + prepare_to_wait(&hi->datawait, &wait, TASK_INTERRUPTIBLE); + spin_unlock_bh(&hi->lock); + s = schedule_timeout( + msecs_to_jiffies(CS_HSI_TRANSFER_TIMEOUT_MS)); + spin_lock_bh(&hi->lock); + finish_wait(&hi->datawait, &wait); + if (!s) { + dev_dbg(&hi->cl->device, + "hsi_data_sync timeout after %d ms\n", + CS_HSI_TRANSFER_TIMEOUT_MS); + r = -EIO; + goto out; + } + } + +out: + spin_unlock_bh(&hi->lock); + dev_dbg(&hi->cl->device, "hsi_data_sync done with res %d\n", r); + + return r; +} + +static void cs_hsi_data_enable(struct cs_hsi_iface *hi, + struct cs_buffer_config *buf_cfg) +{ + unsigned int data_start, i; + + BUG_ON(hi->buf_size == 0); + + set_buffer_sizes(hi, buf_cfg->rx_bufs, buf_cfg->tx_bufs); + + hi->slot_size = L1_CACHE_ALIGN(hi->buf_size); + dev_dbg(&hi->cl->device, + "setting slot size to %u, buf size %u, align %u\n", + hi->slot_size, hi->buf_size, L1_CACHE_BYTES); + + data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg)); + dev_dbg(&hi->cl->device, + "setting data start at %u, cfg block %u, align %u\n", + data_start, sizeof(*hi->mmap_cfg), L1_CACHE_BYTES); + + for (i = 0; i < hi->mmap_cfg->rx_bufs; i++) { + hi->rx_offsets[i] = data_start + i * hi->slot_size; + hi->mmap_cfg->rx_offsets[i] = hi->rx_offsets[i]; + dev_dbg(&hi->cl->device, "DL buf #%u at %u\n", + i, hi->rx_offsets[i]); + } + for (i = 0; i < hi->mmap_cfg->tx_bufs; i++) { + hi->tx_offsets[i] = data_start + + (i + hi->mmap_cfg->rx_bufs) * hi->slot_size; + hi->mmap_cfg->tx_offsets[i] = hi->tx_offsets[i]; + dev_dbg(&hi->cl->device, "UL buf #%u at %u\n", + i, hi->rx_offsets[i]); + } + + hi->iface_state = CS_STATE_CONFIGURED; +} + +static void cs_hsi_data_disable(struct cs_hsi_iface *hi, int old_state) +{ + if (old_state == CS_STATE_CONFIGURED) { + dev_dbg(&hi->cl->device, + "closing data channel with slot size 0\n"); + hi->iface_state = CS_STATE_OPENED; + } +} + +static int cs_hsi_buf_config(struct cs_hsi_iface *hi, + struct cs_buffer_config *buf_cfg) +{ + int r = 0; + unsigned int old_state = hi->iface_state; + + spin_lock_bh(&hi->lock); + /* Prevent new transactions during buffer reconfig */ + if (old_state == CS_STATE_CONFIGURED) + hi->iface_state = CS_STATE_OPENED; + spin_unlock_bh(&hi->lock); + + /* + * make sure that no non-zero data reads are ongoing before + * proceeding to change the buffer layout + */ + r = cs_hsi_data_sync(hi); + if (r < 0) + return r; + + WARN_ON(cs_state_xfer_active(hi->data_state)); + + spin_lock_bh(&hi->lock); + r = check_buf_params(hi, buf_cfg); + if (r < 0) + goto error; + + hi->buf_size = buf_cfg->buf_size; + hi->mmap_cfg->buf_size = hi->buf_size; + hi->flags = buf_cfg->flags; + + hi->rx_slot = 0; + hi->tx_slot = 0; + hi->slot_size = 0; + + if (hi->buf_size) + cs_hsi_data_enable(hi, buf_cfg); + else + cs_hsi_data_disable(hi, old_state); + + spin_unlock_bh(&hi->lock); + + if (old_state != hi->iface_state) { + if (hi->iface_state == CS_STATE_CONFIGURED) { + pm_qos_add_request(&hi->pm_qos_req, + PM_QOS_CPU_DMA_LATENCY, + CS_QOS_LATENCY_FOR_DATA_USEC); + local_bh_disable(); + cs_hsi_read_on_data(hi); + local_bh_enable(); + } else if (old_state == CS_STATE_CONFIGURED) { + pm_qos_remove_request(&hi->pm_qos_req); + } + } + return r; + +error: + spin_unlock_bh(&hi->lock); + return r; +} + +static int cs_hsi_start(struct cs_hsi_iface **hi, struct hsi_client *cl, + unsigned long mmap_base, unsigned long mmap_size) +{ + int err = 0; + struct cs_hsi_iface *hsi_if = kzalloc(sizeof(*hsi_if), GFP_KERNEL); + + dev_dbg(&cl->device, "cs_hsi_start\n"); + + if (!hsi_if) { + err = -ENOMEM; + goto leave0; + } + spin_lock_init(&hsi_if->lock); + hsi_if->cl = cl; + hsi_if->iface_state = CS_STATE_CLOSED; + hsi_if->mmap_cfg = (struct cs_mmap_config_block *)mmap_base; + hsi_if->mmap_base = mmap_base; + hsi_if->mmap_size = mmap_size; + memset(hsi_if->mmap_cfg, 0, sizeof(*hsi_if->mmap_cfg)); + init_waitqueue_head(&hsi_if->datawait); + err = cs_alloc_cmds(hsi_if); + if (err < 0) { + dev_err(&cl->device, "Unable to alloc HSI messages\n"); + goto leave1; + } + err = cs_hsi_alloc_data(hsi_if); + if (err < 0) { + dev_err(&cl->device, "Unable to alloc HSI messages for data\n"); + goto leave2; + } + err = hsi_claim_port(cl, 1); + if (err < 0) { + dev_err(&cl->device, + "Could not open, HSI port already claimed\n"); + goto leave3; + } + hsi_if->master = ssip_slave_get_master(cl); + if (IS_ERR(hsi_if->master)) { + dev_err(&cl->device, "Could not get HSI master client\n"); + goto leave4; + } + if (!ssip_slave_running(hsi_if->master)) { + err = -ENODEV; + dev_err(&cl->device, + "HSI port not initialized\n"); + goto leave4; + } + + hsi_if->iface_state = CS_STATE_OPENED; + local_bh_disable(); + cs_hsi_read_on_control(hsi_if); + local_bh_enable(); + + dev_dbg(&cl->device, "cs_hsi_start...done\n"); + + BUG_ON(!hi); + *hi = hsi_if; + + return 0; + +leave4: + hsi_release_port(cl); +leave3: + cs_hsi_free_data(hsi_if); +leave2: + cs_free_cmds(hsi_if); +leave1: + kfree(hsi_if); +leave0: + dev_dbg(&cl->device, "cs_hsi_start...done/error\n\n"); + + return err; +} + +static void cs_hsi_stop(struct cs_hsi_iface *hi) +{ + dev_dbg(&hi->cl->device, "cs_hsi_stop\n"); + cs_hsi_set_wakeline(hi, 0); + ssip_slave_put_master(hi->master); + + /* hsi_release_port() needs to be called with CS_STATE_CLOSED */ + hi->iface_state = CS_STATE_CLOSED; + hsi_release_port(hi->cl); + + /* + * hsi_release_port() should flush out all the pending + * messages, so cs_state_idle() should be true for both + * control and data channels. + */ + WARN_ON(!cs_state_idle(hi->control_state)); + WARN_ON(!cs_state_idle(hi->data_state)); + + if (pm_qos_request_active(&hi->pm_qos_req)) + pm_qos_remove_request(&hi->pm_qos_req); + + spin_lock_bh(&hi->lock); + cs_hsi_free_data(hi); + cs_free_cmds(hi); + spin_unlock_bh(&hi->lock); + kfree(hi); +} + +static int cs_char_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct cs_char *csdata = vma->vm_private_data; + struct page *page; + + page = virt_to_page(csdata->mmap_base); + get_page(page); + vmf->page = page; + + return 0; +} + +static struct vm_operations_struct cs_char_vm_ops = { + .fault = cs_char_vma_fault, +}; + +static int cs_char_fasync(int fd, struct file *file, int on) +{ + struct cs_char *csdata = file->private_data; + + if (fasync_helper(fd, file, on, &csdata->async_queue) >= 0) + return 0; + else + return -EIO; +} + +static unsigned int cs_char_poll(struct file *file, poll_table *wait) +{ + struct cs_char *csdata = file->private_data; + unsigned int ret = 0; + + poll_wait(file, &cs_char_data.wait, wait); + spin_lock_bh(&csdata->lock); + if (!list_empty(&csdata->chardev_queue)) + ret = POLLIN | POLLRDNORM; + else if (!list_empty(&csdata->dataind_queue)) + ret = POLLIN | POLLRDNORM; + spin_unlock_bh(&csdata->lock); + + return ret; +} + +static ssize_t cs_char_read(struct file *file, char __user *buf, size_t count, + loff_t *unused) +{ + struct cs_char *csdata = file->private_data; + u32 data; + ssize_t retval; + + if (count < sizeof(data)) + return -EINVAL; + + for ( ; ; ) { + DEFINE_WAIT(wait); + + spin_lock_bh(&csdata->lock); + if (!list_empty(&csdata->chardev_queue)) { + data = cs_pop_entry(&csdata->chardev_queue); + } else if (!list_empty(&csdata->dataind_queue)) { + data = cs_pop_entry(&csdata->dataind_queue); + --csdata->dataind_pending; + + } else { + data = 0; + } + spin_unlock_bh(&csdata->lock); + + if (data) + break; + if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + goto out; + } else if (signal_pending(current)) { + retval = -ERESTARTSYS; + goto out; + } + prepare_to_wait_exclusive(&csdata->wait, &wait, + TASK_INTERRUPTIBLE); + schedule(); + finish_wait(&csdata->wait, &wait); + } + + retval = put_user(data, (u32 __user *)buf); + if (!retval) + retval = sizeof(data); + +out: + return retval; +} + +static ssize_t cs_char_write(struct file *file, const char __user *buf, + size_t count, loff_t *unused) +{ + struct cs_char *csdata = file->private_data; + u32 data; + int err; + ssize_t retval; + + if (count < sizeof(data)) + return -EINVAL; + + if (get_user(data, (u32 __user *)buf)) + retval = -EFAULT; + else + retval = count; + + err = cs_hsi_command(csdata->hi, data); + if (err < 0) + retval = err; + + return retval; +} + +static long cs_char_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct cs_char *csdata = file->private_data; + int r = 0; + + switch (cmd) { + case CS_GET_STATE: { + unsigned int state; + + state = cs_hsi_get_state(csdata->hi); + if (copy_to_user((void __user *)arg, &state, sizeof(state))) + r = -EFAULT; + } + break; + case CS_SET_WAKELINE: { + unsigned int state; + + if (copy_from_user(&state, (void __user *)arg, sizeof(state))) + r = -EFAULT; + else + cs_hsi_set_wakeline(csdata->hi, state); + } + break; + case CS_GET_IF_VERSION: { + unsigned int ifver = CS_IF_VERSION; + + if (copy_to_user((void __user *)arg, &ifver, sizeof(ifver))) + r = -EFAULT; + break; + } + case CS_CONFIG_BUFS: { + struct cs_buffer_config buf_cfg; + + if (copy_from_user(&buf_cfg, (void __user *)arg, + sizeof(buf_cfg))) + r = -EFAULT; + else + r = cs_hsi_buf_config(csdata->hi, &buf_cfg); + break; + } + default: + r = -ENOTTY; + break; + } + + return r; +} + +static int cs_char_mmap(struct file *file, struct vm_area_struct *vma) +{ + if (vma->vm_end < vma->vm_start) + return -EINVAL; + + if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) != 1) + return -EINVAL; + + vma->vm_flags |= VM_RESERVED; + vma->vm_ops = &cs_char_vm_ops; + vma->vm_private_data = file->private_data; + + return 0; +} + +static int cs_char_open(struct inode *unused, struct file *file) +{ + int ret = 0; + unsigned long p; + + spin_lock_bh(&cs_char_data.lock); + if (cs_char_data.opened) { + ret = -EBUSY; + spin_unlock_bh(&cs_char_data.lock); + goto out1; + } + cs_char_data.opened = 1; + cs_char_data.dataind_pending = 0; + spin_unlock_bh(&cs_char_data.lock); + + p = get_zeroed_page(GFP_KERNEL); + if (!p) { + ret = -ENOMEM; + goto out2; + } + + ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl, p, CS_MMAP_SIZE); + if (ret) { + dev_err(&cs_char_data.cl->device, "Unable to initialize HSI\n"); + goto out3; + } + + /* these are only used in release so lock not needed */ + cs_char_data.mmap_base = p; + cs_char_data.mmap_size = CS_MMAP_SIZE; + + file->private_data = &cs_char_data; + + return 0; + +out3: + free_page(p); +out2: + spin_lock_bh(&cs_char_data.lock); + cs_char_data.opened = 0; + spin_unlock_bh(&cs_char_data.lock); +out1: + return ret; +} + +static void cs_free_char_queue(struct list_head *head) +{ + struct char_queue *entry; + struct list_head *cursor, *next; + + if (!list_empty(head)) { + list_for_each_safe(cursor, next, head) { + entry = list_entry(cursor, struct char_queue, list); + list_del(&entry->list); + kfree(entry); + } + } + +} + +static int cs_char_release(struct inode *unused, struct file *file) +{ + struct cs_char *csdata = file->private_data; + + cs_hsi_stop(csdata->hi); + spin_lock_bh(&csdata->lock); + csdata->hi = NULL; + free_page(csdata->mmap_base); + cs_free_char_queue(&csdata->chardev_queue); + cs_free_char_queue(&csdata->dataind_queue); + csdata->opened = 0; + spin_unlock_bh(&csdata->lock); + + return 0; +} + +static const struct file_operations cs_char_fops = { + .owner = THIS_MODULE, + .read = cs_char_read, + .write = cs_char_write, + .poll = cs_char_poll, + .unlocked_ioctl = cs_char_ioctl, + .mmap = cs_char_mmap, + .open = cs_char_open, + .release = cs_char_release, + .fasync = cs_char_fasync, +}; + +static struct miscdevice cs_char_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = DRIVER_NAME, + .fops = &cs_char_fops +}; + +static int __init cs_hsi_client_probe(struct device *dev) +{ + int err = 0; + struct hsi_client *cl = to_hsi_client(dev); + + dev_dbg(dev, "hsi_client_probe\n"); + init_waitqueue_head(&cs_char_data.wait); + spin_lock_init(&cs_char_data.lock); + cs_char_data.opened = 0; + cs_char_data.cl = cl; + cs_char_data.hi = NULL; + INIT_LIST_HEAD(&cs_char_data.chardev_queue); + INIT_LIST_HEAD(&cs_char_data.dataind_queue); + + err = misc_register(&cs_char_miscdev); + if (err) + dev_err(dev, "Failed to register\n"); + + return err; +} + +static int __exit cs_hsi_client_remove(struct device *dev) +{ + struct cs_hsi_iface *hi; + + dev_dbg(dev, "hsi_client_remove\n"); + misc_deregister(&cs_char_miscdev); + spin_lock_bh(&cs_char_data.lock); + hi = cs_char_data.hi; + cs_char_data.hi = NULL; + spin_unlock_bh(&cs_char_data.lock); + if (hi) + cs_hsi_stop(hi); + + return 0; +} + +static struct hsi_client_driver cs_hsi_driver = { + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .probe = cs_hsi_client_probe, + .remove = cs_hsi_client_remove, + }, +}; + +static int __init cs_char_init(void) +{ + int err = 0; + + err = hsi_register_client_driver(&cs_hsi_driver); + if (err) + pr_err(DRIVER_NAME ": Error when registering driver %d\n", err); + + return err; +} +module_init(cs_char_init); + +static void __exit cs_char_exit(void) +{ + hsi_unregister_client_driver(&cs_hsi_driver); +} +module_exit(cs_char_exit); + +MODULE_ALIAS("hsi:cmt_speech"); +MODULE_AUTHOR("Kai Vehmanen "); +MODULE_AUTHOR("Peter Ujfalusi "); +MODULE_DESCRIPTION("CMT speech driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c new file mode 100644 index 000000000000..3f9aeb7009d8 --- /dev/null +++ b/drivers/hsi/clients/ssi_protocol.c @@ -0,0 +1,1177 @@ +/* + * ssi_protocol.c + * + * Implementation of the SSI McSAAB improved protocol. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +void ssi_waketest(struct hsi_client *cl, unsigned int enable); + +#define SSIP_TXQUEUE_LEN 100 +#define SSIP_MAX_MTU 65535 +#define SSIP_DEFAULT_MTU 4000 +#define PN_MEDIA_SOS 21 +#define SSIP_MIN_PN_HDR 6 /* FIXME: Revisit */ +#define SSIP_WDTOUT 2000 /* FIXME: has to be 500 msecs */ +#define SSIP_KATOUT 15 /* 15 msecs */ +#define SSIP_MAX_CMDS 5 /* Number of pre-allocated commands buffers */ +#define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1) +#define SSIP_CMT_LOADER_SYNC 0x11223344 +/* + * SSI protocol command definitions + */ +#define SSIP_COMMAND(data) ((data) >> 28) +#define SSIP_PAYLOAD(data) ((data) & 0xfffffff) +/* Commands */ +#define SSIP_SW_BREAK 0 +#define SSIP_BOOTINFO_REQ 1 +#define SSIP_BOOTINFO_RESP 2 +#define SSIP_WAKETEST_RESULT 3 +#define SSIP_START_TRANS 4 +#define SSIP_READY 5 +/* Payloads */ +#define SSIP_DATA_VERSION(data) ((data) & 0xff) +#define SSIP_LOCAL_VERID 1 +#define SSIP_WAKETEST_OK 0 +#define SSIP_WAKETEST_FAILED 1 +#define SSIP_PDU_LENGTH(data) (((data) >> 8) & 0xffff) +#define SSIP_MSG_ID(data) ((data) & 0xff) +/* Generic Command */ +#define SSIP_CMD(cmd, payload) (((cmd) << 28) | ((payload) & 0xfffffff)) +/* Commands for the control channel */ +#define SSIP_BOOTINFO_REQ_CMD(ver) \ + SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver)) +#define SSIP_BOOTINFO_RESP_CMD(ver) \ + SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver)) +#define SSIP_START_TRANS_CMD(pdulen, id) \ + SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id))) +#define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0) +#define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0) + +/* Main state machine states */ +enum { + INIT, + HANDSHAKE, + ACTIVE, +}; + +/* Send state machine states */ +enum { + SEND_IDLE, + WAIT4READY, + SEND_READY, + SENDING, + SENDING_SWBREAK, +}; + +/* Receive state machine states */ +enum { + RECV_IDLE, + RECV_READY, + RECEIVING, +}; + +/** + * struct ssi_protocol - SSI protocol (McSAAB) data + * @main_state: Main state machine + * @send_state: TX state machine + * @recv_state: RX state machine + * @waketest: Flag to follow wake line test + * @rxid: RX data id + * @txid: TX data id + * @txqueue_len: TX queue length + * @tx_wd: TX watchdog + * @rx_wd: RX watchdog + * @keep_alive: Workaround for SSI HW bug + * @lock: To serialize access to this struct + * @netdev: Phonet network device + * @nb: CMT reset notification block + * @cmt: Reference to the CMT device + * @txqueue: TX data queue + * @cmdqueue: Queue of free commands + * @cl: HSI client own reference + * @link: Link for ssip_list + * @tx_usecount: Refcount to keep track the slaves that use the wake line + */ +struct ssi_protocol { + unsigned int main_state; + unsigned int send_state; + unsigned int recv_state; + unsigned int waketest:1; + u8 rxid; + u8 txid; + unsigned int txqueue_len; + struct timer_list tx_wd; + struct timer_list rx_wd; + struct timer_list keep_alive; /* wake-up workaround */ + spinlock_t lock; + struct net_device *netdev; + struct notifier_block nb; + struct cmt_device *cmt; + struct list_head txqueue; + struct list_head cmdqueue; + struct hsi_client *cl; + struct list_head link; + atomic_t tx_usecnt; +}; + +/* List of ssi protocol instances */ +static LIST_HEAD(ssip_list); + +static void ssip_rxcmd_complete(struct hsi_msg *msg); + +static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd) +{ + u32 *data; + + data = sg_virt(msg->sgt.sgl); + *data = cmd; +} + +static inline u32 ssip_get_cmd(struct hsi_msg *msg) +{ + u32 *data; + + data = sg_virt(msg->sgt.sgl); + + return *data; +} + +static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg) +{ + skb_frag_t *frag; + struct scatterlist *sg; + int i; + + BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1)); + + sg = msg->sgt.sgl; + sg_set_buf(sg, skb->data, skb_headlen(skb)); + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + sg = sg_next(sg); + BUG_ON(!sg); + frag = &skb_shinfo(skb)->frags[i]; + sg_set_page(sg, frag->page.p, frag->size, frag->page_offset); + } +} + +static void ssip_free_data(struct hsi_msg *msg) +{ + struct sk_buff *skb; + + skb = msg->context; + pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context, + skb); + msg->destructor = NULL; + dev_kfree_skb(skb); + hsi_free_msg(msg); +} + +static struct hsi_msg *ssip_alloc_data(struct sk_buff *skb, gfp_t flags) +{ + struct hsi_msg *msg; + + msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags); + if (!msg) + return NULL; + ssip_skb_to_msg(skb, msg); + msg->destructor = ssip_free_data; + msg->channel = 3; + msg->context = skb; + + return msg; +} + +static inline void ssip_release_cmd(struct hsi_msg *msg) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl); + + dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg)); + spin_lock_bh(&ssi->lock); + list_add_tail(&msg->link, &ssi->cmdqueue); + spin_unlock_bh(&ssi->lock); +} + +static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi) +{ + struct hsi_msg *msg; + + BUG_ON(list_empty(&ssi->cmdqueue)); + + spin_lock_bh(&ssi->lock); + msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link); + list_del(&msg->link); + spin_unlock_bh(&ssi->lock); + msg->destructor = ssip_release_cmd; + + return msg; +} + +static void ssip_free_cmds(struct ssi_protocol *ssi) +{ + struct hsi_msg *msg, *tmp; + + list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) { + list_del(&msg->link); + msg->destructor = NULL; + kfree(sg_virt(msg->sgt.sgl)); + hsi_free_msg(msg); + } +} + +static int ssip_alloc_cmds(struct ssi_protocol *ssi) +{ + struct hsi_msg *msg; + u32 *buf; + unsigned int i; + + for (i = 0; i < SSIP_MAX_CMDS; i++) { + msg = hsi_alloc_msg(1, GFP_KERNEL); + if (!msg) + goto out; + buf = kmalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) { + hsi_free_msg(msg); + goto out; + } + sg_init_one(msg->sgt.sgl, buf, sizeof(*buf)); + msg->channel = 0; + list_add_tail(&msg->link, &ssi->cmdqueue); + } + + return 0; +out: + ssip_free_cmds(ssi); + + return -ENOMEM; +} + +static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state) +{ + ssi->recv_state = state; + switch (state) { + case RECV_IDLE: + del_timer(&ssi->rx_wd); + if (ssi->send_state == SEND_IDLE) + del_timer(&ssi->keep_alive); + break; + case RECV_READY: + /* CMT speech workaround */ + if (atomic_read(&ssi->tx_usecnt)) + break; + /* Otherwise fall through */ + case RECEIVING: + mod_timer(&ssi->keep_alive, jiffies + + msecs_to_jiffies(SSIP_KATOUT)); + mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); + break; + default: + break; + } +} + +static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state) +{ + ssi->send_state = state; + switch (state) { + case SEND_IDLE: + case SEND_READY: + del_timer(&ssi->tx_wd); + if (ssi->recv_state == RECV_IDLE) + del_timer(&ssi->keep_alive); + break; + case WAIT4READY: + case SENDING: + case SENDING_SWBREAK: + mod_timer(&ssi->keep_alive, + jiffies + msecs_to_jiffies(SSIP_KATOUT)); + mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); + break; + default: + break; + } +} + +struct hsi_client *ssip_slave_get_master(struct hsi_client *slave) +{ + struct hsi_client *master = ERR_PTR(-ENODEV); + struct ssi_protocol *ssi; + + list_for_each_entry(ssi, &ssip_list, link) + if (slave->device.parent == ssi->cl->device.parent) { + master = ssi->cl; + break; + } + + return master; +} +EXPORT_SYMBOL_GPL(ssip_slave_get_master); + +int ssip_slave_start_tx(struct hsi_client *master) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(master); + + dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt)); + spin_lock_bh(&ssi->lock); + if (ssi->send_state == SEND_IDLE) { + ssip_set_txstate(ssi, WAIT4READY); + hsi_start_tx(master); + } + spin_unlock_bh(&ssi->lock); + atomic_inc(&ssi->tx_usecnt); + + return 0; +} +EXPORT_SYMBOL_GPL(ssip_slave_start_tx); + +int ssip_slave_stop_tx(struct hsi_client *master) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(master); + + WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0); + + if (atomic_dec_and_test(&ssi->tx_usecnt)) { + spin_lock_bh(&ssi->lock); + if ((ssi->send_state == SEND_READY) || + (ssi->send_state == WAIT4READY)) { + ssip_set_txstate(ssi, SEND_IDLE); + hsi_stop_tx(master); + } + spin_unlock_bh(&ssi->lock); + } + dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt)); + + return 0; +} +EXPORT_SYMBOL_GPL(ssip_slave_stop_tx); + +int ssip_slave_running(struct hsi_client *master) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(master); + return netif_running(ssi->netdev); +} +EXPORT_SYMBOL_GPL(ssip_slave_running); + +static void ssip_reset(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct list_head *head, *tmp; + struct hsi_msg *msg; + + if (netif_running(ssi->netdev)) + netif_carrier_off(ssi->netdev); + hsi_flush(cl); + spin_lock_bh(&ssi->lock); + if (ssi->send_state != SEND_IDLE) + hsi_stop_tx(cl); + if (ssi->waketest) + ssi_waketest(cl, 0); + del_timer(&ssi->rx_wd); + del_timer(&ssi->tx_wd); + del_timer(&ssi->keep_alive); + ssi->main_state = 0; + ssi->send_state = 0; + ssi->recv_state = 0; + ssi->waketest = 0; + ssi->rxid = 0; + ssi->txid = 0; + list_for_each_safe(head, tmp, &ssi->txqueue) { + msg = list_entry(head, struct hsi_msg, link); + dev_dbg(&cl->device, "Pending TX data\n"); + list_del(head); + ssip_free_data(msg); + } + ssi->txqueue_len = 0; + spin_unlock_bh(&ssi->lock); +} + +static void ssip_dump_state(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + spin_lock_bh(&ssi->lock); + dev_err(&cl->device, "Main state: %d\n", ssi->main_state); + dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state); + dev_err(&cl->device, "Send state: %d\n", ssi->send_state); + dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ? + "Online" : "Offline"); + dev_err(&cl->device, "Wake test %d\n", ssi->waketest); + dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid); + dev_err(&cl->device, "Data TX id: %d\n", ssi->txid); + + list_for_each_entry(msg, &ssi->txqueue, link) + dev_err(&cl->device, "pending TX data (%p)\n", msg); + spin_unlock_bh(&ssi->lock); +} + +static void ssip_error(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + ssip_dump_state(cl); + ssip_reset(cl); + msg = ssip_claim_cmd(ssi); + msg->complete = ssip_rxcmd_complete; + hsi_async_read(cl, msg); +} + +static void ssip_keep_alive(unsigned long data) +{ + struct hsi_client *cl = (struct hsi_client *)data; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n", + ssi->main_state, ssi->recv_state, ssi->send_state); + + spin_lock(&ssi->lock); + if (ssi->recv_state == RECV_IDLE) + switch (ssi->send_state) { + case SEND_READY: + if (atomic_read(&ssi->tx_usecnt) == 0) + break; + /* + * Fall through. Workaround for cmt-speech + * in that case we relay on audio timers. + */ + case SEND_IDLE: + spin_unlock(&ssi->lock); + return; + } + mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT)); + spin_unlock(&ssi->lock); +} + +static void ssip_wd(unsigned long data) +{ + struct hsi_client *cl = (struct hsi_client *)data; + + dev_err(&cl->device, "Watchdog trigerred\n"); + ssip_error(cl); +} + +static void ssip_start_rx(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state, + ssi->recv_state); + spin_lock(&ssi->lock); + /* + * We can have two UP events in a row due to a short low + * high transition. Therefore we need to ignore the sencond UP event. + */ + if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) { + spin_unlock(&ssi->lock); + return; + } + ssip_set_rxstate(ssi, RECV_READY); + spin_unlock(&ssi->lock); + + msg = ssip_claim_cmd(ssi); + ssip_set_cmd(msg, SSIP_READY_CMD); + msg->complete = ssip_release_cmd; + dev_dbg(&cl->device, "Send READY\n"); + hsi_async_write(cl, msg); +} + +static void ssip_stop_rx(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state); + spin_lock(&ssi->lock); + if (likely(ssi->main_state == ACTIVE)) + ssip_set_rxstate(ssi, RECV_IDLE); + spin_unlock(&ssi->lock); +} + +static void ssip_free_strans(struct hsi_msg *msg) +{ + ssip_free_data(msg->context); + ssip_release_cmd(msg); +} + +static void ssip_strans_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *data; + + data = msg->context; + ssip_release_cmd(msg); + spin_lock(&ssi->lock); + ssip_set_txstate(ssi, SENDING); + spin_unlock(&ssi->lock); + hsi_async_write(cl, data); +} + +static int ssip_xmit(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg, *dmsg; + struct sk_buff *skb; + + spin_lock_bh(&ssi->lock); + if (list_empty(&ssi->txqueue)) { + spin_unlock_bh(&ssi->lock); + return 0; + } + dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link); + list_del(&dmsg->link); + ssi->txqueue_len--; + spin_unlock_bh(&ssi->lock); + + msg = ssip_claim_cmd(ssi); + skb = dmsg->context; + msg->context = dmsg; + msg->complete = ssip_strans_complete; + msg->destructor = ssip_free_strans; + + spin_lock_bh(&ssi->lock); + ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len), + ssi->txid)); + ssi->txid++; + ssip_set_txstate(ssi, SENDING); + spin_unlock_bh(&ssi->lock); + + dev_dbg(&cl->device, "Send STRANS (%d frames)\n", + SSIP_BYTES_TO_FRAMES(skb->len)); + + return hsi_async_write(cl, msg); +} + +/* In soft IRQ context */ +static void ssip_pn_rx(struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + + if (unlikely(!netif_running(dev))) { + dev_dbg(&dev->dev, "Drop RX packet\n"); + dev->stats.rx_dropped++; + dev_kfree_skb(skb); + return; + } + if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) { + dev_dbg(&dev->dev, "Error drop RX packet\n"); + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + dev_kfree_skb(skb); + return; + } + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; +#ifdef __LITTLE_ENDIAN + ((u16 *)skb->data)[2] = swab16(((u16 *)skb->data)[2]); + dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n", + ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2])); +#endif + skb->protocol = htons(ETH_P_PHONET); + skb_reset_mac_header(skb); + __skb_pull(skb, 1); + netif_rx(skb); +} + +static void ssip_rx_data_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct sk_buff *skb; + + if (msg->status == HSI_STATUS_ERROR) { + dev_err(&cl->device, "RX data error\n"); + ssip_free_data(msg); + ssip_error(cl); + return; + } + del_timer(&ssi->rx_wd); /* FIXME: Revisit */ + skb = msg->context; + ssip_pn_rx(skb); + hsi_free_msg(msg); +} + +static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + /* Workaroud: Ignore CMT Loader message leftover */ + if (cmd == SSIP_CMT_LOADER_SYNC) + return; + + switch (ssi->main_state) { + case ACTIVE: + dev_err(&cl->device, "Boot info req on active state\n"); + ssip_error(cl); + /* Fall through */ + case INIT: + spin_lock(&ssi->lock); + ssi->main_state = HANDSHAKE; + if (!ssi->waketest) { + ssi->waketest = 1; + ssi_waketest(cl, 1); /* FIXME: To be removed */ + } + /* Start boot handshake watchdog */ + mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); + spin_unlock(&ssi->lock); + dev_dbg(&cl->device, "Send BOOTINFO_RESP\n"); + if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) + dev_warn(&cl->device, "boot info req verid mismatch\n"); + msg = ssip_claim_cmd(ssi); + ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID)); + msg->complete = ssip_release_cmd; + hsi_async_write(cl, msg); + break; + case HANDSHAKE: + /* Ignore */ + break; + default: + dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state); + break; + } +} + +static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) + dev_warn(&cl->device, "boot info resp verid mismatch\n"); + + spin_lock(&ssi->lock); + if (ssi->main_state != ACTIVE) + /* Use tx_wd as a boot watchdog in non ACTIVE state */ + mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); + else + dev_dbg(&cl->device, "boot info resp ignored M(%d)\n", + ssi->main_state); + spin_unlock(&ssi->lock); +} + +static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + unsigned int wkres = SSIP_PAYLOAD(cmd); + + spin_lock(&ssi->lock); + if (ssi->main_state != HANDSHAKE) { + dev_dbg(&cl->device, "wake lines test ignored M(%d)\n", + ssi->main_state); + spin_unlock(&ssi->lock); + return; + } + if (ssi->waketest) { + ssi->waketest = 0; + ssi_waketest(cl, 0); /* FIXME: To be removed */ + } + ssi->main_state = ACTIVE; + del_timer(&ssi->tx_wd); /* Stop boot handshake timer */ + spin_unlock(&ssi->lock); + + dev_notice(&cl->device, "WAKELINES TEST %s\n", + wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK"); + if (wkres & SSIP_WAKETEST_FAILED) { + ssip_error(cl); + return; + } + dev_dbg(&cl->device, "CMT is ONLINE\n"); + netif_wake_queue(ssi->netdev); + netif_carrier_on(ssi->netdev); +} + +static void ssip_rx_ready(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + spin_lock(&ssi->lock); + if (unlikely(ssi->main_state != ACTIVE)) { + dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n", + ssi->send_state, ssi->main_state); + spin_unlock(&ssi->lock); + return; + } + if (ssi->send_state != WAIT4READY) { + dev_dbg(&cl->device, "Ignore spurious READY command\n"); + spin_unlock(&ssi->lock); + return; + } + ssip_set_txstate(ssi, SEND_READY); + spin_unlock(&ssi->lock); + ssip_xmit(cl); +} + +static void ssip_rx_strans(struct hsi_client *cl, u32 cmd) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct sk_buff *skb; + struct hsi_msg *msg; + int len = SSIP_PDU_LENGTH(cmd); + + dev_dbg(&cl->device, "RX strans: %d frames\n", len); + spin_lock(&ssi->lock); + if (unlikely(ssi->main_state != ACTIVE)) { + dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n", + ssi->send_state, ssi->main_state); + spin_unlock(&ssi->lock); + return; + } + ssip_set_rxstate(ssi, RECEIVING); + if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) { + dev_err(&cl->device, "START TRANS id %d expeceted %d\n", + SSIP_MSG_ID(cmd), ssi->rxid); + spin_unlock(&ssi->lock); + goto out1; + } + ssi->rxid++; + spin_unlock(&ssi->lock); + skb = netdev_alloc_skb(ssi->netdev, len * 4); + if (unlikely(!skb)) { + dev_err(&cl->device, "No memory for rx skb\n"); + goto out1; + } + skb->dev = ssi->netdev; + skb_put(skb, len * 4); + msg = ssip_alloc_data(skb, GFP_ATOMIC); + if (unlikely(!msg)) { + dev_err(&cl->device, "No memory for RX data msg\n"); + goto out2; + } + msg->complete = ssip_rx_data_complete; + hsi_async_read(cl, msg); + + return; +out2: + dev_kfree_skb(skb); +out1: + ssip_error(cl); +} + +static void ssip_rxcmd_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + u32 cmd = ssip_get_cmd(msg); + unsigned int cmdid = SSIP_COMMAND(cmd); + + if (msg->status == HSI_STATUS_ERROR) { + dev_err(&cl->device, "RX error detected\n"); + ssip_release_cmd(msg); + ssip_error(cl); + return; + } + hsi_async_read(cl, msg); + dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd); + switch (cmdid) { + case SSIP_SW_BREAK: + /* Ignored */ + break; + case SSIP_BOOTINFO_REQ: + ssip_rx_bootinforeq(cl, cmd); + break; + case SSIP_BOOTINFO_RESP: + ssip_rx_bootinforesp(cl, cmd); + break; + case SSIP_WAKETEST_RESULT: + ssip_rx_waketest(cl, cmd); + break; + case SSIP_START_TRANS: + ssip_rx_strans(cl, cmd); + break; + case SSIP_READY: + ssip_rx_ready(cl); + break; + default: + dev_warn(&cl->device, "command 0x%08x not supported\n", cmd); + break; + } +} + +static void ssip_swbreak_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + ssip_release_cmd(msg); + spin_lock(&ssi->lock); + if (list_empty(&ssi->txqueue)) { + if (atomic_read(&ssi->tx_usecnt)) { + ssip_set_txstate(ssi, SEND_READY); + } else { + ssip_set_txstate(ssi, SEND_IDLE); + hsi_stop_tx(cl); + } + spin_unlock(&ssi->lock); + } else { + spin_unlock(&ssi->lock); + ssip_xmit(cl); + } + netif_wake_queue(ssi->netdev); +} + +static void ssip_tx_data_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *cmsg; + + if (msg->status == HSI_STATUS_ERROR) { + dev_err(&cl->device, "TX data error\n"); + ssip_error(cl); + goto out; + } + spin_lock(&ssi->lock); + if (list_empty(&ssi->txqueue)) { + ssip_set_txstate(ssi, SENDING_SWBREAK); + spin_unlock(&ssi->lock); + cmsg = ssip_claim_cmd(ssi); + ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD); + cmsg->complete = ssip_swbreak_complete; + dev_dbg(&cl->device, "Send SWBREAK\n"); + hsi_async_write(cl, cmsg); + } else { + spin_unlock(&ssi->lock); + ssip_xmit(cl); + } +out: + ssip_free_data(msg); +} + +void ssip_port_event(struct hsi_client *cl, unsigned long event) +{ + switch (event) { + case HSI_EVENT_START_RX: + ssip_start_rx(cl); + break; + case HSI_EVENT_STOP_RX: + ssip_stop_rx(cl); + break; + default: + return; + } +} + +static int ssip_pn_open(struct net_device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev->dev.parent); + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + int err; + + err = hsi_claim_port(cl, 1); + if (err < 0) { + dev_err(&cl->device, "SSI port already claimed\n"); + return err; + } + err = hsi_register_port_event(cl, ssip_port_event); + if (err < 0) { + dev_err(&cl->device, "Register HSI port event failed (%d)\n", err); + return err; + } + dev_dbg(&cl->device, "Configuring SSI port\n"); + hsi_setup(cl); + spin_lock_bh(&ssi->lock); + if (!ssi->waketest) { + ssi->waketest = 1; + ssi_waketest(cl, 1); /* FIXME: To be removed */ + } + ssi->main_state = HANDSHAKE; + spin_unlock_bh(&ssi->lock); + dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n"); + msg = ssip_claim_cmd(ssi); + ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID)); + msg->complete = ssip_release_cmd; + hsi_async_write(cl, msg); + dev_dbg(&cl->device, "Issuing RX command\n"); + msg = ssip_claim_cmd(ssi); + msg->complete = ssip_rxcmd_complete; + hsi_async_read(cl, msg); + + return 0; +} + +static int ssip_pn_stop(struct net_device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev->dev.parent); + + ssip_reset(cl); + hsi_unregister_port_event(cl); + hsi_release_port(cl); + + return 0; +} + +static int ssip_pn_set_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu > SSIP_MAX_MTU || new_mtu < PHONET_MIN_MTU) + return -EINVAL; + dev->mtu = new_mtu; + + return 0; +} + +static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev->dev.parent); + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + if ((skb->protocol != htons(ETH_P_PHONET)) || + (skb->len < SSIP_MIN_PN_HDR)) + goto drop; + /* Pad to 32-bits - FIXME: Revisit*/ + if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3))) + goto drop; + + /* + * Modem sends Phonet messages over SSI with its own endianess... + * Assume that modem has the same endianess as we do. + */ + if (skb_cow_head(skb, 0)) + goto drop; +#ifdef __LITTLE_ENDIAN + ((u16 *)skb->data)[2] = swab16(((u16 *)skb->data)[2]); +#endif + msg = ssip_alloc_data(skb, GFP_ATOMIC); + if (!msg) { + dev_dbg(&cl->device, "Dropping tx data: No memory\n"); + goto drop; + } + msg->complete = ssip_tx_data_complete; + + spin_lock_bh(&ssi->lock); + if (unlikely(ssi->main_state != ACTIVE)) { + spin_unlock_bh(&ssi->lock); + dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n"); + goto drop2; + } + list_add_tail(&msg->link, &ssi->txqueue); + ssi->txqueue_len++; + if (dev->tx_queue_len < ssi->txqueue_len) { + dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len); + netif_stop_queue(dev); + } + if (ssi->send_state == SEND_IDLE) { + ssip_set_txstate(ssi, WAIT4READY); + spin_unlock_bh(&ssi->lock); + dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len); + hsi_start_tx(cl); + } else if (ssi->send_state == SEND_READY) { + /* Needed for cmt-speech workaround */ + dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n", + ssi->txqueue_len); + spin_unlock_bh(&ssi->lock); + ssip_xmit(cl); + } else { + spin_unlock_bh(&ssi->lock); + } + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + return 0; +drop2: + hsi_free_msg(msg); +drop: + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + + return 0; +} + +/* CMT reset support */ +static int ssip_cmt_event(struct notifier_block *nb, unsigned long event, + void *data) +{ + struct ssi_protocol *ssi = container_of(nb, struct ssi_protocol, nb); + + if (event != CMT_RESET) + return NOTIFY_DONE; + + dev_err(&ssi->cl->device, "CMT reset detected !\n"); + ssip_error(ssi->cl); + + return NOTIFY_DONE; +} + +static const struct net_device_ops ssip_pn_ops = { + .ndo_open = ssip_pn_open, + .ndo_stop = ssip_pn_stop, + .ndo_start_xmit = ssip_pn_xmit, + .ndo_change_mtu = ssip_pn_set_mtu, +}; + +static void ssip_pn_setup(struct net_device *dev) +{ + dev->features = 0; + dev->netdev_ops = &ssip_pn_ops; + dev->type = ARPHRD_PHONET; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->mtu = SSIP_DEFAULT_MTU; + dev->hard_header_len = 1; + dev->dev_addr[0] = PN_MEDIA_SOS; + dev->addr_len = 1; + dev->tx_queue_len = SSIP_TXQUEUE_LEN; + + dev->destructor = free_netdev; + dev->header_ops = &phonet_header_ops; +} + +static int __init ssi_protocol_probe(struct device *dev) +{ + static const char ifname[] = "phonet%d"; + struct hsi_client *cl = to_hsi_client(dev); + struct ssi_protocol *ssi; + int err; + + ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); + if (!ssi) { + dev_err(dev, "No memory for ssi protocol\n"); + return -ENOMEM; + } + spin_lock_init(&ssi->lock); + init_timer_deferrable(&ssi->rx_wd); + init_timer_deferrable(&ssi->tx_wd); + init_timer(&ssi->keep_alive); + ssi->rx_wd.data = (unsigned long)cl; + ssi->rx_wd.function = ssip_wd; + ssi->tx_wd.data = (unsigned long)cl; + ssi->tx_wd.function = ssip_wd; + ssi->keep_alive.data = (unsigned long)cl; + ssi->keep_alive.function = ssip_keep_alive; + INIT_LIST_HEAD(&ssi->txqueue); + INIT_LIST_HEAD(&ssi->cmdqueue); + ssi->nb.notifier_call = ssip_cmt_event; + ssi->nb.priority = INT_MAX; + atomic_set(&ssi->tx_usecnt, 0); + hsi_client_set_drvdata(cl, ssi); + ssi->cl = cl; + err = ssip_alloc_cmds(ssi); + if (err < 0) { + dev_err(dev, "No memory for commands\n"); + goto out; + } + ssi->netdev = alloc_netdev(0, ifname, ssip_pn_setup); + if (!ssi->netdev) { + dev_err(dev, "No memory for netdev\n"); + err = -ENOMEM; + goto out1; + } + SET_NETDEV_DEV(ssi->netdev, dev); + netif_carrier_off(ssi->netdev); + err = register_netdev(ssi->netdev); + if (err < 0) { + dev_err(dev, "Register netdev failed (%d)\n", err); + free_netdev(ssi->netdev); + goto out1; + } + ssi->cmt = cmt_get("cmt"); + if (IS_ERR(ssi->cmt)) { + err = PTR_ERR(ssi->cmt); + dev_err(dev, "Could not get CMT (%d)\n", err); + goto out2; + } + err = cmt_notifier_register(ssi->cmt, &ssi->nb); + if (err < 0) { + dev_err(dev, "Register CMT notifier failed (%d)\n", err); + goto out3; + } + list_add(&ssi->link, &ssip_list); + + return 0; +out3: + cmt_put(ssi->cmt); +out2: + unregister_netdev(ssi->netdev); +out1: + ssip_free_cmds(ssi); +out: + kfree(ssi); + + return err; +} + +static int __exit ssi_protocol_remove(struct device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev); + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + list_del(&ssi->link); + cmt_notifier_unregister(ssi->cmt, &ssi->nb); + cmt_put(ssi->cmt); + unregister_netdev(ssi->netdev); + ssip_free_cmds(ssi); + hsi_client_set_drvdata(cl, NULL); + kfree(ssi); + + return 0; +} + +static struct hsi_client_driver ssip_driver = { + .driver = { + .name = "ssi_protocol", + .owner = THIS_MODULE, + .probe = ssi_protocol_probe, + .remove = ssi_protocol_remove, + }, +}; + +static int __init ssip_init(void) +{ + pr_info("SSI protocol aka McSAAB added\n"); + + return hsi_register_client_driver(&ssip_driver); +} +module_init(ssip_init); + +static void __exit ssip_exit(void) +{ + hsi_unregister_client_driver(&ssip_driver); + pr_info("SSI protocol driver removed\n"); +} +module_exit(ssip_exit); + +MODULE_ALIAS("hsi:ssi_protocol"); +MODULE_AUTHOR("Carlos Chinea ," + "Remi Denis-Courmont "); +MODULE_DESCRIPTION("SSI protocol improved aka McSAAB"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hsi/controllers/Kconfig b/drivers/hsi/controllers/Kconfig new file mode 100644 index 000000000000..3efe0f027c7a --- /dev/null +++ b/drivers/hsi/controllers/Kconfig @@ -0,0 +1,23 @@ +# +# HSI controllers configuration +# +comment "HSI controllers" + +config OMAP_SSI + tristate "OMAP SSI hardware driver" + depends on ARCH_OMAP && HSI + default n + ---help--- + SSI is a legacy version of HSI. It is usually used to connect + an application engine with a cellular modem. + If you say Y here, you will enable the OMAP SSI hardware driver. + + If unsure, say N. + +if OMAP_SSI + +config OMAP_SSI_CONFIG + boolean + default y + +endif # OMAP_SSI diff --git a/drivers/hsi/controllers/Makefile b/drivers/hsi/controllers/Makefile new file mode 100644 index 000000000000..c4ba2c2c2bda --- /dev/null +++ b/drivers/hsi/controllers/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for HSI controllers drivers +# + +obj-$(CONFIG_OMAP_SSI) += omap_ssi.o diff --git a/drivers/hsi/controllers/omap_ssi.c b/drivers/hsi/controllers/omap_ssi.c new file mode 100644 index 000000000000..45192b480ab7 --- /dev/null +++ b/drivers/hsi/controllers/omap_ssi.c @@ -0,0 +1,1885 @@ +/* + * omap_ssi.c + * + * Implements the OMAP SSI driver. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#define SSI_MAX_CHANNELS 8 +#define SSI_MAX_GDD_LCH 8 +#define SSI_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1) + +/** + * struct ssi_clk_res - Device resource data for the SSI clocks + * @clk: Pointer to the clock + * @nb: Pointer to the clock notifier for clk, if any + */ +struct ssi_clk_res { + struct clk *clk; + struct notifier_block *nb; +}; + +/** + * struct gdd_trn - GDD transaction data + * @msg: Pointer to the HSI message being served + * @sg: Pointer to the current sg entry being served + */ +struct gdd_trn { + struct hsi_msg *msg; + struct scatterlist *sg; +}; + +/** + * struct omap_ssm_ctx - OMAP synchronous serial module (TX/RX) context + * @mode: Bit transmission mode + * @channels: Number of channels + * @framesize: Frame size in bits + * @timeout: RX frame timeout + * @divisor: TX divider + * @arb_mode: Arbitration mode for TX frame (Round robin, priority) + */ +struct omap_ssm_ctx { + u32 mode; + u32 channels; + u32 frame_size; + union { + u32 timeout; /* Rx Only */ + struct { + u32 arb_mode; + u32 divisor; + }; /* Tx only */ + }; +}; + +/** + * struct omap_ssi_port - OMAP SSI port data + * @dev: device associated to the port (HSI port) + * @sst_dma: SSI transmitter physical base address + * @ssr_dma: SSI receiver physical base address + * @sst_base: SSI transmitter base address + * @ssr_base: SSI receiver base address + * @wk_lock: spin lock to serialize access to the wake lines + * @lock: Spin lock to serialize access to the SSI port + * @channels: Current number of channels configured (1,2,4 or 8) + * @txqueue: TX message queues + * @rxqueue: RX message queues + * @brkqueue: Queue of incoming HWBREAK requests (FRAME mode) + * @irq: IRQ number + * @wake_irq: IRQ number for incoming wake line (-1 if none) + * @wake_gpio: GPIO number for incoming wake line (-1 if none) + * @pio_tasklet: Bottom half for PIO transfers and events + * @wake_tasklet: Bottom half for incoming wake events + * @wkin_cken: Keep track of clock references due to the incoming wake line + * @wk_refcount: Reference count for output wake line + * @sys_mpu_enable: Context for the interrupt enable register for irq 0 + * @sst: Context for the synchronous serial transmitter + * @ssr: Context for the synchronous serial receiver + */ +struct omap_ssi_port { + struct device *dev; + dma_addr_t sst_dma; + dma_addr_t ssr_dma; + void __iomem *sst_base; + void __iomem *ssr_base; + spinlock_t wk_lock; + spinlock_t lock; + unsigned int channels; + struct list_head txqueue[SSI_MAX_CHANNELS]; + struct list_head rxqueue[SSI_MAX_CHANNELS]; + struct list_head brkqueue; + unsigned int irq; + int wake_irq; + int wake_gpio; + struct tasklet_struct pio_tasklet; + struct tasklet_struct wake_tasklet; + unsigned int wktest:1; /* FIXME: HACK to be removed */ + unsigned int wkin_cken:1; /* Workaround */ + int wk_refcount; + /* OMAP SSI port context */ + u32 sys_mpu_enable; /* We use only one irq */ + struct omap_ssm_ctx sst; + struct omap_ssm_ctx ssr; +}; + +/** + * struct omap_ssi_controller - OMAP SSI controller data + * @dev: device associated to the controller (HSI controller) + * @sys: SSI I/O base address + * @gdd: GDD I/O base address + * @ick: SSI interconnect clock + * @fck: SSI functional clock + * @ck_refcount: References count for clocks + * @gdd_irq: IRQ line for GDD + * @gdd_tasklet: bottom half for DMA transfers + * @gdd_trn: Array of GDD transaction data for ongoing GDD transfers + * @lock: lock to serialize access to GDD + * @ck_lock: lock to serialize access to the clocks + * @loss_count: To follow if we need to restore context or not + * @max_speed: Maximum TX speed (Kb/s) set by the clients. + * @sysconfig: SSI controller saved context + * @gdd_gcr: SSI GDD saved context + * @get_loss: Pointer to omap_pm_get_dev_context_loss_count, if any + * @port: Array of pointers of the ports of the controller + * @dir: Debugfs SSI root directory + */ +struct omap_ssi_controller { + struct device *dev; + void __iomem *sys; + void __iomem *gdd; + struct clk *ick; + struct clk *fck; + int ck_refcount; + unsigned int gdd_irq; + struct tasklet_struct gdd_tasklet; + struct gdd_trn gdd_trn[SSI_MAX_GDD_LCH]; + spinlock_t lock; + spinlock_t ck_lock; + unsigned long fck_rate; + u32 loss_count; + u32 max_speed; + /* OMAP SSI Controller context */ + u32 sysconfig; + u32 gdd_gcr; + u32 (*get_loss)(struct device *dev); + struct omap_ssi_port **port; +#ifdef CONFIG_DEBUG_FS + struct dentry *dir; +#endif +}; + +static inline unsigned int ssi_wakein(struct hsi_port *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + return gpio_get_value(omap_port->wake_gpio); +} + +static int ssi_for_each_port(struct hsi_controller *ssi, void *data, + int (*fn)(struct omap_ssi_port *p, void *data)) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + unsigned int i = 0; + int err = 0; + + for (i = 0; ((i < ssi->num_ports) && !err); i++) + err = (*fn)(omap_ssi->port[i], data); + + return err; +} + +static int ssi_set_port_mode(struct omap_ssi_port *omap_port, void *data) +{ + u32 *mode = data; + + __raw_writel(*mode, omap_port->sst_base + SSI_SST_MODE_REG); + __raw_writel(*mode, omap_port->ssr_base + SSI_SSR_MODE_REG); + /* OCP barrier */ + *mode = __raw_readl(omap_port->ssr_base + SSI_SSR_MODE_REG); + + return 0; +} + +static inline void ssi_set_mode(struct hsi_controller *ssi, u32 mode) +{ + ssi_for_each_port(ssi, &mode, ssi_set_port_mode); +} + +static int ssi_restore_port_mode(struct omap_ssi_port *omap_port, + void *data __maybe_unused) +{ + u32 mode; + + __raw_writel(omap_port->sst.mode, + omap_port->sst_base + SSI_SST_MODE_REG); + __raw_writel(omap_port->ssr.mode, + omap_port->ssr_base + SSI_SSR_MODE_REG); + /* OCP barrier */ + mode = __raw_readl(omap_port->ssr_base + SSI_SSR_MODE_REG); + + return 0; +} + +static int ssi_restore_divisor(struct omap_ssi_port *omap_port, + void *data __maybe_unused) +{ + __raw_writel(omap_port->sst.divisor, + omap_port->sst_base + SSI_SST_DIVISOR_REG); + + return 0; +} + +static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port, + void *data __maybe_unused) +{ + struct hsi_port *port = to_hsi_port(omap_port->dev); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *base = omap_port->sst_base; + + __raw_writel(omap_port->sys_mpu_enable, + omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + /* SST context */ + __raw_writel(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG); + __raw_writel(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG); + __raw_writel(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG); + /* SSR context */ + base = omap_port->ssr_base; + __raw_writel(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG); + __raw_writel(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG); + __raw_writel(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG); + + return 0; +} + +static int ssi_save_port_ctx(struct omap_ssi_port *omap_port, + void *data __maybe_unused) +{ + struct hsi_port *port = to_hsi_port(omap_port->dev); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + omap_port->sys_mpu_enable = __raw_readl(omap_ssi->sys + + SSI_MPU_ENABLE_REG(port->num, 0)); + + return 0; +} + +static int ssi_clk_enable(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct device *pdev = ssi->device.parent; + int err = 0; + + spin_lock_bh(&omap_ssi->ck_lock); + + if (omap_ssi->ck_refcount++) + goto out; + + err = pm_runtime_get_sync(pdev); + if (unlikely(err < 0)) + goto out; + + if ((omap_ssi->get_loss) && (omap_ssi->loss_count == + (*omap_ssi->get_loss)(ssi->device.parent))) + goto mode; /* We always need to restore the mode & TX divisor */ + + __raw_writel(omap_ssi->sysconfig, omap_ssi->sys + SSI_SYSCONFIG_REG); + __raw_writel(omap_ssi->gdd_gcr, omap_ssi->gdd + SSI_GDD_GCR_REG); + + ssi_for_each_port(ssi, NULL, ssi_restore_port_ctx); +mode: + ssi_for_each_port(ssi, NULL, ssi_restore_divisor); + ssi_for_each_port(ssi, NULL, ssi_restore_port_mode); +out: + spin_unlock_bh(&omap_ssi->ck_lock); + + return err; +} + +static void ssi_clk_disable(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct device *pdev = ssi->device.parent; + + spin_lock_bh(&omap_ssi->ck_lock); + WARN_ON(omap_ssi->ck_refcount <= 0); + if (--omap_ssi->ck_refcount) + goto out; + + ssi_set_mode(ssi, SSI_MODE_SLEEP); + + if (omap_ssi->get_loss) + omap_ssi->loss_count = + (*omap_ssi->get_loss)(ssi->device.parent); + + ssi_for_each_port(ssi, NULL, ssi_save_port_ctx); + + pm_runtime_put_sync(pdev); +out: + spin_unlock_bh(&omap_ssi->ck_lock); +} + +#ifdef CONFIG_DEBUG_FS +static int ssi_debug_show(struct seq_file *m, void *p __maybe_unused) +{ + struct hsi_controller *ssi = m->private; + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *sys = omap_ssi->sys; + + ssi_clk_enable(ssi); + seq_printf(m, "REVISION\t: 0x%08x\n", + __raw_readl(sys + SSI_REVISION_REG)); + seq_printf(m, "SYSCONFIG\t: 0x%08x\n", + __raw_readl(sys + SSI_SYSCONFIG_REG)); + seq_printf(m, "SYSSTATUS\t: 0x%08x\n", + __raw_readl(sys + SSI_SYSSTATUS_REG)); + ssi_clk_disable(ssi); + + return 0; +} + +static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused) +{ + struct hsi_port *port = m->private; + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *base = omap_ssi->sys; + unsigned int ch; + + ssi_clk_enable(ssi); + if (omap_port->wake_irq > 0) + seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port)); + seq_printf(m, "WAKE\t\t: 0x%08x\n", + __raw_readl(base + SSI_WAKE_REG(port->num))); + seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0, + __raw_readl(base + SSI_MPU_ENABLE_REG(port->num, 0))); + seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0, + __raw_readl(base + SSI_MPU_STATUS_REG(port->num, 0))); + /* SST */ + base = omap_port->sst_base; + seq_printf(m, "\nSST\n===\n"); + seq_printf(m, "ID SST\t\t: 0x%08x\n", + __raw_readl(base + SSI_SST_ID_REG)); + seq_printf(m, "MODE\t\t: 0x%08x\n", + __raw_readl(base + SSI_SST_MODE_REG)); + seq_printf(m, "FRAMESIZE\t: 0x%08x\n", + __raw_readl(base + SSI_SST_FRAMESIZE_REG)); + seq_printf(m, "DIVISOR\t\t: 0x%08x\n", + __raw_readl(base + SSI_SST_DIVISOR_REG)); + seq_printf(m, "CHANNELS\t: 0x%08x\n", + __raw_readl(base + SSI_SST_CHANNELS_REG)); + seq_printf(m, "ARBMODE\t\t: 0x%08x\n", + __raw_readl(base + SSI_SST_ARBMODE_REG)); + seq_printf(m, "TXSTATE\t\t: 0x%08x\n", + __raw_readl(base + SSI_SST_TXSTATE_REG)); + seq_printf(m, "BUFSTATE\t: 0x%08x\n", + __raw_readl(base + SSI_SST_BUFSTATE_REG)); + seq_printf(m, "BREAK\t\t: 0x%08x\n", + __raw_readl(base + SSI_SST_BREAK_REG)); + for (ch = 0; ch < omap_port->channels; ch++) { + seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, + __raw_readl(base + SSI_SST_BUFFER_CH_REG(ch))); + } + /* SSR */ + base = omap_port->ssr_base; + seq_printf(m, "\nSSR\n===\n"); + seq_printf(m, "ID SSR\t\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_ID_REG)); + seq_printf(m, "MODE\t\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_MODE_REG)); + seq_printf(m, "FRAMESIZE\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_FRAMESIZE_REG)); + seq_printf(m, "CHANNELS\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_CHANNELS_REG)); + seq_printf(m, "TIMEOUT\t\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_TIMEOUT_REG)); + seq_printf(m, "RXSTATE\t\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_RXSTATE_REG)); + seq_printf(m, "BUFSTATE\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_BUFSTATE_REG)); + seq_printf(m, "BREAK\t\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_BREAK_REG)); + seq_printf(m, "ERROR\t\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_ERROR_REG)); + seq_printf(m, "ERRORACK\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_ERRORACK_REG)); + for (ch = 0; ch < omap_port->channels; ch++) { + seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, + __raw_readl(base + SSI_SSR_BUFFER_CH_REG(ch))); + } + ssi_clk_disable(ssi); + + return 0; +} + +static int ssi_debug_gdd_show(struct seq_file *m, void *p __maybe_unused) +{ + struct hsi_controller *ssi = m->private; + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *gdd = omap_ssi->gdd; + int lch; + + ssi_clk_enable(ssi); + seq_printf(m, "GDD_MPU_STATUS\t: 0x%08x\n", + __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG)); + seq_printf(m, "GDD_MPU_ENABLE\t: 0x%08x\n\n", + __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG)); + seq_printf(m, "HW_ID\t\t: 0x%08x\n", + __raw_readl(gdd + SSI_GDD_HW_ID_REG)); + seq_printf(m, "PPORT_ID\t: 0x%08x\n", + __raw_readl(gdd + SSI_GDD_PPORT_ID_REG)); + seq_printf(m, "MPORT_ID\t: 0x%08x\n", + __raw_readl(gdd + SSI_GDD_MPORT_ID_REG)); + seq_printf(m, "TEST\t\t: 0x%08x\n", + __raw_readl(gdd + SSI_GDD_TEST_REG)); + seq_printf(m, "GCR\t\t: 0x%08x\n", + __raw_readl(gdd + SSI_GDD_GCR_REG)); + + for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) { + seq_printf(m, "\nGDD LCH %d\n=========\n", lch); + seq_printf(m, "CSDP\t\t: 0x%04x\n", + __raw_readw(gdd + SSI_GDD_CSDP_REG(lch))); + seq_printf(m, "CCR\t\t: 0x%04x\n", + __raw_readw(gdd + SSI_GDD_CCR_REG(lch))); + seq_printf(m, "CICR\t\t: 0x%04x\n", + __raw_readw(gdd + SSI_GDD_CICR_REG(lch))); + seq_printf(m, "CSR\t\t: 0x%04x\n", + __raw_readw(gdd + SSI_GDD_CSR_REG(lch))); + seq_printf(m, "CSSA\t\t: 0x%08x\n", + __raw_readl(gdd + SSI_GDD_CSSA_REG(lch))); + seq_printf(m, "CDSA\t\t: 0x%08x\n", + __raw_readl(gdd + SSI_GDD_CDSA_REG(lch))); + seq_printf(m, "CEN\t\t: 0x%04x\n", + __raw_readw(gdd + SSI_GDD_CEN_REG(lch))); + seq_printf(m, "CSAC\t\t: 0x%04x\n", + __raw_readw(gdd + SSI_GDD_CSAC_REG(lch))); + seq_printf(m, "CDAC\t\t: 0x%04x\n", + __raw_readw(gdd + SSI_GDD_CDAC_REG(lch))); + seq_printf(m, "CLNK_CTRL\t: 0x%04x\n", + __raw_readw(gdd + SSI_GDD_CLNK_CTRL_REG(lch))); + } + ssi_clk_disable(ssi); + + return 0; +} + +static int ssi_div_get(void *data, u64 *val) +{ + struct hsi_port *port = data; + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + + ssi_clk_enable(ssi); + *val = __raw_readl(omap_port->sst_base + SSI_SST_DIVISOR_REG); + ssi_clk_disable(ssi); + + return 0; +} + +static int ssi_div_set(void *data, u64 val) +{ + struct hsi_port *port = data; + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + + if (val > 127) + return -EINVAL; + + ssi_clk_enable(ssi); + __raw_writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG); + omap_port->sst.divisor = val; + ssi_clk_disable(ssi); + + return 0; +} + +static int ssi_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, ssi_debug_show, inode->i_private); +} + +static int ssi_port_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, ssi_debug_port_show, inode->i_private); +} + +static int ssi_gdd_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, ssi_debug_gdd_show, inode->i_private); +} + +static const struct file_operations ssi_regs_fops = { + .open = ssi_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations ssi_port_regs_fops = { + .open = ssi_port_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations ssi_gdd_regs_fops = { + .open = ssi_gdd_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n"); + +static int __init ssi_debug_add_port(struct omap_ssi_port *omap_port, + void *data) +{ + struct hsi_port *port = to_hsi_port(omap_port->dev); + struct dentry *dir = data; + + dir = debugfs_create_dir(dev_name(omap_port->dev), dir); + if (IS_ERR(dir)) + return PTR_ERR(dir); + debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops); + dir = debugfs_create_dir("sst", dir); + if (IS_ERR(dir)) + return PTR_ERR(dir); + debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port, + &ssi_sst_div_fops); + + return 0; +} + +static int __init ssi_debug_add_ctrl(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct dentry *dir; + int err; + + /* SSI controller */ + omap_ssi->dir = debugfs_create_dir(dev_name(&ssi->device), NULL); + if (IS_ERR(omap_ssi->dir)) + return PTR_ERR(omap_ssi->dir); + + debugfs_create_file("regs", S_IRUGO, omap_ssi->dir, ssi, + &ssi_regs_fops); + /* SSI GDD (DMA) */ + dir = debugfs_create_dir("gdd", omap_ssi->dir); + if (IS_ERR(dir)) + goto rback; + debugfs_create_file("regs", S_IRUGO, dir, ssi, &ssi_gdd_regs_fops); + /* SSI ports */ + err = ssi_for_each_port(ssi, omap_ssi->dir, ssi_debug_add_port); + if (err < 0) + goto rback; + + return 0; +rback: + debugfs_remove_recursive(omap_ssi->dir); + + return PTR_ERR(dir); +} + +static void ssi_debug_remove_ctrl(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + debugfs_remove_recursive(omap_ssi->dir); +} +#endif /* CONFIG_DEBUG_FS */ + +static int ssi_claim_lch(struct hsi_msg *msg) +{ + + struct hsi_port *port = hsi_get_port(msg->cl); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + int lch; + + for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) + if (!omap_ssi->gdd_trn[lch].msg) { + omap_ssi->gdd_trn[lch].msg = msg; + omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl; + return lch; + } + + return -EBUSY; +} + +static int ssi_start_pio(struct hsi_msg *msg) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + u32 val; + + ssi_clk_enable(ssi); + if (msg->ttype == HSI_MSG_WRITE) { + val = SSI_DATAACCEPT(msg->channel); + ssi_clk_enable(ssi); /* Hold clocks for pio writes */ + } else { + val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED; + } + dev_dbg(&port->device, "Single %s transfer\n", + msg->ttype ? "write" : "read"); + val |= __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + ssi_clk_disable(ssi); + msg->actual_len = 0; + msg->status = HSI_STATUS_PROCEEDING; + + return 0; +} + +static int ssi_start_dma(struct hsi_msg *msg, int lch) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *gdd = omap_ssi->gdd; + int err; + u16 csdp; + u16 ccr; + u32 s_addr; + u32 d_addr; + u32 tmp; + + if (msg->ttype == HSI_MSG_READ) { + err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, + DMA_FROM_DEVICE); + if (err < 0) { + dev_dbg(&ssi->device, "DMA map SG failed !\n"); + return err; + } + csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT | + SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT | + SSI_DATA_TYPE_S32; + ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */ + ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST | + SSI_CCR_ENABLE; + s_addr = omap_port->ssr_dma + + SSI_SSR_BUFFER_CH_REG(msg->channel); + d_addr = sg_dma_address(msg->sgt.sgl); + } else { + err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, + DMA_TO_DEVICE); + if (err < 0) { + dev_dbg(&ssi->device, "DMA map SG failed !\n"); + return err; + } + csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT | + SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT | + SSI_DATA_TYPE_S32; + ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */ + ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST | + SSI_CCR_ENABLE; + s_addr = sg_dma_address(msg->sgt.sgl); + d_addr = omap_port->sst_dma + + SSI_SST_BUFFER_CH_REG(msg->channel); + } + dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x" + " d_addr %08x\n", lch, csdp, ccr, s_addr, d_addr); + ssi_clk_enable(ssi); /* Hold clocks during the transfer */ + __raw_writew(csdp, gdd + SSI_GDD_CSDP_REG(lch)); + __raw_writew(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch)); + __raw_writel(d_addr, gdd + SSI_GDD_CDSA_REG(lch)); + __raw_writel(s_addr, gdd + SSI_GDD_CSSA_REG(lch)); + __raw_writew(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length), + gdd + SSI_GDD_CEN_REG(lch)); + + spin_lock_bh(&omap_ssi->lock); + tmp = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + tmp |= SSI_GDD_LCH(lch); + __raw_writel(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + spin_unlock_bh(&omap_ssi->lock); + __raw_writew(ccr, gdd + SSI_GDD_CCR_REG(lch)); + msg->status = HSI_STATUS_PROCEEDING; + + return 0; +} + +static int ssi_start_transfer(struct list_head *queue) +{ + struct hsi_msg *msg; + int lch = -1; + + if (list_empty(queue)) + return 0; + msg = list_first_entry(queue, struct hsi_msg, link); + if (msg->status != HSI_STATUS_QUEUED) + return 0; + if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32))) + lch = ssi_claim_lch(msg); + if (lch >= 0) + return ssi_start_dma(msg, lch); + else + return ssi_start_pio(msg); +} + +static void ssi_transfer(struct omap_ssi_port *omap_port, + struct list_head *queue) +{ + struct hsi_msg *msg; + int err = -1; + + spin_lock_bh(&omap_port->lock); + while (err < 0) { + err = ssi_start_transfer(queue); + if (err < 0) { + msg = list_first_entry(queue, struct hsi_msg, link); + msg->status = HSI_STATUS_ERROR; + msg->actual_len = 0; + list_del(&msg->link); + spin_unlock_bh(&omap_port->lock); + msg->complete(msg); + spin_lock_bh(&omap_port->lock); + } + } + spin_unlock_bh(&omap_port->lock); +} + +static u32 ssi_calculate_div(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + u32 tx_fckrate = (u32) omap_ssi->fck_rate; + + /* / 2 : SSI TX clock is always half of the SSI functional clock */ + tx_fckrate >>= 1; + /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */ + tx_fckrate--; + dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n", + tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate, + omap_ssi->max_speed); + + return tx_fckrate / omap_ssi->max_speed; +} + +/* + * FIXME: Horrible HACK needed until we remove the useless wakeline test + * in the CMT. To be removed !!!! + */ +void ssi_waketest(struct hsi_client *cl, unsigned int enable) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + omap_port->wktest = !!enable; + if (omap_port->wktest) { + ssi_clk_enable(ssi); + __raw_writel(SSI_WAKE(0), + omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); + } else { + __raw_writel(SSI_WAKE(0), + omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); + ssi_clk_disable(ssi); + } +} +EXPORT_SYMBOL_GPL(ssi_waketest); + +static void ssi_error(struct hsi_port *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + unsigned int i; + u32 err; + u32 val; + u32 tmp; + + /* ACK error */ + err = __raw_readl(omap_port->ssr_base + SSI_SSR_ERROR_REG); + dev_err(&port->device, "SSI error: 0x%02x\n", err); + if (!err) { + dev_dbg(&port->device, "spurious SSI error ignored!\n"); + return; + } + spin_lock(&omap_ssi->lock); + /* Cancel all GDD read transfers */ + for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) { + msg = omap_ssi->gdd_trn[i].msg; + if ((msg) && (msg->ttype == HSI_MSG_READ)) { + __raw_writew(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); + val |= (1 << i); + omap_ssi->gdd_trn[i].msg = NULL; + } + } + tmp = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + tmp &= ~val; + __raw_writel(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + spin_unlock(&omap_ssi->lock); + /* Cancel all PIO read transfers */ + spin_lock(&omap_port->lock); + tmp = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */ + __raw_writel(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + /* ACK error */ + __raw_writel(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG); + __raw_writel(SSI_ERROROCCURED, + omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); + /* Signal the error all current pending read requests */ + for (i = 0; i < omap_port->channels; i++) { + if (list_empty(&omap_port->rxqueue[i])) + continue; + msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, + link); + list_del(&msg->link); + msg->status = HSI_STATUS_ERROR; + spin_unlock(&omap_port->lock); + msg->complete(msg); + /* Now restart queued reads if any */ + ssi_transfer(omap_port, &omap_port->rxqueue[i]); + spin_lock(&omap_port->lock); + } + spin_unlock(&omap_port->lock); +} + +static void ssi_break_complete(struct hsi_port *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + struct hsi_msg *tmp; + u32 val; + + dev_dbg(&port->device, "HWBREAK received\n"); + + spin_lock(&omap_port->lock); + val = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + val &= ~SSI_BREAKDETECTED; + __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + __raw_writel(0, omap_port->ssr_base + SSI_SSR_BREAK_REG); + __raw_writel(SSI_BREAKDETECTED, + omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); + spin_unlock(&omap_port->lock); + + list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) { + msg->status = HSI_STATUS_COMPLETED; + spin_lock(&omap_port->lock); + list_del(&msg->link); + spin_unlock(&omap_port->lock); + msg->complete(msg); + } + +} + +static int ssi_async_break(struct hsi_msg *msg) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + int err = 0; + u32 tmp; + + ssi_clk_enable(ssi); + if (msg->ttype == HSI_MSG_WRITE) { + if (omap_port->sst.mode != SSI_MODE_FRAME) { + err = -EINVAL; + goto out; + } + __raw_writel(1, omap_port->sst_base + SSI_SST_BREAK_REG); + msg->status = HSI_STATUS_COMPLETED; + msg->complete(msg); + } else { + if (omap_port->ssr.mode != SSI_MODE_FRAME) { + err = -EINVAL; + goto out; + } + spin_lock_bh(&omap_port->lock); + tmp = __raw_readl(omap_ssi->sys + + SSI_MPU_ENABLE_REG(port->num, 0)); + __raw_writel(tmp | SSI_BREAKDETECTED, + omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + msg->status = HSI_STATUS_PROCEEDING; + list_add_tail(&msg->link, &omap_port->brkqueue); + spin_unlock_bh(&omap_port->lock); + } +out: + ssi_clk_disable(ssi); + + return err; +} + +static int ssi_async(struct hsi_msg *msg) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct list_head *queue; + int err = 0; + + BUG_ON(!msg); + + if (msg->sgt.nents > 1) + return -ENOSYS; /* TODO: Add sg support */ + + if (msg->break_frame) + return ssi_async_break(msg); + + if (msg->ttype) { + BUG_ON(msg->channel >= omap_port->sst.channels); + queue = &omap_port->txqueue[msg->channel]; + } else { + BUG_ON(msg->channel >= omap_port->ssr.channels); + queue = &omap_port->rxqueue[msg->channel]; + } + msg->status = HSI_STATUS_QUEUED; + spin_lock_bh(&omap_port->lock); + list_add_tail(&msg->link, queue); + err = ssi_start_transfer(queue); + if (err < 0) { + list_del(&msg->link); + msg->status = HSI_STATUS_ERROR; + } + spin_unlock_bh(&omap_port->lock); + dev_dbg(&port->device, "msg status %d ttype %d ch %d\n", + msg->status, msg->ttype, msg->channel); + + return err; +} + +static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl) +{ + struct list_head *node, *tmp; + struct hsi_msg *msg; + + list_for_each_safe(node, tmp, queue) { + msg = list_entry(node, struct hsi_msg, link); + if ((cl) && (cl != msg->cl)) + continue; + list_del(node); + pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n", + msg->channel, msg, msg->sgt.sgl->length, + msg->ttype, msg->context); + if (msg->destructor) + msg->destructor(msg); + else + hsi_free_msg(msg); + } +} + +static int ssi_setup(struct hsi_client *cl) +{ + struct hsi_port *port = to_hsi_port(cl->device.parent); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *sst = omap_port->sst_base; + void __iomem *ssr = omap_port->ssr_base; + u32 div; + u32 val; + int err = 0; + + ssi_clk_enable(ssi); + spin_lock_bh(&omap_port->lock); + if (cl->tx_cfg.speed) + omap_ssi->max_speed = cl->tx_cfg.speed; + div = ssi_calculate_div(ssi); + if (div > SSI_MAX_DIVISOR) { + dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n", + cl->tx_cfg.speed, div); + err = -EINVAL; + goto out; + } + /* Set TX/RX module to sleep to stop TX/RX during cfg update */ + __raw_writel(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG); + __raw_writel(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG); + /* Flush posted write */ + val = __raw_readl(ssr + SSI_SSR_MODE_REG); + /* TX */ + __raw_writel(31, sst + SSI_SST_FRAMESIZE_REG); + __raw_writel(div, sst + SSI_SST_DIVISOR_REG); + __raw_writel(cl->tx_cfg.channels, sst + SSI_SST_CHANNELS_REG); + __raw_writel(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG); + __raw_writel(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG); + /* RX */ + __raw_writel(31, ssr + SSI_SSR_FRAMESIZE_REG); + __raw_writel(cl->rx_cfg.channels, ssr + SSI_SSR_CHANNELS_REG); + __raw_writel(0, ssr + SSI_SSR_TIMEOUT_REG); + /* Cleanup the break queue if we leave FRAME mode */ + if ((omap_port->ssr.mode == SSI_MODE_FRAME) && + (cl->rx_cfg.mode != SSI_MODE_FRAME)) + ssi_flush_queue(&omap_port->brkqueue, cl); + __raw_writel(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG); + omap_port->channels = max(cl->rx_cfg.channels, cl->tx_cfg.channels); + /* Shadow registering for OFF mode */ + /* SST */ + omap_port->sst.divisor = div; + omap_port->sst.frame_size = 31; + omap_port->sst.channels = cl->tx_cfg.channels; + omap_port->sst.arb_mode = cl->tx_cfg.arb_mode; + omap_port->sst.mode = cl->tx_cfg.mode; + /* SSR */ + omap_port->ssr.frame_size = 31; + omap_port->ssr.timeout = 0; + omap_port->ssr.channels = cl->rx_cfg.channels; + omap_port->ssr.mode = cl->rx_cfg.mode; +out: + spin_unlock_bh(&omap_port->lock); + ssi_clk_disable(ssi); + + return err; +} + +static void ssi_cleanup_queues(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + unsigned int i; + u32 rxbufstate = 0; + u32 txbufstate = 0; + u32 status = SSI_ERROROCCURED; + u32 tmp; + + ssi_flush_queue(&omap_port->brkqueue, cl); + if (list_empty(&omap_port->brkqueue)) + status |= SSI_BREAKDETECTED; + + for (i = 0; i < omap_port->channels; i++) { + if (list_empty(&omap_port->txqueue[i])) + continue; + msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg, + link); + if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { + txbufstate |= (1 << i); + status |= SSI_DATAACCEPT(i); + /* Release the clocks writes, also GDD ones */ + ssi_clk_disable(ssi); + } + ssi_flush_queue(&omap_port->txqueue[i], cl); + } + for (i = 0; i < omap_port->channels; i++) { + if (list_empty(&omap_port->rxqueue[i])) + continue; + msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, + link); + if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { + rxbufstate |= (1 << i); + status |= SSI_DATAAVAILABLE(i); + } + ssi_flush_queue(&omap_port->rxqueue[i], cl); + /* Check if we keep the error detection interrupt armed */ + if (!list_empty(&omap_port->rxqueue[i])) + status &= ~SSI_ERROROCCURED; + } + /* Cleanup write buffers */ + tmp = __raw_readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG); + tmp &= ~txbufstate; + __raw_writel(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG); + /* Cleanup read buffers */ + tmp = __raw_readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); + tmp &= ~rxbufstate; + __raw_writel(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); + /* Disarm and ack pending interrupts */ + tmp = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + tmp &= ~status; + __raw_writel(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + __raw_writel(status, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); +} + +static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + unsigned int i; + u32 val = 0; + u32 tmp; + + for (i = 0; i < SSI_MAX_GDD_LCH; i++) { + msg = omap_ssi->gdd_trn[i].msg; + if ((!msg) || (msg->cl != cl)) + continue; + __raw_writew(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); + val |= (1 << i); + /* + * Clock references for write will be handled in + * ssi_cleanup_queues + */ + if (msg->ttype == HSI_MSG_READ) + ssi_clk_disable(ssi); + omap_ssi->gdd_trn[i].msg = NULL; + } + tmp = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + tmp &= ~val; + __raw_writel(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + __raw_writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); +} + +static int ssi_release(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + spin_lock_bh(&omap_port->lock); + ssi_clk_enable(ssi); + /* Stop all the pending DMA requests for that client */ + ssi_cleanup_gdd(ssi, cl); + /* Now cleanup all the queues */ + ssi_cleanup_queues(cl); + ssi_clk_disable(ssi); + /* If it is the last client of the port, do extra checks and cleanup */ + if (port->claimed <= 1) { + /* + * Drop the clock reference for the incoming wake line + * if it is still kept high by the other side. + */ + if (omap_port->wkin_cken) { + ssi_clk_disable(ssi); + omap_port->wkin_cken = 0; + } + ssi_clk_enable(ssi); + /* Stop any SSI TX/RX without a client */ + ssi_set_mode(ssi, SSI_MODE_SLEEP); + omap_port->sst.mode = SSI_MODE_SLEEP; + omap_port->ssr.mode = SSI_MODE_SLEEP; + ssi_clk_disable(ssi); + WARN_ON(omap_port->wk_refcount != 0); + WARN_ON(omap_ssi->ck_refcount != 0); + } + spin_unlock_bh(&omap_port->lock); + + return 0; +} + +static int ssi_flush(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + void __iomem *sst = omap_port->sst_base; + void __iomem *ssr = omap_port->ssr_base; + unsigned int i; + u32 err; + + ssi_clk_enable(ssi); + spin_lock_bh(&omap_port->lock); + /* Stop all DMA transfers */ + for (i = 0; i < SSI_MAX_GDD_LCH; i++) { + msg = omap_ssi->gdd_trn[i].msg; + if (!msg || (port != hsi_get_port(msg->cl))) + continue; + __raw_writew(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); + if (msg->ttype == HSI_MSG_READ) + ssi_clk_disable(ssi); + omap_ssi->gdd_trn[i].msg = NULL; + } + /* Flush all SST buffers */ + __raw_writel(0, sst + SSI_SST_BUFSTATE_REG); + __raw_writel(0, sst + SSI_SST_TXSTATE_REG); + /* Flush all SSR buffers */ + __raw_writel(0, ssr + SSI_SSR_RXSTATE_REG); + __raw_writel(0, ssr + SSI_SSR_BUFSTATE_REG); + /* Flush all errors */ + err = __raw_readl(ssr + SSI_SSR_ERROR_REG); + __raw_writel(err, ssr + SSI_SSR_ERRORACK_REG); + /* Flush break */ + __raw_writel(0, ssr + SSI_SSR_BREAK_REG); + /* Clear interrupts */ + __raw_writel(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + __raw_writel(0xffffff00, + omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); + __raw_writel(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + __raw_writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); + /* Dequeue all pending requests */ + for (i = 0; i < omap_port->channels; i++) { + /* Release write clocks */ + if (!list_empty(&omap_port->txqueue[i])) + ssi_clk_disable(ssi); + ssi_flush_queue(&omap_port->txqueue[i], NULL); + ssi_flush_queue(&omap_port->rxqueue[i], NULL); + } + ssi_flush_queue(&omap_port->brkqueue, NULL); + spin_unlock_bh(&omap_port->lock); + ssi_clk_disable(ssi); + + return 0; +} + +static int ssi_start_tx(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount); + + spin_lock_bh(&omap_port->wk_lock); + if (omap_port->wk_refcount++) { + spin_unlock_bh(&omap_port->wk_lock); + return 0; + } + ssi_clk_enable(ssi); /* Grab clocks */ + __raw_writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); + spin_unlock_bh(&omap_port->wk_lock); + + return 0; +} + +static int ssi_stop_tx(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount); + + spin_lock_bh(&omap_port->wk_lock); + BUG_ON(!omap_port->wk_refcount); + if (--omap_port->wk_refcount) { + spin_unlock_bh(&omap_port->wk_lock); + return 0; + } + __raw_writel(SSI_WAKE(0), + omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); + ssi_clk_disable(ssi); /* Release clocks */ + spin_unlock_bh(&omap_port->wk_lock); + + return 0; +} + +static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue) +{ + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_msg *msg; + u32 *buf; + u32 reg; + u32 val; + + spin_lock(&omap_port->lock); + msg = list_first_entry(queue, struct hsi_msg, link); + if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) { + msg->actual_len = 0; + msg->status = HSI_STATUS_PENDING; + } + if (msg->ttype == HSI_MSG_WRITE) + val = SSI_DATAACCEPT(msg->channel); + else + val = SSI_DATAAVAILABLE(msg->channel); + if (msg->status == HSI_STATUS_PROCEEDING) { + buf = sg_virt(msg->sgt.sgl) + msg->actual_len; + if (msg->ttype == HSI_MSG_WRITE) + __raw_writel(*buf, omap_port->sst_base + + SSI_SST_BUFFER_CH_REG(msg->channel)); + else + *buf = __raw_readl(omap_port->ssr_base + + SSI_SSR_BUFFER_CH_REG(msg->channel)); + dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel, + msg->ttype, *buf); + msg->actual_len += sizeof(*buf); + if (msg->actual_len >= msg->sgt.sgl->length) + msg->status = HSI_STATUS_COMPLETED; + /* + * Wait for the last written frame to be really sent before + * we call the complete callback + */ + if ((msg->status == HSI_STATUS_PROCEEDING) || + ((msg->status == HSI_STATUS_COMPLETED) && + (msg->ttype == HSI_MSG_WRITE))) { + __raw_writel(val, omap_ssi->sys + + SSI_MPU_STATUS_REG(port->num, 0)); + spin_unlock(&omap_port->lock); + + return; + } + + } + /* Transfer completed at this point */ + reg = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + if (msg->ttype == HSI_MSG_WRITE) + ssi_clk_disable(ssi); /* Release clocks for write transfer */ + reg &= ~val; + __raw_writel(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + __raw_writel(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); + list_del(&msg->link); + spin_unlock(&omap_port->lock); + msg->complete(msg); + ssi_transfer(omap_port, queue); +} + +static void ssi_gdd_complete(struct hsi_controller *ssi, unsigned int lch) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg = omap_ssi->gdd_trn[lch].msg; + struct hsi_port *port = to_hsi_port(msg->cl->device.parent); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + unsigned int dir; + u32 csr; + u32 val; + + spin_lock(&omap_ssi->lock); + + val = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + val &= ~SSI_GDD_LCH(lch); + __raw_writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + + if (msg->ttype == HSI_MSG_READ) { + dir = DMA_FROM_DEVICE; + val = SSI_DATAAVAILABLE(msg->channel); + ssi_clk_disable(ssi); + } else { + dir = DMA_TO_DEVICE; + val = SSI_DATAACCEPT(msg->channel); + /* Keep clocks reference for write pio event */ + } + dma_unmap_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, dir); + csr = __raw_readw(omap_ssi->gdd + SSI_GDD_CSR_REG(lch)); + omap_ssi->gdd_trn[lch].msg = NULL; /* release GDD lch */ + dev_dbg(&port->device, "DMA completed ch %d ttype %d\n", + msg->channel, msg->ttype); + spin_unlock(&omap_ssi->lock); + if (csr & SSI_CSR_TOUR) { /* Timeout error */ + msg->status = HSI_STATUS_ERROR; + msg->actual_len = 0; + spin_lock(&omap_port->lock); + list_del(&msg->link); /* Dequeue msg */ + spin_unlock(&omap_port->lock); + msg->complete(msg); + return; + } + spin_lock(&omap_port->lock); + val |= __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + spin_unlock(&omap_port->lock); + + msg->status = HSI_STATUS_COMPLETED; + msg->actual_len = sg_dma_len(msg->sgt.sgl); +} + +static void ssi_gdd_tasklet(unsigned long dev) +{ + struct hsi_controller *ssi = (struct hsi_controller *)dev; + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *sys = omap_ssi->sys; + unsigned int lch; + u32 status_reg; + + ssi_clk_enable(ssi); + + status_reg = __raw_readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG); + for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) { + if (status_reg & SSI_GDD_LCH(lch)) + ssi_gdd_complete(ssi, lch); + } + __raw_writel(status_reg, sys + SSI_GDD_MPU_IRQ_STATUS_REG); + status_reg = __raw_readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG); + ssi_clk_disable(ssi); + if (status_reg) + tasklet_hi_schedule(&omap_ssi->gdd_tasklet); + else + enable_irq(omap_ssi->gdd_irq); + +} + +static irqreturn_t ssi_gdd_isr(int irq, void *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + tasklet_hi_schedule(&omap_ssi->gdd_tasklet); + disable_irq_nosync(irq); + + return IRQ_HANDLED; +} + +static void ssi_pio_tasklet(unsigned long ssi_port) +{ + struct hsi_port *port = (struct hsi_port *)ssi_port; + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *sys = omap_ssi->sys; + unsigned int ch; + u32 status_reg; + + ssi_clk_enable(ssi); + status_reg = __raw_readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); + status_reg &= __raw_readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); + + for (ch = 0; ch < omap_port->channels; ch++) { + if (status_reg & SSI_DATAACCEPT(ch)) + ssi_pio_complete(port, &omap_port->txqueue[ch]); + if (status_reg & SSI_DATAAVAILABLE(ch)) + ssi_pio_complete(port, &omap_port->rxqueue[ch]); + } + if (status_reg & SSI_BREAKDETECTED) + ssi_break_complete(port); + if (status_reg & SSI_ERROROCCURED) + ssi_error(port); + + status_reg = __raw_readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); + status_reg &= __raw_readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); + ssi_clk_disable(ssi); + + if (status_reg) + tasklet_hi_schedule(&omap_port->pio_tasklet); + else + enable_irq(omap_port->irq); +} + +static irqreturn_t ssi_pio_isr(int irq, void *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + + tasklet_hi_schedule(&omap_port->pio_tasklet); + disable_irq_nosync(irq); + + return IRQ_HANDLED; +} + +static void ssi_wake_tasklet(unsigned long ssi_port) +{ + struct hsi_port *port = (struct hsi_port *)ssi_port; + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + if (ssi_wakein(port)) { + /** + * We can have a quick High-Low-High transition in the line. + * In such a case if we have long interrupt latencies, + * we can miss the low event or get twice a high event. + * This workaround will avoid breaking the clock reference + * count when such a situation ocurrs. + */ + spin_lock(&omap_port->lock); + if (!omap_port->wkin_cken) { + omap_port->wkin_cken = 1; + ssi_clk_enable(ssi); + } + spin_unlock(&omap_port->lock); + dev_dbg(&ssi->device, "Wake in high\n"); + if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ + __raw_writel(SSI_WAKE(0), + omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); + } + hsi_event(port, HSI_EVENT_START_RX); + } else { + dev_dbg(&ssi->device, "Wake in low\n"); + if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ + __raw_writel(SSI_WAKE(0), + omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); + } + hsi_event(port, HSI_EVENT_STOP_RX); + spin_lock(&omap_port->lock); + if (omap_port->wkin_cken) { + ssi_clk_disable(ssi); + omap_port->wkin_cken = 0; + } + spin_unlock(&omap_port->lock); + } +} + +static irqreturn_t ssi_wake_isr(int irq __maybe_unused, void *ssi_port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port); + + tasklet_hi_schedule(&omap_port->wake_tasklet); + + return IRQ_HANDLED; +} + +static int __init ssi_port_irq(struct hsi_port *port, + struct platform_device *pd) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct resource *irq; + int err; + char irq_name[25]; + + sprintf(irq_name, "ssi_p%d_mpu_irq0", port->num+1); + irq = platform_get_resource_byname(pd, IORESOURCE_IRQ, irq_name); + if (!irq) { + dev_err(&port->device, "Port IRQ resource missing\n"); + return -ENXIO; + } + omap_port->irq = irq->start; + tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet, + (unsigned long)port); + err = devm_request_irq(&pd->dev, omap_port->irq, ssi_pio_isr, + IRQF_DISABLED, irq->name, port); + if (err < 0) + dev_err(&port->device, "Request IRQ %d failed (%d)\n", + omap_port->irq, err); + return err; +} + +static int __init ssi_wake_irq(struct hsi_port *port, + struct platform_device *pd) +{ + struct omap_ssi_platform_data *omap_ssi_pdata = pd->dev.platform_data; + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + char irq_name[23]; + int cawake_gpio; + int cawake_irq; + int err; + + if(port->num >= omap_ssi_pdata->num_ports) { + dev_err(&port->device, "Wake in IRQ resource missing"); + return -ENXIO; + } + + sprintf(irq_name, "ssi_p%d_cawake", port->num+1); + + cawake_gpio = omap_ssi_pdata->cawake_gpio[port->num]; + + if(cawake_gpio == -1) { + omap_port->wake_gpio = -1; + omap_port->wake_irq = -1; + return 0; + } + + cawake_irq = gpio_to_irq(cawake_gpio); + + omap_port->wake_gpio = cawake_gpio; + omap_port->wake_irq = cawake_irq; + tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet, + (unsigned long)port); + err = devm_request_irq(&pd->dev, cawake_irq, ssi_wake_isr, + IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + irq_name, port); + if (err < 0) + dev_err(&port->device, "Request Wake in IRQ %d failed %d\n", + cawake_irq, err); + err = enable_irq_wake(cawake_irq); + if (err < 0) + dev_err(&port->device, "Enable wake on the wakeline in irq %d" + " failed %d\n", cawake_irq, err); + + return err; +} + +static void __init ssi_queues_init(struct omap_ssi_port *omap_port) +{ + unsigned int ch; + + for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) { + INIT_LIST_HEAD(&omap_port->txqueue[ch]); + INIT_LIST_HEAD(&omap_port->rxqueue[ch]); + } + INIT_LIST_HEAD(&omap_port->brkqueue); +} + +static int __init ssi_get_iomem(struct platform_device *pd, + const char *name, void __iomem **pbase, dma_addr_t *phy) +{ + struct resource *mem; + struct resource *ioarea; + void __iomem *base; + + mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name); + if (!mem) { + dev_err(&pd->dev, "IO memory region missing (%s)\n", name); + return -ENXIO; + } + ioarea = devm_request_mem_region(&pd->dev, mem->start, + resource_size(mem), dev_name(&pd->dev)); + if (!ioarea) { + dev_err(&pd->dev, "%s IO memory region request failed\n", + mem->name); + return -ENXIO; + } + base = devm_ioremap(&pd->dev, mem->start, resource_size(mem)); + if (!base) { + dev_err(&pd->dev, "%s IO remap failed\n", mem->name); + return -ENXIO; + } + *pbase = base; + + if (phy) + *phy = mem->start; + + return 0; +} + +static int __init ssi_ports_init(struct hsi_controller *ssi, + struct platform_device *pd) +{ + struct hsi_port *port; + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct omap_ssi_port *omap_port; + unsigned int i; + int err; + char mem_sst_name[16]; + char mem_ssr_name[16]; + + omap_ssi->port = devm_kzalloc(&pd->dev, + sizeof(omap_port) * ssi->num_ports, GFP_KERNEL); + if (!omap_ssi->port) + return -ENOMEM; + + for (i = 0; i < ssi->num_ports; i++) { + port = ssi->port[i]; + omap_port = devm_kzalloc(&pd->dev, sizeof(*omap_port), + GFP_KERNEL); + if (!omap_port) + return -ENOMEM; + port->async = ssi_async; + port->setup = ssi_setup; + port->flush = ssi_flush; + port->start_tx = ssi_start_tx; + port->stop_tx = ssi_stop_tx; + port->release = ssi_release; + hsi_port_set_drvdata(port, omap_port); + + sprintf(mem_sst_name, "p%d_sst", i+1); + sprintf(mem_ssr_name, "p%d_ssr", i+1); + + /* Get SST base addresses*/ + err = ssi_get_iomem(pd, mem_sst_name, &omap_port->sst_base, + &omap_port->sst_dma); + if (err < 0) + return err; + /* Get SSR base addresses */ + err = ssi_get_iomem(pd, mem_ssr_name, &omap_port->ssr_base, + &omap_port->ssr_dma); + if (err < 0) + return err; + err = ssi_port_irq(port, pd); + if (err < 0) + return err; + err = ssi_wake_irq(port, pd); + if (err < 0) + return err; + ssi_queues_init(omap_port); + spin_lock_init(&omap_port->lock); + spin_lock_init(&omap_port->wk_lock); + omap_port->dev = &port->device; + omap_ssi->port[i] = omap_port; + } + + return 0; +} + +static void ssi_ports_exit(struct hsi_controller *ssi) +{ + struct omap_ssi_port *omap_port; + unsigned int i; + + for (i = 0; i < ssi->num_ports; i++) { + omap_port = hsi_port_drvdata(ssi->port[i]); + tasklet_kill(&omap_port->wake_tasklet); + tasklet_kill(&omap_port->pio_tasklet); + } +} + +static unsigned long ssi_get_clk_rate(struct hsi_controller *ssi) +{ + struct device *pdev = ssi->device.parent; + struct clk *clk; + unsigned long rate; + + clk = clk_get(pdev, "ssi_ssr_fck"); + if (IS_ERR(clk)) { + dev_err(pdev, "clock get ssi_ssr_fck failed %li\n", PTR_ERR(clk)); + return 0; + } + + rate = clk_get_rate(clk); + + clk_put(clk); + + return rate; +} + +static int __init ssi_add_controller(struct hsi_controller *ssi, + struct platform_device *pd) +{ + struct omap_ssi_platform_data *omap_ssi_pdata = pd->dev.platform_data; + struct omap_ssi_controller *omap_ssi; + struct resource *irq; + int err; + + omap_ssi = devm_kzalloc(&pd->dev, sizeof(*omap_ssi), GFP_KERNEL); + if (!omap_ssi) { + dev_err(&pd->dev, "not enough memory for omap ssi\n"); + return -ENOMEM; + } + ssi->id = pd->id; + ssi->owner = THIS_MODULE; + ssi->device.parent = &pd->dev; + dev_set_name(&ssi->device, "ssi%d", ssi->id); + hsi_controller_set_drvdata(ssi, omap_ssi); + omap_ssi->dev = &ssi->device; + err = ssi_get_iomem(pd, "sys", &omap_ssi->sys, NULL); + if (err < 0) + return err; + err = ssi_get_iomem(pd, "gdd", &omap_ssi->gdd, NULL); + if (err < 0) + return err; + irq = platform_get_resource_byname(pd, IORESOURCE_IRQ, "ssi_gdd_mpu"); + if (!irq) { + dev_err(&pd->dev, "GDD IRQ resource missing\n"); + return -ENXIO; + } + omap_ssi->gdd_irq = irq->start; + tasklet_init(&omap_ssi->gdd_tasklet, ssi_gdd_tasklet, + (unsigned long)ssi); + err = devm_request_irq(&pd->dev, omap_ssi->gdd_irq, ssi_gdd_isr, + IRQF_DISABLED, irq->name, ssi); + if (err < 0) { + dev_err(&ssi->device, "Request GDD IRQ %d failed (%d)", + omap_ssi->gdd_irq, err); + return err; + } + err = ssi_ports_init(ssi, pd); + if (err < 0) + return err; + omap_ssi->get_loss = omap_ssi_pdata->get_dev_context_loss_count; + omap_ssi->max_speed = UINT_MAX; + spin_lock_init(&omap_ssi->lock); + spin_lock_init(&omap_ssi->ck_lock); + err = hsi_register_controller(ssi); + + return err; +} + +static int __init ssi_hw_init(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + unsigned int i; + u32 val; + int err; + + err = ssi_clk_enable(ssi); + if (err < 0) { + dev_err(&ssi->device, "Failed to enable the clocks %d\n", err); + return err; + } + /* Reseting SSI controller */ + __raw_writel(SSI_SOFTRESET, omap_ssi->sys + SSI_SYSCONFIG_REG); + val = __raw_readl(omap_ssi->sys + SSI_SYSSTATUS_REG); + for (i = 0; ((i < 20) && !(val & SSI_RESETDONE)); i++) { + msleep(20); + val = __raw_readl(omap_ssi->sys + SSI_SYSSTATUS_REG); + } + if (!(val & SSI_RESETDONE)) { + dev_err(&ssi->device, "SSI HW reset failed\n"); + ssi_clk_disable(ssi); + return -EIO; + } + /* Reseting GDD */ + __raw_writel(SSI_SWRESET, omap_ssi->gdd + SSI_GDD_GRST_REG); + /* Get FCK rate */ + omap_ssi->fck_rate = ssi_get_clk_rate(ssi) / 1000; /* KHz */ + dev_dbg(&ssi->device, "SSI fck rate %lu KHz\n", omap_ssi->fck_rate); + /* Set default PM settings */ + val = SSI_AUTOIDLE | SSI_SIDLEMODE_SMART | SSI_MIDLEMODE_SMART; + __raw_writel(val, omap_ssi->sys + SSI_SYSCONFIG_REG); + omap_ssi->sysconfig = val; + __raw_writel(SSI_CLK_AUTOGATING_ON, omap_ssi->sys + SSI_GDD_GCR_REG); + omap_ssi->gdd_gcr = SSI_CLK_AUTOGATING_ON; + ssi_clk_disable(ssi); + + return 0; +} + +static void ssi_remove_controller(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + ssi_ports_exit(ssi); + tasklet_kill(&omap_ssi->gdd_tasklet); + hsi_unregister_controller(ssi); +} + +static int __init ssi_probe(struct platform_device *pd) +{ + struct omap_ssi_platform_data *omap_ssi_pdata = pd->dev.platform_data; + struct hsi_controller *ssi; + int err; + + if (!omap_ssi_pdata) { + dev_err(&pd->dev, "No OMAP SSI platform data\n"); + return -EINVAL; + } + ssi = hsi_alloc_controller(omap_ssi_pdata->num_ports, GFP_KERNEL); + if (!ssi) { + dev_err(&pd->dev, "No memory for controller\n"); + return -ENOMEM; + } + platform_set_drvdata(pd, ssi); + pm_runtime_enable(&pd->dev); + err = ssi_add_controller(ssi, pd); + if (err < 0) + goto out1; + err = ssi_hw_init(ssi); + if (err < 0) + goto out2; +#ifdef CONFIG_DEBUG_FS + err = ssi_debug_add_ctrl(ssi); + if (err < 0) + goto out2; +#endif + return err; +out2: + ssi_remove_controller(ssi); +out1: + platform_set_drvdata(pd, NULL); + + pm_runtime_disable(&pd->dev); + + return err; +} + +static int __exit ssi_remove(struct platform_device *pd) +{ + struct hsi_controller *ssi = platform_get_drvdata(pd); + +#ifdef CONFIG_DEBUG_FS + ssi_debug_remove_ctrl(ssi); +#endif + ssi_remove_controller(ssi); + platform_set_drvdata(pd, NULL); + + pm_runtime_disable(&pd->dev); + + return 0; +} + +static struct platform_driver ssi_pdriver = { + .remove = __exit_p(ssi_remove), + .driver = { + .name = "omap_ssi", + .owner = THIS_MODULE, + }, +}; + +module_platform_driver_probe(ssi_pdriver, ssi_probe); + +MODULE_ALIAS("platform:omap_ssi"); +MODULE_AUTHOR("Carlos Chinea "); +MODULE_DESCRIPTION("Synchronous Serial Interface Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/hsi/ssip_slave.h b/include/linux/hsi/ssip_slave.h new file mode 100644 index 000000000000..30e61bfb4cda --- /dev/null +++ b/include/linux/hsi/ssip_slave.h @@ -0,0 +1,38 @@ +/* + * ssip_slave.h + * + * SSIP slave support header file + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_SSIP_SLAVE_H__ +#define __LINUX_SSIP_SLAVE_H__ + +#include + +static inline void ssip_slave_put_master(struct hsi_client *master) +{ +} + +struct hsi_client *ssip_slave_get_master(struct hsi_client *slave); +int ssip_slave_start_tx(struct hsi_client *master); +int ssip_slave_stop_tx(struct hsi_client *master); + +#endif /* __LINUX_SSIP_SLAVE_H__ */ diff --git a/include/linux/platform_data/hsi-omap-ssi.h b/include/linux/platform_data/hsi-omap-ssi.h new file mode 100644 index 000000000000..7c3801af45b7 --- /dev/null +++ b/include/linux/platform_data/hsi-omap-ssi.h @@ -0,0 +1,202 @@ +/* Hardware definitions for SSI. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __OMAP_SSI_REGS_H__ +#define __OMAP_SSI_REGS_H__ + +#define SSI_NUM_PORTS 1 +/* + * SSI SYS registers + */ +#define SSI_REVISION_REG 0 +# define SSI_REV_MAJOR 0xf0 +# define SSI_REV_MINOR 0xf +#define SSI_SYSCONFIG_REG 0x10 +# define SSI_AUTOIDLE (1 << 0) +# define SSI_SOFTRESET (1 << 1) +# define SSI_SIDLEMODE_FORCE 0 +# define SSI_SIDLEMODE_NO (1 << 3) +# define SSI_SIDLEMODE_SMART (1 << 4) +# define SSI_SIDLEMODE_MASK 0x18 +# define SSI_MIDLEMODE_FORCE 0 +# define SSI_MIDLEMODE_NO (1 << 12) +# define SSI_MIDLEMODE_SMART (1 << 13) +# define SSI_MIDLEMODE_MASK 0x3000 +#define SSI_SYSSTATUS_REG 0x14 +# define SSI_RESETDONE 1 +#define SSI_MPU_STATUS_REG(port, irq) (0x808 + ((port) * 0x10) + ((irq) * 2)) +#define SSI_MPU_ENABLE_REG(port, irq) (0x80c + ((port) * 0x10) + ((irq) * 8)) +# define SSI_DATAACCEPT(channel) (1 << (channel)) +# define SSI_DATAAVAILABLE(channel) (1 << ((channel) + 8)) +# define SSI_DATAOVERRUN(channel) (1 << ((channel) + 16)) +# define SSI_ERROROCCURED (1 << 24) +# define SSI_BREAKDETECTED (1 << 25) +#define SSI_GDD_MPU_IRQ_STATUS_REG 0x0800 +#define SSI_GDD_MPU_IRQ_ENABLE_REG 0x0804 +# define SSI_GDD_LCH(channel) (1 << (channel)) +#define SSI_WAKE_REG(port) (0xc00 + ((port) * 0x10)) +#define SSI_CLEAR_WAKE_REG(port) (0xc04 + ((port) * 0x10)) +#define SSI_SET_WAKE_REG(port) (0xc08 + ((port) * 0x10)) +# define SSI_WAKE(channel) (1 << (channel)) +# define SSI_WAKE_MASK 0xff + +/* + * SSI SST registers + */ +#define SSI_SST_ID_REG 0 +#define SSI_SST_MODE_REG 4 +# define SSI_MODE_VAL_MASK 3 +# define SSI_MODE_SLEEP 0 +# define SSI_MODE_STREAM 1 +# define SSI_MODE_FRAME 2 +# define SSI_MODE_MULTIPOINTS 3 +#define SSI_SST_FRAMESIZE_REG 8 +# define SSI_FRAMESIZE_DEFAULT 31 +#define SSI_SST_TXSTATE_REG 0xc +# define SSI_TXSTATE_IDLE 0 +#define SSI_SST_BUFSTATE_REG 0x10 +# define SSI_FULL(channel) (1 << (channel)) +#define SSI_SST_DIVISOR_REG 0x18 +# define SSI_MAX_DIVISOR 127 +#define SSI_SST_BREAK_REG 0x20 +#define SSI_SST_CHANNELS_REG 0x24 +# define SSI_CHANNELS_DEFAULT 4 +#define SSI_SST_ARBMODE_REG 0x28 +# define SSI_ARBMODE_ROUNDROBIN 0 +# define SSI_ARBMODE_PRIORITY 1 +#define SSI_SST_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4)) +#define SSI_SST_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4)) + +/* + * SSI SSR registers + */ +#define SSI_SSR_ID_REG 0 +#define SSI_SSR_MODE_REG 4 +#define SSI_SSR_FRAMESIZE_REG 8 +#define SSI_SSR_RXSTATE_REG 0xc +#define SSI_SSR_BUFSTATE_REG 0x10 +# define SSI_NOTEMPTY(channel) (1 << (channel)) +#define SSI_SSR_BREAK_REG 0x1c +#define SSI_SSR_ERROR_REG 0x20 +#define SSI_SSR_ERRORACK_REG 0x24 +#define SSI_SSR_OVERRUN_REG 0x2c +#define SSI_SSR_OVERRUNACK_REG 0x30 +#define SSI_SSR_TIMEOUT_REG 0x34 +# define SSI_TIMEOUT_DEFAULT 0 +#define SSI_SSR_CHANNELS_REG 0x28 +#define SSI_SSR_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4)) +#define SSI_SSR_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4)) + +/* + * SSI GDD registers + */ +#define SSI_GDD_HW_ID_REG 0 +#define SSI_GDD_PPORT_ID_REG 0x10 +#define SSI_GDD_MPORT_ID_REG 0x14 +#define SSI_GDD_PPORT_SR_REG 0x20 +#define SSI_GDD_MPORT_SR_REG 0x24 +# define SSI_ACTIVE_LCH_NUM_MASK 0xff +#define SSI_GDD_TEST_REG 0x40 +# define SSI_TEST 1 +#define SSI_GDD_GCR_REG 0x100 +# define SSI_CLK_AUTOGATING_ON (1 << 3) +# define SSI_FREE (1 << 2) +# define SSI_SWITCH_OFF (1 << 0) +#define SSI_GDD_GRST_REG 0x200 +# define SSI_SWRESET 1 +#define SSI_GDD_CSDP_REG(channel) (0x800 + ((channel) * 0x40)) +# define SSI_DST_BURST_EN_MASK 0xc000 +# define SSI_DST_SINGLE_ACCESS0 0 +# define SSI_DST_SINGLE_ACCESS (1 << 14) +# define SSI_DST_BURST_4x32_BIT (2 << 14) +# define SSI_DST_BURST_8x32_BIT (3 << 14) +# define SSI_DST_MASK 0x1e00 +# define SSI_DST_MEMORY_PORT (8 << 9) +# define SSI_DST_PERIPHERAL_PORT (9 << 9) +# define SSI_SRC_BURST_EN_MASK 0x180 +# define SSI_SRC_SINGLE_ACCESS0 0 +# define SSI_SRC_SINGLE_ACCESS (1 << 7) +# define SSI_SRC_BURST_4x32_BIT (2 << 7) +# define SSI_SRC_BURST_8x32_BIT (3 << 7) +# define SSI_SRC_MASK 0x3c +# define SSI_SRC_MEMORY_PORT (8 << 2) +# define SSI_SRC_PERIPHERAL_PORT (9 << 2) +# define SSI_DATA_TYPE_MASK 3 +# define SSI_DATA_TYPE_S32 2 +#define SSI_GDD_CCR_REG(channel) (0x802 + ((channel) * 0x40)) +# define SSI_DST_AMODE_MASK (3 << 14) +# define SSI_DST_AMODE_CONST 0 +# define SSI_DST_AMODE_POSTINC (1 << 12) +# define SSI_SRC_AMODE_MASK (3 << 12) +# define SSI_SRC_AMODE_CONST 0 +# define SSI_SRC_AMODE_POSTINC (1 << 12) +# define SSI_CCR_ENABLE (1 << 7) +# define SSI_CCR_SYNC_MASK 0x1f +#define SSI_GDD_CICR_REG(channel) (0x804 + ((channel) * 0x40)) +# define SSI_BLOCK_IE (1 << 5) +# define SSI_HALF_IE (1 << 2) +# define SSI_TOUT_IE (1 << 0) +#define SSI_GDD_CSR_REG(channel) (0x806 + ((channel) * 0x40)) +# define SSI_CSR_SYNC (1 << 6) +# define SSI_CSR_BLOCK (1 << 5) +# define SSI_CSR_HALF (1 << 2) +# define SSI_CSR_TOUR (1 << 0) +#define SSI_GDD_CSSA_REG(channel) (0x808 + ((channel) * 0x40)) +#define SSI_GDD_CDSA_REG(channel) (0x80c + ((channel) * 0x40)) +#define SSI_GDD_CEN_REG(channel) (0x810 + ((channel) * 0x40)) +#define SSI_GDD_CSAC_REG(channel) (0x818 + ((channel) * 0x40)) +#define SSI_GDD_CDAC_REG(channel) (0x81a + ((channel) * 0x40)) +#define SSI_GDD_CLNK_CTRL_REG(channel) (0x828 + ((channel) * 0x40)) +# define SSI_ENABLE_LNK (1 << 15) +# define SSI_STOP_LNK (1 << 14) +# define SSI_NEXT_CH_ID_MASK 0xf + +/** + * struct omap_ssi_platform_data - OMAP SSI platform data + * @num_ports: Number of ports on the controller + * @ctxt_loss_count: Pointer to omap_pm_get_dev_context_loss_count + */ +struct omap_ssi_platform_data { + unsigned int num_ports; + int cawake_gpio[SSI_NUM_PORTS]; + int (*get_dev_context_loss_count)(struct device *dev); +}; + +/** + * struct omap_ssi_config - SSI board configuration + * @num_ports: Number of ports in use + * @cawake_line: Array of cawake gpio lines + */ +struct omap_ssi_board_config { + unsigned int num_ports; + int cawake_gpio[SSI_NUM_PORTS]; +}; + +#ifdef CONFIG_OMAP_SSI_CONFIG +extern int omap_ssi_config(struct omap_ssi_board_config *ssi_config); +#else +static inline int omap_ssi_config(struct omap_ssi_board_config *ssi_config) +{ + return 0; +} +#endif /* CONFIG_OMAP_SSI_CONFIG */ + +#endif /* __OMAP_SSI_REGS_H__ */ -- cgit v1.2.3