From fcf2d8978cd538a5d614076fccfe9a4af23b9cc9 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 10 Feb 2019 14:45:47 +0100 Subject: ARM: ixp4xx: Move NPE and QMGR to drivers/soc The Network Processing Engine and Queue Manager are versatile firmware components used by several IXP4xx drivers. Drivers are relying on getting access to these components using headers which does not work with multiplatform. We need to find a better place for the drivers to live. Let's first move them to drivers/soc and the start to refactor a bit by passing resources and moving headers. This patch introduce static IRQ assignments but that will be fixed by later patches in this series. Signed-off-by: Linus Walleij --- MAINTAINERS | 4 +- arch/arm/mach-ixp4xx/Kconfig | 13 - arch/arm/mach-ixp4xx/Makefile | 2 - arch/arm/mach-ixp4xx/ixp4xx_npe.c | 747 ------------------------------------- arch/arm/mach-ixp4xx/ixp4xx_qmgr.c | 379 ------------------- drivers/soc/Kconfig | 1 + drivers/soc/Makefile | 1 + drivers/soc/ixp4xx/Kconfig | 16 + drivers/soc/ixp4xx/Makefile | 2 + drivers/soc/ixp4xx/ixp4xx-npe.c | 747 +++++++++++++++++++++++++++++++++++++ drivers/soc/ixp4xx/ixp4xx-qmgr.c | 382 +++++++++++++++++++ 11 files changed, 1151 insertions(+), 1143 deletions(-) delete mode 100644 arch/arm/mach-ixp4xx/ixp4xx_npe.c delete mode 100644 arch/arm/mach-ixp4xx/ixp4xx_qmgr.c create mode 100644 drivers/soc/ixp4xx/Kconfig create mode 100644 drivers/soc/ixp4xx/Makefile create mode 100644 drivers/soc/ixp4xx/ixp4xx-npe.c create mode 100644 drivers/soc/ixp4xx/ixp4xx-qmgr.c diff --git a/MAINTAINERS b/MAINTAINERS index 49052de0567b..dbbd7594a9b8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7888,8 +7888,8 @@ M: Krzysztof Halasa S: Maintained F: arch/arm/mach-ixp4xx/include/mach/qmgr.h F: arch/arm/mach-ixp4xx/include/mach/npe.h -F: arch/arm/mach-ixp4xx/ixp4xx_qmgr.c -F: arch/arm/mach-ixp4xx/ixp4xx_npe.c +F: drivers/soc/ixp4xx/ixp4xx-qmgr.c +F: drivers/soc/ixp4xx/ixp4xx-npe.c F: drivers/net/ethernet/xscale/ixp4xx_eth.c F: drivers/net/wan/ixp4xx_hss.c diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig index 0973270f4863..83afb80d38a8 100644 --- a/arch/arm/mach-ixp4xx/Kconfig +++ b/arch/arm/mach-ixp4xx/Kconfig @@ -236,19 +236,6 @@ config IXP4XX_INDIRECT_PCI need to use the indirect method instead. If you don't know what you need, leave this option unselected. -config IXP4XX_QMGR - tristate "IXP4xx Queue Manager support" - help - This driver supports IXP4xx built-in hardware queue manager - and is automatically selected by Ethernet and HSS drivers. - -config IXP4XX_NPE - tristate "IXP4xx Network Processor Engine support" - select FW_LOADER - help - This driver supports IXP4xx built-in network coprocessors - and is automatically selected by Ethernet and HSS drivers. - endmenu endif diff --git a/arch/arm/mach-ixp4xx/Makefile b/arch/arm/mach-ixp4xx/Makefile index 5f63b3012826..1fa4e6605782 100644 --- a/arch/arm/mach-ixp4xx/Makefile +++ b/arch/arm/mach-ixp4xx/Makefile @@ -43,5 +43,3 @@ obj-$(CONFIG_MACH_GORAMO_MLR) += goramo_mlr.o obj-$(CONFIG_MACH_ARCOM_VULCAN) += vulcan-setup.o obj-$(CONFIG_PCI) += $(obj-pci-$(CONFIG_PCI)) common-pci.o -obj-$(CONFIG_IXP4XX_QMGR) += ixp4xx_qmgr.o -obj-$(CONFIG_IXP4XX_NPE) += ixp4xx_npe.o diff --git a/arch/arm/mach-ixp4xx/ixp4xx_npe.c b/arch/arm/mach-ixp4xx/ixp4xx_npe.c deleted file mode 100644 index e0ce22cd9bfc..000000000000 --- a/arch/arm/mach-ixp4xx/ixp4xx_npe.c +++ /dev/null @@ -1,747 +0,0 @@ -/* - * Intel IXP4xx Network Processor Engine driver for Linux - * - * Copyright (C) 2007 Krzysztof Halasa - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License - * as published by the Free Software Foundation. - * - * The code is based on publicly available information: - * - Intel IXP4xx Developer's Manual and other e-papers - * - Intel IXP400 Access Library Software (BSD license) - * - previous works by Christian Hohnstaedt - * Thanks, Christian. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#define DEBUG_MSG 0 -#define DEBUG_FW 0 - -#define NPE_COUNT 3 -#define MAX_RETRIES 1000 /* microseconds */ -#define NPE_42X_DATA_SIZE 0x800 /* in dwords */ -#define NPE_46X_DATA_SIZE 0x1000 -#define NPE_A_42X_INSTR_SIZE 0x1000 -#define NPE_B_AND_C_42X_INSTR_SIZE 0x800 -#define NPE_46X_INSTR_SIZE 0x1000 -#define REGS_SIZE 0x1000 - -#define NPE_PHYS_REG 32 - -#define FW_MAGIC 0xFEEDF00D -#define FW_BLOCK_TYPE_INSTR 0x0 -#define FW_BLOCK_TYPE_DATA 0x1 -#define FW_BLOCK_TYPE_EOF 0xF - -/* NPE exec status (read) and command (write) */ -#define CMD_NPE_STEP 0x01 -#define CMD_NPE_START 0x02 -#define CMD_NPE_STOP 0x03 -#define CMD_NPE_CLR_PIPE 0x04 -#define CMD_CLR_PROFILE_CNT 0x0C -#define CMD_RD_INS_MEM 0x10 /* instruction memory */ -#define CMD_WR_INS_MEM 0x11 -#define CMD_RD_DATA_MEM 0x12 /* data memory */ -#define CMD_WR_DATA_MEM 0x13 -#define CMD_RD_ECS_REG 0x14 /* exec access register */ -#define CMD_WR_ECS_REG 0x15 - -#define STAT_RUN 0x80000000 -#define STAT_STOP 0x40000000 -#define STAT_CLEAR 0x20000000 -#define STAT_ECS_K 0x00800000 /* pipeline clean */ - -#define NPE_STEVT 0x1B -#define NPE_STARTPC 0x1C -#define NPE_REGMAP 0x1E -#define NPE_CINDEX 0x1F - -#define INSTR_WR_REG_SHORT 0x0000C000 -#define INSTR_WR_REG_BYTE 0x00004000 -#define INSTR_RD_FIFO 0x0F888220 -#define INSTR_RESET_MBOX 0x0FAC8210 - -#define ECS_BG_CTXT_REG_0 0x00 /* Background Executing Context */ -#define ECS_BG_CTXT_REG_1 0x01 /* Stack level */ -#define ECS_BG_CTXT_REG_2 0x02 -#define ECS_PRI_1_CTXT_REG_0 0x04 /* Priority 1 Executing Context */ -#define ECS_PRI_1_CTXT_REG_1 0x05 /* Stack level */ -#define ECS_PRI_1_CTXT_REG_2 0x06 -#define ECS_PRI_2_CTXT_REG_0 0x08 /* Priority 2 Executing Context */ -#define ECS_PRI_2_CTXT_REG_1 0x09 /* Stack level */ -#define ECS_PRI_2_CTXT_REG_2 0x0A -#define ECS_DBG_CTXT_REG_0 0x0C /* Debug Executing Context */ -#define ECS_DBG_CTXT_REG_1 0x0D /* Stack level */ -#define ECS_DBG_CTXT_REG_2 0x0E -#define ECS_INSTRUCT_REG 0x11 /* NPE Instruction Register */ - -#define ECS_REG_0_ACTIVE 0x80000000 /* all levels */ -#define ECS_REG_0_NEXTPC_MASK 0x1FFF0000 /* BG/PRI1/PRI2 levels */ -#define ECS_REG_0_LDUR_BITS 8 -#define ECS_REG_0_LDUR_MASK 0x00000700 /* all levels */ -#define ECS_REG_1_CCTXT_BITS 16 -#define ECS_REG_1_CCTXT_MASK 0x000F0000 /* all levels */ -#define ECS_REG_1_SELCTXT_BITS 0 -#define ECS_REG_1_SELCTXT_MASK 0x0000000F /* all levels */ -#define ECS_DBG_REG_2_IF 0x00100000 /* debug level */ -#define ECS_DBG_REG_2_IE 0x00080000 /* debug level */ - -/* NPE watchpoint_fifo register bit */ -#define WFIFO_VALID 0x80000000 - -/* NPE messaging_status register bit definitions */ -#define MSGSTAT_OFNE 0x00010000 /* OutFifoNotEmpty */ -#define MSGSTAT_IFNF 0x00020000 /* InFifoNotFull */ -#define MSGSTAT_OFNF 0x00040000 /* OutFifoNotFull */ -#define MSGSTAT_IFNE 0x00080000 /* InFifoNotEmpty */ -#define MSGSTAT_MBINT 0x00100000 /* Mailbox interrupt */ -#define MSGSTAT_IFINT 0x00200000 /* InFifo interrupt */ -#define MSGSTAT_OFINT 0x00400000 /* OutFifo interrupt */ -#define MSGSTAT_WFINT 0x00800000 /* WatchFifo interrupt */ - -/* NPE messaging_control register bit definitions */ -#define MSGCTL_OUT_FIFO 0x00010000 /* enable output FIFO */ -#define MSGCTL_IN_FIFO 0x00020000 /* enable input FIFO */ -#define MSGCTL_OUT_FIFO_WRITE 0x01000000 /* enable FIFO + WRITE */ -#define MSGCTL_IN_FIFO_WRITE 0x02000000 - -/* NPE mailbox_status value for reset */ -#define RESET_MBOX_STAT 0x0000F0F0 - -#define NPE_A_FIRMWARE "NPE-A" -#define NPE_B_FIRMWARE "NPE-B" -#define NPE_C_FIRMWARE "NPE-C" - -const char *npe_names[] = { NPE_A_FIRMWARE, NPE_B_FIRMWARE, NPE_C_FIRMWARE }; - -#define print_npe(pri, npe, fmt, ...) \ - printk(pri "%s: " fmt, npe_name(npe), ## __VA_ARGS__) - -#if DEBUG_MSG -#define debug_msg(npe, fmt, ...) \ - print_npe(KERN_DEBUG, npe, fmt, ## __VA_ARGS__) -#else -#define debug_msg(npe, fmt, ...) -#endif - -static struct { - u32 reg, val; -} ecs_reset[] = { - { ECS_BG_CTXT_REG_0, 0xA0000000 }, - { ECS_BG_CTXT_REG_1, 0x01000000 }, - { ECS_BG_CTXT_REG_2, 0x00008000 }, - { ECS_PRI_1_CTXT_REG_0, 0x20000080 }, - { ECS_PRI_1_CTXT_REG_1, 0x01000000 }, - { ECS_PRI_1_CTXT_REG_2, 0x00008000 }, - { ECS_PRI_2_CTXT_REG_0, 0x20000080 }, - { ECS_PRI_2_CTXT_REG_1, 0x01000000 }, - { ECS_PRI_2_CTXT_REG_2, 0x00008000 }, - { ECS_DBG_CTXT_REG_0, 0x20000000 }, - { ECS_DBG_CTXT_REG_1, 0x00000000 }, - { ECS_DBG_CTXT_REG_2, 0x001E0000 }, - { ECS_INSTRUCT_REG, 0x1003C00F }, -}; - -static struct npe npe_tab[NPE_COUNT] = { - { - .id = 0, - .regs = (struct npe_regs __iomem *)IXP4XX_NPEA_BASE_VIRT, - .regs_phys = IXP4XX_NPEA_BASE_PHYS, - }, { - .id = 1, - .regs = (struct npe_regs __iomem *)IXP4XX_NPEB_BASE_VIRT, - .regs_phys = IXP4XX_NPEB_BASE_PHYS, - }, { - .id = 2, - .regs = (struct npe_regs __iomem *)IXP4XX_NPEC_BASE_VIRT, - .regs_phys = IXP4XX_NPEC_BASE_PHYS, - } -}; - -int npe_running(struct npe *npe) -{ - return (__raw_readl(&npe->regs->exec_status_cmd) & STAT_RUN) != 0; -} - -static void npe_cmd_write(struct npe *npe, u32 addr, int cmd, u32 data) -{ - __raw_writel(data, &npe->regs->exec_data); - __raw_writel(addr, &npe->regs->exec_addr); - __raw_writel(cmd, &npe->regs->exec_status_cmd); -} - -static u32 npe_cmd_read(struct npe *npe, u32 addr, int cmd) -{ - __raw_writel(addr, &npe->regs->exec_addr); - __raw_writel(cmd, &npe->regs->exec_status_cmd); - /* Iintroduce extra read cycles after issuing read command to NPE - so that we read the register after the NPE has updated it. - This is to overcome race condition between XScale and NPE */ - __raw_readl(&npe->regs->exec_data); - __raw_readl(&npe->regs->exec_data); - return __raw_readl(&npe->regs->exec_data); -} - -static void npe_clear_active(struct npe *npe, u32 reg) -{ - u32 val = npe_cmd_read(npe, reg, CMD_RD_ECS_REG); - npe_cmd_write(npe, reg, CMD_WR_ECS_REG, val & ~ECS_REG_0_ACTIVE); -} - -static void npe_start(struct npe *npe) -{ - /* ensure only Background Context Stack Level is active */ - npe_clear_active(npe, ECS_PRI_1_CTXT_REG_0); - npe_clear_active(npe, ECS_PRI_2_CTXT_REG_0); - npe_clear_active(npe, ECS_DBG_CTXT_REG_0); - - __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); - __raw_writel(CMD_NPE_START, &npe->regs->exec_status_cmd); -} - -static void npe_stop(struct npe *npe) -{ - __raw_writel(CMD_NPE_STOP, &npe->regs->exec_status_cmd); - __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); /*FIXME?*/ -} - -static int __must_check npe_debug_instr(struct npe *npe, u32 instr, u32 ctx, - u32 ldur) -{ - u32 wc; - int i; - - /* set the Active bit, and the LDUR, in the debug level */ - npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG, - ECS_REG_0_ACTIVE | (ldur << ECS_REG_0_LDUR_BITS)); - - /* set CCTXT at ECS DEBUG L3 to specify in which context to execute - the instruction, and set SELCTXT at ECS DEBUG Level to specify - which context store to access. - Debug ECS Level Reg 1 has form 0x000n000n, where n = context number - */ - npe_cmd_write(npe, ECS_DBG_CTXT_REG_1, CMD_WR_ECS_REG, - (ctx << ECS_REG_1_CCTXT_BITS) | - (ctx << ECS_REG_1_SELCTXT_BITS)); - - /* clear the pipeline */ - __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); - - /* load NPE instruction into the instruction register */ - npe_cmd_write(npe, ECS_INSTRUCT_REG, CMD_WR_ECS_REG, instr); - - /* we need this value later to wait for completion of NPE execution - step */ - wc = __raw_readl(&npe->regs->watch_count); - - /* issue a Step One command via the Execution Control register */ - __raw_writel(CMD_NPE_STEP, &npe->regs->exec_status_cmd); - - /* Watch Count register increments when NPE completes an instruction */ - for (i = 0; i < MAX_RETRIES; i++) { - if (wc != __raw_readl(&npe->regs->watch_count)) - return 0; - udelay(1); - } - - print_npe(KERN_ERR, npe, "reset: npe_debug_instr(): timeout\n"); - return -ETIMEDOUT; -} - -static int __must_check npe_logical_reg_write8(struct npe *npe, u32 addr, - u8 val, u32 ctx) -{ - /* here we build the NPE assembler instruction: mov8 d0, #0 */ - u32 instr = INSTR_WR_REG_BYTE | /* OpCode */ - addr << 9 | /* base Operand */ - (val & 0x1F) << 4 | /* lower 5 bits to immediate data */ - (val & ~0x1F) << (18 - 5);/* higher 3 bits to CoProc instr. */ - return npe_debug_instr(npe, instr, ctx, 1); /* execute it */ -} - -static int __must_check npe_logical_reg_write16(struct npe *npe, u32 addr, - u16 val, u32 ctx) -{ - /* here we build the NPE assembler instruction: mov16 d0, #0 */ - u32 instr = INSTR_WR_REG_SHORT | /* OpCode */ - addr << 9 | /* base Operand */ - (val & 0x1F) << 4 | /* lower 5 bits to immediate data */ - (val & ~0x1F) << (18 - 5);/* higher 11 bits to CoProc instr. */ - return npe_debug_instr(npe, instr, ctx, 1); /* execute it */ -} - -static int __must_check npe_logical_reg_write32(struct npe *npe, u32 addr, - u32 val, u32 ctx) -{ - /* write in 16 bit steps first the high and then the low value */ - if (npe_logical_reg_write16(npe, addr, val >> 16, ctx)) - return -ETIMEDOUT; - return npe_logical_reg_write16(npe, addr + 2, val & 0xFFFF, ctx); -} - -static int npe_reset(struct npe *npe) -{ - u32 val, ctl, exec_count, ctx_reg2; - int i; - - ctl = (__raw_readl(&npe->regs->messaging_control) | 0x3F000000) & - 0x3F3FFFFF; - - /* disable parity interrupt */ - __raw_writel(ctl & 0x3F00FFFF, &npe->regs->messaging_control); - - /* pre exec - debug instruction */ - /* turn off the halt bit by clearing Execution Count register. */ - exec_count = __raw_readl(&npe->regs->exec_count); - __raw_writel(0, &npe->regs->exec_count); - /* ensure that IF and IE are on (temporarily), so that we don't end up - stepping forever */ - ctx_reg2 = npe_cmd_read(npe, ECS_DBG_CTXT_REG_2, CMD_RD_ECS_REG); - npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2 | - ECS_DBG_REG_2_IF | ECS_DBG_REG_2_IE); - - /* clear the FIFOs */ - while (__raw_readl(&npe->regs->watchpoint_fifo) & WFIFO_VALID) - ; - while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE) - /* read from the outFIFO until empty */ - print_npe(KERN_DEBUG, npe, "npe_reset: read FIFO = 0x%X\n", - __raw_readl(&npe->regs->in_out_fifo)); - - while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE) - /* step execution of the NPE intruction to read inFIFO using - the Debug Executing Context stack */ - if (npe_debug_instr(npe, INSTR_RD_FIFO, 0, 0)) - return -ETIMEDOUT; - - /* reset the mailbox reg from the XScale side */ - __raw_writel(RESET_MBOX_STAT, &npe->regs->mailbox_status); - /* from NPE side */ - if (npe_debug_instr(npe, INSTR_RESET_MBOX, 0, 0)) - return -ETIMEDOUT; - - /* Reset the physical registers in the NPE register file */ - for (val = 0; val < NPE_PHYS_REG; val++) { - if (npe_logical_reg_write16(npe, NPE_REGMAP, val >> 1, 0)) - return -ETIMEDOUT; - /* address is either 0 or 4 */ - if (npe_logical_reg_write32(npe, (val & 1) * 4, 0, 0)) - return -ETIMEDOUT; - } - - /* Reset the context store = each context's Context Store registers */ - - /* Context 0 has no STARTPC. Instead, this value is used to set NextPC - for Background ECS, to set where NPE starts executing code */ - val = npe_cmd_read(npe, ECS_BG_CTXT_REG_0, CMD_RD_ECS_REG); - val &= ~ECS_REG_0_NEXTPC_MASK; - val |= (0 /* NextPC */ << 16) & ECS_REG_0_NEXTPC_MASK; - npe_cmd_write(npe, ECS_BG_CTXT_REG_0, CMD_WR_ECS_REG, val); - - for (i = 0; i < 16; i++) { - if (i) { /* Context 0 has no STEVT nor STARTPC */ - /* STEVT = off, 0x80 */ - if (npe_logical_reg_write8(npe, NPE_STEVT, 0x80, i)) - return -ETIMEDOUT; - if (npe_logical_reg_write16(npe, NPE_STARTPC, 0, i)) - return -ETIMEDOUT; - } - /* REGMAP = d0->p0, d8->p2, d16->p4 */ - if (npe_logical_reg_write16(npe, NPE_REGMAP, 0x820, i)) - return -ETIMEDOUT; - if (npe_logical_reg_write8(npe, NPE_CINDEX, 0, i)) - return -ETIMEDOUT; - } - - /* post exec */ - /* clear active bit in debug level */ - npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG, 0); - /* clear the pipeline */ - __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); - /* restore previous values */ - __raw_writel(exec_count, &npe->regs->exec_count); - npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2); - - /* write reset values to Execution Context Stack registers */ - for (val = 0; val < ARRAY_SIZE(ecs_reset); val++) - npe_cmd_write(npe, ecs_reset[val].reg, CMD_WR_ECS_REG, - ecs_reset[val].val); - - /* clear the profile counter */ - __raw_writel(CMD_CLR_PROFILE_CNT, &npe->regs->exec_status_cmd); - - __raw_writel(0, &npe->regs->exec_count); - __raw_writel(0, &npe->regs->action_points[0]); - __raw_writel(0, &npe->regs->action_points[1]); - __raw_writel(0, &npe->regs->action_points[2]); - __raw_writel(0, &npe->regs->action_points[3]); - __raw_writel(0, &npe->regs->watch_count); - - val = ixp4xx_read_feature_bits(); - /* reset the NPE */ - ixp4xx_write_feature_bits(val & - ~(IXP4XX_FEATURE_RESET_NPEA << npe->id)); - /* deassert reset */ - ixp4xx_write_feature_bits(val | - (IXP4XX_FEATURE_RESET_NPEA << npe->id)); - for (i = 0; i < MAX_RETRIES; i++) { - if (ixp4xx_read_feature_bits() & - (IXP4XX_FEATURE_RESET_NPEA << npe->id)) - break; /* NPE is back alive */ - udelay(1); - } - if (i == MAX_RETRIES) - return -ETIMEDOUT; - - npe_stop(npe); - - /* restore NPE configuration bus Control Register - parity settings */ - __raw_writel(ctl, &npe->regs->messaging_control); - return 0; -} - - -int npe_send_message(struct npe *npe, const void *msg, const char *what) -{ - const u32 *send = msg; - int cycles = 0; - - debug_msg(npe, "Trying to send message %s [%08X:%08X]\n", - what, send[0], send[1]); - - if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE) { - debug_msg(npe, "NPE input FIFO not empty\n"); - return -EIO; - } - - __raw_writel(send[0], &npe->regs->in_out_fifo); - - if (!(__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNF)) { - debug_msg(npe, "NPE input FIFO full\n"); - return -EIO; - } - - __raw_writel(send[1], &npe->regs->in_out_fifo); - - while ((cycles < MAX_RETRIES) && - (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE)) { - udelay(1); - cycles++; - } - - if (cycles == MAX_RETRIES) { - debug_msg(npe, "Timeout sending message\n"); - return -ETIMEDOUT; - } - -#if DEBUG_MSG > 1 - debug_msg(npe, "Sending a message took %i cycles\n", cycles); -#endif - return 0; -} - -int npe_recv_message(struct npe *npe, void *msg, const char *what) -{ - u32 *recv = msg; - int cycles = 0, cnt = 0; - - debug_msg(npe, "Trying to receive message %s\n", what); - - while (cycles < MAX_RETRIES) { - if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE) { - recv[cnt++] = __raw_readl(&npe->regs->in_out_fifo); - if (cnt == 2) - break; - } else { - udelay(1); - cycles++; - } - } - - switch(cnt) { - case 1: - debug_msg(npe, "Received [%08X]\n", recv[0]); - break; - case 2: - debug_msg(npe, "Received [%08X:%08X]\n", recv[0], recv[1]); - break; - } - - if (cycles == MAX_RETRIES) { - debug_msg(npe, "Timeout waiting for message\n"); - return -ETIMEDOUT; - } - -#if DEBUG_MSG > 1 - debug_msg(npe, "Receiving a message took %i cycles\n", cycles); -#endif - return 0; -} - -int npe_send_recv_message(struct npe *npe, void *msg, const char *what) -{ - int result; - u32 *send = msg, recv[2]; - - if ((result = npe_send_message(npe, msg, what)) != 0) - return result; - if ((result = npe_recv_message(npe, recv, what)) != 0) - return result; - - if ((recv[0] != send[0]) || (recv[1] != send[1])) { - debug_msg(npe, "Message %s: unexpected message received\n", - what); - return -EIO; - } - return 0; -} - - -int npe_load_firmware(struct npe *npe, const char *name, struct device *dev) -{ - const struct firmware *fw_entry; - - struct dl_block { - u32 type; - u32 offset; - } *blk; - - struct dl_image { - u32 magic; - u32 id; - u32 size; - union { - u32 data[0]; - struct dl_block blocks[0]; - }; - } *image; - - struct dl_codeblock { - u32 npe_addr; - u32 size; - u32 data[0]; - } *cb; - - int i, j, err, data_size, instr_size, blocks, table_end; - u32 cmd; - - if ((err = request_firmware(&fw_entry, name, dev)) != 0) - return err; - - err = -EINVAL; - if (fw_entry->size < sizeof(struct dl_image)) { - print_npe(KERN_ERR, npe, "incomplete firmware file\n"); - goto err; - } - image = (struct dl_image*)fw_entry->data; - -#if DEBUG_FW - print_npe(KERN_DEBUG, npe, "firmware: %08X %08X %08X (0x%X bytes)\n", - image->magic, image->id, image->size, image->size * 4); -#endif - - if (image->magic == swab32(FW_MAGIC)) { /* swapped file */ - image->id = swab32(image->id); - image->size = swab32(image->size); - } else if (image->magic != FW_MAGIC) { - print_npe(KERN_ERR, npe, "bad firmware file magic: 0x%X\n", - image->magic); - goto err; - } - if ((image->size * 4 + sizeof(struct dl_image)) != fw_entry->size) { - print_npe(KERN_ERR, npe, - "inconsistent size of firmware file\n"); - goto err; - } - if (((image->id >> 24) & 0xF /* NPE ID */) != npe->id) { - print_npe(KERN_ERR, npe, "firmware file NPE ID mismatch\n"); - goto err; - } - if (image->magic == swab32(FW_MAGIC)) - for (i = 0; i < image->size; i++) - image->data[i] = swab32(image->data[i]); - - if (cpu_is_ixp42x() && ((image->id >> 28) & 0xF /* device ID */)) { - print_npe(KERN_INFO, npe, "IXP43x/IXP46x firmware ignored on " - "IXP42x\n"); - goto err; - } - - if (npe_running(npe)) { - print_npe(KERN_INFO, npe, "unable to load firmware, NPE is " - "already running\n"); - err = -EBUSY; - goto err; - } -#if 0 - npe_stop(npe); - npe_reset(npe); -#endif - - print_npe(KERN_INFO, npe, "firmware functionality 0x%X, " - "revision 0x%X:%X\n", (image->id >> 16) & 0xFF, - (image->id >> 8) & 0xFF, image->id & 0xFF); - - if (cpu_is_ixp42x()) { - if (!npe->id) - instr_size = NPE_A_42X_INSTR_SIZE; - else - instr_size = NPE_B_AND_C_42X_INSTR_SIZE; - data_size = NPE_42X_DATA_SIZE; - } else { - instr_size = NPE_46X_INSTR_SIZE; - data_size = NPE_46X_DATA_SIZE; - } - - for (blocks = 0; blocks * sizeof(struct dl_block) / 4 < image->size; - blocks++) - if (image->blocks[blocks].type == FW_BLOCK_TYPE_EOF) - break; - if (blocks * sizeof(struct dl_block) / 4 >= image->size) { - print_npe(KERN_INFO, npe, "firmware EOF block marker not " - "found\n"); - goto err; - } - -#if DEBUG_FW - print_npe(KERN_DEBUG, npe, "%i firmware blocks found\n", blocks); -#endif - - table_end = blocks * sizeof(struct dl_block) / 4 + 1 /* EOF marker */; - for (i = 0, blk = image->blocks; i < blocks; i++, blk++) { - if (blk->offset > image->size - sizeof(struct dl_codeblock) / 4 - || blk->offset < table_end) { - print_npe(KERN_INFO, npe, "invalid offset 0x%X of " - "firmware block #%i\n", blk->offset, i); - goto err; - } - - cb = (struct dl_codeblock*)&image->data[blk->offset]; - if (blk->type == FW_BLOCK_TYPE_INSTR) { - if (cb->npe_addr + cb->size > instr_size) - goto too_big; - cmd = CMD_WR_INS_MEM; - } else if (blk->type == FW_BLOCK_TYPE_DATA) { - if (cb->npe_addr + cb->size > data_size) - goto too_big; - cmd = CMD_WR_DATA_MEM; - } else { - print_npe(KERN_INFO, npe, "invalid firmware block #%i " - "type 0x%X\n", i, blk->type); - goto err; - } - if (blk->offset + sizeof(*cb) / 4 + cb->size > image->size) { - print_npe(KERN_INFO, npe, "firmware block #%i doesn't " - "fit in firmware image: type %c, start 0x%X," - " length 0x%X\n", i, - blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D', - cb->npe_addr, cb->size); - goto err; - } - - for (j = 0; j < cb->size; j++) - npe_cmd_write(npe, cb->npe_addr + j, cmd, cb->data[j]); - } - - npe_start(npe); - if (!npe_running(npe)) - print_npe(KERN_ERR, npe, "unable to start\n"); - release_firmware(fw_entry); - return 0; - -too_big: - print_npe(KERN_INFO, npe, "firmware block #%i doesn't fit in NPE " - "memory: type %c, start 0x%X, length 0x%X\n", i, - blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D', - cb->npe_addr, cb->size); -err: - release_firmware(fw_entry); - return err; -} - - -struct npe *npe_request(unsigned id) -{ - if (id < NPE_COUNT) - if (npe_tab[id].valid) - if (try_module_get(THIS_MODULE)) - return &npe_tab[id]; - return NULL; -} - -void npe_release(struct npe *npe) -{ - module_put(THIS_MODULE); -} - - -static int __init npe_init_module(void) -{ - - int i, found = 0; - - /* This driver does not work with device tree */ - if (of_have_populated_dt()) - return -ENODEV; - - for (i = 0; i < NPE_COUNT; i++) { - struct npe *npe = &npe_tab[i]; - if (!(ixp4xx_read_feature_bits() & - (IXP4XX_FEATURE_RESET_NPEA << i))) - continue; /* NPE already disabled or not present */ - if (!(npe->mem_res = request_mem_region(npe->regs_phys, - REGS_SIZE, - npe_name(npe)))) { - print_npe(KERN_ERR, npe, - "failed to request memory region\n"); - continue; - } - - if (npe_reset(npe)) - continue; - npe->valid = 1; - found++; - } - - if (!found) - return -ENODEV; - return 0; -} - -static void __exit npe_cleanup_module(void) -{ - int i; - - for (i = 0; i < NPE_COUNT; i++) - if (npe_tab[i].mem_res) { - npe_reset(&npe_tab[i]); - release_resource(npe_tab[i].mem_res); - } -} - -module_init(npe_init_module); -module_exit(npe_cleanup_module); - -MODULE_AUTHOR("Krzysztof Halasa"); -MODULE_LICENSE("GPL v2"); -MODULE_FIRMWARE(NPE_A_FIRMWARE); -MODULE_FIRMWARE(NPE_B_FIRMWARE); -MODULE_FIRMWARE(NPE_C_FIRMWARE); - -EXPORT_SYMBOL(npe_names); -EXPORT_SYMBOL(npe_running); -EXPORT_SYMBOL(npe_request); -EXPORT_SYMBOL(npe_release); -EXPORT_SYMBOL(npe_load_firmware); -EXPORT_SYMBOL(npe_send_message); -EXPORT_SYMBOL(npe_recv_message); -EXPORT_SYMBOL(npe_send_recv_message); diff --git a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c deleted file mode 100644 index 2665347a2c6f..000000000000 --- a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c +++ /dev/null @@ -1,379 +0,0 @@ -/* - * Intel IXP4xx Queue Manager driver for Linux - * - * Copyright (C) 2007 Krzysztof Halasa - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License - * as published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include - -#include "irqs.h" - -static struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT; -static struct resource *mem_res; -static spinlock_t qmgr_lock; -static u32 used_sram_bitmap[4]; /* 128 16-dword pages */ -static void (*irq_handlers[QUEUES])(void *pdev); -static void *irq_pdevs[QUEUES]; - -#if DEBUG_QMGR -char qmgr_queue_descs[QUEUES][32]; -#endif - -void qmgr_set_irq(unsigned int queue, int src, - void (*handler)(void *pdev), void *pdev) -{ - unsigned long flags; - - spin_lock_irqsave(&qmgr_lock, flags); - if (queue < HALF_QUEUES) { - u32 __iomem *reg; - int bit; - BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL); - reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */ - bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */ - __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit), - reg); - } else - /* IRQ source for queues 32-63 is fixed */ - BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY); - - irq_handlers[queue] = handler; - irq_pdevs[queue] = pdev; - spin_unlock_irqrestore(&qmgr_lock, flags); -} - - -static irqreturn_t qmgr_irq1_a0(int irq, void *pdev) -{ - int i, ret = 0; - u32 en_bitmap, src, stat; - - /* ACK - it may clear any bits so don't rely on it */ - __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]); - - en_bitmap = qmgr_regs->irqen[0]; - while (en_bitmap) { - i = __fls(en_bitmap); /* number of the last "low" queue */ - en_bitmap &= ~BIT(i); - src = qmgr_regs->irqsrc[i >> 3]; - stat = qmgr_regs->stat1[i >> 3]; - if (src & 4) /* the IRQ condition is inverted */ - stat = ~stat; - if (stat & BIT(src & 3)) { - irq_handlers[i](irq_pdevs[i]); - ret = IRQ_HANDLED; - } - } - return ret; -} - - -static irqreturn_t qmgr_irq2_a0(int irq, void *pdev) -{ - int i, ret = 0; - u32 req_bitmap; - - /* ACK - it may clear any bits so don't rely on it */ - __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]); - - req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h; - while (req_bitmap) { - i = __fls(req_bitmap); /* number of the last "high" queue */ - req_bitmap &= ~BIT(i); - irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]); - ret = IRQ_HANDLED; - } - return ret; -} - - -static irqreturn_t qmgr_irq(int irq, void *pdev) -{ - int i, half = (irq == IRQ_IXP4XX_QM1 ? 0 : 1); - u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]); - - if (!req_bitmap) - return 0; - __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */ - - while (req_bitmap) { - i = __fls(req_bitmap); /* number of the last queue */ - req_bitmap &= ~BIT(i); - i += half * HALF_QUEUES; - irq_handlers[i](irq_pdevs[i]); - } - return IRQ_HANDLED; -} - - -void qmgr_enable_irq(unsigned int queue) -{ - unsigned long flags; - int half = queue / 32; - u32 mask = 1 << (queue & (HALF_QUEUES - 1)); - - spin_lock_irqsave(&qmgr_lock, flags); - __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask, - &qmgr_regs->irqen[half]); - spin_unlock_irqrestore(&qmgr_lock, flags); -} - -void qmgr_disable_irq(unsigned int queue) -{ - unsigned long flags; - int half = queue / 32; - u32 mask = 1 << (queue & (HALF_QUEUES - 1)); - - spin_lock_irqsave(&qmgr_lock, flags); - __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask, - &qmgr_regs->irqen[half]); - __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */ - spin_unlock_irqrestore(&qmgr_lock, flags); -} - -static inline void shift_mask(u32 *mask) -{ - mask[3] = mask[3] << 1 | mask[2] >> 31; - mask[2] = mask[2] << 1 | mask[1] >> 31; - mask[1] = mask[1] << 1 | mask[0] >> 31; - mask[0] <<= 1; -} - -#if DEBUG_QMGR -int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, - unsigned int nearly_empty_watermark, - unsigned int nearly_full_watermark, - const char *desc_format, const char* name) -#else -int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, - unsigned int nearly_empty_watermark, - unsigned int nearly_full_watermark) -#endif -{ - u32 cfg, addr = 0, mask[4]; /* in 16-dwords */ - int err; - - BUG_ON(queue >= QUEUES); - - if ((nearly_empty_watermark | nearly_full_watermark) & ~7) - return -EINVAL; - - switch (len) { - case 16: - cfg = 0 << 24; - mask[0] = 0x1; - break; - case 32: - cfg = 1 << 24; - mask[0] = 0x3; - break; - case 64: - cfg = 2 << 24; - mask[0] = 0xF; - break; - case 128: - cfg = 3 << 24; - mask[0] = 0xFF; - break; - default: - return -EINVAL; - } - - cfg |= nearly_empty_watermark << 26; - cfg |= nearly_full_watermark << 29; - len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */ - mask[1] = mask[2] = mask[3] = 0; - - if (!try_module_get(THIS_MODULE)) - return -ENODEV; - - spin_lock_irq(&qmgr_lock); - if (__raw_readl(&qmgr_regs->sram[queue])) { - err = -EBUSY; - goto err; - } - - while (1) { - if (!(used_sram_bitmap[0] & mask[0]) && - !(used_sram_bitmap[1] & mask[1]) && - !(used_sram_bitmap[2] & mask[2]) && - !(used_sram_bitmap[3] & mask[3])) - break; /* found free space */ - - addr++; - shift_mask(mask); - if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) { - printk(KERN_ERR "qmgr: no free SRAM space for" - " queue %i\n", queue); - err = -ENOMEM; - goto err; - } - } - - used_sram_bitmap[0] |= mask[0]; - used_sram_bitmap[1] |= mask[1]; - used_sram_bitmap[2] |= mask[2]; - used_sram_bitmap[3] |= mask[3]; - __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]); -#if DEBUG_QMGR - snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]), - desc_format, name); - printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n", - qmgr_queue_descs[queue], queue, addr); -#endif - spin_unlock_irq(&qmgr_lock); - return 0; - -err: - spin_unlock_irq(&qmgr_lock); - module_put(THIS_MODULE); - return err; -} - -void qmgr_release_queue(unsigned int queue) -{ - u32 cfg, addr, mask[4]; - - BUG_ON(queue >= QUEUES); /* not in valid range */ - - spin_lock_irq(&qmgr_lock); - cfg = __raw_readl(&qmgr_regs->sram[queue]); - addr = (cfg >> 14) & 0xFF; - - BUG_ON(!addr); /* not requested */ - - switch ((cfg >> 24) & 3) { - case 0: mask[0] = 0x1; break; - case 1: mask[0] = 0x3; break; - case 2: mask[0] = 0xF; break; - case 3: mask[0] = 0xFF; break; - } - - mask[1] = mask[2] = mask[3] = 0; - - while (addr--) - shift_mask(mask); - -#if DEBUG_QMGR - printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n", - qmgr_queue_descs[queue], queue); - qmgr_queue_descs[queue][0] = '\x0'; -#endif - - while ((addr = qmgr_get_entry(queue))) - printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n", - queue, addr); - - __raw_writel(0, &qmgr_regs->sram[queue]); - - used_sram_bitmap[0] &= ~mask[0]; - used_sram_bitmap[1] &= ~mask[1]; - used_sram_bitmap[2] &= ~mask[2]; - used_sram_bitmap[3] &= ~mask[3]; - irq_handlers[queue] = NULL; /* catch IRQ bugs */ - spin_unlock_irq(&qmgr_lock); - - module_put(THIS_MODULE); -} - -static int qmgr_init(void) -{ - int i, err; - irq_handler_t handler1, handler2; - - /* This driver does not work with device tree */ - if (of_have_populated_dt()) - return -ENODEV; - - mem_res = request_mem_region(IXP4XX_QMGR_BASE_PHYS, - IXP4XX_QMGR_REGION_SIZE, - "IXP4xx Queue Manager"); - if (mem_res == NULL) - return -EBUSY; - - /* reset qmgr registers */ - for (i = 0; i < 4; i++) { - __raw_writel(0x33333333, &qmgr_regs->stat1[i]); - __raw_writel(0, &qmgr_regs->irqsrc[i]); - } - for (i = 0; i < 2; i++) { - __raw_writel(0, &qmgr_regs->stat2[i]); - __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */ - __raw_writel(0, &qmgr_regs->irqen[i]); - } - - __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h); - __raw_writel(0, &qmgr_regs->statf_h); - - for (i = 0; i < QUEUES; i++) - __raw_writel(0, &qmgr_regs->sram[i]); - - if (cpu_is_ixp42x_rev_a0()) { - handler1 = qmgr_irq1_a0; - handler2 = qmgr_irq2_a0; - } else - handler1 = handler2 = qmgr_irq; - - err = request_irq(IRQ_IXP4XX_QM1, handler1, 0, "IXP4xx Queue Manager", - NULL); - if (err) { - printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n", - IRQ_IXP4XX_QM1, err); - goto error_irq; - } - - err = request_irq(IRQ_IXP4XX_QM2, handler2, 0, "IXP4xx Queue Manager", - NULL); - if (err) { - printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n", - IRQ_IXP4XX_QM2, err); - goto error_irq2; - } - - used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */ - spin_lock_init(&qmgr_lock); - - printk(KERN_INFO "IXP4xx Queue Manager initialized.\n"); - return 0; - -error_irq2: - free_irq(IRQ_IXP4XX_QM1, NULL); -error_irq: - release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); - return err; -} - -static void qmgr_remove(void) -{ - free_irq(IRQ_IXP4XX_QM1, NULL); - free_irq(IRQ_IXP4XX_QM2, NULL); - synchronize_irq(IRQ_IXP4XX_QM1); - synchronize_irq(IRQ_IXP4XX_QM2); - release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); -} - -module_init(qmgr_init); -module_exit(qmgr_remove); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Krzysztof Halasa"); - -EXPORT_SYMBOL(qmgr_set_irq); -EXPORT_SYMBOL(qmgr_enable_irq); -EXPORT_SYMBOL(qmgr_disable_irq); -#if DEBUG_QMGR -EXPORT_SYMBOL(qmgr_queue_descs); -EXPORT_SYMBOL(qmgr_request_queue); -#else -EXPORT_SYMBOL(__qmgr_request_queue); -#endif -EXPORT_SYMBOL(qmgr_release_queue); diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index c07b4a85253f..ae9bf20b26fa 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -6,6 +6,7 @@ source "drivers/soc/atmel/Kconfig" source "drivers/soc/bcm/Kconfig" source "drivers/soc/fsl/Kconfig" source "drivers/soc/imx/Kconfig" +source "drivers/soc/ixp4xx/Kconfig" source "drivers/soc/mediatek/Kconfig" source "drivers/soc/qcom/Kconfig" source "drivers/soc/renesas/Kconfig" diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 90b686e586c6..c7c1a139ad8d 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_MACH_DOVE) += dove/ obj-y += fsl/ obj-$(CONFIG_ARCH_GEMINI) += gemini/ obj-$(CONFIG_ARCH_MXC) += imx/ +obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/ obj-$(CONFIG_SOC_XWAY) += lantiq/ obj-y += mediatek/ obj-y += amlogic/ diff --git a/drivers/soc/ixp4xx/Kconfig b/drivers/soc/ixp4xx/Kconfig new file mode 100644 index 000000000000..de6becdc78a2 --- /dev/null +++ b/drivers/soc/ixp4xx/Kconfig @@ -0,0 +1,16 @@ +menu "IXP4xx SoC drivers" + +config IXP4XX_QMGR + tristate "IXP4xx Queue Manager support" + help + This driver supports IXP4xx built-in hardware queue manager + and is automatically selected by Ethernet and HSS drivers. + +config IXP4XX_NPE + tristate "IXP4xx Network Processor Engine support" + select FW_LOADER + help + This driver supports IXP4xx built-in network coprocessors + and is automatically selected by Ethernet and HSS drivers. + +endmenu diff --git a/drivers/soc/ixp4xx/Makefile b/drivers/soc/ixp4xx/Makefile new file mode 100644 index 000000000000..d20d99e6df65 --- /dev/null +++ b/drivers/soc/ixp4xx/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_IXP4XX_QMGR) += ixp4xx-qmgr.o +obj-$(CONFIG_IXP4XX_NPE) += ixp4xx-npe.o diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c new file mode 100644 index 000000000000..e0ce22cd9bfc --- /dev/null +++ b/drivers/soc/ixp4xx/ixp4xx-npe.c @@ -0,0 +1,747 @@ +/* + * Intel IXP4xx Network Processor Engine driver for Linux + * + * Copyright (C) 2007 Krzysztof Halasa + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * The code is based on publicly available information: + * - Intel IXP4xx Developer's Manual and other e-papers + * - Intel IXP400 Access Library Software (BSD license) + * - previous works by Christian Hohnstaedt + * Thanks, Christian. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG_MSG 0 +#define DEBUG_FW 0 + +#define NPE_COUNT 3 +#define MAX_RETRIES 1000 /* microseconds */ +#define NPE_42X_DATA_SIZE 0x800 /* in dwords */ +#define NPE_46X_DATA_SIZE 0x1000 +#define NPE_A_42X_INSTR_SIZE 0x1000 +#define NPE_B_AND_C_42X_INSTR_SIZE 0x800 +#define NPE_46X_INSTR_SIZE 0x1000 +#define REGS_SIZE 0x1000 + +#define NPE_PHYS_REG 32 + +#define FW_MAGIC 0xFEEDF00D +#define FW_BLOCK_TYPE_INSTR 0x0 +#define FW_BLOCK_TYPE_DATA 0x1 +#define FW_BLOCK_TYPE_EOF 0xF + +/* NPE exec status (read) and command (write) */ +#define CMD_NPE_STEP 0x01 +#define CMD_NPE_START 0x02 +#define CMD_NPE_STOP 0x03 +#define CMD_NPE_CLR_PIPE 0x04 +#define CMD_CLR_PROFILE_CNT 0x0C +#define CMD_RD_INS_MEM 0x10 /* instruction memory */ +#define CMD_WR_INS_MEM 0x11 +#define CMD_RD_DATA_MEM 0x12 /* data memory */ +#define CMD_WR_DATA_MEM 0x13 +#define CMD_RD_ECS_REG 0x14 /* exec access register */ +#define CMD_WR_ECS_REG 0x15 + +#define STAT_RUN 0x80000000 +#define STAT_STOP 0x40000000 +#define STAT_CLEAR 0x20000000 +#define STAT_ECS_K 0x00800000 /* pipeline clean */ + +#define NPE_STEVT 0x1B +#define NPE_STARTPC 0x1C +#define NPE_REGMAP 0x1E +#define NPE_CINDEX 0x1F + +#define INSTR_WR_REG_SHORT 0x0000C000 +#define INSTR_WR_REG_BYTE 0x00004000 +#define INSTR_RD_FIFO 0x0F888220 +#define INSTR_RESET_MBOX 0x0FAC8210 + +#define ECS_BG_CTXT_REG_0 0x00 /* Background Executing Context */ +#define ECS_BG_CTXT_REG_1 0x01 /* Stack level */ +#define ECS_BG_CTXT_REG_2 0x02 +#define ECS_PRI_1_CTXT_REG_0 0x04 /* Priority 1 Executing Context */ +#define ECS_PRI_1_CTXT_REG_1 0x05 /* Stack level */ +#define ECS_PRI_1_CTXT_REG_2 0x06 +#define ECS_PRI_2_CTXT_REG_0 0x08 /* Priority 2 Executing Context */ +#define ECS_PRI_2_CTXT_REG_1 0x09 /* Stack level */ +#define ECS_PRI_2_CTXT_REG_2 0x0A +#define ECS_DBG_CTXT_REG_0 0x0C /* Debug Executing Context */ +#define ECS_DBG_CTXT_REG_1 0x0D /* Stack level */ +#define ECS_DBG_CTXT_REG_2 0x0E +#define ECS_INSTRUCT_REG 0x11 /* NPE Instruction Register */ + +#define ECS_REG_0_ACTIVE 0x80000000 /* all levels */ +#define ECS_REG_0_NEXTPC_MASK 0x1FFF0000 /* BG/PRI1/PRI2 levels */ +#define ECS_REG_0_LDUR_BITS 8 +#define ECS_REG_0_LDUR_MASK 0x00000700 /* all levels */ +#define ECS_REG_1_CCTXT_BITS 16 +#define ECS_REG_1_CCTXT_MASK 0x000F0000 /* all levels */ +#define ECS_REG_1_SELCTXT_BITS 0 +#define ECS_REG_1_SELCTXT_MASK 0x0000000F /* all levels */ +#define ECS_DBG_REG_2_IF 0x00100000 /* debug level */ +#define ECS_DBG_REG_2_IE 0x00080000 /* debug level */ + +/* NPE watchpoint_fifo register bit */ +#define WFIFO_VALID 0x80000000 + +/* NPE messaging_status register bit definitions */ +#define MSGSTAT_OFNE 0x00010000 /* OutFifoNotEmpty */ +#define MSGSTAT_IFNF 0x00020000 /* InFifoNotFull */ +#define MSGSTAT_OFNF 0x00040000 /* OutFifoNotFull */ +#define MSGSTAT_IFNE 0x00080000 /* InFifoNotEmpty */ +#define MSGSTAT_MBINT 0x00100000 /* Mailbox interrupt */ +#define MSGSTAT_IFINT 0x00200000 /* InFifo interrupt */ +#define MSGSTAT_OFINT 0x00400000 /* OutFifo interrupt */ +#define MSGSTAT_WFINT 0x00800000 /* WatchFifo interrupt */ + +/* NPE messaging_control register bit definitions */ +#define MSGCTL_OUT_FIFO 0x00010000 /* enable output FIFO */ +#define MSGCTL_IN_FIFO 0x00020000 /* enable input FIFO */ +#define MSGCTL_OUT_FIFO_WRITE 0x01000000 /* enable FIFO + WRITE */ +#define MSGCTL_IN_FIFO_WRITE 0x02000000 + +/* NPE mailbox_status value for reset */ +#define RESET_MBOX_STAT 0x0000F0F0 + +#define NPE_A_FIRMWARE "NPE-A" +#define NPE_B_FIRMWARE "NPE-B" +#define NPE_C_FIRMWARE "NPE-C" + +const char *npe_names[] = { NPE_A_FIRMWARE, NPE_B_FIRMWARE, NPE_C_FIRMWARE }; + +#define print_npe(pri, npe, fmt, ...) \ + printk(pri "%s: " fmt, npe_name(npe), ## __VA_ARGS__) + +#if DEBUG_MSG +#define debug_msg(npe, fmt, ...) \ + print_npe(KERN_DEBUG, npe, fmt, ## __VA_ARGS__) +#else +#define debug_msg(npe, fmt, ...) +#endif + +static struct { + u32 reg, val; +} ecs_reset[] = { + { ECS_BG_CTXT_REG_0, 0xA0000000 }, + { ECS_BG_CTXT_REG_1, 0x01000000 }, + { ECS_BG_CTXT_REG_2, 0x00008000 }, + { ECS_PRI_1_CTXT_REG_0, 0x20000080 }, + { ECS_PRI_1_CTXT_REG_1, 0x01000000 }, + { ECS_PRI_1_CTXT_REG_2, 0x00008000 }, + { ECS_PRI_2_CTXT_REG_0, 0x20000080 }, + { ECS_PRI_2_CTXT_REG_1, 0x01000000 }, + { ECS_PRI_2_CTXT_REG_2, 0x00008000 }, + { ECS_DBG_CTXT_REG_0, 0x20000000 }, + { ECS_DBG_CTXT_REG_1, 0x00000000 }, + { ECS_DBG_CTXT_REG_2, 0x001E0000 }, + { ECS_INSTRUCT_REG, 0x1003C00F }, +}; + +static struct npe npe_tab[NPE_COUNT] = { + { + .id = 0, + .regs = (struct npe_regs __iomem *)IXP4XX_NPEA_BASE_VIRT, + .regs_phys = IXP4XX_NPEA_BASE_PHYS, + }, { + .id = 1, + .regs = (struct npe_regs __iomem *)IXP4XX_NPEB_BASE_VIRT, + .regs_phys = IXP4XX_NPEB_BASE_PHYS, + }, { + .id = 2, + .regs = (struct npe_regs __iomem *)IXP4XX_NPEC_BASE_VIRT, + .regs_phys = IXP4XX_NPEC_BASE_PHYS, + } +}; + +int npe_running(struct npe *npe) +{ + return (__raw_readl(&npe->regs->exec_status_cmd) & STAT_RUN) != 0; +} + +static void npe_cmd_write(struct npe *npe, u32 addr, int cmd, u32 data) +{ + __raw_writel(data, &npe->regs->exec_data); + __raw_writel(addr, &npe->regs->exec_addr); + __raw_writel(cmd, &npe->regs->exec_status_cmd); +} + +static u32 npe_cmd_read(struct npe *npe, u32 addr, int cmd) +{ + __raw_writel(addr, &npe->regs->exec_addr); + __raw_writel(cmd, &npe->regs->exec_status_cmd); + /* Iintroduce extra read cycles after issuing read command to NPE + so that we read the register after the NPE has updated it. + This is to overcome race condition between XScale and NPE */ + __raw_readl(&npe->regs->exec_data); + __raw_readl(&npe->regs->exec_data); + return __raw_readl(&npe->regs->exec_data); +} + +static void npe_clear_active(struct npe *npe, u32 reg) +{ + u32 val = npe_cmd_read(npe, reg, CMD_RD_ECS_REG); + npe_cmd_write(npe, reg, CMD_WR_ECS_REG, val & ~ECS_REG_0_ACTIVE); +} + +static void npe_start(struct npe *npe) +{ + /* ensure only Background Context Stack Level is active */ + npe_clear_active(npe, ECS_PRI_1_CTXT_REG_0); + npe_clear_active(npe, ECS_PRI_2_CTXT_REG_0); + npe_clear_active(npe, ECS_DBG_CTXT_REG_0); + + __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); + __raw_writel(CMD_NPE_START, &npe->regs->exec_status_cmd); +} + +static void npe_stop(struct npe *npe) +{ + __raw_writel(CMD_NPE_STOP, &npe->regs->exec_status_cmd); + __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); /*FIXME?*/ +} + +static int __must_check npe_debug_instr(struct npe *npe, u32 instr, u32 ctx, + u32 ldur) +{ + u32 wc; + int i; + + /* set the Active bit, and the LDUR, in the debug level */ + npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG, + ECS_REG_0_ACTIVE | (ldur << ECS_REG_0_LDUR_BITS)); + + /* set CCTXT at ECS DEBUG L3 to specify in which context to execute + the instruction, and set SELCTXT at ECS DEBUG Level to specify + which context store to access. + Debug ECS Level Reg 1 has form 0x000n000n, where n = context number + */ + npe_cmd_write(npe, ECS_DBG_CTXT_REG_1, CMD_WR_ECS_REG, + (ctx << ECS_REG_1_CCTXT_BITS) | + (ctx << ECS_REG_1_SELCTXT_BITS)); + + /* clear the pipeline */ + __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); + + /* load NPE instruction into the instruction register */ + npe_cmd_write(npe, ECS_INSTRUCT_REG, CMD_WR_ECS_REG, instr); + + /* we need this value later to wait for completion of NPE execution + step */ + wc = __raw_readl(&npe->regs->watch_count); + + /* issue a Step One command via the Execution Control register */ + __raw_writel(CMD_NPE_STEP, &npe->regs->exec_status_cmd); + + /* Watch Count register increments when NPE completes an instruction */ + for (i = 0; i < MAX_RETRIES; i++) { + if (wc != __raw_readl(&npe->regs->watch_count)) + return 0; + udelay(1); + } + + print_npe(KERN_ERR, npe, "reset: npe_debug_instr(): timeout\n"); + return -ETIMEDOUT; +} + +static int __must_check npe_logical_reg_write8(struct npe *npe, u32 addr, + u8 val, u32 ctx) +{ + /* here we build the NPE assembler instruction: mov8 d0, #0 */ + u32 instr = INSTR_WR_REG_BYTE | /* OpCode */ + addr << 9 | /* base Operand */ + (val & 0x1F) << 4 | /* lower 5 bits to immediate data */ + (val & ~0x1F) << (18 - 5);/* higher 3 bits to CoProc instr. */ + return npe_debug_instr(npe, instr, ctx, 1); /* execute it */ +} + +static int __must_check npe_logical_reg_write16(struct npe *npe, u32 addr, + u16 val, u32 ctx) +{ + /* here we build the NPE assembler instruction: mov16 d0, #0 */ + u32 instr = INSTR_WR_REG_SHORT | /* OpCode */ + addr << 9 | /* base Operand */ + (val & 0x1F) << 4 | /* lower 5 bits to immediate data */ + (val & ~0x1F) << (18 - 5);/* higher 11 bits to CoProc instr. */ + return npe_debug_instr(npe, instr, ctx, 1); /* execute it */ +} + +static int __must_check npe_logical_reg_write32(struct npe *npe, u32 addr, + u32 val, u32 ctx) +{ + /* write in 16 bit steps first the high and then the low value */ + if (npe_logical_reg_write16(npe, addr, val >> 16, ctx)) + return -ETIMEDOUT; + return npe_logical_reg_write16(npe, addr + 2, val & 0xFFFF, ctx); +} + +static int npe_reset(struct npe *npe) +{ + u32 val, ctl, exec_count, ctx_reg2; + int i; + + ctl = (__raw_readl(&npe->regs->messaging_control) | 0x3F000000) & + 0x3F3FFFFF; + + /* disable parity interrupt */ + __raw_writel(ctl & 0x3F00FFFF, &npe->regs->messaging_control); + + /* pre exec - debug instruction */ + /* turn off the halt bit by clearing Execution Count register. */ + exec_count = __raw_readl(&npe->regs->exec_count); + __raw_writel(0, &npe->regs->exec_count); + /* ensure that IF and IE are on (temporarily), so that we don't end up + stepping forever */ + ctx_reg2 = npe_cmd_read(npe, ECS_DBG_CTXT_REG_2, CMD_RD_ECS_REG); + npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2 | + ECS_DBG_REG_2_IF | ECS_DBG_REG_2_IE); + + /* clear the FIFOs */ + while (__raw_readl(&npe->regs->watchpoint_fifo) & WFIFO_VALID) + ; + while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE) + /* read from the outFIFO until empty */ + print_npe(KERN_DEBUG, npe, "npe_reset: read FIFO = 0x%X\n", + __raw_readl(&npe->regs->in_out_fifo)); + + while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE) + /* step execution of the NPE intruction to read inFIFO using + the Debug Executing Context stack */ + if (npe_debug_instr(npe, INSTR_RD_FIFO, 0, 0)) + return -ETIMEDOUT; + + /* reset the mailbox reg from the XScale side */ + __raw_writel(RESET_MBOX_STAT, &npe->regs->mailbox_status); + /* from NPE side */ + if (npe_debug_instr(npe, INSTR_RESET_MBOX, 0, 0)) + return -ETIMEDOUT; + + /* Reset the physical registers in the NPE register file */ + for (val = 0; val < NPE_PHYS_REG; val++) { + if (npe_logical_reg_write16(npe, NPE_REGMAP, val >> 1, 0)) + return -ETIMEDOUT; + /* address is either 0 or 4 */ + if (npe_logical_reg_write32(npe, (val & 1) * 4, 0, 0)) + return -ETIMEDOUT; + } + + /* Reset the context store = each context's Context Store registers */ + + /* Context 0 has no STARTPC. Instead, this value is used to set NextPC + for Background ECS, to set where NPE starts executing code */ + val = npe_cmd_read(npe, ECS_BG_CTXT_REG_0, CMD_RD_ECS_REG); + val &= ~ECS_REG_0_NEXTPC_MASK; + val |= (0 /* NextPC */ << 16) & ECS_REG_0_NEXTPC_MASK; + npe_cmd_write(npe, ECS_BG_CTXT_REG_0, CMD_WR_ECS_REG, val); + + for (i = 0; i < 16; i++) { + if (i) { /* Context 0 has no STEVT nor STARTPC */ + /* STEVT = off, 0x80 */ + if (npe_logical_reg_write8(npe, NPE_STEVT, 0x80, i)) + return -ETIMEDOUT; + if (npe_logical_reg_write16(npe, NPE_STARTPC, 0, i)) + return -ETIMEDOUT; + } + /* REGMAP = d0->p0, d8->p2, d16->p4 */ + if (npe_logical_reg_write16(npe, NPE_REGMAP, 0x820, i)) + return -ETIMEDOUT; + if (npe_logical_reg_write8(npe, NPE_CINDEX, 0, i)) + return -ETIMEDOUT; + } + + /* post exec */ + /* clear active bit in debug level */ + npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG, 0); + /* clear the pipeline */ + __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); + /* restore previous values */ + __raw_writel(exec_count, &npe->regs->exec_count); + npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2); + + /* write reset values to Execution Context Stack registers */ + for (val = 0; val < ARRAY_SIZE(ecs_reset); val++) + npe_cmd_write(npe, ecs_reset[val].reg, CMD_WR_ECS_REG, + ecs_reset[val].val); + + /* clear the profile counter */ + __raw_writel(CMD_CLR_PROFILE_CNT, &npe->regs->exec_status_cmd); + + __raw_writel(0, &npe->regs->exec_count); + __raw_writel(0, &npe->regs->action_points[0]); + __raw_writel(0, &npe->regs->action_points[1]); + __raw_writel(0, &npe->regs->action_points[2]); + __raw_writel(0, &npe->regs->action_points[3]); + __raw_writel(0, &npe->regs->watch_count); + + val = ixp4xx_read_feature_bits(); + /* reset the NPE */ + ixp4xx_write_feature_bits(val & + ~(IXP4XX_FEATURE_RESET_NPEA << npe->id)); + /* deassert reset */ + ixp4xx_write_feature_bits(val | + (IXP4XX_FEATURE_RESET_NPEA << npe->id)); + for (i = 0; i < MAX_RETRIES; i++) { + if (ixp4xx_read_feature_bits() & + (IXP4XX_FEATURE_RESET_NPEA << npe->id)) + break; /* NPE is back alive */ + udelay(1); + } + if (i == MAX_RETRIES) + return -ETIMEDOUT; + + npe_stop(npe); + + /* restore NPE configuration bus Control Register - parity settings */ + __raw_writel(ctl, &npe->regs->messaging_control); + return 0; +} + + +int npe_send_message(struct npe *npe, const void *msg, const char *what) +{ + const u32 *send = msg; + int cycles = 0; + + debug_msg(npe, "Trying to send message %s [%08X:%08X]\n", + what, send[0], send[1]); + + if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE) { + debug_msg(npe, "NPE input FIFO not empty\n"); + return -EIO; + } + + __raw_writel(send[0], &npe->regs->in_out_fifo); + + if (!(__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNF)) { + debug_msg(npe, "NPE input FIFO full\n"); + return -EIO; + } + + __raw_writel(send[1], &npe->regs->in_out_fifo); + + while ((cycles < MAX_RETRIES) && + (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE)) { + udelay(1); + cycles++; + } + + if (cycles == MAX_RETRIES) { + debug_msg(npe, "Timeout sending message\n"); + return -ETIMEDOUT; + } + +#if DEBUG_MSG > 1 + debug_msg(npe, "Sending a message took %i cycles\n", cycles); +#endif + return 0; +} + +int npe_recv_message(struct npe *npe, void *msg, const char *what) +{ + u32 *recv = msg; + int cycles = 0, cnt = 0; + + debug_msg(npe, "Trying to receive message %s\n", what); + + while (cycles < MAX_RETRIES) { + if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE) { + recv[cnt++] = __raw_readl(&npe->regs->in_out_fifo); + if (cnt == 2) + break; + } else { + udelay(1); + cycles++; + } + } + + switch(cnt) { + case 1: + debug_msg(npe, "Received [%08X]\n", recv[0]); + break; + case 2: + debug_msg(npe, "Received [%08X:%08X]\n", recv[0], recv[1]); + break; + } + + if (cycles == MAX_RETRIES) { + debug_msg(npe, "Timeout waiting for message\n"); + return -ETIMEDOUT; + } + +#if DEBUG_MSG > 1 + debug_msg(npe, "Receiving a message took %i cycles\n", cycles); +#endif + return 0; +} + +int npe_send_recv_message(struct npe *npe, void *msg, const char *what) +{ + int result; + u32 *send = msg, recv[2]; + + if ((result = npe_send_message(npe, msg, what)) != 0) + return result; + if ((result = npe_recv_message(npe, recv, what)) != 0) + return result; + + if ((recv[0] != send[0]) || (recv[1] != send[1])) { + debug_msg(npe, "Message %s: unexpected message received\n", + what); + return -EIO; + } + return 0; +} + + +int npe_load_firmware(struct npe *npe, const char *name, struct device *dev) +{ + const struct firmware *fw_entry; + + struct dl_block { + u32 type; + u32 offset; + } *blk; + + struct dl_image { + u32 magic; + u32 id; + u32 size; + union { + u32 data[0]; + struct dl_block blocks[0]; + }; + } *image; + + struct dl_codeblock { + u32 npe_addr; + u32 size; + u32 data[0]; + } *cb; + + int i, j, err, data_size, instr_size, blocks, table_end; + u32 cmd; + + if ((err = request_firmware(&fw_entry, name, dev)) != 0) + return err; + + err = -EINVAL; + if (fw_entry->size < sizeof(struct dl_image)) { + print_npe(KERN_ERR, npe, "incomplete firmware file\n"); + goto err; + } + image = (struct dl_image*)fw_entry->data; + +#if DEBUG_FW + print_npe(KERN_DEBUG, npe, "firmware: %08X %08X %08X (0x%X bytes)\n", + image->magic, image->id, image->size, image->size * 4); +#endif + + if (image->magic == swab32(FW_MAGIC)) { /* swapped file */ + image->id = swab32(image->id); + image->size = swab32(image->size); + } else if (image->magic != FW_MAGIC) { + print_npe(KERN_ERR, npe, "bad firmware file magic: 0x%X\n", + image->magic); + goto err; + } + if ((image->size * 4 + sizeof(struct dl_image)) != fw_entry->size) { + print_npe(KERN_ERR, npe, + "inconsistent size of firmware file\n"); + goto err; + } + if (((image->id >> 24) & 0xF /* NPE ID */) != npe->id) { + print_npe(KERN_ERR, npe, "firmware file NPE ID mismatch\n"); + goto err; + } + if (image->magic == swab32(FW_MAGIC)) + for (i = 0; i < image->size; i++) + image->data[i] = swab32(image->data[i]); + + if (cpu_is_ixp42x() && ((image->id >> 28) & 0xF /* device ID */)) { + print_npe(KERN_INFO, npe, "IXP43x/IXP46x firmware ignored on " + "IXP42x\n"); + goto err; + } + + if (npe_running(npe)) { + print_npe(KERN_INFO, npe, "unable to load firmware, NPE is " + "already running\n"); + err = -EBUSY; + goto err; + } +#if 0 + npe_stop(npe); + npe_reset(npe); +#endif + + print_npe(KERN_INFO, npe, "firmware functionality 0x%X, " + "revision 0x%X:%X\n", (image->id >> 16) & 0xFF, + (image->id >> 8) & 0xFF, image->id & 0xFF); + + if (cpu_is_ixp42x()) { + if (!npe->id) + instr_size = NPE_A_42X_INSTR_SIZE; + else + instr_size = NPE_B_AND_C_42X_INSTR_SIZE; + data_size = NPE_42X_DATA_SIZE; + } else { + instr_size = NPE_46X_INSTR_SIZE; + data_size = NPE_46X_DATA_SIZE; + } + + for (blocks = 0; blocks * sizeof(struct dl_block) / 4 < image->size; + blocks++) + if (image->blocks[blocks].type == FW_BLOCK_TYPE_EOF) + break; + if (blocks * sizeof(struct dl_block) / 4 >= image->size) { + print_npe(KERN_INFO, npe, "firmware EOF block marker not " + "found\n"); + goto err; + } + +#if DEBUG_FW + print_npe(KERN_DEBUG, npe, "%i firmware blocks found\n", blocks); +#endif + + table_end = blocks * sizeof(struct dl_block) / 4 + 1 /* EOF marker */; + for (i = 0, blk = image->blocks; i < blocks; i++, blk++) { + if (blk->offset > image->size - sizeof(struct dl_codeblock) / 4 + || blk->offset < table_end) { + print_npe(KERN_INFO, npe, "invalid offset 0x%X of " + "firmware block #%i\n", blk->offset, i); + goto err; + } + + cb = (struct dl_codeblock*)&image->data[blk->offset]; + if (blk->type == FW_BLOCK_TYPE_INSTR) { + if (cb->npe_addr + cb->size > instr_size) + goto too_big; + cmd = CMD_WR_INS_MEM; + } else if (blk->type == FW_BLOCK_TYPE_DATA) { + if (cb->npe_addr + cb->size > data_size) + goto too_big; + cmd = CMD_WR_DATA_MEM; + } else { + print_npe(KERN_INFO, npe, "invalid firmware block #%i " + "type 0x%X\n", i, blk->type); + goto err; + } + if (blk->offset + sizeof(*cb) / 4 + cb->size > image->size) { + print_npe(KERN_INFO, npe, "firmware block #%i doesn't " + "fit in firmware image: type %c, start 0x%X," + " length 0x%X\n", i, + blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D', + cb->npe_addr, cb->size); + goto err; + } + + for (j = 0; j < cb->size; j++) + npe_cmd_write(npe, cb->npe_addr + j, cmd, cb->data[j]); + } + + npe_start(npe); + if (!npe_running(npe)) + print_npe(KERN_ERR, npe, "unable to start\n"); + release_firmware(fw_entry); + return 0; + +too_big: + print_npe(KERN_INFO, npe, "firmware block #%i doesn't fit in NPE " + "memory: type %c, start 0x%X, length 0x%X\n", i, + blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D', + cb->npe_addr, cb->size); +err: + release_firmware(fw_entry); + return err; +} + + +struct npe *npe_request(unsigned id) +{ + if (id < NPE_COUNT) + if (npe_tab[id].valid) + if (try_module_get(THIS_MODULE)) + return &npe_tab[id]; + return NULL; +} + +void npe_release(struct npe *npe) +{ + module_put(THIS_MODULE); +} + + +static int __init npe_init_module(void) +{ + + int i, found = 0; + + /* This driver does not work with device tree */ + if (of_have_populated_dt()) + return -ENODEV; + + for (i = 0; i < NPE_COUNT; i++) { + struct npe *npe = &npe_tab[i]; + if (!(ixp4xx_read_feature_bits() & + (IXP4XX_FEATURE_RESET_NPEA << i))) + continue; /* NPE already disabled or not present */ + if (!(npe->mem_res = request_mem_region(npe->regs_phys, + REGS_SIZE, + npe_name(npe)))) { + print_npe(KERN_ERR, npe, + "failed to request memory region\n"); + continue; + } + + if (npe_reset(npe)) + continue; + npe->valid = 1; + found++; + } + + if (!found) + return -ENODEV; + return 0; +} + +static void __exit npe_cleanup_module(void) +{ + int i; + + for (i = 0; i < NPE_COUNT; i++) + if (npe_tab[i].mem_res) { + npe_reset(&npe_tab[i]); + release_resource(npe_tab[i].mem_res); + } +} + +module_init(npe_init_module); +module_exit(npe_cleanup_module); + +MODULE_AUTHOR("Krzysztof Halasa"); +MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(NPE_A_FIRMWARE); +MODULE_FIRMWARE(NPE_B_FIRMWARE); +MODULE_FIRMWARE(NPE_C_FIRMWARE); + +EXPORT_SYMBOL(npe_names); +EXPORT_SYMBOL(npe_running); +EXPORT_SYMBOL(npe_request); +EXPORT_SYMBOL(npe_release); +EXPORT_SYMBOL(npe_load_firmware); +EXPORT_SYMBOL(npe_send_message); +EXPORT_SYMBOL(npe_recv_message); +EXPORT_SYMBOL(npe_send_recv_message); diff --git a/drivers/soc/ixp4xx/ixp4xx-qmgr.c b/drivers/soc/ixp4xx/ixp4xx-qmgr.c new file mode 100644 index 000000000000..2e6d33534afe --- /dev/null +++ b/drivers/soc/ixp4xx/ixp4xx-qmgr.c @@ -0,0 +1,382 @@ +/* + * Intel IXP4xx Queue Manager driver for Linux + * + * Copyright (C) 2007 Krzysztof Halasa + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +/* FIXME: get rid of these static assigments */ +#define IRQ_IXP4XX_BASE 16 +#define IRQ_IXP4XX_QM1 (IRQ_IXP4XX_BASE + 3) +#define IRQ_IXP4XX_QM2 (IRQ_IXP4XX_BASE + 4) + +static struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT; +static struct resource *mem_res; +static spinlock_t qmgr_lock; +static u32 used_sram_bitmap[4]; /* 128 16-dword pages */ +static void (*irq_handlers[QUEUES])(void *pdev); +static void *irq_pdevs[QUEUES]; + +#if DEBUG_QMGR +char qmgr_queue_descs[QUEUES][32]; +#endif + +void qmgr_set_irq(unsigned int queue, int src, + void (*handler)(void *pdev), void *pdev) +{ + unsigned long flags; + + spin_lock_irqsave(&qmgr_lock, flags); + if (queue < HALF_QUEUES) { + u32 __iomem *reg; + int bit; + BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL); + reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */ + bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */ + __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit), + reg); + } else + /* IRQ source for queues 32-63 is fixed */ + BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY); + + irq_handlers[queue] = handler; + irq_pdevs[queue] = pdev; + spin_unlock_irqrestore(&qmgr_lock, flags); +} + + +static irqreturn_t qmgr_irq1_a0(int irq, void *pdev) +{ + int i, ret = 0; + u32 en_bitmap, src, stat; + + /* ACK - it may clear any bits so don't rely on it */ + __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]); + + en_bitmap = qmgr_regs->irqen[0]; + while (en_bitmap) { + i = __fls(en_bitmap); /* number of the last "low" queue */ + en_bitmap &= ~BIT(i); + src = qmgr_regs->irqsrc[i >> 3]; + stat = qmgr_regs->stat1[i >> 3]; + if (src & 4) /* the IRQ condition is inverted */ + stat = ~stat; + if (stat & BIT(src & 3)) { + irq_handlers[i](irq_pdevs[i]); + ret = IRQ_HANDLED; + } + } + return ret; +} + + +static irqreturn_t qmgr_irq2_a0(int irq, void *pdev) +{ + int i, ret = 0; + u32 req_bitmap; + + /* ACK - it may clear any bits so don't rely on it */ + __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]); + + req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h; + while (req_bitmap) { + i = __fls(req_bitmap); /* number of the last "high" queue */ + req_bitmap &= ~BIT(i); + irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]); + ret = IRQ_HANDLED; + } + return ret; +} + + +static irqreturn_t qmgr_irq(int irq, void *pdev) +{ + int i, half = (irq == IRQ_IXP4XX_QM1 ? 0 : 1); + u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]); + + if (!req_bitmap) + return 0; + __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */ + + while (req_bitmap) { + i = __fls(req_bitmap); /* number of the last queue */ + req_bitmap &= ~BIT(i); + i += half * HALF_QUEUES; + irq_handlers[i](irq_pdevs[i]); + } + return IRQ_HANDLED; +} + + +void qmgr_enable_irq(unsigned int queue) +{ + unsigned long flags; + int half = queue / 32; + u32 mask = 1 << (queue & (HALF_QUEUES - 1)); + + spin_lock_irqsave(&qmgr_lock, flags); + __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask, + &qmgr_regs->irqen[half]); + spin_unlock_irqrestore(&qmgr_lock, flags); +} + +void qmgr_disable_irq(unsigned int queue) +{ + unsigned long flags; + int half = queue / 32; + u32 mask = 1 << (queue & (HALF_QUEUES - 1)); + + spin_lock_irqsave(&qmgr_lock, flags); + __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask, + &qmgr_regs->irqen[half]); + __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */ + spin_unlock_irqrestore(&qmgr_lock, flags); +} + +static inline void shift_mask(u32 *mask) +{ + mask[3] = mask[3] << 1 | mask[2] >> 31; + mask[2] = mask[2] << 1 | mask[1] >> 31; + mask[1] = mask[1] << 1 | mask[0] >> 31; + mask[0] <<= 1; +} + +#if DEBUG_QMGR +int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, + unsigned int nearly_empty_watermark, + unsigned int nearly_full_watermark, + const char *desc_format, const char* name) +#else +int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, + unsigned int nearly_empty_watermark, + unsigned int nearly_full_watermark) +#endif +{ + u32 cfg, addr = 0, mask[4]; /* in 16-dwords */ + int err; + + BUG_ON(queue >= QUEUES); + + if ((nearly_empty_watermark | nearly_full_watermark) & ~7) + return -EINVAL; + + switch (len) { + case 16: + cfg = 0 << 24; + mask[0] = 0x1; + break; + case 32: + cfg = 1 << 24; + mask[0] = 0x3; + break; + case 64: + cfg = 2 << 24; + mask[0] = 0xF; + break; + case 128: + cfg = 3 << 24; + mask[0] = 0xFF; + break; + default: + return -EINVAL; + } + + cfg |= nearly_empty_watermark << 26; + cfg |= nearly_full_watermark << 29; + len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */ + mask[1] = mask[2] = mask[3] = 0; + + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + spin_lock_irq(&qmgr_lock); + if (__raw_readl(&qmgr_regs->sram[queue])) { + err = -EBUSY; + goto err; + } + + while (1) { + if (!(used_sram_bitmap[0] & mask[0]) && + !(used_sram_bitmap[1] & mask[1]) && + !(used_sram_bitmap[2] & mask[2]) && + !(used_sram_bitmap[3] & mask[3])) + break; /* found free space */ + + addr++; + shift_mask(mask); + if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) { + printk(KERN_ERR "qmgr: no free SRAM space for" + " queue %i\n", queue); + err = -ENOMEM; + goto err; + } + } + + used_sram_bitmap[0] |= mask[0]; + used_sram_bitmap[1] |= mask[1]; + used_sram_bitmap[2] |= mask[2]; + used_sram_bitmap[3] |= mask[3]; + __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]); +#if DEBUG_QMGR + snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]), + desc_format, name); + printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n", + qmgr_queue_descs[queue], queue, addr); +#endif + spin_unlock_irq(&qmgr_lock); + return 0; + +err: + spin_unlock_irq(&qmgr_lock); + module_put(THIS_MODULE); + return err; +} + +void qmgr_release_queue(unsigned int queue) +{ + u32 cfg, addr, mask[4]; + + BUG_ON(queue >= QUEUES); /* not in valid range */ + + spin_lock_irq(&qmgr_lock); + cfg = __raw_readl(&qmgr_regs->sram[queue]); + addr = (cfg >> 14) & 0xFF; + + BUG_ON(!addr); /* not requested */ + + switch ((cfg >> 24) & 3) { + case 0: mask[0] = 0x1; break; + case 1: mask[0] = 0x3; break; + case 2: mask[0] = 0xF; break; + case 3: mask[0] = 0xFF; break; + } + + mask[1] = mask[2] = mask[3] = 0; + + while (addr--) + shift_mask(mask); + +#if DEBUG_QMGR + printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n", + qmgr_queue_descs[queue], queue); + qmgr_queue_descs[queue][0] = '\x0'; +#endif + + while ((addr = qmgr_get_entry(queue))) + printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n", + queue, addr); + + __raw_writel(0, &qmgr_regs->sram[queue]); + + used_sram_bitmap[0] &= ~mask[0]; + used_sram_bitmap[1] &= ~mask[1]; + used_sram_bitmap[2] &= ~mask[2]; + used_sram_bitmap[3] &= ~mask[3]; + irq_handlers[queue] = NULL; /* catch IRQ bugs */ + spin_unlock_irq(&qmgr_lock); + + module_put(THIS_MODULE); +} + +static int qmgr_init(void) +{ + int i, err; + irq_handler_t handler1, handler2; + + /* This driver does not work with device tree */ + if (of_have_populated_dt()) + return -ENODEV; + + mem_res = request_mem_region(IXP4XX_QMGR_BASE_PHYS, + IXP4XX_QMGR_REGION_SIZE, + "IXP4xx Queue Manager"); + if (mem_res == NULL) + return -EBUSY; + + /* reset qmgr registers */ + for (i = 0; i < 4; i++) { + __raw_writel(0x33333333, &qmgr_regs->stat1[i]); + __raw_writel(0, &qmgr_regs->irqsrc[i]); + } + for (i = 0; i < 2; i++) { + __raw_writel(0, &qmgr_regs->stat2[i]); + __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */ + __raw_writel(0, &qmgr_regs->irqen[i]); + } + + __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h); + __raw_writel(0, &qmgr_regs->statf_h); + + for (i = 0; i < QUEUES; i++) + __raw_writel(0, &qmgr_regs->sram[i]); + + if (cpu_is_ixp42x_rev_a0()) { + handler1 = qmgr_irq1_a0; + handler2 = qmgr_irq2_a0; + } else + handler1 = handler2 = qmgr_irq; + + err = request_irq(IRQ_IXP4XX_QM1, handler1, 0, "IXP4xx Queue Manager", + NULL); + if (err) { + printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n", + IRQ_IXP4XX_QM1, err); + goto error_irq; + } + + err = request_irq(IRQ_IXP4XX_QM2, handler2, 0, "IXP4xx Queue Manager", + NULL); + if (err) { + printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n", + IRQ_IXP4XX_QM2, err); + goto error_irq2; + } + + used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */ + spin_lock_init(&qmgr_lock); + + printk(KERN_INFO "IXP4xx Queue Manager initialized.\n"); + return 0; + +error_irq2: + free_irq(IRQ_IXP4XX_QM1, NULL); +error_irq: + release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); + return err; +} + +static void qmgr_remove(void) +{ + free_irq(IRQ_IXP4XX_QM1, NULL); + free_irq(IRQ_IXP4XX_QM2, NULL); + synchronize_irq(IRQ_IXP4XX_QM1); + synchronize_irq(IRQ_IXP4XX_QM2); + release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); +} + +module_init(qmgr_init); +module_exit(qmgr_remove); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Krzysztof Halasa"); + +EXPORT_SYMBOL(qmgr_set_irq); +EXPORT_SYMBOL(qmgr_enable_irq); +EXPORT_SYMBOL(qmgr_disable_irq); +#if DEBUG_QMGR +EXPORT_SYMBOL(qmgr_queue_descs); +EXPORT_SYMBOL(qmgr_request_queue); +#else +EXPORT_SYMBOL(__qmgr_request_queue); +#endif +EXPORT_SYMBOL(qmgr_release_queue); -- cgit v1.2.3