diff options
author | Olof Johansson <olof@lixom.net> | 2020-01-16 12:47:12 -0800 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2020-01-16 12:47:13 -0800 |
commit | a9e3e12f3f58083b4c0fda7bcb95d05841e982bb (patch) | |
tree | afe97a74424467b52b054b23279c5625929ae731 /drivers/soc | |
parent | 0f827273178f5656edf1210dc94b8653df7580bf (diff) | |
parent | 6e62bd36e9ad85a22d92b1adce6a0336ea549733 (diff) | |
download | linux-a9e3e12f3f58083b4c0fda7bcb95d05841e982bb.tar.bz2 |
Merge tag 'soc-fsl-next-v5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux into arm/drivers
NXP/FSL SoC driver updates for v5.6
QUICC Engine drivers
- Improve the QE drivers to be compatible with ARM/ARM64/PPC64
architectures
- Various cleanups to the QE drivers
* tag 'soc-fsl-next-v5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux: (49 commits)
soc: fsl: qe: remove set but not used variable 'mm_gc'
soc: fsl: qe: remove PPC32 dependency from CONFIG_QUICC_ENGINE
soc: fsl: qe: remove unused #include of asm/irq.h from ucc.c
net: ethernet: freescale: make UCC_GETH explicitly depend on PPC32
net/wan/fsl_ucc_hdlc: reject muram offsets above 64K
net/wan/fsl_ucc_hdlc: fix reading of __be16 registers
net/wan/fsl_ucc_hdlc: avoid use of IS_ERR_VALUE()
soc: fsl: qe: avoid IS_ERR_VALUE in ucc_fast.c
soc: fsl: qe: drop pointless check in qe_sdma_init()
soc: fsl: qe: drop use of IS_ERR_VALUE in qe_sdma_init()
soc: fsl: qe: avoid IS_ERR_VALUE in ucc_slow.c
soc: fsl: qe: refactor cpm_muram_alloc_common to prevent BUG on error path
soc: fsl: qe: drop broken lazy call of cpm_muram_init()
soc: fsl: qe: make cpm_muram_free() ignore a negative offset
soc: fsl: qe: make cpm_muram_free() return void
soc: fsl: qe: change return type of cpm_muram_alloc() to s32
serial: ucc_uart: access __be32 field using be32_to_cpu
serial: ucc_uart: limit brg-frequency workaround to PPC32
serial: ucc_uart: use of_property_read_u32() in ucc_uart_probe()
serial: ucc_uart: stub out soft_uart_init for !CONFIG_PPC32
...
Link: https://lore.kernel.org/r/1578608351-23289-1-git-send-email-leoyang.li@nxp.com
Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'drivers/soc')
-rw-r--r-- | drivers/soc/fsl/qe/Kconfig | 3 | ||||
-rw-r--r-- | drivers/soc/fsl/qe/gpio.c | 36 | ||||
-rw-r--r-- | drivers/soc/fsl/qe/qe.c | 104 | ||||
-rw-r--r-- | drivers/soc/fsl/qe/qe_common.c | 50 | ||||
-rw-r--r-- | drivers/soc/fsl/qe/qe_ic.c | 285 | ||||
-rw-r--r-- | drivers/soc/fsl/qe/qe_ic.h | 99 | ||||
-rw-r--r-- | drivers/soc/fsl/qe/qe_io.c | 70 | ||||
-rw-r--r-- | drivers/soc/fsl/qe/qe_tdm.c | 8 | ||||
-rw-r--r-- | drivers/soc/fsl/qe/ucc.c | 27 | ||||
-rw-r--r-- | drivers/soc/fsl/qe/ucc_fast.c | 86 | ||||
-rw-r--r-- | drivers/soc/fsl/qe/ucc_slow.c | 60 | ||||
-rw-r--r-- | drivers/soc/fsl/qe/usb.c | 2 |
12 files changed, 339 insertions, 491 deletions
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig index cfa4b2939992..357c5800b112 100644 --- a/drivers/soc/fsl/qe/Kconfig +++ b/drivers/soc/fsl/qe/Kconfig @@ -5,7 +5,8 @@ config QUICC_ENGINE bool "QUICC Engine (QE) framework support" - depends on FSL_SOC && PPC32 + depends on OF && HAS_IOMEM + depends on PPC || ARM || ARM64 || COMPILE_TEST select GENERIC_ALLOCATOR select CRC32 help diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c index f0c29ed8f0ff..ed75198ed254 100644 --- a/drivers/soc/fsl/qe/gpio.c +++ b/drivers/soc/fsl/qe/gpio.c @@ -41,13 +41,13 @@ static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) container_of(mm_gc, struct qe_gpio_chip, mm_gc); struct qe_pio_regs __iomem *regs = mm_gc->regs; - qe_gc->cpdata = in_be32(®s->cpdata); + qe_gc->cpdata = qe_ioread32be(®s->cpdata); qe_gc->saved_regs.cpdata = qe_gc->cpdata; - qe_gc->saved_regs.cpdir1 = in_be32(®s->cpdir1); - qe_gc->saved_regs.cpdir2 = in_be32(®s->cpdir2); - qe_gc->saved_regs.cppar1 = in_be32(®s->cppar1); - qe_gc->saved_regs.cppar2 = in_be32(®s->cppar2); - qe_gc->saved_regs.cpodr = in_be32(®s->cpodr); + qe_gc->saved_regs.cpdir1 = qe_ioread32be(®s->cpdir1); + qe_gc->saved_regs.cpdir2 = qe_ioread32be(®s->cpdir2); + qe_gc->saved_regs.cppar1 = qe_ioread32be(®s->cppar1); + qe_gc->saved_regs.cppar2 = qe_ioread32be(®s->cppar2); + qe_gc->saved_regs.cpodr = qe_ioread32be(®s->cpodr); } static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio) @@ -56,7 +56,7 @@ static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio) struct qe_pio_regs __iomem *regs = mm_gc->regs; u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio); - return !!(in_be32(®s->cpdata) & pin_mask); + return !!(qe_ioread32be(®s->cpdata) & pin_mask); } static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) @@ -74,7 +74,7 @@ static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) else qe_gc->cpdata &= ~pin_mask; - out_be32(®s->cpdata, qe_gc->cpdata); + qe_iowrite32be(qe_gc->cpdata, ®s->cpdata); spin_unlock_irqrestore(&qe_gc->lock, flags); } @@ -101,7 +101,7 @@ static void qe_gpio_set_multiple(struct gpio_chip *gc, } } - out_be32(®s->cpdata, qe_gc->cpdata); + qe_iowrite32be(qe_gc->cpdata, ®s->cpdata); spin_unlock_irqrestore(&qe_gc->lock, flags); } @@ -160,7 +160,6 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index) { struct qe_pin *qe_pin; struct gpio_chip *gc; - struct of_mm_gpio_chip *mm_gc; struct qe_gpio_chip *qe_gc; int err; unsigned long flags; @@ -186,7 +185,6 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index) goto err0; } - mm_gc = to_of_mm_gpio_chip(gc); qe_gc = gpiochip_get_data(gc); spin_lock_irqsave(&qe_gc->lock, flags); @@ -255,11 +253,15 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin) spin_lock_irqsave(&qe_gc->lock, flags); if (second_reg) { - clrsetbits_be32(®s->cpdir2, mask2, sregs->cpdir2 & mask2); - clrsetbits_be32(®s->cppar2, mask2, sregs->cppar2 & mask2); + qe_clrsetbits_be32(®s->cpdir2, mask2, + sregs->cpdir2 & mask2); + qe_clrsetbits_be32(®s->cppar2, mask2, + sregs->cppar2 & mask2); } else { - clrsetbits_be32(®s->cpdir1, mask2, sregs->cpdir1 & mask2); - clrsetbits_be32(®s->cppar1, mask2, sregs->cppar1 & mask2); + qe_clrsetbits_be32(®s->cpdir1, mask2, + sregs->cpdir1 & mask2); + qe_clrsetbits_be32(®s->cppar1, mask2, + sregs->cppar1 & mask2); } if (sregs->cpdata & mask1) @@ -267,8 +269,8 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin) else qe_gc->cpdata &= ~mask1; - out_be32(®s->cpdata, qe_gc->cpdata); - clrsetbits_be32(®s->cpodr, mask1, sregs->cpodr & mask1); + qe_iowrite32be(qe_gc->cpdata, ®s->cpdata); + qe_clrsetbits_be32(®s->cpodr, mask1, sregs->cpodr & mask1); spin_unlock_irqrestore(&qe_gc->lock, flags); } diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index 417df7e19281..96c2057d8d8e 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c @@ -22,16 +22,12 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/ioport.h> +#include <linux/iopoll.h> #include <linux/crc32.h> #include <linux/mod_devicetable.h> #include <linux/of_platform.h> -#include <asm/irq.h> -#include <asm/page.h> -#include <asm/pgtable.h> #include <soc/fsl/qe/immap_qe.h> #include <soc/fsl/qe/qe.h> -#include <asm/prom.h> -#include <asm/rheap.h> static void qe_snums_init(void); static int qe_sdma_init(void); @@ -108,11 +104,12 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) { unsigned long flags; u8 mcn_shift = 0, dev_shift = 0; - u32 ret; + u32 val; + int ret; spin_lock_irqsave(&qe_lock, flags); if (cmd == QE_RESET) { - out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG)); + qe_iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr); } else { if (cmd == QE_ASSIGN_PAGE) { /* Here device is the SNUM, not sub-block */ @@ -129,20 +126,18 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) mcn_shift = QE_CR_MCN_NORMAL_SHIFT; } - out_be32(&qe_immr->cp.cecdr, cmd_input); - out_be32(&qe_immr->cp.cecr, - (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32) - mcn_protocol << mcn_shift)); + qe_iowrite32be(cmd_input, &qe_immr->cp.cecdr); + qe_iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift), + &qe_immr->cp.cecr); } /* wait for the QE_CR_FLG to clear */ - ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0, - 100, 0); - /* On timeout (e.g. failure), the expression will be false (ret == 0), - otherwise it will be true (ret == 1). */ + ret = readx_poll_timeout_atomic(qe_ioread32be, &qe_immr->cp.cecr, val, + (val & QE_CR_FLG) == 0, 0, 100); + /* On timeout, ret is -ETIMEDOUT, otherwise it will be 0. */ spin_unlock_irqrestore(&qe_lock, flags); - return ret == 1; + return ret == 0; } EXPORT_SYMBOL(qe_issue_cmd); @@ -164,8 +159,7 @@ static unsigned int brg_clk = 0; unsigned int qe_get_brg_clk(void) { struct device_node *qe; - int size; - const u32 *prop; + u32 brg; unsigned int mod; if (brg_clk) @@ -175,9 +169,8 @@ unsigned int qe_get_brg_clk(void) if (!qe) return brg_clk; - prop = of_get_property(qe, "brg-frequency", &size); - if (prop && size == sizeof(*prop)) - brg_clk = *prop; + if (!of_property_read_u32(qe, "brg-frequency", &brg)) + brg_clk = brg; of_node_put(qe); @@ -197,6 +190,14 @@ EXPORT_SYMBOL(qe_get_brg_clk); #define PVR_VER_836x 0x8083 #define PVR_VER_832x 0x8084 +static bool qe_general4_errata(void) +{ +#ifdef CONFIG_PPC32 + return pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x); +#endif + return false; +} + /* Program the BRG to the given sampling rate and multiplier * * @brg: the BRG, QE_BRG1 - QE_BRG16 @@ -223,14 +224,14 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier) /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says that the BRG divisor must be even if you're not using divide-by-16 mode. */ - if (pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x)) + if (qe_general4_errata()) if (!div16 && (divisor & 1) && (divisor > 3)) divisor++; tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | QE_BRGC_ENABLE | div16; - out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval); + qe_iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]); return 0; } @@ -364,22 +365,20 @@ EXPORT_SYMBOL(qe_put_snum); static int qe_sdma_init(void) { struct sdma __iomem *sdma = &qe_immr->sdma; - static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM; - - if (!sdma) - return -ENODEV; + static s32 sdma_buf_offset = -ENOMEM; /* allocate 2 internal temporary buffers (512 bytes size each) for * the SDMA */ - if (IS_ERR_VALUE(sdma_buf_offset)) { + if (sdma_buf_offset < 0) { sdma_buf_offset = qe_muram_alloc(512 * 2, 4096); - if (IS_ERR_VALUE(sdma_buf_offset)) + if (sdma_buf_offset < 0) return -ENOMEM; } - out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK); - out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK | - (0x1 << QE_SDMR_CEN_SHIFT))); + qe_iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK, + &sdma->sdebcr); + qe_iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)), + &sdma->sdmr); return 0; } @@ -417,14 +416,14 @@ static void qe_upload_microcode(const void *base, "uploading microcode '%s'\n", ucode->id); /* Use auto-increment */ - out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) | - QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR); + qe_iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR, + &qe_immr->iram.iadd); for (i = 0; i < be32_to_cpu(ucode->count); i++) - out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i])); + qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata); /* Set I-RAM Ready Register */ - out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY)); + qe_iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready); } /* @@ -509,7 +508,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware) * If the microcode calls for it, split the I-RAM. */ if (!firmware->split) - setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR); + qe_setbits_be16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR); if (firmware->soc.model) printk(KERN_INFO @@ -543,11 +542,13 @@ int qe_upload_firmware(const struct qe_firmware *firmware) u32 trap = be32_to_cpu(ucode->traps[j]); if (trap) - out_be32(&qe_immr->rsp[i].tibcr[j], trap); + qe_iowrite32be(trap, + &qe_immr->rsp[i].tibcr[j]); } /* Enable traps */ - out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr)); + qe_iowrite32be(be32_to_cpu(ucode->eccr), + &qe_immr->rsp[i].eccr); } qe_firmware_uploaded = 1; @@ -565,11 +566,9 @@ EXPORT_SYMBOL(qe_upload_firmware); struct qe_firmware_info *qe_get_firmware_info(void) { static int initialized; - struct property *prop; struct device_node *qe; struct device_node *fw = NULL; const char *sprop; - unsigned int i; /* * If we haven't checked yet, and a driver hasn't uploaded a firmware @@ -603,20 +602,11 @@ struct qe_firmware_info *qe_get_firmware_info(void) strlcpy(qe_firmware_info.id, sprop, sizeof(qe_firmware_info.id)); - prop = of_find_property(fw, "extended-modes", NULL); - if (prop && (prop->length == sizeof(u64))) { - const u64 *iprop = prop->value; - - qe_firmware_info.extended_modes = *iprop; - } + of_property_read_u64(fw, "extended-modes", + &qe_firmware_info.extended_modes); - prop = of_find_property(fw, "virtual-traps", NULL); - if (prop && (prop->length == 32)) { - const u32 *iprop = prop->value; - - for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++) - qe_firmware_info.vtraps[i] = iprop[i]; - } + of_property_read_u32_array(fw, "virtual-traps", qe_firmware_info.vtraps, + ARRAY_SIZE(qe_firmware_info.vtraps)); of_node_put(fw); @@ -627,17 +617,13 @@ EXPORT_SYMBOL(qe_get_firmware_info); unsigned int qe_get_num_of_risc(void) { struct device_node *qe; - int size; unsigned int num_of_risc = 0; - const u32 *prop; qe = qe_get_device_node(); if (!qe) return num_of_risc; - prop = of_get_property(qe, "fsl,qe-num-riscs", &size); - if (prop && size == sizeof(*prop)) - num_of_risc = *prop; + of_property_read_u32(qe, "fsl,qe-num-riscs", &num_of_risc); of_node_put(qe); diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c index 83e85e61669f..a81a1a79f1ca 100644 --- a/drivers/soc/fsl/qe/qe_common.c +++ b/drivers/soc/fsl/qe/qe_common.c @@ -32,7 +32,7 @@ static phys_addr_t muram_pbase; struct muram_block { struct list_head head; - unsigned long start; + s32 start; int size; }; @@ -110,34 +110,30 @@ out_muram: * @algo: algorithm for alloc. * @data: data for genalloc's algorithm. * - * This function returns an offset into the muram area. + * This function returns a non-negative offset into the muram area, or + * a negative errno on failure. */ -static unsigned long cpm_muram_alloc_common(unsigned long size, - genpool_algo_t algo, void *data) +static s32 cpm_muram_alloc_common(unsigned long size, + genpool_algo_t algo, void *data) { struct muram_block *entry; - unsigned long start; - - if (!muram_pool && cpm_muram_init()) - goto out2; + s32 start; + entry = kmalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return -ENOMEM; start = gen_pool_alloc_algo(muram_pool, size, algo, data); - if (!start) - goto out2; + if (!start) { + kfree(entry); + return -ENOMEM; + } start = start - GENPOOL_OFFSET; memset_io(cpm_muram_addr(start), 0, size); - entry = kmalloc(sizeof(*entry), GFP_ATOMIC); - if (!entry) - goto out1; entry->start = start; entry->size = size; list_add(&entry->head, &muram_block_list); return start; -out1: - gen_pool_free(muram_pool, start, size); -out2: - return (unsigned long)-ENOMEM; } /* @@ -145,13 +141,14 @@ out2: * @size: number of bytes to allocate * @align: requested alignment, in bytes * - * This function returns an offset into the muram area. + * This function returns a non-negative offset into the muram area, or + * a negative errno on failure. * Use cpm_dpram_addr() to get the virtual address of the area. * Use cpm_muram_free() to free the allocation. */ -unsigned long cpm_muram_alloc(unsigned long size, unsigned long align) +s32 cpm_muram_alloc(unsigned long size, unsigned long align) { - unsigned long start; + s32 start; unsigned long flags; struct genpool_data_align muram_pool_data; @@ -168,12 +165,15 @@ EXPORT_SYMBOL(cpm_muram_alloc); * cpm_muram_free - free a chunk of multi-user ram * @offset: The beginning of the chunk as returned by cpm_muram_alloc(). */ -int cpm_muram_free(unsigned long offset) +void cpm_muram_free(s32 offset) { unsigned long flags; int size; struct muram_block *tmp; + if (offset < 0) + return; + size = 0; spin_lock_irqsave(&cpm_muram_lock, flags); list_for_each_entry(tmp, &muram_block_list, head) { @@ -186,7 +186,6 @@ int cpm_muram_free(unsigned long offset) } gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size); spin_unlock_irqrestore(&cpm_muram_lock, flags); - return size; } EXPORT_SYMBOL(cpm_muram_free); @@ -194,13 +193,14 @@ EXPORT_SYMBOL(cpm_muram_free); * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram * @offset: offset of allocation start address * @size: number of bytes to allocate - * This function returns an offset into the muram area + * This function returns @offset if the area was available, a negative + * errno otherwise. * Use cpm_dpram_addr() to get the virtual address of the area. * Use cpm_muram_free() to free the allocation. */ -unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size) +s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size) { - unsigned long start; + s32 start; unsigned long flags; struct genpool_data_fixed muram_pool_data_fixed; diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c index 9bac546998d3..0dd5bdb04a14 100644 --- a/drivers/soc/fsl/qe/qe_ic.c +++ b/drivers/soc/fsl/qe/qe_ic.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> +#include <linux/irq.h> #include <linux/reboot.h> #include <linux/slab.h> #include <linux/stddef.h> @@ -24,9 +25,57 @@ #include <linux/spinlock.h> #include <asm/irq.h> #include <asm/io.h> -#include <soc/fsl/qe/qe_ic.h> +#include <soc/fsl/qe/qe.h> + +#define NR_QE_IC_INTS 64 + +/* QE IC registers offset */ +#define QEIC_CICR 0x00 +#define QEIC_CIVEC 0x04 +#define QEIC_CIPXCC 0x10 +#define QEIC_CIPYCC 0x14 +#define QEIC_CIPWCC 0x18 +#define QEIC_CIPZCC 0x1c +#define QEIC_CIMR 0x20 +#define QEIC_CRIMR 0x24 +#define QEIC_CIPRTA 0x30 +#define QEIC_CIPRTB 0x34 +#define QEIC_CHIVEC 0x60 + +struct qe_ic { + /* Control registers offset */ + u32 __iomem *regs; + + /* The remapper for this QEIC */ + struct irq_domain *irqhost; + + /* The "linux" controller struct */ + struct irq_chip hc_irq; + + /* VIRQ numbers of QE high/low irqs */ + unsigned int virq_high; + unsigned int virq_low; +}; -#include "qe_ic.h" +/* + * QE interrupt controller internal structure + */ +struct qe_ic_info { + /* Location of this source at the QIMR register */ + u32 mask; + + /* Mask register offset */ + u32 mask_reg; + + /* + * For grouped interrupts sources - the interrupt code as + * appears at the group priority register + */ + u8 pri_code; + + /* Group priority register offset */ + u32 pri_reg; +}; static DEFINE_RAW_SPINLOCK(qe_ic_lock); @@ -171,15 +220,15 @@ static struct qe_ic_info qe_ic_info[] = { }, }; -static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg) +static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg) { - return in_be32(base + (reg >> 2)); + return qe_ioread32be(base + (reg >> 2)); } -static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg, +static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg, u32 value) { - out_be32(base + (reg >> 2), value); + qe_iowrite32be(value, base + (reg >> 2)); } static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) @@ -281,8 +330,8 @@ static const struct irq_domain_ops qe_ic_host_ops = { .xlate = irq_domain_xlate_onetwocell, }; -/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ -unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) +/* Return an interrupt vector or 0 if no interrupt is pending. */ +static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) { int irq; @@ -292,13 +341,13 @@ unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26; if (irq == 0) - return NO_IRQ; + return 0; return irq_linear_revmap(qe_ic->irqhost, irq); } -/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ -unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) +/* Return an interrupt vector or 0 if no interrupt is pending. */ +static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) { int irq; @@ -308,18 +357,60 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26; if (irq == 0) - return NO_IRQ; + return 0; return irq_linear_revmap(qe_ic->irqhost, irq); } -void __init qe_ic_init(struct device_node *node, unsigned int flags, - void (*low_handler)(struct irq_desc *desc), - void (*high_handler)(struct irq_desc *desc)) +static void qe_ic_cascade_low(struct irq_desc *desc) +{ + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); + unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); + struct irq_chip *chip = irq_desc_get_chip(desc); + + if (cascade_irq != 0) + generic_handle_irq(cascade_irq); + + if (chip->irq_eoi) + chip->irq_eoi(&desc->irq_data); +} + +static void qe_ic_cascade_high(struct irq_desc *desc) +{ + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); + unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); + struct irq_chip *chip = irq_desc_get_chip(desc); + + if (cascade_irq != 0) + generic_handle_irq(cascade_irq); + + if (chip->irq_eoi) + chip->irq_eoi(&desc->irq_data); +} + +static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc) { + struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); + unsigned int cascade_irq; + struct irq_chip *chip = irq_desc_get_chip(desc); + + cascade_irq = qe_ic_get_high_irq(qe_ic); + if (cascade_irq == 0) + cascade_irq = qe_ic_get_low_irq(qe_ic); + + if (cascade_irq != 0) + generic_handle_irq(cascade_irq); + + chip->irq_eoi(&desc->irq_data); +} + +static void __init qe_ic_init(struct device_node *node) +{ + void (*low_handler)(struct irq_desc *desc); + void (*high_handler)(struct irq_desc *desc); struct qe_ic *qe_ic; struct resource res; - u32 temp = 0, ret, high_active = 0; + u32 ret; ret = of_address_to_resource(node, 0, &res); if (ret) @@ -343,166 +434,42 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags, qe_ic->virq_high = irq_of_parse_and_map(node, 0); qe_ic->virq_low = irq_of_parse_and_map(node, 1); - if (qe_ic->virq_low == NO_IRQ) { + if (!qe_ic->virq_low) { printk(KERN_ERR "Failed to map QE_IC low IRQ\n"); kfree(qe_ic); return; } - - /* default priority scheme is grouped. If spread mode is */ - /* required, configure cicr accordingly. */ - if (flags & QE_IC_SPREADMODE_GRP_W) - temp |= CICR_GWCC; - if (flags & QE_IC_SPREADMODE_GRP_X) - temp |= CICR_GXCC; - if (flags & QE_IC_SPREADMODE_GRP_Y) - temp |= CICR_GYCC; - if (flags & QE_IC_SPREADMODE_GRP_Z) - temp |= CICR_GZCC; - if (flags & QE_IC_SPREADMODE_GRP_RISCA) - temp |= CICR_GRTA; - if (flags & QE_IC_SPREADMODE_GRP_RISCB) - temp |= CICR_GRTB; - - /* choose destination signal for highest priority interrupt */ - if (flags & QE_IC_HIGH_SIGNAL) { - temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT); - high_active = 1; + if (qe_ic->virq_high != qe_ic->virq_low) { + low_handler = qe_ic_cascade_low; + high_handler = qe_ic_cascade_high; + } else { + low_handler = qe_ic_cascade_muxed_mpic; + high_handler = NULL; } - qe_ic_write(qe_ic->regs, QEIC_CICR, temp); + qe_ic_write(qe_ic->regs, QEIC_CICR, 0); irq_set_handler_data(qe_ic->virq_low, qe_ic); irq_set_chained_handler(qe_ic->virq_low, low_handler); - if (qe_ic->virq_high != NO_IRQ && - qe_ic->virq_high != qe_ic->virq_low) { + if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) { irq_set_handler_data(qe_ic->virq_high, qe_ic); irq_set_chained_handler(qe_ic->virq_high, high_handler); } } -void qe_ic_set_highest_priority(unsigned int virq, int high) +static int __init qe_ic_of_init(void) { - struct qe_ic *qe_ic = qe_ic_from_irq(virq); - unsigned int src = virq_to_hw(virq); - u32 temp = 0; - - temp = qe_ic_read(qe_ic->regs, QEIC_CICR); - - temp &= ~CICR_HP_MASK; - temp |= src << CICR_HP_SHIFT; - - temp &= ~CICR_HPIT_MASK; - temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT; - - qe_ic_write(qe_ic->regs, QEIC_CICR, temp); -} - -/* Set Priority level within its group, from 1 to 8 */ -int qe_ic_set_priority(unsigned int virq, unsigned int priority) -{ - struct qe_ic *qe_ic = qe_ic_from_irq(virq); - unsigned int src = virq_to_hw(virq); - u32 temp; + struct device_node *np; - if (priority > 8 || priority == 0) - return -EINVAL; - if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info), - "%s: Invalid hw irq number for QEIC\n", __func__)) - return -EINVAL; - if (qe_ic_info[src].pri_reg == 0) - return -EINVAL; - - temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg); - - if (priority < 4) { - temp &= ~(0x7 << (32 - priority * 3)); - temp |= qe_ic_info[src].pri_code << (32 - priority * 3); - } else { - temp &= ~(0x7 << (24 - priority * 3)); - temp |= qe_ic_info[src].pri_code << (24 - priority * 3); + np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); + if (!np) { + np = of_find_node_by_type(NULL, "qeic"); + if (!np) + return -ENODEV; } - - qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp); - + qe_ic_init(np); + of_node_put(np); return 0; } - -/* Set a QE priority to use high irq, only priority 1~2 can use high irq */ -int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high) -{ - struct qe_ic *qe_ic = qe_ic_from_irq(virq); - unsigned int src = virq_to_hw(virq); - u32 temp, control_reg = QEIC_CICNR, shift = 0; - - if (priority > 2 || priority == 0) - return -EINVAL; - if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info), - "%s: Invalid hw irq number for QEIC\n", __func__)) - return -EINVAL; - - switch (qe_ic_info[src].pri_reg) { - case QEIC_CIPZCC: - shift = CICNR_ZCC1T_SHIFT; - break; - case QEIC_CIPWCC: - shift = CICNR_WCC1T_SHIFT; - break; - case QEIC_CIPYCC: - shift = CICNR_YCC1T_SHIFT; - break; - case QEIC_CIPXCC: - shift = CICNR_XCC1T_SHIFT; - break; - case QEIC_CIPRTA: - shift = CRICR_RTA1T_SHIFT; - control_reg = QEIC_CRICR; - break; - case QEIC_CIPRTB: - shift = CRICR_RTB1T_SHIFT; - control_reg = QEIC_CRICR; - break; - default: - return -EINVAL; - } - - shift += (2 - priority) * 2; - temp = qe_ic_read(qe_ic->regs, control_reg); - temp &= ~(SIGNAL_MASK << shift); - temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift; - qe_ic_write(qe_ic->regs, control_reg, temp); - - return 0; -} - -static struct bus_type qe_ic_subsys = { - .name = "qe_ic", - .dev_name = "qe_ic", -}; - -static struct device device_qe_ic = { - .id = 0, - .bus = &qe_ic_subsys, -}; - -static int __init init_qe_ic_sysfs(void) -{ - int rc; - - printk(KERN_DEBUG "Registering qe_ic with sysfs...\n"); - - rc = subsys_system_register(&qe_ic_subsys, NULL); - if (rc) { - printk(KERN_ERR "Failed registering qe_ic sys class\n"); - return -ENODEV; - } - rc = device_register(&device_qe_ic); - if (rc) { - printk(KERN_ERR "Failed registering qe_ic sys device\n"); - return -ENODEV; - } - return 0; -} - -subsys_initcall(init_qe_ic_sysfs); +subsys_initcall(qe_ic_of_init); diff --git a/drivers/soc/fsl/qe/qe_ic.h b/drivers/soc/fsl/qe/qe_ic.h deleted file mode 100644 index 08c695672a03..000000000000 --- a/drivers/soc/fsl/qe/qe_ic.h +++ /dev/null @@ -1,99 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * drivers/soc/fsl/qe/qe_ic.h - * - * QUICC ENGINE Interrupt Controller Header - * - * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. - * - * Author: Li Yang <leoli@freescale.com> - * Based on code from Shlomi Gridish <gridish@freescale.com> - */ -#ifndef _POWERPC_SYSDEV_QE_IC_H -#define _POWERPC_SYSDEV_QE_IC_H - -#include <soc/fsl/qe/qe_ic.h> - -#define NR_QE_IC_INTS 64 - -/* QE IC registers offset */ -#define QEIC_CICR 0x00 -#define QEIC_CIVEC 0x04 -#define QEIC_CRIPNR 0x08 -#define QEIC_CIPNR 0x0c -#define QEIC_CIPXCC 0x10 -#define QEIC_CIPYCC 0x14 -#define QEIC_CIPWCC 0x18 -#define QEIC_CIPZCC 0x1c -#define QEIC_CIMR 0x20 -#define QEIC_CRIMR 0x24 -#define QEIC_CICNR 0x28 -#define QEIC_CIPRTA 0x30 -#define QEIC_CIPRTB 0x34 -#define QEIC_CRICR 0x3c -#define QEIC_CHIVEC 0x60 - -/* Interrupt priority registers */ -#define CIPCC_SHIFT_PRI0 29 -#define CIPCC_SHIFT_PRI1 26 -#define CIPCC_SHIFT_PRI2 23 -#define CIPCC_SHIFT_PRI3 20 -#define CIPCC_SHIFT_PRI4 13 -#define CIPCC_SHIFT_PRI5 10 -#define CIPCC_SHIFT_PRI6 7 -#define CIPCC_SHIFT_PRI7 4 - -/* CICR priority modes */ -#define CICR_GWCC 0x00040000 -#define CICR_GXCC 0x00020000 -#define CICR_GYCC 0x00010000 -#define CICR_GZCC 0x00080000 -#define CICR_GRTA 0x00200000 -#define CICR_GRTB 0x00400000 -#define CICR_HPIT_SHIFT 8 -#define CICR_HPIT_MASK 0x00000300 -#define CICR_HP_SHIFT 24 -#define CICR_HP_MASK 0x3f000000 - -/* CICNR */ -#define CICNR_WCC1T_SHIFT 20 -#define CICNR_ZCC1T_SHIFT 28 -#define CICNR_YCC1T_SHIFT 12 -#define CICNR_XCC1T_SHIFT 4 - -/* CRICR */ -#define CRICR_RTA1T_SHIFT 20 -#define CRICR_RTB1T_SHIFT 28 - -/* Signal indicator */ -#define SIGNAL_MASK 3 -#define SIGNAL_HIGH 2 -#define SIGNAL_LOW 0 - -struct qe_ic { - /* Control registers offset */ - volatile u32 __iomem *regs; - - /* The remapper for this QEIC */ - struct irq_domain *irqhost; - - /* The "linux" controller struct */ - struct irq_chip hc_irq; - - /* VIRQ numbers of QE high/low irqs */ - unsigned int virq_high; - unsigned int virq_low; -}; - -/* - * QE interrupt controller internal structure - */ -struct qe_ic_info { - u32 mask; /* location of this source at the QIMR register. */ - u32 mask_reg; /* Mask register offset */ - u8 pri_code; /* for grouped interrupts sources - the interrupt - code as appears at the group priority register */ - u32 pri_reg; /* Group priority register offset */ -}; - -#endif /* _POWERPC_SYSDEV_QE_IC_H */ diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c index 3657e296a8a2..11ea08e97db7 100644 --- a/drivers/soc/fsl/qe/qe_io.c +++ b/drivers/soc/fsl/qe/qe_io.c @@ -18,8 +18,6 @@ #include <asm/io.h> #include <soc/fsl/qe/qe.h> -#include <asm/prom.h> -#include <sysdev/fsl_soc.h> #undef DEBUG @@ -30,7 +28,7 @@ int par_io_init(struct device_node *np) { struct resource res; int ret; - const u32 *num_ports; + u32 num_ports; /* Map Parallel I/O ports registers */ ret = of_address_to_resource(np, 0, &res); @@ -38,9 +36,8 @@ int par_io_init(struct device_node *np) return ret; par_io = ioremap(res.start, resource_size(&res)); - num_ports = of_get_property(np, "num-ports", NULL); - if (num_ports) - num_par_io_ports = *num_ports; + if (!of_property_read_u32(np, "num-ports", &num_ports)) + num_par_io_ports = num_ports; return 0; } @@ -57,16 +54,16 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir, pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1))); /* Set open drain, if required */ - tmp_val = in_be32(&par_io->cpodr); + tmp_val = qe_ioread32be(&par_io->cpodr); if (open_drain) - out_be32(&par_io->cpodr, pin_mask1bit | tmp_val); + qe_iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr); else - out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val); + qe_iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr); /* define direction */ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? - in_be32(&par_io->cpdir2) : - in_be32(&par_io->cpdir1); + qe_ioread32be(&par_io->cpdir2) : + qe_ioread32be(&par_io->cpdir1); /* get all bits mask for 2 bit per port */ pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS - @@ -78,34 +75,30 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir, /* clear and set 2 bits mask */ if (pin > (QE_PIO_PINS / 2) - 1) { - out_be32(&par_io->cpdir2, - ~pin_mask2bits & tmp_val); + qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2); tmp_val &= ~pin_mask2bits; - out_be32(&par_io->cpdir2, new_mask2bits | tmp_val); + qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2); } else { - out_be32(&par_io->cpdir1, - ~pin_mask2bits & tmp_val); + qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1); tmp_val &= ~pin_mask2bits; - out_be32(&par_io->cpdir1, new_mask2bits | tmp_val); + qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1); } /* define pin assignment */ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? - in_be32(&par_io->cppar2) : - in_be32(&par_io->cppar1); + qe_ioread32be(&par_io->cppar2) : + qe_ioread32be(&par_io->cppar1); new_mask2bits = (u32) (assignment << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2)); /* clear and set 2 bits mask */ if (pin > (QE_PIO_PINS / 2) - 1) { - out_be32(&par_io->cppar2, - ~pin_mask2bits & tmp_val); + qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2); tmp_val &= ~pin_mask2bits; - out_be32(&par_io->cppar2, new_mask2bits | tmp_val); + qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2); } else { - out_be32(&par_io->cppar1, - ~pin_mask2bits & tmp_val); + qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1); tmp_val &= ~pin_mask2bits; - out_be32(&par_io->cppar1, new_mask2bits | tmp_val); + qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1); } } EXPORT_SYMBOL(__par_io_config_pin); @@ -133,12 +126,12 @@ int par_io_data_set(u8 port, u8 pin, u8 val) /* calculate pin location */ pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin)); - tmp_val = in_be32(&par_io[port].cpdata); + tmp_val = qe_ioread32be(&par_io[port].cpdata); if (val == 0) /* clear */ - out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val); + qe_iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata); else /* set */ - out_be32(&par_io[port].cpdata, pin_mask | tmp_val); + qe_iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata); return 0; } @@ -147,23 +140,20 @@ EXPORT_SYMBOL(par_io_data_set); int par_io_of_config(struct device_node *np) { struct device_node *pio; - const phandle *ph; int pio_map_len; - const unsigned int *pio_map; + const __be32 *pio_map; if (par_io == NULL) { printk(KERN_ERR "par_io not initialized\n"); return -1; } - ph = of_get_property(np, "pio-handle", NULL); - if (ph == NULL) { + pio = of_parse_phandle(np, "pio-handle", 0); + if (pio == NULL) { printk(KERN_ERR "pio-handle not available\n"); return -1; } - pio = of_find_node_by_phandle(*ph); - pio_map = of_get_property(pio, "pio-map", &pio_map_len); if (pio_map == NULL) { printk(KERN_ERR "pio-map is not set!\n"); @@ -176,9 +166,15 @@ int par_io_of_config(struct device_node *np) } while (pio_map_len > 0) { - par_io_config_pin((u8) pio_map[0], (u8) pio_map[1], - (int) pio_map[2], (int) pio_map[3], - (int) pio_map[4], (int) pio_map[5]); + u8 port = be32_to_cpu(pio_map[0]); + u8 pin = be32_to_cpu(pio_map[1]); + int dir = be32_to_cpu(pio_map[2]); + int open_drain = be32_to_cpu(pio_map[3]); + int assignment = be32_to_cpu(pio_map[4]); + int has_irq = be32_to_cpu(pio_map[5]); + + par_io_config_pin(port, pin, dir, open_drain, + assignment, has_irq); pio_map += 6; pio_map_len -= 6; } diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c index e37ebc3be661..7d7d78d3ee50 100644 --- a/drivers/soc/fsl/qe/qe_tdm.c +++ b/drivers/soc/fsl/qe/qe_tdm.c @@ -169,10 +169,10 @@ void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info) &siram[siram_entry_id * 32 + 0x200 + i]); } - setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)], - SIR_LAST); - setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)], - SIR_LAST); + qe_setbits_be16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)], + SIR_LAST); + qe_setbits_be16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)], + SIR_LAST); /* Set SIxMR register */ sixmr = SIMR_SAD(siram_entry_id); diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c index 024d239ac1e1..90157acc5ba6 100644 --- a/drivers/soc/fsl/qe/ucc.c +++ b/drivers/soc/fsl/qe/ucc.c @@ -15,7 +15,6 @@ #include <linux/spinlock.h> #include <linux/export.h> -#include <asm/irq.h> #include <asm/io.h> #include <soc/fsl/qe/immap_qe.h> #include <soc/fsl/qe/qe.h> @@ -35,8 +34,8 @@ int ucc_set_qe_mux_mii_mng(unsigned int ucc_num) return -EINVAL; spin_lock_irqsave(&cmxgcr_lock, flags); - clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG, - ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT); + qe_clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG, + ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT); spin_unlock_irqrestore(&cmxgcr_lock, flags); return 0; @@ -80,8 +79,8 @@ int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed) return -EINVAL; } - clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK, - UCC_GUEMR_SET_RESERVED3 | speed); + qe_clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK, + UCC_GUEMR_SET_RESERVED3 | speed); return 0; } @@ -109,9 +108,9 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask) get_cmxucr_reg(ucc_num, &cmxucr, ®_num, &shift); if (set) - setbits32(cmxucr, mask << shift); + qe_setbits_be32(cmxucr, mask << shift); else - clrbits32(cmxucr, mask << shift); + qe_clrbits_be32(cmxucr, mask << shift); return 0; } @@ -207,8 +206,8 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock, if (mode == COMM_DIR_RX) shift += 4; - clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, - clock_bits << shift); + qe_clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, + clock_bits << shift); return 0; } @@ -540,8 +539,8 @@ int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock, cmxs1cr = (tdm_num < 4) ? &qe_mux_reg->cmxsi1cr_l : &qe_mux_reg->cmxsi1cr_h; - qe_clrsetbits32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, - clock_bits << shift); + qe_clrsetbits_be32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, + clock_bits << shift); return 0; } @@ -650,9 +649,9 @@ int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock, shift = ucc_get_tdm_sync_shift(mode, tdm_num); - qe_clrsetbits32(&qe_mux_reg->cmxsi1syr, - QE_CMXUCR_TX_CLK_SRC_MASK << shift, - source << shift); + qe_clrsetbits_be32(&qe_mux_reg->cmxsi1syr, + QE_CMXUCR_TX_CLK_SRC_MASK << shift, + source << shift); return 0; } diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c index af4d80e38521..ad6193ea4597 100644 --- a/drivers/soc/fsl/qe/ucc_fast.c +++ b/drivers/soc/fsl/qe/ucc_fast.c @@ -29,41 +29,42 @@ void ucc_fast_dump_regs(struct ucc_fast_private * uccf) printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs); printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr)); + &uccf->uf_regs->gumr, qe_ioread32be(&uccf->uf_regs->gumr)); printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr)); + &uccf->uf_regs->upsmr, qe_ioread32be(&uccf->uf_regs->upsmr)); printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr)); + &uccf->uf_regs->utodr, qe_ioread16be(&uccf->uf_regs->utodr)); printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr)); + &uccf->uf_regs->udsr, qe_ioread16be(&uccf->uf_regs->udsr)); printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce)); + &uccf->uf_regs->ucce, qe_ioread32be(&uccf->uf_regs->ucce)); printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm)); + &uccf->uf_regs->uccm, qe_ioread32be(&uccf->uf_regs->uccm)); printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n", - &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs)); + &uccf->uf_regs->uccs, qe_ioread8(&uccf->uf_regs->uccs)); printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb)); + &uccf->uf_regs->urfb, qe_ioread32be(&uccf->uf_regs->urfb)); printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs)); + &uccf->uf_regs->urfs, qe_ioread16be(&uccf->uf_regs->urfs)); printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet)); + &uccf->uf_regs->urfet, qe_ioread16be(&uccf->uf_regs->urfet)); printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset)); + &uccf->uf_regs->urfset, + qe_ioread16be(&uccf->uf_regs->urfset)); printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb)); + &uccf->uf_regs->utfb, qe_ioread32be(&uccf->uf_regs->utfb)); printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs)); + &uccf->uf_regs->utfs, qe_ioread16be(&uccf->uf_regs->utfs)); printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet)); + &uccf->uf_regs->utfet, qe_ioread16be(&uccf->uf_regs->utfet)); printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt)); + &uccf->uf_regs->utftt, qe_ioread16be(&uccf->uf_regs->utftt)); printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n", - &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt)); + &uccf->uf_regs->utpt, qe_ioread16be(&uccf->uf_regs->utpt)); printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n", - &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry)); + &uccf->uf_regs->urtry, qe_ioread32be(&uccf->uf_regs->urtry)); printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n", - &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr)); + &uccf->uf_regs->guemr, qe_ioread8(&uccf->uf_regs->guemr)); } EXPORT_SYMBOL(ucc_fast_dump_regs); @@ -85,7 +86,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock); void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf) { - out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD); + qe_iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr); } EXPORT_SYMBOL(ucc_fast_transmit_on_demand); @@ -97,7 +98,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode) uf_regs = uccf->uf_regs; /* Enable reception and/or transmission on this UCC. */ - gumr = in_be32(&uf_regs->gumr); + gumr = qe_ioread32be(&uf_regs->gumr); if (mode & COMM_DIR_TX) { gumr |= UCC_FAST_GUMR_ENT; uccf->enabled_tx = 1; @@ -106,7 +107,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode) gumr |= UCC_FAST_GUMR_ENR; uccf->enabled_rx = 1; } - out_be32(&uf_regs->gumr, gumr); + qe_iowrite32be(gumr, &uf_regs->gumr); } EXPORT_SYMBOL(ucc_fast_enable); @@ -118,7 +119,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode) uf_regs = uccf->uf_regs; /* Disable reception and/or transmission on this UCC. */ - gumr = in_be32(&uf_regs->gumr); + gumr = qe_ioread32be(&uf_regs->gumr); if (mode & COMM_DIR_TX) { gumr &= ~UCC_FAST_GUMR_ENT; uccf->enabled_tx = 0; @@ -127,7 +128,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode) gumr &= ~UCC_FAST_GUMR_ENR; uccf->enabled_rx = 0; } - out_be32(&uf_regs->gumr, gumr); + qe_iowrite32be(gumr, &uf_regs->gumr); } EXPORT_SYMBOL(ucc_fast_disable); @@ -196,6 +197,8 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc __func__); return -ENOMEM; } + uccf->ucc_fast_tx_virtual_fifo_base_offset = -1; + uccf->ucc_fast_rx_virtual_fifo_base_offset = -1; /* Fill fast UCC structure */ uccf->uf_info = uf_info; @@ -259,15 +262,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc gumr |= uf_info->tenc; gumr |= uf_info->tcrc; gumr |= uf_info->mode; - out_be32(&uf_regs->gumr, gumr); + qe_iowrite32be(gumr, &uf_regs->gumr); /* Allocate memory for Tx Virtual Fifo */ uccf->ucc_fast_tx_virtual_fifo_base_offset = qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); - if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) { + if (uccf->ucc_fast_tx_virtual_fifo_base_offset < 0) { printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n", __func__); - uccf->ucc_fast_tx_virtual_fifo_base_offset = 0; ucc_fast_free(uccf); return -ENOMEM; } @@ -277,24 +279,25 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc qe_muram_alloc(uf_info->urfs + UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); - if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) { + if (uccf->ucc_fast_rx_virtual_fifo_base_offset < 0) { printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n", __func__); - uccf->ucc_fast_rx_virtual_fifo_base_offset = 0; ucc_fast_free(uccf); return -ENOMEM; } /* Set Virtual Fifo registers */ - out_be16(&uf_regs->urfs, uf_info->urfs); - out_be16(&uf_regs->urfet, uf_info->urfet); - out_be16(&uf_regs->urfset, uf_info->urfset); - out_be16(&uf_regs->utfs, uf_info->utfs); - out_be16(&uf_regs->utfet, uf_info->utfet); - out_be16(&uf_regs->utftt, uf_info->utftt); + qe_iowrite16be(uf_info->urfs, &uf_regs->urfs); + qe_iowrite16be(uf_info->urfet, &uf_regs->urfet); + qe_iowrite16be(uf_info->urfset, &uf_regs->urfset); + qe_iowrite16be(uf_info->utfs, &uf_regs->utfs); + qe_iowrite16be(uf_info->utfet, &uf_regs->utfet); + qe_iowrite16be(uf_info->utftt, &uf_regs->utftt); /* utfb, urfb are offsets from MURAM base */ - out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset); - out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset); + qe_iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, + &uf_regs->utfb); + qe_iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, + &uf_regs->urfb); /* Mux clocking */ /* Grant Support */ @@ -362,14 +365,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc } /* Set interrupt mask register at UCC level. */ - out_be32(&uf_regs->uccm, uf_info->uccm_mask); + qe_iowrite32be(uf_info->uccm_mask, &uf_regs->uccm); /* First, clear anything pending at UCC level, * otherwise, old garbage may come through * as soon as the dam is opened. */ /* Writing '1' clears */ - out_be32(&uf_regs->ucce, 0xffffffff); + qe_iowrite32be(0xffffffff, &uf_regs->ucce); *uccf_ret = uccf; return 0; @@ -381,11 +384,8 @@ void ucc_fast_free(struct ucc_fast_private * uccf) if (!uccf) return; - if (uccf->ucc_fast_tx_virtual_fifo_base_offset) - qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset); - - if (uccf->ucc_fast_rx_virtual_fifo_base_offset) - qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset); + qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset); + qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset); if (uccf->uf_regs) iounmap(uccf->uf_regs); diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c index 34f0ec3a63b5..274d34449846 100644 --- a/drivers/soc/fsl/qe/ucc_slow.c +++ b/drivers/soc/fsl/qe/ucc_slow.c @@ -78,7 +78,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode) us_regs = uccs->us_regs; /* Enable reception and/or transmission on this UCC. */ - gumr_l = in_be32(&us_regs->gumr_l); + gumr_l = qe_ioread32be(&us_regs->gumr_l); if (mode & COMM_DIR_TX) { gumr_l |= UCC_SLOW_GUMR_L_ENT; uccs->enabled_tx = 1; @@ -87,7 +87,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode) gumr_l |= UCC_SLOW_GUMR_L_ENR; uccs->enabled_rx = 1; } - out_be32(&us_regs->gumr_l, gumr_l); + qe_iowrite32be(gumr_l, &us_regs->gumr_l); } EXPORT_SYMBOL(ucc_slow_enable); @@ -99,7 +99,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) us_regs = uccs->us_regs; /* Disable reception and/or transmission on this UCC. */ - gumr_l = in_be32(&us_regs->gumr_l); + gumr_l = qe_ioread32be(&us_regs->gumr_l); if (mode & COMM_DIR_TX) { gumr_l &= ~UCC_SLOW_GUMR_L_ENT; uccs->enabled_tx = 0; @@ -108,7 +108,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) gumr_l &= ~UCC_SLOW_GUMR_L_ENR; uccs->enabled_rx = 0; } - out_be32(&us_regs->gumr_l, gumr_l); + qe_iowrite32be(gumr_l, &us_regs->gumr_l); } EXPORT_SYMBOL(ucc_slow_disable); @@ -154,6 +154,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc __func__); return -ENOMEM; } + uccs->rx_base_offset = -1; + uccs->tx_base_offset = -1; + uccs->us_pram_offset = -1; /* Fill slow UCC structure */ uccs->us_info = us_info; @@ -179,7 +182,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc /* Get PRAM base */ uccs->us_pram_offset = qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM); - if (IS_ERR_VALUE(uccs->us_pram_offset)) { + if (uccs->us_pram_offset < 0) { printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__); ucc_slow_free(uccs); return -ENOMEM; @@ -198,7 +201,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc return ret; } - out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length); + qe_iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr); INIT_LIST_HEAD(&uccs->confQ); @@ -206,10 +209,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc uccs->rx_base_offset = qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd), QE_ALIGNMENT_OF_BD); - if (IS_ERR_VALUE(uccs->rx_base_offset)) { + if (uccs->rx_base_offset < 0) { printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__, us_info->rx_bd_ring_len); - uccs->rx_base_offset = 0; ucc_slow_free(uccs); return -ENOMEM; } @@ -217,9 +219,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc uccs->tx_base_offset = qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd), QE_ALIGNMENT_OF_BD); - if (IS_ERR_VALUE(uccs->tx_base_offset)) { + if (uccs->tx_base_offset < 0) { printk(KERN_ERR "%s: cannot allocate TX BDs", __func__); - uccs->tx_base_offset = 0; ucc_slow_free(uccs); return -ENOMEM; } @@ -228,27 +229,27 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset); for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) { /* clear bd buffer */ - out_be32(&bd->buf, 0); + qe_iowrite32be(0, &bd->buf); /* set bd status and length */ - out_be32((u32 *) bd, 0); + qe_iowrite32be(0, (u32 *)bd); bd++; } /* for last BD set Wrap bit */ - out_be32(&bd->buf, 0); - out_be32((u32 *) bd, cpu_to_be32(T_W)); + qe_iowrite32be(0, &bd->buf); + qe_iowrite32be(cpu_to_be32(T_W), (u32 *)bd); /* Init Rx bds */ bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset); for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) { /* set bd status and length */ - out_be32((u32*)bd, 0); + qe_iowrite32be(0, (u32 *)bd); /* clear bd buffer */ - out_be32(&bd->buf, 0); + qe_iowrite32be(0, &bd->buf); bd++; } /* for last BD set Wrap bit */ - out_be32((u32*)bd, cpu_to_be32(R_W)); - out_be32(&bd->buf, 0); + qe_iowrite32be(cpu_to_be32(R_W), (u32 *)bd); + qe_iowrite32be(0, &bd->buf); /* Set GUMR (For more details see the hardware spec.). */ /* gumr_h */ @@ -269,7 +270,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc gumr |= UCC_SLOW_GUMR_H_TXSY; if (us_info->rtsm) gumr |= UCC_SLOW_GUMR_H_RTSM; - out_be32(&us_regs->gumr_h, gumr); + qe_iowrite32be(gumr, &us_regs->gumr_h); /* gumr_l */ gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc | @@ -282,7 +283,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc gumr |= UCC_SLOW_GUMR_L_TINV; if (us_info->tend) gumr |= UCC_SLOW_GUMR_L_TEND; - out_be32(&us_regs->gumr_l, gumr); + qe_iowrite32be(gumr, &us_regs->gumr_l); /* Function code registers */ @@ -292,8 +293,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc uccs->us_pram->rbmr = UCC_BMR_BO_BE; /* rbase, tbase are offsets from MURAM base */ - out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset); - out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset); + qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase); + qe_iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase); /* Mux clocking */ /* Grant Support */ @@ -323,14 +324,14 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc } /* Set interrupt mask register at UCC level. */ - out_be16(&us_regs->uccm, us_info->uccm_mask); + qe_iowrite16be(us_info->uccm_mask, &us_regs->uccm); /* First, clear anything pending at UCC level, * otherwise, old garbage may come through * as soon as the dam is opened. */ /* Writing '1' clears */ - out_be16(&us_regs->ucce, 0xffff); + qe_iowrite16be(0xffff, &us_regs->ucce); /* Issue QE Init command */ if (us_info->init_tx && us_info->init_rx) @@ -352,14 +353,9 @@ void ucc_slow_free(struct ucc_slow_private * uccs) if (!uccs) return; - if (uccs->rx_base_offset) - qe_muram_free(uccs->rx_base_offset); - - if (uccs->tx_base_offset) - qe_muram_free(uccs->tx_base_offset); - - if (uccs->us_pram) - qe_muram_free(uccs->us_pram_offset); + qe_muram_free(uccs->rx_base_offset); + qe_muram_free(uccs->tx_base_offset); + qe_muram_free(uccs->us_pram_offset); if (uccs->us_regs) iounmap(uccs->us_regs); diff --git a/drivers/soc/fsl/qe/usb.c b/drivers/soc/fsl/qe/usb.c index 32d8269fa692..890f236ea697 100644 --- a/drivers/soc/fsl/qe/usb.c +++ b/drivers/soc/fsl/qe/usb.c @@ -43,7 +43,7 @@ int qe_usb_clock_set(enum qe_clock clk, int rate) spin_lock_irqsave(&cmxgcr_lock, flags); - clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val); + qe_clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val); spin_unlock_irqrestore(&cmxgcr_lock, flags); |