diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/clk/mmp/clk-of-mmp2.c | 6 | ||||
-rw-r--r-- | drivers/dma/ti/omap-dma.c | 288 |
2 files changed, 276 insertions, 18 deletions
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index b4a95cbbda98..6e71591e63a0 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -53,6 +53,8 @@ #define APMU_DISP1 0x110 #define APMU_CCIC0 0x50 #define APMU_CCIC1 0xf4 +#define APMU_USBHSIC0 0xf8 +#define APMU_USBHSIC1 0xfc #define MPMU_UART_PLL 0x14 struct mmp2_clk_unit { @@ -194,6 +196,8 @@ static struct mmp_clk_mix_config sdh_mix_config = { }; static DEFINE_SPINLOCK(usb_lock); +static DEFINE_SPINLOCK(usbhsic0_lock); +static DEFINE_SPINLOCK(usbhsic1_lock); static DEFINE_SPINLOCK(disp0_lock); static DEFINE_SPINLOCK(disp1_lock); @@ -224,6 +228,8 @@ static struct mmp_param_div_clk apmu_div_clks[] = { static struct mmp_param_gate_clk apmu_gate_clks[] = { {MMP2_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock}, + {MMP2_CLK_USBHSIC0, "usbhsic0_clk", "usb_pll", 0, APMU_USBHSIC0, 0x1b, 0x1b, 0x0, 0, &usbhsic0_lock}, + {MMP2_CLK_USBHSIC1, "usbhsic1_clk", "usb_pll", 0, APMU_USBHSIC1, 0x1b, 0x1b, 0x0, 0, &usbhsic1_lock}, /* The gate clocks has mux parent. */ {MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c index 6b6ba238b81a..a014ab96e673 100644 --- a/drivers/dma/ti/omap-dma.c +++ b/drivers/dma/ti/omap-dma.c @@ -2,6 +2,7 @@ /* * OMAP DMAengine support */ +#include <linux/cpu_pm.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> @@ -23,12 +24,33 @@ #define OMAP_SDMA_REQUESTS 127 #define OMAP_SDMA_CHANNELS 32 +struct omap_dma_config { + int lch_end; + unsigned int rw_priority:1; + unsigned int needs_busy_check:1; + unsigned int may_lose_context:1; + unsigned int needs_lch_clear:1; +}; + +struct omap_dma_context { + u32 irqenable_l0; + u32 irqenable_l1; + u32 ocp_sysconfig; + u32 gcr; +}; + struct omap_dmadev { struct dma_device ddev; spinlock_t lock; void __iomem *base; const struct omap_dma_reg *reg_map; struct omap_system_dma_plat_info *plat; + const struct omap_dma_config *cfg; + struct notifier_block nb; + struct omap_dma_context context; + int lch_count; + DECLARE_BITMAP(lch_bitmap, OMAP_SDMA_CHANNELS); + struct mutex lch_lock; /* for assigning logical channels */ bool legacy; bool ll123_supported; struct dma_pool *desc_pool; @@ -376,6 +398,19 @@ static unsigned omap_dma_get_csr(struct omap_chan *c) return val; } +static void omap_dma_clear_lch(struct omap_dmadev *od, int lch) +{ + struct omap_chan *c; + int i; + + c = od->lch_map[lch]; + if (!c) + return; + + for (i = CSDP; i <= od->cfg->lch_end; i++) + omap_dma_chan_write(c, i, 0); +} + static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c, unsigned lch) { @@ -633,6 +668,37 @@ static irqreturn_t omap_dma_irq(int irq, void *devid) return IRQ_HANDLED; } +static int omap_dma_get_lch(struct omap_dmadev *od, int *lch) +{ + int channel; + + mutex_lock(&od->lch_lock); + channel = find_first_zero_bit(od->lch_bitmap, od->lch_count); + if (channel >= od->lch_count) + goto out_busy; + set_bit(channel, od->lch_bitmap); + mutex_unlock(&od->lch_lock); + + omap_dma_clear_lch(od, channel); + *lch = channel; + + return 0; + +out_busy: + mutex_unlock(&od->lch_lock); + *lch = -EINVAL; + + return -EBUSY; +} + +static void omap_dma_put_lch(struct omap_dmadev *od, int lch) +{ + omap_dma_clear_lch(od, lch); + mutex_lock(&od->lch_lock); + clear_bit(lch, od->lch_bitmap); + mutex_unlock(&od->lch_lock); +} + static int omap_dma_alloc_chan_resources(struct dma_chan *chan) { struct omap_dmadev *od = to_omap_dma_dev(chan->device); @@ -644,8 +710,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan) ret = omap_request_dma(c->dma_sig, "DMA engine", omap_dma_callback, c, &c->dma_ch); } else { - ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL, - &c->dma_ch); + ret = omap_dma_get_lch(od, &c->dma_ch); } dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig); @@ -702,7 +767,11 @@ static void omap_dma_free_chan_resources(struct dma_chan *chan) c->channel_base = NULL; od->lch_map[c->dma_ch] = NULL; vchan_free_chan_resources(&c->vc); - omap_free_dma(c->dma_ch); + + if (od->legacy) + omap_free_dma(c->dma_ch); + else + omap_dma_put_lch(od, c->dma_ch); dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch, c->dma_sig); @@ -1453,16 +1522,128 @@ static void omap_dma_free(struct omap_dmadev *od) } } +/* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */ +static int omap_dma_busy_notifier(struct notifier_block *nb, + unsigned long cmd, void *v) +{ + struct omap_dmadev *od; + struct omap_chan *c; + int lch = -1; + + od = container_of(nb, struct omap_dmadev, nb); + + switch (cmd) { + case CPU_CLUSTER_PM_ENTER: + while (1) { + lch = find_next_bit(od->lch_bitmap, od->lch_count, + lch + 1); + if (lch >= od->lch_count) + break; + c = od->lch_map[lch]; + if (!c) + continue; + if (omap_dma_chan_read(c, CCR) & CCR_ENABLE) + return NOTIFY_BAD; + } + break; + case CPU_CLUSTER_PM_ENTER_FAILED: + case CPU_CLUSTER_PM_EXIT: + break; + } + + return NOTIFY_OK; +} + +/* + * We are using IRQENABLE_L1, and legacy DMA code was using IRQENABLE_L0. + * As the DSP may be using IRQENABLE_L2 and L3, let's not touch those for + * now. Context save seems to be only currently needed on omap3. + */ +static void omap_dma_context_save(struct omap_dmadev *od) +{ + od->context.irqenable_l0 = omap_dma_glbl_read(od, IRQENABLE_L0); + od->context.irqenable_l1 = omap_dma_glbl_read(od, IRQENABLE_L1); + od->context.ocp_sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG); + od->context.gcr = omap_dma_glbl_read(od, GCR); +} + +static void omap_dma_context_restore(struct omap_dmadev *od) +{ + int i; + + omap_dma_glbl_write(od, GCR, od->context.gcr); + omap_dma_glbl_write(od, OCP_SYSCONFIG, od->context.ocp_sysconfig); + omap_dma_glbl_write(od, IRQENABLE_L0, od->context.irqenable_l0); + omap_dma_glbl_write(od, IRQENABLE_L1, od->context.irqenable_l1); + + /* Clear IRQSTATUS_L0 as legacy DMA code is no longer doing it */ + if (od->plat->errata & DMA_ROMCODE_BUG) + omap_dma_glbl_write(od, IRQSTATUS_L0, 0); + + /* Clear dma channels */ + for (i = 0; i < od->lch_count; i++) + omap_dma_clear_lch(od, i); +} + +/* Currently only used for omap3 */ +static int omap_dma_context_notifier(struct notifier_block *nb, + unsigned long cmd, void *v) +{ + struct omap_dmadev *od; + + od = container_of(nb, struct omap_dmadev, nb); + + switch (cmd) { + case CPU_CLUSTER_PM_ENTER: + omap_dma_context_save(od); + break; + case CPU_CLUSTER_PM_ENTER_FAILED: + case CPU_CLUSTER_PM_EXIT: + omap_dma_context_restore(od); + break; + } + + return NOTIFY_OK; +} + +static void omap_dma_init_gcr(struct omap_dmadev *od, int arb_rate, + int max_fifo_depth, int tparams) +{ + u32 val; + + /* Set only for omap2430 and later */ + if (!od->cfg->rw_priority) + return; + + if (max_fifo_depth == 0) + max_fifo_depth = 1; + if (arb_rate == 0) + arb_rate = 1; + + val = 0xff & max_fifo_depth; + val |= (0x3 & tparams) << 12; + val |= (arb_rate & 0xff) << 16; + + omap_dma_glbl_write(od, GCR, val); +} + #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) +/* + * No flags currently set for default configuration as omap1 is still + * using platform data. + */ +static const struct omap_dma_config default_cfg; + static int omap_dma_probe(struct platform_device *pdev) { + const struct omap_dma_config *conf; struct omap_dmadev *od; struct resource *res; int rc, i, irq; - u32 lch_count; + u32 val; od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); if (!od) @@ -1473,9 +1654,21 @@ static int omap_dma_probe(struct platform_device *pdev) if (IS_ERR(od->base)) return PTR_ERR(od->base); - od->plat = omap_get_plat_info(); - if (!od->plat) - return -EPROBE_DEFER; + conf = of_device_get_match_data(&pdev->dev); + if (conf) { + od->cfg = conf; + od->plat = dev_get_platdata(&pdev->dev); + if (!od->plat) { + dev_err(&pdev->dev, "omap_system_dma_plat_info is missing"); + return -ENODEV; + } + } else { + od->cfg = &default_cfg; + + od->plat = omap_get_plat_info(); + if (!od->plat) + return -EPROBE_DEFER; + } od->reg_map = od->plat->reg_map; @@ -1507,6 +1700,7 @@ static int omap_dma_probe(struct platform_device *pdev) od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */ od->ddev.dev = &pdev->dev; INIT_LIST_HEAD(&od->ddev.channels); + mutex_init(&od->lch_lock); spin_lock_init(&od->lock); spin_lock_init(&od->irq_lock); @@ -1522,18 +1716,30 @@ static int omap_dma_probe(struct platform_device *pdev) /* Number of available logical channels */ if (!pdev->dev.of_node) { - lch_count = od->plat->dma_attr->lch_count; - if (unlikely(!lch_count)) - lch_count = OMAP_SDMA_CHANNELS; + od->lch_count = od->plat->dma_attr->lch_count; + if (unlikely(!od->lch_count)) + od->lch_count = OMAP_SDMA_CHANNELS; } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels", - &lch_count)) { + &od->lch_count)) { dev_info(&pdev->dev, "Missing dma-channels property, using %u.\n", OMAP_SDMA_CHANNELS); - lch_count = OMAP_SDMA_CHANNELS; + od->lch_count = OMAP_SDMA_CHANNELS; + } + + /* Mask of allowed logical channels */ + if (pdev->dev.of_node && !of_property_read_u32(pdev->dev.of_node, + "dma-channel-mask", + &val)) { + /* Tag channels not in mask as reserved */ + val = ~val; + bitmap_from_arr32(od->lch_bitmap, &val, od->lch_count); } + if (od->plat->dma_attr->dev_caps & HS_CHANNELS_RESERVED) + bitmap_set(od->lch_bitmap, 0, 2); - od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map), + od->lch_map = devm_kcalloc(&pdev->dev, od->lch_count, + sizeof(*od->lch_map), GFP_KERNEL); if (!od->lch_map) return -ENOMEM; @@ -1605,6 +1811,16 @@ static int omap_dma_probe(struct platform_device *pdev) } } + omap_dma_init_gcr(od, DMA_DEFAULT_ARB_RATE, DMA_DEFAULT_FIFO_DEPTH, 0); + + if (od->cfg->needs_busy_check) { + od->nb.notifier_call = omap_dma_busy_notifier; + cpu_pm_register_notifier(&od->nb); + } else if (od->cfg->may_lose_context) { + od->nb.notifier_call = omap_dma_context_notifier; + cpu_pm_register_notifier(&od->nb); + } + dev_info(&pdev->dev, "OMAP DMA engine driver%s\n", od->ll123_supported ? " (LinkedList1/2/3 supported)" : ""); @@ -1616,6 +1832,9 @@ static int omap_dma_remove(struct platform_device *pdev) struct omap_dmadev *od = platform_get_drvdata(pdev); int irq; + if (od->cfg->may_lose_context) + cpu_pm_unregister_notifier(&od->nb); + if (pdev->dev.of_node) of_dma_controller_free(pdev->dev.of_node); @@ -1637,12 +1856,45 @@ static int omap_dma_remove(struct platform_device *pdev) return 0; } +static const struct omap_dma_config omap2420_data = { + .lch_end = CCFN, + .rw_priority = true, + .needs_lch_clear = true, + .needs_busy_check = true, +}; + +static const struct omap_dma_config omap2430_data = { + .lch_end = CCFN, + .rw_priority = true, + .needs_lch_clear = true, +}; + +static const struct omap_dma_config omap3430_data = { + .lch_end = CCFN, + .rw_priority = true, + .needs_lch_clear = true, + .may_lose_context = true, +}; + +static const struct omap_dma_config omap3630_data = { + .lch_end = CCDN, + .rw_priority = true, + .needs_lch_clear = true, + .may_lose_context = true, +}; + +static const struct omap_dma_config omap4_data = { + .lch_end = CCDN, + .rw_priority = true, + .needs_lch_clear = true, +}; + static const struct of_device_id omap_dma_match[] = { - { .compatible = "ti,omap2420-sdma", }, - { .compatible = "ti,omap2430-sdma", }, - { .compatible = "ti,omap3430-sdma", }, - { .compatible = "ti,omap3630-sdma", }, - { .compatible = "ti,omap4430-sdma", }, + { .compatible = "ti,omap2420-sdma", .data = &omap2420_data, }, + { .compatible = "ti,omap2430-sdma", .data = &omap2430_data, }, + { .compatible = "ti,omap3430-sdma", .data = &omap3430_data, }, + { .compatible = "ti,omap3630-sdma", .data = &omap3630_data, }, + { .compatible = "ti,omap4430-sdma", .data = &omap4_data, }, {}, }; MODULE_DEVICE_TABLE(of, omap_dma_match); |