diff options
Diffstat (limited to 'drivers')
303 files changed, 13113 insertions, 4652 deletions
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index d58fbf7f04e6..7dd70927991e 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -122,7 +122,7 @@ static int acpi_apd_create_device(struct acpi_device *adev, int ret; if (!dev_desc) { - pdev = acpi_create_platform_device(adev); + pdev = acpi_create_platform_device(adev, NULL); return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; } @@ -139,14 +139,8 @@ static int acpi_apd_create_device(struct acpi_device *adev, goto err_out; } - if (dev_desc->properties) { - ret = device_add_properties(&adev->dev, dev_desc->properties); - if (ret) - goto err_out; - } - adev->driver_data = pdata; - pdev = acpi_create_platform_device(adev); + pdev = acpi_create_platform_device(adev, dev_desc->properties); if (!IS_ERR_OR_NULL(pdev)) return 1; diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 552010288135..373657f7e35a 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -395,7 +395,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev, dev_desc = (const struct lpss_device_desc *)id->driver_data; if (!dev_desc) { - pdev = acpi_create_platform_device(adev); + pdev = acpi_create_platform_device(adev, NULL); return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; } pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); @@ -451,14 +451,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev, goto err_out; } - if (dev_desc->properties) { - ret = device_add_properties(&adev->dev, dev_desc->properties); - if (ret) - goto err_out; - } - adev->driver_data = pdata; - pdev = acpi_create_platform_device(adev); + pdev = acpi_create_platform_device(adev, dev_desc->properties); if (!IS_ERR_OR_NULL(pdev)) { return 1; } diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index b200ae1f3c6f..b4c1a6a51da4 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -50,6 +50,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev, /** * acpi_create_platform_device - Create platform device for ACPI device node * @adev: ACPI device node to create a platform device for. + * @properties: Optional collection of build-in properties. * * Check if the given @adev can be represented as a platform device and, if * that's the case, create and register a platform device, populate its common @@ -57,7 +58,8 @@ static void acpi_platform_fill_resource(struct acpi_device *adev, * * Name of the platform device will be the same as @adev's. */ -struct platform_device *acpi_create_platform_device(struct acpi_device *adev) +struct platform_device *acpi_create_platform_device(struct acpi_device *adev, + struct property_entry *properties) { struct platform_device *pdev = NULL; struct platform_device_info pdevinfo; @@ -106,6 +108,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev) pdevinfo.res = resources; pdevinfo.num_res = count; pdevinfo.fwnode = acpi_fwnode_handle(adev); + pdevinfo.properties = properties; if (acpi_dma_supported(adev)) pdevinfo.dma_mask = DMA_BIT_MASK(32); diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c index 33505c651f62..86364097e236 100644 --- a/drivers/acpi/dptf/int340x_thermal.c +++ b/drivers/acpi/dptf/int340x_thermal.c @@ -34,11 +34,11 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev, const struct acpi_device_id *id) { if (IS_ENABLED(CONFIG_INT340X_THERMAL)) - acpi_create_platform_device(adev); + acpi_create_platform_device(adev, NULL); /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) && id->driver_data == INT3401_DEVICE) - acpi_create_platform_device(adev); + acpi_create_platform_device(adev, NULL); return 1; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 035ac646d8db..3d1856f1f4d0 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1734,7 +1734,7 @@ static void acpi_default_enumeration(struct acpi_device *device) &is_spi_i2c_slave); acpi_dev_free_resource_list(&resource_list); if (!is_spi_i2c_slave) { - acpi_create_platform_device(device); + acpi_create_platform_device(device, NULL); acpi_device_set_enumerated(device); } else { blocking_notifier_call_chain(&acpi_reconfig_chain, diff --git a/drivers/base/dd.c b/drivers/base/dd.c index d22a7260f42b..d76cd97a98b6 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -324,7 +324,8 @@ static int really_probe(struct device *dev, struct device_driver *drv) { int ret = -EPROBE_DEFER; int local_trigger_count = atomic_read(&deferred_trigger_count); - bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE); + bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) && + !drv->suppress_bind_attrs; if (defer_all_probes) { /* @@ -383,7 +384,7 @@ re_probe: if (test_remove) { test_remove = false; - if (dev->bus && dev->bus->remove) + if (dev->bus->remove) dev->bus->remove(dev); else if (drv->remove) drv->remove(dev); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index e44944f4be77..2932a5bd892f 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1027,6 +1027,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a TRACE_DEVICE(dev); TRACE_SUSPEND(0); + dpm_wait_for_children(dev, async); + if (async_error) goto Complete; @@ -1038,8 +1040,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a if (dev->power.syscore || dev->power.direct_complete) goto Complete; - dpm_wait_for_children(dev, async); - if (dev->pm_domain) { info = "noirq power domain "; callback = pm_noirq_op(&dev->pm_domain->ops, state); @@ -1174,6 +1174,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as __pm_runtime_disable(dev, false); + dpm_wait_for_children(dev, async); + if (async_error) goto Complete; @@ -1185,8 +1187,6 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as if (dev->power.syscore || dev->power.direct_complete) goto Complete; - dpm_wait_for_children(dev, async); - if (dev->pm_domain) { info = "late power domain "; callback = pm_late_early_op(&dev->pm_domain->ops, state); diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index ab19adb07a12..3c606c09fd5a 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -853,45 +853,6 @@ rqbiocnt(struct request *r) return n; } -/* This can be removed if we are certain that no users of the block - * layer will ever use zero-count pages in bios. Otherwise we have to - * protect against the put_page sometimes done by the network layer. - * - * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for - * discussion. - * - * We cannot use get_page in the workaround, because it insists on a - * positive page count as a precondition. So we use _refcount directly. - */ -static void -bio_pageinc(struct bio *bio) -{ - struct bio_vec bv; - struct page *page; - struct bvec_iter iter; - - bio_for_each_segment(bv, bio, iter) { - /* Non-zero page count for non-head members of - * compound pages is no longer allowed by the kernel. - */ - page = compound_head(bv.bv_page); - page_ref_inc(page); - } -} - -static void -bio_pagedec(struct bio *bio) -{ - struct page *page; - struct bio_vec bv; - struct bvec_iter iter; - - bio_for_each_segment(bv, bio, iter) { - page = compound_head(bv.bv_page); - page_ref_dec(page); - } -} - static void bufinit(struct buf *buf, struct request *rq, struct bio *bio) { @@ -899,7 +860,6 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio) buf->rq = rq; buf->bio = bio; buf->iter = bio->bi_iter; - bio_pageinc(bio); } static struct buf * @@ -1127,7 +1087,6 @@ aoe_end_buf(struct aoedev *d, struct buf *buf) if (buf == d->ip.buf) d->ip.buf = NULL; rq = buf->rq; - bio_pagedec(buf->bio); mempool_free(buf, d->bufpool); n = (unsigned long) rq->special; rq->special = (void *) --n; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 100be556e613..83482721bc01 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1871,7 +1871,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock, drbd_update_congested(connection); } do { - rv = kernel_sendmsg(sock, &msg, &iov, 1, size); + rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); if (rv == -EAGAIN) { if (we_should_drop_the_connection(connection, sock)) break; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 19a16b2dbb91..7a1048755914 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -599,7 +599,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, return -EINVAL; sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0); - if (!sreq) + if (IS_ERR(sreq)) return -ENOMEM; mutex_unlock(&nbd->tx_lock); diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index d23368874710..6af1ce04b3da 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -748,10 +748,7 @@ static int pp_release(struct inode *inode, struct file *file) } if (pp->pdev) { - const char *name = pp->pdev->name; - parport_unregister_device(pp->pdev); - kfree(name); pp->pdev = NULL; pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); } diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 20b105584f82..80ae2a51452d 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -700,6 +700,7 @@ static struct clk * __init create_mux_common(struct clockgen *cg, struct mux_hwclock *hwc, const struct clk_ops *ops, unsigned long min_rate, + unsigned long max_rate, unsigned long pct80_rate, const char *fmt, int idx) { @@ -728,6 +729,8 @@ static struct clk * __init create_mux_common(struct clockgen *cg, continue; if (rate < min_rate) continue; + if (rate > max_rate) + continue; parent_names[j] = div->name; hwc->parent_to_clksel[j] = i; @@ -759,7 +762,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) struct mux_hwclock *hwc; const struct clockgen_pll_div *div; unsigned long plat_rate, min_rate; - u64 pct80_rate; + u64 max_rate, pct80_rate; u32 clksel; hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); @@ -787,8 +790,8 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) return NULL; } - pct80_rate = clk_get_rate(div->clk); - pct80_rate *= 8; + max_rate = clk_get_rate(div->clk); + pct80_rate = max_rate * 8; do_div(pct80_rate, 10); plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk); @@ -798,7 +801,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) else min_rate = plat_rate / 2; - return create_mux_common(cg, hwc, &cmux_ops, min_rate, + return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate, pct80_rate, "cg-cmux%d", idx); } @@ -813,7 +816,7 @@ static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx) hwc->reg = cg->regs + 0x20 * idx + 0x10; hwc->info = cg->info.hwaccel[idx]; - return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0, + return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0, "cg-hwaccel%d", idx); } diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c index 5daddf5ecc4b..bc37030e38ba 100644 --- a/drivers/clk/clk-xgene.c +++ b/drivers/clk/clk-xgene.c @@ -463,22 +463,20 @@ static int xgene_clk_enable(struct clk_hw *hw) struct xgene_clk *pclk = to_xgene_clk(hw); unsigned long flags = 0; u32 data; - phys_addr_t reg; if (pclk->lock) spin_lock_irqsave(pclk->lock, flags); if (pclk->param.csr_reg != NULL) { pr_debug("%s clock enabled\n", clk_hw_get_name(hw)); - reg = __pa(pclk->param.csr_reg); /* First enable the clock */ data = xgene_clk_read(pclk->param.csr_reg + pclk->param.reg_clk_offset); data |= pclk->param.reg_clk_mask; xgene_clk_write(data, pclk->param.csr_reg + pclk->param.reg_clk_offset); - pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n", - clk_hw_get_name(hw), ®, + pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n", + clk_hw_get_name(hw), pclk->param.reg_clk_offset, pclk->param.reg_clk_mask, data); @@ -488,8 +486,8 @@ static int xgene_clk_enable(struct clk_hw *hw) data &= ~pclk->param.reg_csr_mask; xgene_clk_write(data, pclk->param.csr_reg + pclk->param.reg_csr_offset); - pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n", - clk_hw_get_name(hw), ®, + pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n", + clk_hw_get_name(hw), pclk->param.reg_csr_offset, pclk->param.reg_csr_mask, data); } diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c index 19f9b622981a..7a6acc3e4a92 100644 --- a/drivers/clk/imx/clk-pllv3.c +++ b/drivers/clk/imx/clk-pllv3.c @@ -223,7 +223,7 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw, temp64 *= mfn; do_div(temp64, mfd); - return (parent_rate * div) + (u32)temp64; + return parent_rate * div + (unsigned long)temp64; } static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate, @@ -247,7 +247,11 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate, do_div(temp64, parent_rate); mfn = temp64; - return parent_rate * div + parent_rate * mfn / mfd; + temp64 = (u64)parent_rate; + temp64 *= mfn; + do_div(temp64, mfd); + + return parent_rate * div + (unsigned long)temp64; } static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate, diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index 3a51fff1b0e7..9adaf48aea23 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -313,7 +313,7 @@ static void __init mmp2_clk_init(struct device_node *np) } pxa_unit->apmu_base = of_iomap(np, 1); - if (!pxa_unit->mpmu_base) { + if (!pxa_unit->apmu_base) { pr_err("failed to map apmu registers\n"); return; } diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c index 87f2317b2a00..f110c02e83cb 100644 --- a/drivers/clk/mmp/clk-of-pxa168.c +++ b/drivers/clk/mmp/clk-of-pxa168.c @@ -262,7 +262,7 @@ static void __init pxa168_clk_init(struct device_node *np) } pxa_unit->apmu_base = of_iomap(np, 1); - if (!pxa_unit->mpmu_base) { + if (!pxa_unit->apmu_base) { pr_err("failed to map apmu registers\n"); return; } diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c index e22a67f76d93..64d1ef49caeb 100644 --- a/drivers/clk/mmp/clk-of-pxa910.c +++ b/drivers/clk/mmp/clk-of-pxa910.c @@ -282,7 +282,7 @@ static void __init pxa910_clk_init(struct device_node *np) } pxa_unit->apmu_base = of_iomap(np, 1); - if (!pxa_unit->mpmu_base) { + if (!pxa_unit->apmu_base) { pr_err("failed to map apmu registers\n"); return; } @@ -294,7 +294,7 @@ static void __init pxa910_clk_init(struct device_node *np) } pxa_unit->apbcp_base = of_iomap(np, 3); - if (!pxa_unit->mpmu_base) { + if (!pxa_unit->apbcp_base) { pr_err("failed to map apbcp registers\n"); return; } diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c index 8feba93672c5..e8075359366b 100644 --- a/drivers/clk/rockchip/clk-ddr.c +++ b/drivers/clk/rockchip/clk-ddr.c @@ -144,11 +144,8 @@ struct clk *rockchip_clk_register_ddrclk(const char *name, int flags, ddrclk->ddr_flag = ddr_flag; clk = clk_register(NULL, &ddrclk->hw); - if (IS_ERR(clk)) { - pr_err("%s: could not register ddrclk %s\n", __func__, name); + if (IS_ERR(clk)) kfree(ddrclk); - return NULL; - } return clk; } diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c index 96fab6cfb202..6c6afb87b4ce 100644 --- a/drivers/clk/samsung/clk-exynos-clkout.c +++ b/drivers/clk/samsung/clk-exynos-clkout.c @@ -132,28 +132,34 @@ free_clkout: pr_err("%s: failed to register clkout clock\n", __func__); } +/* + * We use CLK_OF_DECLARE_DRIVER initialization method to avoid setting + * the OF_POPULATED flag on the pmu device tree node, so later the + * Exynos PMU platform device can be properly probed with PMU driver. + */ + static void __init exynos4_clkout_init(struct device_node *node) { exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK); } -CLK_OF_DECLARE(exynos4210_clkout, "samsung,exynos4210-pmu", +CLK_OF_DECLARE_DRIVER(exynos4210_clkout, "samsung,exynos4210-pmu", exynos4_clkout_init); -CLK_OF_DECLARE(exynos4212_clkout, "samsung,exynos4212-pmu", +CLK_OF_DECLARE_DRIVER(exynos4212_clkout, "samsung,exynos4212-pmu", exynos4_clkout_init); -CLK_OF_DECLARE(exynos4412_clkout, "samsung,exynos4412-pmu", +CLK_OF_DECLARE_DRIVER(exynos4412_clkout, "samsung,exynos4412-pmu", exynos4_clkout_init); -CLK_OF_DECLARE(exynos3250_clkout, "samsung,exynos3250-pmu", +CLK_OF_DECLARE_DRIVER(exynos3250_clkout, "samsung,exynos3250-pmu", exynos4_clkout_init); static void __init exynos5_clkout_init(struct device_node *node) { exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK); } -CLK_OF_DECLARE(exynos5250_clkout, "samsung,exynos5250-pmu", +CLK_OF_DECLARE_DRIVER(exynos5250_clkout, "samsung,exynos5250-pmu", exynos5_clkout_init); -CLK_OF_DECLARE(exynos5410_clkout, "samsung,exynos5410-pmu", +CLK_OF_DECLARE_DRIVER(exynos5410_clkout, "samsung,exynos5410-pmu", exynos5_clkout_init); -CLK_OF_DECLARE(exynos5420_clkout, "samsung,exynos5420-pmu", +CLK_OF_DECLARE_DRIVER(exynos5420_clkout, "samsung,exynos5420-pmu", exynos5_clkout_init); -CLK_OF_DECLARE(exynos5433_clkout, "samsung,exynos5433-pmu", +CLK_OF_DECLARE_DRIVER(exynos5433_clkout, "samsung,exynos5433-pmu", exynos5_clkout_init); diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 69d8ef98d34c..6d802f2d2881 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -308,8 +308,7 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait) poll_wait(file, &sync_file->wq, wait); - if (!poll_does_not_wait(wait) && - !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { + if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { if (dma_fence_add_callback(sync_file->fence, &sync_file->cb, fence_check_cb_func) < 0) wake_up_all(&sync_file->wq); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 2f9f96cc9f65..06879d1dcabd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -395,9 +395,12 @@ static int acp_hw_fini(void *handle) { int i, ret; struct device *dev; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* return early if no ACP */ + if (!adev->acp.acp_genpd) + return 0; + for (i = 0; i < ACP_DEVS ; i++) { dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 017556ca22e6..7ded61e6dd81 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -809,10 +809,19 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, if (!adev->pm.fw) { switch (adev->asic_type) { case CHIP_TOPAZ: - strcpy(fw_name, "amdgpu/topaz_smc.bin"); + if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || + ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || + ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) + strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); + else + strcpy(fw_name, "amdgpu/topaz_smc.bin"); break; case CHIP_TONGA: - strcpy(fw_name, "amdgpu/tonga_smc.bin"); + if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) || + ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) + strcpy(fw_name, "amdgpu/tonga_k_smc.bin"); + else + strcpy(fw_name, "amdgpu/tonga_smc.bin"); break; case CHIP_FIJI: strcpy(fw_name, "amdgpu/fiji_smc.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 3af8ffb45b64..8d1cf2d3e663 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -769,7 +769,7 @@ static void amdgpu_connector_unregister(struct drm_connector *connector) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - if (amdgpu_connector->ddc_bus->has_aux) { + if (amdgpu_connector->ddc_bus && amdgpu_connector->ddc_bus->has_aux) { drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux); amdgpu_connector->ddc_bus->has_aux = false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 6bb4d9e9afe4..42da6163b893 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -742,8 +742,20 @@ static struct pci_driver amdgpu_kms_pci_driver = { static int __init amdgpu_init(void) { - amdgpu_sync_init(); - amdgpu_fence_slab_init(); + int r; + + r = amdgpu_sync_init(); + if (r) + goto error_sync; + + r = amdgpu_fence_slab_init(); + if (r) + goto error_fence; + + r = amd_sched_fence_slab_init(); + if (r) + goto error_sched; + if (vgacon_text_force()) { DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); return -EINVAL; @@ -755,6 +767,15 @@ static int __init amdgpu_init(void) amdgpu_register_atpx_handler(); /* let modprobe override vga console setting */ return drm_pci_init(driver, pdriver); + +error_sched: + amdgpu_fence_slab_fini(); + +error_fence: + amdgpu_sync_fini(); + +error_sync: + return r; } static void __exit amdgpu_exit(void) @@ -763,6 +784,7 @@ static void __exit amdgpu_exit(void) drm_pci_exit(driver, pdriver); amdgpu_unregister_atpx_handler(); amdgpu_sync_fini(); + amd_sched_fence_slab_fini(); amdgpu_fence_slab_fini(); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index ad908612aff9..d1cf9ac0dff1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -99,6 +99,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) if ((amdgpu_runtime_pm != 0) && amdgpu_has_atpx() && + (amdgpu_is_atpx_hybrid() || + amdgpu_has_atpx_dgpu_power_cntl()) && ((flags & AMD_IS_APU) == 0)) flags |= AMD_IS_PX; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 52d0a83e6ad1..0b21e7beda91 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -80,7 +80,9 @@ #include "dce_virtual.h" MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); +MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin"); MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); +MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin"); MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index 14f8c1f4da3d..0723758ed065 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -272,7 +272,7 @@ bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hw PHM_FUNC_CHECK(hwmgr); if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL) - return -EINVAL; + return false; return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 2ba7937d2545..e03dcb6ea9c1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -710,8 +710,10 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t vol; int ret = 0; - if (hwmgr->chip_id < CHIP_POLARIS10) { - atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage); + if (hwmgr->chip_id < CHIP_TONGA) { + ret = atomctrl_get_voltage_evv(hwmgr, id, voltage); + } else if (hwmgr->chip_id < CHIP_POLARIS10) { + ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage); if (*voltage >= 2000 || *voltage == 0) *voltage = 1150; } else { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 9e49f2777143..28e748d688e2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1474,19 +1474,19 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; - if (table_info == NULL) - return -EINVAL; - - sclk_table = table_info->vdd_dep_on_sclk; - for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { - if (0 == phm_get_sclk_for_voltage_evv(hwmgr, + if ((hwmgr->pp_table_version == PP_TABLE_V1) + && !phm_get_sclk_for_voltage_evv(hwmgr, table_info->vddgfx_lookup_table, vv_id, &sclk)) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ClockStretcher)) { + if (table_info == NULL) + return -EINVAL; + sclk_table = table_info->vdd_dep_on_sclk; + for (j = 1; j < sclk_table->count; j++) { if (sclk_table->entries[j].clk == sclk && sclk_table->entries[j].cks_enable == 0) { @@ -1512,12 +1512,15 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) } } } else { - if ((hwmgr->pp_table_version == PP_TABLE_V0) || !phm_get_sclk_for_voltage_evv(hwmgr, table_info->vddc_lookup_table, vv_id, &sclk)) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ClockStretcher)) { + if (table_info == NULL) + return -EINVAL; + sclk_table = table_info->vdd_dep_on_sclk; + for (j = 1; j < sclk_table->count; j++) { if (sclk_table->entries[j].clk == sclk && sclk_table->entries[j].cks_enable == 0) { @@ -2147,9 +2150,11 @@ static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (tab) { + vddc = tab->vddc; smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage); tab->vddc = vddc; + vddci = tab->vddci; smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci, &data->vddci_leakage); tab->vddci = vddci; @@ -4247,18 +4252,26 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) { struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)hwmgr->pptable; - struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL; + struct phm_clock_voltage_dependency_table *sclk_table; int i; - if (table_info == NULL) - return -EINVAL; - - dep_sclk_table = table_info->vdd_dep_on_sclk; - - for (i = 0; i < dep_sclk_table->count; i++) { - clocks->clock[i] = dep_sclk_table->entries[i].clk; - clocks->count++; + if (hwmgr->pp_table_version == PP_TABLE_V1) { + if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL) + return -EINVAL; + dep_sclk_table = table_info->vdd_dep_on_sclk; + for (i = 0; i < dep_sclk_table->count; i++) { + clocks->clock[i] = dep_sclk_table->entries[i].clk; + clocks->count++; + } + } else if (hwmgr->pp_table_version == PP_TABLE_V0) { + sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; + for (i = 0; i < sclk_table->count; i++) { + clocks->clock[i] = sclk_table->entries[i].clk; + clocks->count++; + } } + return 0; } @@ -4280,17 +4293,24 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) (struct phm_ppt_v1_information *)hwmgr->pptable; struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; int i; + struct phm_clock_voltage_dependency_table *mclk_table; - if (table_info == NULL) - return -EINVAL; - - dep_mclk_table = table_info->vdd_dep_on_mclk; - - for (i = 0; i < dep_mclk_table->count; i++) { - clocks->clock[i] = dep_mclk_table->entries[i].clk; - clocks->latency[i] = smu7_get_mem_latency(hwmgr, + if (hwmgr->pp_table_version == PP_TABLE_V1) { + if (table_info == NULL) + return -EINVAL; + dep_mclk_table = table_info->vdd_dep_on_mclk; + for (i = 0; i < dep_mclk_table->count; i++) { + clocks->clock[i] = dep_mclk_table->entries[i].clk; + clocks->latency[i] = smu7_get_mem_latency(hwmgr, dep_mclk_table->entries[i].clk); - clocks->count++; + clocks->count++; + } + } else if (hwmgr->pp_table_version == PP_TABLE_V0) { + mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; + for (i = 0; i < mclk_table->count; i++) { + clocks->clock[i] = mclk_table->entries[i].clk; + clocks->count++; + } } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c index fb6c6f6106d5..29d0319b22e6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c @@ -30,7 +30,7 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info) { if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; + return -ENODEV; fan_speed_info->supports_percent_read = true; fan_speed_info->supports_percent_write = true; @@ -60,7 +60,7 @@ int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint64_t tmp64; if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; + return -ENODEV; duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); @@ -89,7 +89,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) if (hwmgr->thermal_controller.fanInfo.bNoFan || (hwmgr->thermal_controller.fanInfo. ucTachometerPulsesPerRevolution == 0)) - return 0; + return -ENODEV; tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_TACH_STATUS, TACH_PERIOD); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 09b2cf6ccfa4..1bf83ed113b3 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -34,9 +34,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); -struct kmem_cache *sched_fence_slab; -atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); - /* Initialize a given run queue struct */ static void amd_sched_rq_init(struct amd_sched_rq *rq) { @@ -619,13 +616,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, INIT_LIST_HEAD(&sched->ring_mirror_list); spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0); - if (atomic_inc_return(&sched_fence_slab_ref) == 1) { - sched_fence_slab = kmem_cache_create( - "amd_sched_fence", sizeof(struct amd_sched_fence), 0, - SLAB_HWCACHE_ALIGN, NULL); - if (!sched_fence_slab) - return -ENOMEM; - } /* Each scheduler will run on a seperate kernel thread */ sched->thread = kthread_run(amd_sched_main, sched, sched->name); @@ -646,7 +636,4 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched) { if (sched->thread) kthread_stop(sched->thread); - rcu_barrier(); - if (atomic_dec_and_test(&sched_fence_slab_ref)) - kmem_cache_destroy(sched_fence_slab); } diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 876aa43b57df..d8dc681bcda6 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -30,9 +30,6 @@ struct amd_gpu_scheduler; struct amd_sched_rq; -extern struct kmem_cache *sched_fence_slab; -extern atomic_t sched_fence_slab_ref; - /** * A scheduler entity is a wrapper around a job queue or a group * of other entities. Entities take turns emitting jobs from their @@ -145,6 +142,9 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity); void amd_sched_entity_push_job(struct amd_sched_job *sched_job); +int amd_sched_fence_slab_init(void); +void amd_sched_fence_slab_fini(void); + struct amd_sched_fence *amd_sched_fence_create( struct amd_sched_entity *s_entity, void *owner); void amd_sched_fence_scheduled(struct amd_sched_fence *fence); diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 91530e25aaff..33f54d0a5c4f 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -27,6 +27,25 @@ #include <drm/drmP.h> #include "gpu_scheduler.h" +static struct kmem_cache *sched_fence_slab; + +int amd_sched_fence_slab_init(void) +{ + sched_fence_slab = kmem_cache_create( + "amd_sched_fence", sizeof(struct amd_sched_fence), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!sched_fence_slab) + return -ENOMEM; + + return 0; +} + +void amd_sched_fence_slab_fini(void) +{ + rcu_barrier(); + kmem_cache_destroy(sched_fence_slab); +} + struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, void *owner) { diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index a424e03b3007..eb8688ec6f18 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -39,6 +39,15 @@ config DRM_DW_HDMI_AHB_AUDIO Designware HDMI block. This is used in conjunction with the i.MX6 HDMI driver. +config DRM_DW_HDMI_I2S_AUDIO + tristate "Synopsis Designware I2S Audio interface" + depends on SND_SOC + depends on DRM_DW_HDMI + select SND_SOC_HDMI_CODEC + help + Support the I2S Audio interface which is part of the Synopsis + Designware HDMI block. + config DRM_NXP_PTN3460 tristate "NXP PTN3460 DP/LVDS bridge" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 8b065d9c7652..2e83a7855399 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o +obj-$(CONFIG_DRM_DW_HDMI_I2S_AUDIO) += dw-hdmi-i2s-audio.o obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig index d2b0499ab7d7..2fed567f9943 100644 --- a/drivers/gpu/drm/bridge/adv7511/Kconfig +++ b/drivers/gpu/drm/bridge/adv7511/Kconfig @@ -6,6 +6,14 @@ config DRM_I2C_ADV7511 help Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders. +config DRM_I2C_ADV7511_AUDIO + bool "ADV7511 HDMI Audio driver" + depends on DRM_I2C_ADV7511 && SND_SOC + select SND_SOC_HDMI_CODEC + help + Support the ADV7511 HDMI Audio interface. This is used in + conjunction with the AV7511 HDMI driver. + config DRM_I2C_ADV7533 bool "ADV7533 encoder" depends on DRM_I2C_ADV7511 diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile index 9019327fff4c..5ba675534f6e 100644 --- a/drivers/gpu/drm/bridge/adv7511/Makefile +++ b/drivers/gpu/drm/bridge/adv7511/Makefile @@ -1,3 +1,4 @@ adv7511-y := adv7511_drv.o +adv7511-$(CONFIG_DRM_I2C_ADV7511_AUDIO) += adv7511_audio.o adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index 161c923d6162..992d76ce02bb 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -309,6 +309,8 @@ struct adv7511 { struct drm_display_mode curr_mode; unsigned int f_tmds; + unsigned int f_audio; + unsigned int audio_source; unsigned int current_edid_segment; uint8_t edid_buf[256]; @@ -334,6 +336,7 @@ struct adv7511 { bool use_timing_gen; enum adv7511_type type; + struct platform_device *audio_pdev; }; #ifdef CONFIG_DRM_I2C_ADV7533 @@ -389,4 +392,17 @@ static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv) } #endif +#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO +int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511); +void adv7511_audio_exit(struct adv7511 *adv7511); +#else /*CONFIG_DRM_I2C_ADV7511_AUDIO */ +static inline int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511) +{ + return 0; +} +static inline void adv7511_audio_exit(struct adv7511 *adv7511) +{ +} +#endif /* CONFIG_DRM_I2C_ADV7511_AUDIO */ + #endif /* __DRM_I2C_ADV7511_H__ */ diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c new file mode 100644 index 000000000000..cf92ebfe6ab7 --- /dev/null +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c @@ -0,0 +1,213 @@ +/* + * Analog Devices ADV7511 HDMI transmitter driver + * + * Copyright 2012 Analog Devices Inc. + * Copyright (c) 2016, Linaro Limited + * + * Licensed under the GPL-2. + */ + +#include <sound/core.h> +#include <sound/hdmi-codec.h> +#include <sound/pcm.h> +#include <sound/soc.h> + +#include "adv7511.h" + +static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs, + unsigned int *cts, unsigned int *n) +{ + switch (fs) { + case 32000: + *n = 4096; + break; + case 44100: + *n = 6272; + break; + case 48000: + *n = 6144; + break; + } + + *cts = ((f_tmds * *n) / (128 * fs)) * 1000; +} + +static int adv7511_update_cts_n(struct adv7511 *adv7511) +{ + unsigned int cts = 0; + unsigned int n = 0; + + adv7511_calc_cts_n(adv7511->f_tmds, adv7511->f_audio, &cts, &n); + + regmap_write(adv7511->regmap, ADV7511_REG_N0, (n >> 16) & 0xf); + regmap_write(adv7511->regmap, ADV7511_REG_N1, (n >> 8) & 0xff); + regmap_write(adv7511->regmap, ADV7511_REG_N2, n & 0xff); + + regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL0, + (cts >> 16) & 0xf); + regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL1, + (cts >> 8) & 0xff); + regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL2, + cts & 0xff); + + return 0; +} + +int adv7511_hdmi_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms) +{ + struct adv7511 *adv7511 = dev_get_drvdata(dev); + unsigned int audio_source, i2s_format = 0; + unsigned int invert_clock; + unsigned int rate; + unsigned int len; + + switch (hparms->sample_rate) { + case 32000: + rate = ADV7511_SAMPLE_FREQ_32000; + break; + case 44100: + rate = ADV7511_SAMPLE_FREQ_44100; + break; + case 48000: + rate = ADV7511_SAMPLE_FREQ_48000; + break; + case 88200: + rate = ADV7511_SAMPLE_FREQ_88200; + break; + case 96000: + rate = ADV7511_SAMPLE_FREQ_96000; + break; + case 176400: + rate = ADV7511_SAMPLE_FREQ_176400; + break; + case 192000: + rate = ADV7511_SAMPLE_FREQ_192000; + break; + default: + return -EINVAL; + } + + switch (hparms->sample_width) { + case 16: + len = ADV7511_I2S_SAMPLE_LEN_16; + break; + case 18: + len = ADV7511_I2S_SAMPLE_LEN_18; + break; + case 20: + len = ADV7511_I2S_SAMPLE_LEN_20; + break; + case 24: + len = ADV7511_I2S_SAMPLE_LEN_24; + break; + default: + return -EINVAL; + } + + switch (fmt->fmt) { + case HDMI_I2S: + audio_source = ADV7511_AUDIO_SOURCE_I2S; + i2s_format = ADV7511_I2S_FORMAT_I2S; + break; + case HDMI_RIGHT_J: + audio_source = ADV7511_AUDIO_SOURCE_I2S; + i2s_format = ADV7511_I2S_FORMAT_RIGHT_J; + break; + case HDMI_LEFT_J: + audio_source = ADV7511_AUDIO_SOURCE_I2S; + i2s_format = ADV7511_I2S_FORMAT_LEFT_J; + break; + default: + return -EINVAL; + } + + invert_clock = fmt->bit_clk_inv; + + regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_SOURCE, 0x70, + audio_source << 4); + regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, BIT(6), + invert_clock << 6); + regmap_update_bits(adv7511->regmap, ADV7511_REG_I2S_CONFIG, 0x03, + i2s_format); + + adv7511->audio_source = audio_source; + + adv7511->f_audio = hparms->sample_rate; + + adv7511_update_cts_n(adv7511); + + regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG3, + ADV7511_AUDIO_CFG3_LEN_MASK, len); + regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG, + ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4); + regmap_write(adv7511->regmap, 0x73, 0x1); + + return 0; +} + +static int audio_startup(struct device *dev, void *data) +{ + struct adv7511 *adv7511 = dev_get_drvdata(dev); + + regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, + BIT(7), 0); + + /* hide Audio infoframe updates */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE, + BIT(5), BIT(5)); + /* enable N/CTS, enable Audio sample packets */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1, + BIT(5), BIT(5)); + /* enable N/CTS */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1, + BIT(6), BIT(6)); + /* not copyrighted */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG1, + BIT(5), BIT(5)); + /* enable audio infoframes */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1, + BIT(3), BIT(3)); + /* AV mute disable */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0), + BIT(7) | BIT(6), BIT(7)); + /* use Audio infoframe updated info */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1), + BIT(5), 0); + return 0; +} + +static void audio_shutdown(struct device *dev, void *data) +{ +} + +static const struct hdmi_codec_ops adv7511_codec_ops = { + .hw_params = adv7511_hdmi_hw_params, + .audio_shutdown = audio_shutdown, + .audio_startup = audio_startup, +}; + +static struct hdmi_codec_pdata codec_data = { + .ops = &adv7511_codec_ops, + .max_i2s_channels = 2, + .i2s = 1, +}; + +int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511) +{ + adv7511->audio_pdev = platform_device_register_data(dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + return PTR_ERR_OR_ZERO(adv7511->audio_pdev); +} + +void adv7511_audio_exit(struct adv7511 *adv7511) +{ + if (adv7511->audio_pdev) { + platform_device_unregister(adv7511->audio_pdev); + adv7511->audio_pdev = NULL; + } +} diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 8ed3906dd411..8dba729f6ef9 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -1037,6 +1037,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) goto err_unregister_cec; } + adv7511_audio_init(dev, adv7511); + return 0; err_unregister_cec: @@ -1058,6 +1060,8 @@ static int adv7511_remove(struct i2c_client *i2c) drm_bridge_remove(&adv7511->bridge); + adv7511_audio_exit(adv7511); + i2c_unregister_device(adv7511->i2c_edid); kfree(adv7511->edid); diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c index d7f7b7ce8ebe..8b210373cfa2 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7533.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c @@ -29,6 +29,7 @@ static const struct reg_sequence adv7533_cec_fixed_registers[] = { { 0x17, 0xd0 }, { 0x24, 0x20 }, { 0x57, 0x11 }, + { 0x05, 0xc8 }, }; static const struct regmap_config adv7533_cec_regmap_config = { diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c index afec232185a7..e5706981c934 100644 --- a/drivers/gpu/drm/bridge/dumb-vga-dac.c +++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c @@ -12,6 +12,7 @@ #include <linux/module.h> #include <linux/of_graph.h> +#include <linux/regulator/consumer.h> #include <drm/drmP.h> #include <drm/drm_atomic_helper.h> @@ -23,6 +24,7 @@ struct dumb_vga { struct drm_connector connector; struct i2c_adapter *ddc; + struct regulator *vdd; }; static inline struct dumb_vga * @@ -124,8 +126,30 @@ static int dumb_vga_attach(struct drm_bridge *bridge) return 0; } +static void dumb_vga_enable(struct drm_bridge *bridge) +{ + struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge); + int ret = 0; + + if (vga->vdd) + ret = regulator_enable(vga->vdd); + + if (ret) + DRM_ERROR("Failed to enable vdd regulator: %d\n", ret); +} + +static void dumb_vga_disable(struct drm_bridge *bridge) +{ + struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge); + + if (vga->vdd) + regulator_disable(vga->vdd); +} + static const struct drm_bridge_funcs dumb_vga_bridge_funcs = { .attach = dumb_vga_attach, + .enable = dumb_vga_enable, + .disable = dumb_vga_disable, }; static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev) @@ -169,6 +193,15 @@ static int dumb_vga_probe(struct platform_device *pdev) return -ENOMEM; platform_set_drvdata(pdev, vga); + vga->vdd = devm_regulator_get_optional(&pdev->dev, "vdd"); + if (IS_ERR(vga->vdd)) { + ret = PTR_ERR(vga->vdd); + if (ret == -EPROBE_DEFER) + return -EPROBE_DEFER; + vga->vdd = NULL; + dev_dbg(&pdev->dev, "No vdd regulator found: %d\n", ret); + } + vga->ddc = dumb_vga_retrieve_ddc(&pdev->dev); if (IS_ERR(vga->ddc)) { if (PTR_ERR(vga->ddc) == -ENODEV) { diff --git a/drivers/gpu/drm/bridge/dw-hdmi-audio.h b/drivers/gpu/drm/bridge/dw-hdmi-audio.h index 91f631beecc7..fd1f745c6073 100644 --- a/drivers/gpu/drm/bridge/dw-hdmi-audio.h +++ b/drivers/gpu/drm/bridge/dw-hdmi-audio.h @@ -11,4 +11,11 @@ struct dw_hdmi_audio_data { u8 *eld; }; +struct dw_hdmi_i2s_audio_data { + struct dw_hdmi *hdmi; + + void (*write)(struct dw_hdmi *hdmi, u8 val, int offset); + u8 (*read)(struct dw_hdmi *hdmi, int offset); +}; + #endif diff --git a/drivers/gpu/drm/bridge/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/dw-hdmi-i2s-audio.c new file mode 100644 index 000000000000..aaf287d2e91d --- /dev/null +++ b/drivers/gpu/drm/bridge/dw-hdmi-i2s-audio.c @@ -0,0 +1,141 @@ +/* + * dw-hdmi-i2s-audio.c + * + * Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <drm/bridge/dw_hdmi.h> + +#include <sound/hdmi-codec.h> + +#include "dw-hdmi.h" +#include "dw-hdmi-audio.h" + +#define DRIVER_NAME "dw-hdmi-i2s-audio" + +static inline void hdmi_write(struct dw_hdmi_i2s_audio_data *audio, + u8 val, int offset) +{ + struct dw_hdmi *hdmi = audio->hdmi; + + audio->write(hdmi, val, offset); +} + +static inline u8 hdmi_read(struct dw_hdmi_i2s_audio_data *audio, int offset) +{ + struct dw_hdmi *hdmi = audio->hdmi; + + return audio->read(hdmi, offset); +} + +static int dw_hdmi_i2s_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms) +{ + struct dw_hdmi_i2s_audio_data *audio = data; + struct dw_hdmi *hdmi = audio->hdmi; + u8 conf0 = 0; + u8 conf1 = 0; + u8 inputclkfs = 0; + + /* it cares I2S only */ + if ((fmt->fmt != HDMI_I2S) || + (fmt->bit_clk_master | fmt->frame_clk_master)) { + dev_err(dev, "unsupported format/settings\n"); + return -EINVAL; + } + + inputclkfs = HDMI_AUD_INPUTCLKFS_64FS; + conf0 = HDMI_AUD_CONF0_I2S_ALL_ENABLE; + + switch (hparms->sample_width) { + case 16: + conf1 = HDMI_AUD_CONF1_WIDTH_16; + break; + case 24: + case 32: + conf1 = HDMI_AUD_CONF1_WIDTH_24; + break; + } + + dw_hdmi_set_sample_rate(hdmi, hparms->sample_rate); + + hdmi_write(audio, inputclkfs, HDMI_AUD_INPUTCLKFS); + hdmi_write(audio, conf0, HDMI_AUD_CONF0); + hdmi_write(audio, conf1, HDMI_AUD_CONF1); + + dw_hdmi_audio_enable(hdmi); + + return 0; +} + +static void dw_hdmi_i2s_audio_shutdown(struct device *dev, void *data) +{ + struct dw_hdmi_i2s_audio_data *audio = data; + struct dw_hdmi *hdmi = audio->hdmi; + + dw_hdmi_audio_disable(hdmi); + + hdmi_write(audio, HDMI_AUD_CONF0_SW_RESET, HDMI_AUD_CONF0); +} + +static struct hdmi_codec_ops dw_hdmi_i2s_ops = { + .hw_params = dw_hdmi_i2s_hw_params, + .audio_shutdown = dw_hdmi_i2s_audio_shutdown, +}; + +static int snd_dw_hdmi_probe(struct platform_device *pdev) +{ + struct dw_hdmi_i2s_audio_data *audio = pdev->dev.platform_data; + struct platform_device_info pdevinfo; + struct hdmi_codec_pdata pdata; + struct platform_device *platform; + + pdata.ops = &dw_hdmi_i2s_ops; + pdata.i2s = 1; + pdata.max_i2s_channels = 6; + pdata.data = audio; + + memset(&pdevinfo, 0, sizeof(pdevinfo)); + pdevinfo.parent = pdev->dev.parent; + pdevinfo.id = PLATFORM_DEVID_AUTO; + pdevinfo.name = HDMI_CODEC_DRV_NAME; + pdevinfo.data = &pdata; + pdevinfo.size_data = sizeof(pdata); + pdevinfo.dma_mask = DMA_BIT_MASK(32); + + platform = platform_device_register_full(&pdevinfo); + if (IS_ERR(platform)) + return PTR_ERR(platform); + + dev_set_drvdata(&pdev->dev, platform); + + return 0; +} + +static int snd_dw_hdmi_remove(struct platform_device *pdev) +{ + struct platform_device *platform = dev_get_drvdata(&pdev->dev); + + platform_device_unregister(platform); + + return 0; +} + +static struct platform_driver snd_dw_hdmi_driver = { + .probe = snd_dw_hdmi_probe, + .remove = snd_dw_hdmi_remove, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; +module_platform_driver(snd_dw_hdmi_driver); + +MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); +MODULE_DESCRIPTION("Synopsis Designware HDMI I2S ALSA SoC interface"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c index b71088dab268..235ce7d1583d 100644 --- a/drivers/gpu/drm/bridge/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/dw-hdmi.c @@ -1871,10 +1871,11 @@ int dw_hdmi_bind(struct device *dev, struct device *master, struct device_node *np = dev->of_node; struct platform_device_info pdevinfo; struct device_node *ddc_node; - struct dw_hdmi_audio_data audio; struct dw_hdmi *hdmi; int ret; u32 val = 1; + u8 config0; + u8 config1; hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); if (!hdmi) @@ -2011,7 +2012,12 @@ int dw_hdmi_bind(struct device *dev, struct device *master, pdevinfo.parent = dev; pdevinfo.id = PLATFORM_DEVID_AUTO; - if (hdmi_readb(hdmi, HDMI_CONFIG1_ID) & HDMI_CONFIG1_AHB) { + config0 = hdmi_readb(hdmi, HDMI_CONFIG0_ID); + config1 = hdmi_readb(hdmi, HDMI_CONFIG1_ID); + + if (config1 & HDMI_CONFIG1_AHB) { + struct dw_hdmi_audio_data audio; + audio.phys = iores->start; audio.base = hdmi->regs; audio.irq = irq; @@ -2023,6 +2029,18 @@ int dw_hdmi_bind(struct device *dev, struct device *master, pdevinfo.size_data = sizeof(audio); pdevinfo.dma_mask = DMA_BIT_MASK(32); hdmi->audio = platform_device_register_full(&pdevinfo); + } else if (config0 & HDMI_CONFIG0_I2S) { + struct dw_hdmi_i2s_audio_data audio; + + audio.hdmi = hdmi; + audio.write = hdmi_writeb; + audio.read = hdmi_readb; + + pdevinfo.name = "dw-hdmi-i2s-audio"; + pdevinfo.data = &audio; + pdevinfo.size_data = sizeof(audio); + pdevinfo.dma_mask = DMA_BIT_MASK(32); + hdmi->audio = platform_device_register_full(&pdevinfo); } /* Reset HDMI DDC I2C master controller and mute I2CM interrupts */ diff --git a/drivers/gpu/drm/bridge/dw-hdmi.h b/drivers/gpu/drm/bridge/dw-hdmi.h index 6aadc840e888..55135bbd0c16 100644 --- a/drivers/gpu/drm/bridge/dw-hdmi.h +++ b/drivers/gpu/drm/bridge/dw-hdmi.h @@ -545,6 +545,9 @@ #define HDMI_I2CM_FS_SCL_LCNT_0_ADDR 0x7E12 enum { +/* CONFIG0_ID field values */ + HDMI_CONFIG0_I2S = 0x10, + /* CONFIG1_ID field values */ HDMI_CONFIG1_AHB = 0x01, @@ -891,6 +894,17 @@ enum { HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL = 0x08, HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_MASK = 0x04, +/* AUD_CONF0 field values */ + HDMI_AUD_CONF0_SW_RESET = 0x80, + HDMI_AUD_CONF0_I2S_ALL_ENABLE = 0x2F, + +/* AUD_CONF1 field values */ + HDMI_AUD_CONF1_MODE_I2S = 0x00, + HDMI_AUD_CONF1_MODE_RIGHT_J = 0x02, + HDMI_AUD_CONF1_MODE_LEFT_J = 0x04, + HDMI_AUD_CONF1_WIDTH_16 = 0x10, + HDMI_AUD_CONF1_WIDTH_24 = 0x18, + /* AUD_CTS3 field values */ HDMI_AUD_CTS3_N_SHIFT_OFFSET = 5, HDMI_AUD_CTS3_N_SHIFT_MASK = 0xe0, @@ -905,6 +919,12 @@ enum { HDMI_AUD_CTS3_CTS_MANUAL = 0x10, HDMI_AUD_CTS3_AUDCTS19_16_MASK = 0x0f, +/* HDMI_AUD_INPUTCLKFS field values */ + HDMI_AUD_INPUTCLKFS_128FS = 0, + HDMI_AUD_INPUTCLKFS_256FS = 1, + HDMI_AUD_INPUTCLKFS_512FS = 2, + HDMI_AUD_INPUTCLKFS_64FS = 4, + /* AHB_DMA_CONF0 field values */ HDMI_AHB_DMA_CONF0_SW_FIFO_RST_OFFSET = 7, HDMI_AHB_DMA_CONF0_SW_FIFO_RST_MASK = 0x80, diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index b476ec585547..19d7bcb88217 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -965,12 +965,12 @@ static void drm_atomic_plane_print_state(struct drm_printer *p, drm_printf(p, "\t\tformat=%s\n", drm_get_format_name(fb->pixel_format, &format_name)); + drm_printf(p, "\t\t\tmodifier=0x%llx\n", fb->modifier); drm_printf(p, "\t\tsize=%dx%d\n", fb->width, fb->height); drm_printf(p, "\t\tlayers:\n"); for (i = 0; i < n; i++) { drm_printf(p, "\t\t\tpitch[%d]=%u\n", i, fb->pitches[i]); drm_printf(p, "\t\t\toffset[%d]=%u\n", i, fb->offsets[i]); - drm_printf(p, "\t\t\tmodifier[%d]=0x%llx\n", i, fb->modifier[i]); } } drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); @@ -1246,18 +1246,14 @@ void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, struct drm_framebuffer *fb) { - if (plane_state->fb) - drm_framebuffer_unreference(plane_state->fb); - if (fb) - drm_framebuffer_reference(fb); - plane_state->fb = fb; - if (fb) DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n", fb->base.id, plane_state); else DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n", plane_state); + + drm_framebuffer_assign(&plane_state->fb, fb); } EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); @@ -1686,6 +1682,13 @@ int drm_atomic_debugfs_init(struct drm_minor *minor) ARRAY_SIZE(drm_atomic_debugfs_list), minor->debugfs_root, minor); } + +int drm_atomic_debugfs_cleanup(struct drm_minor *minor) +{ + return drm_debugfs_remove_files(drm_atomic_debugfs_list, + ARRAY_SIZE(drm_atomic_debugfs_list), + minor); +} #endif /* @@ -1809,6 +1812,58 @@ void drm_atomic_clean_old_fb(struct drm_device *dev, } EXPORT_SYMBOL(drm_atomic_clean_old_fb); +/** + * DOC: explicit fencing properties + * + * Explicit fencing allows userspace to control the buffer synchronization + * between devices. A Fence or a group of fences are transfered to/from + * userspace using Sync File fds and there are two DRM properties for that. + * IN_FENCE_FD on each DRM Plane to send fences to the kernel and + * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel. + * + * As a contrast, with implicit fencing the kernel keeps track of any + * ongoing rendering, and automatically ensures that the atomic update waits + * for any pending rendering to complete. For shared buffers represented with + * a struct &dma_buf this is tracked in &reservation_object structures. + * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), + * whereas explicit fencing is what Android wants. + * + * "IN_FENCE_FD”: + * Use this property to pass a fence that DRM should wait on before + * proceeding with the Atomic Commit request and show the framebuffer for + * the plane on the screen. The fence can be either a normal fence or a + * merged one, the sync_file framework will handle both cases and use a + * fence_array if a merged fence is received. Passing -1 here means no + * fences to wait on. + * + * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag + * it will only check if the Sync File is a valid one. + * + * On the driver side the fence is stored on the @fence parameter of + * struct &drm_plane_state. Drivers which also support implicit fencing + * should set the implicit fence using drm_atomic_set_fence_for_plane(), + * to make sure there's consistent behaviour between drivers in precedence + * of implicit vs. explicit fencing. + * + * "OUT_FENCE_PTR”: + * Use this property to pass a file descriptor pointer to DRM. Once the + * Atomic Commit request call returns OUT_FENCE_PTR will be filled with + * the file descriptor number of a Sync File. This Sync File contains the + * CRTC fence that will be signaled when all framebuffers present on the + * Atomic Commit * request for that given CRTC are scanned out on the + * screen. + * + * The Atomic Commit request fails if a invalid pointer is passed. If the + * Atomic Commit request fails for any other reason the out fence fd + * returned will be -1. On a Atomic Commit with the + * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1. + * + * Note that out-fences don't have a special interface to drivers and are + * internally represented by a struct &drm_pending_vblank_event in struct + * &drm_crtc_state, which is also used by the nonblocking atomic commit + * helpers and for the DRM event handling for existing userspace. + */ + static struct dma_fence *get_crtc_fence(struct drm_crtc *crtc) { struct dma_fence *fence; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 0b16587cdc62..494680c9056e 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1006,13 +1006,21 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables); * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state * @dev: DRM device * @state: atomic state object with old state structures - * @pre_swap: if true, do an interruptible wait + * @pre_swap: If true, do an interruptible wait, and @state is the new state. + * Otherwise @state is the old state. * * For implicit sync, driver should fish the exclusive fence out from the * incoming fb's and stash it in the drm_plane_state. This is called after * drm_atomic_helper_swap_state() so it uses the current plane state (and * just uses the atomic state to find the changed planes) * + * Note that @pre_swap is needed since the point where we block for fences moves + * around depending upon whether an atomic commit is blocking or + * non-blocking. For async commit all waiting needs to happen after + * drm_atomic_helper_swap_state() is called, but for synchronous commits we want + * to wait **before** we do anything that can't be easily rolled back. That is + * before we call drm_atomic_helper_swap_state(). + * * Returns zero if success or < 0 if dma_fence_wait() fails. */ int drm_atomic_helper_wait_for_fences(struct drm_device *dev, @@ -1147,7 +1155,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); /** * drm_atomic_helper_commit_tail - commit atomic update to hardware - * @state: new modeset state to be committed + * @old_state: atomic state object with old state structures * * This is the default implemenation for the ->atomic_commit_tail() hook of the * &drm_mode_config_helper_funcs vtable. @@ -1158,53 +1166,53 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); * * For drivers supporting runtime PM the recommended sequence is instead :: * - * drm_atomic_helper_commit_modeset_disables(dev, state); + * drm_atomic_helper_commit_modeset_disables(dev, old_state); * - * drm_atomic_helper_commit_modeset_enables(dev, state); + * drm_atomic_helper_commit_modeset_enables(dev, old_state); * - * drm_atomic_helper_commit_planes(dev, state, + * drm_atomic_helper_commit_planes(dev, old_state, * DRM_PLANE_COMMIT_ACTIVE_ONLY); * * for committing the atomic update to hardware. See the kerneldoc entries for * these three functions for more details. */ -void drm_atomic_helper_commit_tail(struct drm_atomic_state *state) +void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state) { - struct drm_device *dev = state->dev; + struct drm_device *dev = old_state->dev; - drm_atomic_helper_commit_modeset_disables(dev, state); + drm_atomic_helper_commit_modeset_disables(dev, old_state); - drm_atomic_helper_commit_planes(dev, state, 0); + drm_atomic_helper_commit_planes(dev, old_state, 0); - drm_atomic_helper_commit_modeset_enables(dev, state); + drm_atomic_helper_commit_modeset_enables(dev, old_state); - drm_atomic_helper_commit_hw_done(state); + drm_atomic_helper_commit_hw_done(old_state); - drm_atomic_helper_wait_for_vblanks(dev, state); + drm_atomic_helper_wait_for_vblanks(dev, old_state); - drm_atomic_helper_cleanup_planes(dev, state); + drm_atomic_helper_cleanup_planes(dev, old_state); } EXPORT_SYMBOL(drm_atomic_helper_commit_tail); -static void commit_tail(struct drm_atomic_state *state) +static void commit_tail(struct drm_atomic_state *old_state) { - struct drm_device *dev = state->dev; + struct drm_device *dev = old_state->dev; struct drm_mode_config_helper_funcs *funcs; funcs = dev->mode_config.helper_private; - drm_atomic_helper_wait_for_fences(dev, state, false); + drm_atomic_helper_wait_for_fences(dev, old_state, false); - drm_atomic_helper_wait_for_dependencies(state); + drm_atomic_helper_wait_for_dependencies(old_state); if (funcs && funcs->atomic_commit_tail) - funcs->atomic_commit_tail(state); + funcs->atomic_commit_tail(old_state); else - drm_atomic_helper_commit_tail(state); + drm_atomic_helper_commit_tail(old_state); - drm_atomic_helper_commit_cleanup_done(state); + drm_atomic_helper_commit_cleanup_done(old_state); - drm_atomic_state_put(state); + drm_atomic_state_put(old_state); } static void commit_work(struct work_struct *work) @@ -1498,10 +1506,10 @@ static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc) /** * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits - * @state: new modeset state to be committed + * @old_state: atomic state object with old state structures * * This function waits for all preceeding commits that touch the same CRTC as - * @state to both be committed to the hardware (as signalled by + * @old_state to both be committed to the hardware (as signalled by * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled * by calling drm_crtc_vblank_send_event on the event member of * &drm_crtc_state). @@ -1509,7 +1517,7 @@ static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc) * This is part of the atomic helper support for nonblocking commits, see * drm_atomic_helper_setup_commit() for an overview. */ -void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state) +void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state) { struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; @@ -1517,7 +1525,7 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state) int i; long ret; - for_each_crtc_in_state(state, crtc, crtc_state, i) { + for_each_crtc_in_state(old_state, crtc, crtc_state, i) { spin_lock(&crtc->commit_lock); commit = preceeding_commit(crtc); if (commit) @@ -1548,7 +1556,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies); /** * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit - * @state: new modeset state to be committed + * @old_state: atomic state object with old state structures * * This function is used to signal completion of the hardware commit step. After * this step the driver is not allowed to read or change any permanent software @@ -1561,15 +1569,15 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies); * This is part of the atomic helper support for nonblocking commits, see * drm_atomic_helper_setup_commit() for an overview. */ -void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state) +void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state) { struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; struct drm_crtc_commit *commit; int i; - for_each_crtc_in_state(state, crtc, crtc_state, i) { - commit = state->crtcs[i].commit; + for_each_crtc_in_state(old_state, crtc, crtc_state, i) { + commit = old_state->crtcs[i].commit; if (!commit) continue; @@ -1584,16 +1592,16 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done); /** * drm_atomic_helper_commit_cleanup_done - signal completion of commit - * @state: new modeset state to be committed + * @old_state: atomic state object with old state structures * - * This signals completion of the atomic update @state, including any cleanup - * work. If used, it must be called right before calling + * This signals completion of the atomic update @old_state, including any + * cleanup work. If used, it must be called right before calling * drm_atomic_state_put(). * * This is part of the atomic helper support for nonblocking commits, see * drm_atomic_helper_setup_commit() for an overview. */ -void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state) +void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state) { struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; @@ -1601,8 +1609,8 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state) int i; long ret; - for_each_crtc_in_state(state, crtc, crtc_state, i) { - commit = state->crtcs[i].commit; + for_each_crtc_in_state(old_state, crtc, crtc_state, i) { + commit = old_state->crtcs[i].commit; if (WARN_ON(!commit)) continue; diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index b5c6a8ee831e..5a4526289392 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -588,6 +588,50 @@ static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = { DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, drm_tv_subconnector_enum_list) +/** + * DOC: standard connector properties + * + * DRM connectors have a few standardized properties: + * + * EDID: + * Blob property which contains the current EDID read from the sink. This + * is useful to parse sink identification information like vendor, model + * and serial. Drivers should update this property by calling + * drm_mode_connector_update_edid_property(), usually after having parsed + * the EDID using drm_add_edid_modes(). Userspace cannot change this + * property. + * DPMS: + * Legacy property for setting the power state of the connector. For atomic + * drivers this is only provided for backwards compatibility with existing + * drivers, it remaps to controlling the "ACTIVE" property on the CRTC the + * connector is linked to. Drivers should never set this property directly, + * it is handled by the DRM core by calling the ->dpms() callback in + * &drm_connector_funcs. Atomic drivers should implement this hook using + * drm_atomic_helper_connector_dpms(). This is the only property standard + * connector property that userspace can change. + * PATH: + * Connector path property to identify how this sink is physically + * connected. Used by DP MST. This should be set by calling + * drm_mode_connector_set_path_property(), in the case of DP MST with the + * path property the MST manager created. Userspace cannot change this + * property. + * TILE: + * Connector tile group property to indicate how a set of DRM connector + * compose together into one logical screen. This is used by both high-res + * external screens (often only using a single cable, but exposing multiple + * DP MST sinks), or high-res integrated panels (like dual-link DSI) which + * are not gen-locked. Note that for tiled panels which are genlocked, like + * dual-link LVDS or dual-link DSI, the driver should try to not expose the + * tiling and virtualize both &drm_crtc and &drm_plane if needed. Drivers + * should update this value using drm_mode_connector_set_tile_property(). + * Userspace cannot change this property. + * + * Connectors also have one standardized atomic property: + * + * CRTC_ID: + * Mode object ID of the &drm_crtc this connector should be connected to. + */ + int drm_connector_create_standard_properties(struct drm_device *dev) { struct drm_property *prop; diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 206a4fe7ea26..2e3e46a53805 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -228,6 +228,7 @@ EXPORT_SYMBOL(drm_debugfs_remove_files); int drm_debugfs_cleanup(struct drm_minor *minor) { struct drm_device *dev = minor->dev; + int ret; if (!minor->debugfs_root) return 0; @@ -235,6 +236,14 @@ int drm_debugfs_cleanup(struct drm_minor *minor) if (dev->driver->debugfs_cleanup) dev->driver->debugfs_cleanup(minor); + if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { + ret = drm_atomic_debugfs_cleanup(minor); + if (ret) { + DRM_ERROR("DRM: Failed to remove atomic debugfs entries\n"); + return ret; + } + } + drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor); debugfs_remove(minor->debugfs_root); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 6dbb98649795..f74b7d06ec01 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -303,9 +303,10 @@ void drm_minor_release(struct drm_minor *minor) * callbacks implemented by the driver. The driver then needs to initialize all * the various subsystems for the drm device like memory management, vblank * handling, modesetting support and intial output configuration plus obviously - * initialize all the corresponding hardware bits. Finally when everything is up - * and running and ready for userspace the device instance can be published - * using drm_dev_register(). + * initialize all the corresponding hardware bits. An important part of this is + * also calling drm_dev_set_unique() to set the userspace-visible unique name of + * this device instance. Finally when everything is up and running and ready for + * userspace the device instance can be published using drm_dev_register(). * * There is also deprecated support for initalizing device instances using * bus-specific helpers and the ->load() callback. But due to @@ -327,17 +328,6 @@ void drm_minor_release(struct drm_minor *minor) * dev_priv field of &drm_device. */ -static int drm_dev_set_unique(struct drm_device *dev, const char *name) -{ - if (!name) - return -EINVAL; - - kfree(dev->unique); - dev->unique = kstrdup(name, GFP_KERNEL); - - return dev->unique ? 0 : -ENOMEM; -} - /** * drm_put_dev - Unregister and release a DRM device * @dev: DRM device @@ -507,12 +497,6 @@ int drm_dev_init(struct drm_device *dev, goto err_free; } - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL); - if (ret) - goto err_minors; - } - if (drm_core_check_feature(dev, DRIVER_RENDER)) { ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); if (ret) @@ -760,6 +744,26 @@ void drm_dev_unregister(struct drm_device *dev) } EXPORT_SYMBOL(drm_dev_unregister); +/** + * drm_dev_set_unique - Set the unique name of a DRM device + * @dev: device of which to set the unique name + * @name: unique name + * + * Sets the unique name of a DRM device using the specified string. Drivers + * can use this at driver probe time if the unique name of the devices they + * drive is static. + * + * Return: 0 on success or a negative error code on failure. + */ +int drm_dev_set_unique(struct drm_device *dev, const char *name) +{ + kfree(dev->unique); + dev->unique = kstrdup(name, GFP_KERNEL); + + return dev->unique ? 0 : -ENOMEM; +} +EXPORT_SYMBOL(drm_dev_set_unique); + /* * DRM Core * The DRM core module initializes all global DRM objects and makes them diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 7eec18925b70..336be31ff3de 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2613,6 +2613,41 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode) return clock; } +static bool +cea_mode_alternate_timings(u8 vic, struct drm_display_mode *mode) +{ + /* + * For certain VICs the spec allows the vertical + * front porch to vary by one or two lines. + * + * cea_modes[] stores the variant with the shortest + * vertical front porch. We can adjust the mode to + * get the other variants by simply increasing the + * vertical front porch length. + */ + BUILD_BUG_ON(edid_cea_modes[8].vtotal != 262 || + edid_cea_modes[9].vtotal != 262 || + edid_cea_modes[12].vtotal != 262 || + edid_cea_modes[13].vtotal != 262 || + edid_cea_modes[23].vtotal != 312 || + edid_cea_modes[24].vtotal != 312 || + edid_cea_modes[27].vtotal != 312 || + edid_cea_modes[28].vtotal != 312); + + if (((vic == 8 || vic == 9 || + vic == 12 || vic == 13) && mode->vtotal < 263) || + ((vic == 23 || vic == 24 || + vic == 27 || vic == 28) && mode->vtotal < 314)) { + mode->vsync_start++; + mode->vsync_end++; + mode->vtotal++; + + return true; + } + + return false; +} + static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match, unsigned int clock_tolerance) { @@ -2622,19 +2657,21 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m return 0; for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) { - const struct drm_display_mode *cea_mode = &edid_cea_modes[vic]; + struct drm_display_mode cea_mode = edid_cea_modes[vic]; unsigned int clock1, clock2; /* Check both 60Hz and 59.94Hz */ - clock1 = cea_mode->clock; - clock2 = cea_mode_alternate_clock(cea_mode); + clock1 = cea_mode.clock; + clock2 = cea_mode_alternate_clock(&cea_mode); if (abs(to_match->clock - clock1) > clock_tolerance && abs(to_match->clock - clock2) > clock_tolerance) continue; - if (drm_mode_equal_no_clocks(to_match, cea_mode)) - return vic; + do { + if (drm_mode_equal_no_clocks_no_stereo(to_match, &cea_mode)) + return vic; + } while (cea_mode_alternate_timings(vic, &cea_mode)); } return 0; @@ -2655,18 +2692,23 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match) return 0; for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) { - const struct drm_display_mode *cea_mode = &edid_cea_modes[vic]; + struct drm_display_mode cea_mode = edid_cea_modes[vic]; unsigned int clock1, clock2; /* Check both 60Hz and 59.94Hz */ - clock1 = cea_mode->clock; - clock2 = cea_mode_alternate_clock(cea_mode); + clock1 = cea_mode.clock; + clock2 = cea_mode_alternate_clock(&cea_mode); - if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || - KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && - drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode)) - return vic; + if (KHZ2PICOS(to_match->clock) != KHZ2PICOS(clock1) && + KHZ2PICOS(to_match->clock) != KHZ2PICOS(clock2)) + continue; + + do { + if (drm_mode_equal_no_clocks_no_stereo(to_match, &cea_mode)) + return vic; + } while (cea_mode_alternate_timings(vic, &cea_mode)); } + return 0; } EXPORT_SYMBOL(drm_match_cea_mode); @@ -3611,32 +3653,6 @@ int drm_av_sync_delay(struct drm_connector *connector, EXPORT_SYMBOL(drm_av_sync_delay); /** - * drm_select_eld - select one ELD from multiple HDMI/DP sinks - * @encoder: the encoder just changed display mode - * - * It's possible for one encoder to be associated with multiple HDMI/DP sinks. - * The policy is now hard coded to simply use the first HDMI/DP sink's ELD. - * - * Return: The connector associated with the first HDMI/DP sink that has ELD - * attached to it. - */ -struct drm_connector *drm_select_eld(struct drm_encoder *encoder) -{ - struct drm_connector *connector; - struct drm_device *dev = encoder->dev; - - WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); - - drm_for_each_connector(connector, dev) - if (connector->encoder == encoder && connector->eld[0]) - return connector; - - return NULL; -} -EXPORT_SYMBOL(drm_select_eld); - -/** * drm_detect_hdmi_monitor - detect whether monitor is HDMI * @edid: monitor EDID information * diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 14547817566d..1f26634f53d8 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1959,19 +1959,20 @@ static bool drm_target_preferred(struct drm_fb_helper *fb_helper, bool *enabled, int width, int height) { struct drm_fb_helper_connector *fb_helper_conn; - int i; - uint64_t conn_configured = 0, mask; + const u64 mask = BIT_ULL(fb_helper->connector_count) - 1; + u64 conn_configured = 0; int tile_pass = 0; - mask = (1 << fb_helper->connector_count) - 1; + int i; + retry: for (i = 0; i < fb_helper->connector_count; i++) { fb_helper_conn = fb_helper->connector_info[i]; - if (conn_configured & (1 << i)) + if (conn_configured & BIT_ULL(i)) continue; if (enabled[i] == false) { - conn_configured |= (1 << i); + conn_configured |= BIT_ULL(i); continue; } @@ -2012,7 +2013,7 @@ retry: } DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : "none"); - conn_configured |= (1 << i); + conn_configured |= BIT_ULL(i); } if ((conn_configured & mask) != mask) { diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 06ad3d1350c4..cbf0c893f426 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -177,6 +177,13 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) return -EINVAL; } + if (r->flags & DRM_MODE_FB_MODIFIERS && + r->modifier[i] != r->modifier[0]) { + DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n", + r->modifier[i], i); + return -EINVAL; + } + /* modifier specific checks: */ switch (r->modifier[i]) { case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE: diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index c901f3c5b269..32d43f86a8f2 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -176,7 +176,8 @@ int drm_legacy_lock(struct drm_device *dev, void *data, DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", lock->context, task_pid_nr(current), - master->lock.hw_lock->lock, lock->flags); + master->lock.hw_lock ? master->lock.hw_lock->lock : -1, + lock->flags); add_wait_queue(&master->lock.lock_queue, &entry); spin_lock_bh(&master->lock.spinlock); diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 632473beb40c..025dcd8cadcb 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -174,19 +174,12 @@ INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, START, LAST, static inline, drm_mm_interval_tree) struct drm_mm_node * -drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last) +__drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last) { return drm_mm_interval_tree_iter_first(&mm->interval_tree, start, last); } -EXPORT_SYMBOL(drm_mm_interval_first); - -struct drm_mm_node * -drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last) -{ - return drm_mm_interval_tree_iter_next(node, start, last); -} -EXPORT_SYMBOL(drm_mm_interval_next); +EXPORT_SYMBOL(__drm_mm_interval_first); static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, struct drm_mm_node *node) @@ -313,6 +306,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) u64 end = node->start + node->size; struct drm_mm_node *hole; u64 hole_start, hole_end; + u64 adj_start, adj_end; if (WARN_ON(node->size == 0)) return -EINVAL; @@ -334,9 +328,13 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) if (!hole->hole_follows) return -ENOSPC; - hole_start = __drm_mm_hole_node_start(hole); - hole_end = __drm_mm_hole_node_end(hole); - if (hole_start > node->start || hole_end < end) + adj_start = hole_start = __drm_mm_hole_node_start(hole); + adj_end = hole_end = __drm_mm_hole_node_end(hole); + + if (mm->color_adjust) + mm->color_adjust(hole, node->color, &adj_start, &adj_end); + + if (adj_start > node->start || adj_end < end) return -ENOSPC; node->mm = mm; diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c index 2f452b3dd40e..cc232ac6c950 100644 --- a/drivers/gpu/drm/drm_modeset_helper.c +++ b/drivers/gpu/drm/drm_modeset_helper.c @@ -38,7 +38,7 @@ * Some userspace presumes that the first connected connector is the main * display, where it's supposed to display e.g. the login screen. For * laptops, this should be the main panel. Use this function to sort all - * (eDP/LVDS) panels to the front of the connector list, instead of + * (eDP/LVDS/DSI) panels to the front of the connector list, instead of * painstakingly trying to initialize them in the right order. */ void drm_helper_move_panel_connectors_to_head(struct drm_device *dev) @@ -51,7 +51,8 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *dev) list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS || - connector->connector_type == DRM_MODE_CONNECTOR_eDP) + connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_DSI) list_move_tail(&connector->head, &panel_list); } @@ -93,8 +94,8 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, for (i = 0; i < 4; i++) { fb->pitches[i] = mode_cmd->pitches[i]; fb->offsets[i] = mode_cmd->offsets[i]; - fb->modifier[i] = mode_cmd->modifier[i]; } + fb->modifier = mode_cmd->modifier[0]; fb->pixel_format = mode_cmd->pixel_format; fb->flags = mode_cmd->flags; } diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index a4d81cf4ffa0..24be69d29964 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -65,9 +65,9 @@ static bool drm_property_type_valid(struct drm_property *property) * @num_values: number of pre-defined values * * This creates a new generic drm property which can then be attached to a drm - * object with drm_object_attach_property. The returned property object must be - * freed with drm_property_destroy(), which is done automatically when calling - * drm_mode_config_cleanup(). + * object with drm_object_attach_property(). The returned property object must + * be freed with drm_property_destroy(), which is done automatically when + * calling drm_mode_config_cleanup(). * * Returns: * A pointer to the newly created property on success, NULL on failure. @@ -125,9 +125,9 @@ EXPORT_SYMBOL(drm_property_create); * @num_values: number of pre-defined values * * This creates a new generic drm property which can then be attached to a drm - * object with drm_object_attach_property. The returned property object must be - * freed with drm_property_destroy(), which is done automatically when calling - * drm_mode_config_cleanup(). + * object with drm_object_attach_property(). The returned property object must + * be freed with drm_property_destroy(), which is done automatically when + * calling drm_mode_config_cleanup(). * * Userspace is only allowed to set one of the predefined values for enumeration * properties. @@ -173,9 +173,9 @@ EXPORT_SYMBOL(drm_property_create_enum); * @supported_bits: bitmask of all supported enumeration values * * This creates a new bitmask drm property which can then be attached to a drm - * object with drm_object_attach_property. The returned property object must be - * freed with drm_property_destroy(), which is done automatically when calling - * drm_mode_config_cleanup(). + * object with drm_object_attach_property(). The returned property object must + * be freed with drm_property_destroy(), which is done automatically when + * calling drm_mode_config_cleanup(). * * Compared to plain enumeration properties userspace is allowed to set any * or'ed together combination of the predefined property bitflag values @@ -245,9 +245,9 @@ static struct drm_property *property_create_range(struct drm_device *dev, * @max: maximum value of the property * * This creates a new generic drm property which can then be attached to a drm - * object with drm_object_attach_property. The returned property object must be - * freed with drm_property_destroy(), which is done automatically when calling - * drm_mode_config_cleanup(). + * object with drm_object_attach_property(). The returned property object must + * be freed with drm_property_destroy(), which is done automatically when + * calling drm_mode_config_cleanup(). * * Userspace is allowed to set any unsigned integer value in the (min, max) * range inclusive. @@ -273,9 +273,9 @@ EXPORT_SYMBOL(drm_property_create_range); * @max: maximum value of the property * * This creates a new generic drm property which can then be attached to a drm - * object with drm_object_attach_property. The returned property object must be - * freed with drm_property_destroy(), which is done automatically when calling - * drm_mode_config_cleanup(). + * object with drm_object_attach_property(). The returned property object must + * be freed with drm_property_destroy(), which is done automatically when + * calling drm_mode_config_cleanup(). * * Userspace is allowed to set any signed integer value in the (min, max) * range inclusive. @@ -300,9 +300,9 @@ EXPORT_SYMBOL(drm_property_create_signed_range); * @type: object type from DRM_MODE_OBJECT_* defines * * This creates a new generic drm property which can then be attached to a drm - * object with drm_object_attach_property. The returned property object must be - * freed with drm_property_destroy(), which is done automatically when calling - * drm_mode_config_cleanup(). + * object with drm_object_attach_property(). The returned property object must + * be freed with drm_property_destroy(), which is done automatically when + * calling drm_mode_config_cleanup(). * * Userspace is only allowed to set this to any property value of the given * @type. Only useful for atomic properties, which is enforced. @@ -338,9 +338,9 @@ EXPORT_SYMBOL(drm_property_create_object); * @name: name of the property * * This creates a new generic drm property which can then be attached to a drm - * object with drm_object_attach_property. The returned property object must be - * freed with drm_property_destroy(), which is done automatically when calling - * drm_mode_config_cleanup(). + * object with drm_object_attach_property(). The returned property object must + * be freed with drm_property_destroy(), which is done automatically when + * calling drm_mode_config_cleanup(). * * This is implemented as a ranged property with only {0, 1} as valid values. * @@ -729,7 +729,6 @@ int drm_mode_getblob_ioctl(struct drm_device *dev, struct drm_mode_get_blob *out_resp = data; struct drm_property_blob *blob; int ret = 0; - void __user *blob_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; @@ -739,8 +738,9 @@ int drm_mode_getblob_ioctl(struct drm_device *dev, return -ENOENT; if (out_resp->length == blob->length) { - blob_ptr = (void __user *)(unsigned long)out_resp->data; - if (copy_to_user(blob_ptr, blob->data, blob->length)) { + if (copy_to_user(u64_to_user_ptr(out_resp->data), + blob->data, + blob->length)) { ret = -EFAULT; goto unref; } @@ -757,7 +757,6 @@ int drm_mode_createblob_ioctl(struct drm_device *dev, { struct drm_mode_create_blob *out_resp = data; struct drm_property_blob *blob; - void __user *blob_ptr; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) @@ -767,8 +766,9 @@ int drm_mode_createblob_ioctl(struct drm_device *dev, if (IS_ERR(blob)) return PTR_ERR(blob); - blob_ptr = (void __user *)(unsigned long)out_resp->data; - if (copy_from_user(blob->data, blob_ptr, out_resp->length)) { + if (copy_from_user(blob->data, + u64_to_user_ptr(out_resp->data), + out_resp->length)) { ret = -EFAULT; goto out_blob; } diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 73ba8b05f1da..7e2043f4348c 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -94,7 +94,7 @@ static struct drm_driver hibmc_driver = { .irq_handler = hibmc_drm_interrupt, }; -static int hibmc_pm_suspend(struct device *dev) +static int __maybe_unused hibmc_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); @@ -112,7 +112,7 @@ static int hibmc_pm_suspend(struct device *dev) return 0; } -static int hibmc_pm_resume(struct device *dev) +static int __maybe_unused hibmc_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); @@ -377,9 +377,9 @@ static int hibmc_pci_probe(struct pci_dev *pdev, int ret; dev = drm_dev_alloc(&hibmc_driver, &pdev->dev); - if (!dev) { + if (IS_ERR(dev)) { DRM_ERROR("failed to allocate drm_device\n"); - return -ENOMEM; + return PTR_ERR(dev); } dev->pdev = pdev; diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index df96aed6975a..5ddde7349fbd 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -36,15 +36,20 @@ config DRM_I915 If "M" is selected, the module will be called i915. -config DRM_I915_PRELIMINARY_HW_SUPPORT - bool "Enable preliminary support for prerelease Intel hardware by default" +config DRM_I915_ALPHA_SUPPORT + bool "Enable alpha quality support for new Intel hardware by default" depends on DRM_I915 default n help - Choose this option if you have prerelease Intel hardware and want the - i915 driver to support it by default. You can enable such support at - runtime with the module option i915.preliminary_hw_support=1; this - option changes the default for that module option. + Choose this option if you have new Intel hardware and want to enable + the alpha quality i915 driver support for the hardware in this kernel + version. You can also enable the support at runtime using the module + parameter i915.alpha_support=1; this option changes the default for + that module parameter. + + It is recommended to upgrade to a kernel version with proper support + as soon as it is available. Generally fixes for platforms with alpha + support are not backported to older kernels. If in doubt, say "N". @@ -107,6 +112,15 @@ config DRM_I915_GVT If in doubt, say "N". +config DRM_I915_GVT_KVMGT + tristate "Enable KVM/VFIO support for Intel GVT-g" + depends on DRM_I915_GVT + depends on KVM + default n + help + Choose this option if you want to enable KVMGT support for + Intel GVT-g. + menu "drm/i915 Debugging" depends on DRM_I915 depends on EXPERT diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 0857e5035f4d..3dea46af9fe6 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -33,7 +33,7 @@ i915-y += i915_cmd_parser.o \ i915_gem_dmabuf.o \ i915_gem_evict.o \ i915_gem_execbuffer.o \ - i915_gem_fence.o \ + i915_gem_fence_reg.o \ i915_gem_gtt.o \ i915_gem_internal.o \ i915_gem.o \ @@ -45,6 +45,7 @@ i915-y += i915_cmd_parser.o \ i915_gem_timeline.o \ i915_gem_userptr.o \ i915_trace_points.o \ + i915_vma.o \ intel_breadcrumbs.o \ intel_engine_cs.o \ intel_hangcheck.o \ diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile index 34ea4776af70..8a46a7f31d53 100644 --- a/drivers/gpu/drm/i915/gvt/Makefile +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -3,5 +3,8 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \ execlist.o scheduler.o sched_policy.o render.o cmd_parser.o -ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall -i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) +ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall +i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) + +CFLAGS_kvmgt.o := -Wno-unused-function +obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 4c687740f5f1..db516382a4d4 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -47,11 +47,9 @@ enum { * Returns: * Zero on success, negative error code if failed. */ -int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset, +int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - struct intel_vgpu *vgpu = __vgpu; - if (WARN_ON(bytes > 4)) return -EINVAL; @@ -82,9 +80,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map) ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn, first_mfn, - vgpu_aperture_sz(vgpu) - >> PAGE_SHIFT, map, - GVT_MAP_APERTURE); + vgpu_aperture_sz(vgpu) >> + PAGE_SHIFT, map); if (ret) return ret; @@ -235,10 +232,9 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, * Returns: * Zero on success, negative error code if failed. */ -int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset, +int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - struct intel_vgpu *vgpu = __vgpu; int ret; if (WARN_ON(bytes > 4)) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 0084ece8d8ff..d26a092c70e8 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1418,8 +1418,8 @@ static int cmd_handler_mi_op_2e(struct parser_exec_state *s) static int cmd_handler_mi_op_2f(struct parser_exec_state *s) { int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; - int op_size = ((1 << (cmd_val(s, 0) & GENMASK(20, 19) >> 19)) * - sizeof(u32)); + int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) * + sizeof(u32); unsigned long gma, gma_high; int ret = 0; @@ -2537,7 +2537,8 @@ static int scan_workload(struct intel_vgpu_workload *workload) s.rb_va = workload->shadow_ring_buffer_va; s.workload = workload; - if (bypass_scan_mask & (1 << workload->ring_id)) + if ((bypass_scan_mask & (1 << workload->ring_id)) || + gma_head == gma_tail) return 0; ret = ip_gma_set(&s, gma_head); diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 7e1da1c563ca..bda85dff7b2a 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -502,8 +502,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, * ACK of I2C_WRITE * returned byte if it is READ */ - - aux_data_for_write |= (GVT_AUX_I2C_REPLY_ACK & 0xff) << 24; + aux_data_for_write |= GVT_AUX_I2C_REPLY_ACK << 24; vgpu_vreg(vgpu, offset + 4) = aux_data_for_write; } diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h index de366b1d5196..f6dfc8b795ec 100644 --- a/drivers/gpu/drm/i915/gvt/edid.h +++ b/drivers/gpu/drm/i915/gvt/edid.h @@ -44,7 +44,7 @@ #define GVT_AUX_I2C_READ 0x1 #define GVT_AUX_I2C_STATUS 0x2 #define GVT_AUX_I2C_MOT 0x4 -#define GVT_AUX_I2C_REPLY_ACK (0x0 << 6) +#define GVT_AUX_I2C_REPLY_ACK 0x0 struct intel_vgpu_edid_data { bool data_valid; diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index c1f6019d8895..f32bb6f6495c 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -838,23 +838,21 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) } void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, - unsigned long ring_bitmap) + unsigned long engine_mask) { - int bit; - struct list_head *pos, *n; - struct intel_vgpu_workload *workload = NULL; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_engine_cs *engine; + struct intel_vgpu_workload *pos, *n; + unsigned int tmp; - for_each_set_bit(bit, &ring_bitmap, sizeof(ring_bitmap) * 8) { - if (bit >= I915_NUM_ENGINES) - break; + for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { /* free the unsubmited workload in the queue */ - list_for_each_safe(pos, n, &vgpu->workload_q_head[bit]) { - workload = container_of(pos, - struct intel_vgpu_workload, list); - list_del_init(&workload->list); - free_workload(workload); + list_for_each_entry_safe(pos, n, + &vgpu->workload_q_head[engine->id], list) { + list_del_init(&pos->list); + free_workload(pos); } - init_vgpu_execlist(vgpu, bit); + init_vgpu_execlist(vgpu, engine->id); } } diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h index 635f31c6dcc1..7eced40a1e30 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.h +++ b/drivers/gpu/drm/i915/gvt/execlist.h @@ -183,6 +183,6 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu); int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id); void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, - unsigned long ring_bitmap); + unsigned long engine_mask); #endif /*_GVT_EXECLIST_H_*/ diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 6554da9f9f5b..7eaaf1c9ed2b 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -138,36 +138,6 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, memcpy(&(e)->val64, &v, sizeof(v)); \ } while (0) -enum { - GTT_TYPE_INVALID = -1, - - GTT_TYPE_GGTT_PTE, - - GTT_TYPE_PPGTT_PTE_4K_ENTRY, - GTT_TYPE_PPGTT_PTE_2M_ENTRY, - GTT_TYPE_PPGTT_PTE_1G_ENTRY, - - GTT_TYPE_PPGTT_PTE_ENTRY, - - GTT_TYPE_PPGTT_PDE_ENTRY, - GTT_TYPE_PPGTT_PDP_ENTRY, - GTT_TYPE_PPGTT_PML4_ENTRY, - - GTT_TYPE_PPGTT_ROOT_ENTRY, - - GTT_TYPE_PPGTT_ROOT_L3_ENTRY, - GTT_TYPE_PPGTT_ROOT_L4_ENTRY, - - GTT_TYPE_PPGTT_ENTRY, - - GTT_TYPE_PPGTT_PTE_PT, - GTT_TYPE_PPGTT_PDE_PT, - GTT_TYPE_PPGTT_PDP_PT, - GTT_TYPE_PPGTT_PML4_PT, - - GTT_TYPE_MAX, -}; - /* * Mappings between GTT_TYPE* enumerations. * Following information can be found according to the given type: @@ -842,13 +812,18 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, { struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; + intel_gvt_gtt_type_t cur_pt_type; if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type)))) return -EINVAL; - if (ops->get_pfn(e) == vgpu->gtt.scratch_page_mfn) - return 0; - + if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY + && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { + cur_pt_type = get_next_pt_type(e->type) + 1; + if (ops->get_pfn(e) == + vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) + return 0; + } s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); if (!s) { gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n", @@ -999,7 +974,7 @@ fail: } static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, - struct intel_gvt_gtt_entry *we, unsigned long index) + unsigned long index) { struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); struct intel_vgpu_shadow_page *sp = &spt->shadow_page; @@ -1008,34 +983,35 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, struct intel_gvt_gtt_entry e; int ret; - trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, - we->val64, index); - ppgtt_get_shadow_entry(spt, &e, index); + + trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64, + index); + if (!ops->test_present(&e)) return 0; - if (ops->get_pfn(&e) == vgpu->gtt.scratch_page_mfn) + if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn) return 0; - if (gtt_type_is_pt(get_next_pt_type(we->type))) { - struct intel_vgpu_guest_page *g = - intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we)); - if (!g) { + if (gtt_type_is_pt(get_next_pt_type(e.type))) { + struct intel_vgpu_ppgtt_spt *s = + ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e)); + if (!s) { gvt_err("fail to find guest page\n"); ret = -ENXIO; goto fail; } - ret = ppgtt_invalidate_shadow_page(guest_page_to_ppgtt_spt(g)); + ret = ppgtt_invalidate_shadow_page(s); if (ret) goto fail; } - ops->set_pfn(&e, vgpu->gtt.scratch_page_mfn); + ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn); ppgtt_set_shadow_entry(spt, &e, index); return 0; fail: gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", - vgpu->id, spt, we->val64, we->type); + vgpu->id, spt, e.val64, e.type); return ret; } @@ -1256,23 +1232,16 @@ static int ppgtt_handle_guest_write_page_table( struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); struct intel_vgpu *vgpu = spt->vgpu; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; - struct intel_gvt_gtt_entry ge; - int old_present, new_present; int ret; + int new_present; - ppgtt_get_guest_entry(spt, &ge, index); - - old_present = ops->test_present(&ge); new_present = ops->test_present(we); - ppgtt_set_guest_entry(spt, we, index); + ret = ppgtt_handle_guest_entry_removal(gpt, index); + if (ret) + goto fail; - if (old_present) { - ret = ppgtt_handle_guest_entry_removal(gpt, &ge, index); - if (ret) - goto fail; - } if (new_present) { ret = ppgtt_handle_guest_entry_add(gpt, we, index); if (ret) @@ -1318,7 +1287,7 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu) { struct list_head *pos, *n; struct intel_vgpu_ppgtt_spt *spt; - struct intel_gvt_gtt_entry ge, e; + struct intel_gvt_gtt_entry ge; unsigned long index; int ret; @@ -1329,9 +1298,6 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu) for_each_set_bit(index, spt->post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE) { ppgtt_get_guest_entry(spt, &ge, index); - e = ge; - e.val64 = 0; - ppgtt_set_guest_entry(spt, &e, index); ret = ppgtt_handle_guest_write_page_table( &spt->guest_page, &ge, index); @@ -1359,8 +1325,6 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp, index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift; ppgtt_get_guest_entry(spt, &we, index); - memcpy((void *)&we.val64 + (pa & (info->gtt_entry_size - 1)), - p_data, bytes); ops->test_pse(&we); @@ -1369,19 +1333,13 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp, if (ret) return ret; } else { - struct intel_gvt_gtt_entry ge; - - ppgtt_get_guest_entry(spt, &ge, index); - if (!test_bit(index, spt->post_shadow_bitmap)) { - ret = ppgtt_handle_guest_entry_removal(gpt, - &ge, index); + ret = ppgtt_handle_guest_entry_removal(gpt, index); if (ret) return ret; } ppgtt_set_post_shadow(spt, index); - ppgtt_set_guest_entry(spt, &we, index); } if (!enable_out_of_sync) @@ -1921,47 +1879,101 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, return ret; } -static int create_scratch_page(struct intel_vgpu *vgpu) +static int alloc_scratch_pages(struct intel_vgpu *vgpu, + intel_gvt_gtt_type_t type) { struct intel_vgpu_gtt *gtt = &vgpu->gtt; - void *p; - void *vaddr; + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + int page_entry_num = GTT_PAGE_SIZE >> + vgpu->gvt->device_info.gtt_entry_size_shift; + struct page *scratch_pt; unsigned long mfn; + int i; + void *p; + + if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) + return -EINVAL; - gtt->scratch_page = alloc_page(GFP_KERNEL); - if (!gtt->scratch_page) { - gvt_err("Failed to allocate scratch page.\n"); + scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); + if (!scratch_pt) { + gvt_err("fail to allocate scratch page\n"); return -ENOMEM; } - /* set to zero */ - p = kmap_atomic(gtt->scratch_page); - memset(p, 0, PAGE_SIZE); + p = kmap_atomic(scratch_pt); + mfn = intel_gvt_hypervisor_virt_to_mfn(p); + if (mfn == INTEL_GVT_INVALID_ADDR) { + gvt_err("fail to translate vaddr:0x%llx\n", (u64)p); + kunmap_atomic(p); + __free_page(scratch_pt); + return -EFAULT; + } + gtt->scratch_pt[type].page_mfn = mfn; + gtt->scratch_pt[type].page = scratch_pt; + gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", + vgpu->id, type, mfn); + + /* Build the tree by full filled the scratch pt with the entries which + * point to the next level scratch pt or scratch page. The + * scratch_pt[type] indicate the scratch pt/scratch page used by the + * 'type' pt. + * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by + * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self + * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. + */ + if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { + struct intel_gvt_gtt_entry se; + + memset(&se, 0, sizeof(struct intel_gvt_gtt_entry)); + se.type = get_entry_type(type - 1); + ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn); + + /* The entry parameters like present/writeable/cache type + * set to the same as i915's scratch page tree. + */ + se.val64 |= _PAGE_PRESENT | _PAGE_RW; + if (type == GTT_TYPE_PPGTT_PDE_PT) + se.val64 |= PPAT_CACHED_INDEX; + + for (i = 0; i < page_entry_num; i++) + ops->set_entry(p, &se, i, false, 0, vgpu); + } + kunmap_atomic(p); - /* translate page to mfn */ - vaddr = page_address(gtt->scratch_page); - mfn = intel_gvt_hypervisor_virt_to_mfn(vaddr); + return 0; +} + +static int release_scratch_page_tree(struct intel_vgpu *vgpu) +{ + int i; - if (mfn == INTEL_GVT_INVALID_ADDR) { - gvt_err("fail to translate vaddr: 0x%p\n", vaddr); - __free_page(gtt->scratch_page); - gtt->scratch_page = NULL; - return -ENXIO; + for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { + if (vgpu->gtt.scratch_pt[i].page != NULL) { + __free_page(vgpu->gtt.scratch_pt[i].page); + vgpu->gtt.scratch_pt[i].page = NULL; + vgpu->gtt.scratch_pt[i].page_mfn = 0; + } } - gtt->scratch_page_mfn = mfn; - gvt_dbg_core("vgpu%d create scratch page: mfn=0x%lx\n", vgpu->id, mfn); return 0; } -static void release_scratch_page(struct intel_vgpu *vgpu) +static int create_scratch_page_tree(struct intel_vgpu *vgpu) { - if (vgpu->gtt.scratch_page != NULL) { - __free_page(vgpu->gtt.scratch_page); - vgpu->gtt.scratch_page = NULL; - vgpu->gtt.scratch_page_mfn = 0; + int i, ret; + + for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { + ret = alloc_scratch_pages(vgpu, i); + if (ret) + goto err; } + + return 0; + +err: + release_scratch_page_tree(vgpu); + return ret; } /** @@ -1995,7 +2007,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) gtt->ggtt_mm = ggtt_mm; - return create_scratch_page(vgpu); + return create_scratch_page_tree(vgpu); } /** @@ -2014,7 +2026,7 @@ void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) struct intel_vgpu_mm *mm; ppgtt_free_all_shadow_page(vgpu); - release_scratch_page(vgpu); + release_scratch_page_tree(vgpu); list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) { mm = container_of(pos, struct intel_vgpu_mm, list); diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index e4dcde78f3f9..d250013bc37b 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -88,6 +88,36 @@ enum { INTEL_GVT_MM_PPGTT, }; +typedef enum { + GTT_TYPE_INVALID = -1, + + GTT_TYPE_GGTT_PTE, + + GTT_TYPE_PPGTT_PTE_4K_ENTRY, + GTT_TYPE_PPGTT_PTE_2M_ENTRY, + GTT_TYPE_PPGTT_PTE_1G_ENTRY, + + GTT_TYPE_PPGTT_PTE_ENTRY, + + GTT_TYPE_PPGTT_PDE_ENTRY, + GTT_TYPE_PPGTT_PDP_ENTRY, + GTT_TYPE_PPGTT_PML4_ENTRY, + + GTT_TYPE_PPGTT_ROOT_ENTRY, + + GTT_TYPE_PPGTT_ROOT_L3_ENTRY, + GTT_TYPE_PPGTT_ROOT_L4_ENTRY, + + GTT_TYPE_PPGTT_ENTRY, + + GTT_TYPE_PPGTT_PTE_PT, + GTT_TYPE_PPGTT_PDE_PT, + GTT_TYPE_PPGTT_PDP_PT, + GTT_TYPE_PPGTT_PML4_PT, + + GTT_TYPE_MAX, +} intel_gvt_gtt_type_t; + struct intel_vgpu_mm { int type; bool initialized; @@ -151,6 +181,12 @@ extern void intel_vgpu_destroy_mm(struct kref *mm_ref); struct intel_vgpu_guest_page; +struct intel_vgpu_scratch_pt { + struct page *page; + unsigned long page_mfn; +}; + + struct intel_vgpu_gtt { struct intel_vgpu_mm *ggtt_mm; unsigned long active_ppgtt_mm_bitmap; @@ -160,8 +196,8 @@ struct intel_vgpu_gtt { atomic_t n_write_protected_guest_page; struct list_head oos_page_list_head; struct list_head post_shadow_list_head; - struct page *scratch_page; - unsigned long scratch_page_mfn; + struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX]; + }; extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 385969a89216..398877c3d2fd 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -44,11 +44,14 @@ static const char * const supported_hypervisors[] = { [INTEL_GVT_HYPERVISOR_KVM] = "KVM", }; -struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops = { +static const struct intel_gvt_ops intel_gvt_ops = { .emulate_cfg_read = intel_vgpu_emulate_cfg_read, .emulate_cfg_write = intel_vgpu_emulate_cfg_write, .emulate_mmio_read = intel_vgpu_emulate_mmio_read, .emulate_mmio_write = intel_vgpu_emulate_mmio_write, + .vgpu_create = intel_gvt_create_vgpu, + .vgpu_destroy = intel_gvt_destroy_vgpu, + .vgpu_reset = intel_gvt_reset_vgpu, }; /** @@ -81,10 +84,12 @@ int intel_gvt_init_host(void) symbol_get(xengt_mpt), "xengt"); intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN; } else { +#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) /* not in Xen. Try KVMGT */ intel_gvt_host.mpt = try_then_request_module( - symbol_get(kvmgt_mpt), "kvm"); + symbol_get(kvmgt_mpt), "kvmgt"); intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM; +#endif } /* Fail to load MPT modules - bail out */ @@ -193,6 +198,9 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) intel_gvt_clean_mmio_info(gvt); intel_gvt_free_firmware(gvt); + intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); + intel_gvt_clean_vgpu_types(gvt); + kfree(dev_priv->gvt); dev_priv->gvt = NULL; } @@ -270,10 +278,25 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) if (ret) goto out_clean_cmd_parser; - gvt_dbg_core("gvt device creation is done\n"); + ret = intel_gvt_init_vgpu_types(gvt); + if (ret) + goto out_clean_thread; + + ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt, + &intel_gvt_ops); + if (ret) { + gvt_err("failed to register gvt-g host device: %d\n", ret); + goto out_clean_types; + } + + gvt_dbg_core("gvt device initialization is done\n"); dev_priv->gvt = gvt; return 0; +out_clean_types: + intel_gvt_clean_vgpu_types(gvt); +out_clean_thread: + clean_service_thread(gvt); out_clean_cmd_parser: intel_gvt_clean_cmd_parser(gvt); out_clean_sched_policy: diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 62fc9e3ac5c6..3d4223e8ebe3 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -161,6 +161,20 @@ struct intel_vgpu { DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); struct i915_gem_context *shadow_ctx; struct notifier_block shadow_ctx_notifier_block; + +#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) + struct { + struct device *mdev; + struct vfio_region *region; + int num_regions; + struct eventfd_ctx *intx_trigger; + struct eventfd_ctx *msi_trigger; + struct rb_root cache; + struct mutex cache_lock; + void *vfio_group; + struct notifier_block iommu_notifier; + } vdev; +#endif }; struct intel_gvt_gm { @@ -190,6 +204,16 @@ struct intel_gvt_opregion { u32 opregion_pa; }; +#define NR_MAX_INTEL_VGPU_TYPES 20 +struct intel_vgpu_type { + char name[16]; + unsigned int max_instance; + unsigned int avail_instance; + unsigned int low_gm_size; + unsigned int high_gm_size; + unsigned int fence; +}; + struct intel_gvt { struct mutex lock; struct drm_i915_private *dev_priv; @@ -205,6 +229,8 @@ struct intel_gvt { struct intel_gvt_opregion opregion; struct intel_gvt_workload_scheduler scheduler; DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); + struct intel_vgpu_type *types; + unsigned int num_types; struct task_struct *service_thread; wait_queue_head_t service_thread_wq; @@ -231,6 +257,14 @@ void intel_gvt_free_firmware(struct intel_gvt *gvt); int intel_gvt_load_firmware(struct intel_gvt *gvt); /* Aperture/GM space definitions for GVT device */ +#define MB_TO_BYTES(mb) ((mb) << 20ULL) +#define BYTES_TO_MB(b) ((b) >> 20ULL) + +#define HOST_LOW_GM_SIZE MB_TO_BYTES(128) +#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) +#define HOST_FENCE 4 + +/* Aperture/GM space definitions for GVT device */ #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end) #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base) @@ -330,11 +364,14 @@ static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, } } -struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, - struct intel_vgpu_creation_params * - param); +int intel_gvt_init_vgpu_types(struct intel_gvt *gvt); +void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); +struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, + struct intel_vgpu_type *type); void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); +void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); + /* validating GM functions */ #define vgpu_gmadr_is_aperture(vgpu, gmadr) \ @@ -369,10 +406,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, unsigned long *g_index); -int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset, +int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); -int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset, +int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); void intel_gvt_clean_opregion(struct intel_gvt *gvt); @@ -385,6 +422,22 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); int setup_vgpu_mmio(struct intel_vgpu *vgpu); void populate_pvinfo_page(struct intel_vgpu *vgpu); +struct intel_gvt_ops { + int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *, + unsigned int); + int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *, + unsigned int); + int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *, + unsigned int); + int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *, + unsigned int); + struct intel_vgpu *(*vgpu_create)(struct intel_gvt *, + struct intel_vgpu_type *); + void (*vgpu_destroy)(struct intel_vgpu *); + void (*vgpu_reset)(struct intel_vgpu *); +}; + + #include "mpt.h" #endif diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 9ab1f95dddc5..522809710312 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1158,7 +1158,10 @@ static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu, static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - u32 mode = *(u32 *)p_data; + u32 mode; + + write_vreg(vgpu, offset, p_data, bytes); + mode = vgpu_vreg(vgpu, offset); if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) { WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n", @@ -1275,19 +1278,18 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset, switch (offset) { case 0x4ddc: vgpu_vreg(vgpu, offset) = 0x8000003c; + /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */ + I915_WRITE(reg, vgpu_vreg(vgpu, offset)); break; case 0x42080: vgpu_vreg(vgpu, offset) = 0x8000; + /* WaCompressedResourceDisplayNewHashMode:skl */ + I915_WRITE(reg, vgpu_vreg(vgpu, offset)); break; default: return -EINVAL; } - /** - * TODO: need detect stepping info after gvt contain such information - * 0x4ddc enabled after C0, 0x42080 enabled after E0. - */ - I915_WRITE(reg, vgpu_vreg(vgpu, offset)); return 0; } @@ -1367,6 +1369,9 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, int rc = 0; unsigned int id = 0; + write_vreg(vgpu, offset, p_data, bytes); + vgpu_vreg(vgpu, offset) = 0; + switch (offset) { case 0x4260: id = RCS; @@ -1392,6 +1397,23 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, return rc; } +static int ring_reset_ctl_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 data; + + write_vreg(vgpu, offset, p_data, bytes); + data = vgpu_vreg(vgpu, offset); + + if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)) + data |= RESET_CTL_READY_TO_RESET; + else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)) + data &= ~RESET_CTL_READY_TO_RESET; + + vgpu_vreg(vgpu, offset) = data; + return 0; +} + #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \ ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \ f, s, am, rm, d, r, w); \ @@ -1485,7 +1507,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL); MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL); - MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK, NULL, NULL); + MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL); MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL); @@ -1494,7 +1516,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL); MMIO_D(GAM_ECOCHK, D_ALL); MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL); - MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK, NULL, NULL); + MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_D(0x9030, D_ALL); MMIO_D(0x20a0, D_ALL); MMIO_D(0x2420, D_ALL); @@ -1503,7 +1525,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(0x2438, D_ALL); MMIO_D(0x243c, D_ALL); MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL); - MMIO_DFH(0xe184, D_ALL, F_MODE_MASK, NULL, NULL); + MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL); /* display */ @@ -2116,6 +2138,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(GEN6_MBCTL, D_ALL); MMIO_D(0x911c, D_ALL); MMIO_D(0x9120, D_ALL); + MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_D(GAB_CTL, D_ALL); MMIO_D(0x48800, D_ALL); @@ -2298,6 +2321,15 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS); +#define RING_REG(base) (base + 0xd0) + MMIO_RING_F(RING_REG, 4, F_RO, 0, + ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL, + ring_reset_ctl_write); + MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, + ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL, + ring_reset_ctl_write); +#undef RING_REG + #define RING_REG(base) (base + 0x230) MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write); MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write); @@ -2345,7 +2377,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL); - MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); + MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW); MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW); @@ -2364,7 +2396,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS); MMIO_D(0xfdc, D_BDW); - MMIO_D(GEN8_ROW_CHICKEN, D_BDW_PLUS); + MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS); MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS); @@ -2375,10 +2407,10 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_D(0xb10c, D_BDW); MMIO_D(0xb110, D_BDW); - MMIO_DH(0x24d0, D_BDW_PLUS, NULL, NULL); - MMIO_DH(0x24d4, D_BDW_PLUS, NULL, NULL); - MMIO_DH(0x24d8, D_BDW_PLUS, NULL, NULL); - MMIO_DH(0x24dc, D_BDW_PLUS, NULL, NULL); + MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_D(0x83a4, D_BDW); MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS); @@ -2392,9 +2424,9 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_D(0x6e570, D_BDW_PLUS); MMIO_D(0x65f10, D_BDW_PLUS); - MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); - MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); - MMIO_DFH(0xe180, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); + MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); MMIO_D(0x2248, D_BDW); @@ -2425,6 +2457,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(0xa210, D_SKL_PLUS); MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); + MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write); MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write); MMIO_D(0x45504, D_SKL); @@ -2574,8 +2607,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(0x51000, D_SKL); MMIO_D(0x6c00c, D_SKL); - MMIO_F(0xc800, 0x7f8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(0xb020, 0x80, 0, 0, 0, D_SKL, NULL, NULL); + MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); + MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); MMIO_D(0xd08, D_SKL); MMIO_D(0x20e0, D_SKL); diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 027ef558d91c..30e543f5a703 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -33,21 +33,14 @@ #ifndef _GVT_HYPERCALL_H_ #define _GVT_HYPERCALL_H_ -struct intel_gvt_io_emulation_ops { - int (*emulate_cfg_read)(void *, unsigned int, void *, unsigned int); - int (*emulate_cfg_write)(void *, unsigned int, void *, unsigned int); - int (*emulate_mmio_read)(void *, u64, void *, unsigned int); - int (*emulate_mmio_write)(void *, u64, void *, unsigned int); -}; - -extern struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops; - /* * Specific GVT-g MPT modules function collections. Currently GVT-g supports * both Xen and KVM by providing dedicated hypervisor-related MPT modules. */ struct intel_gvt_mpt { int (*detect_host)(void); + int (*host_init)(struct device *dev, void *gvt, const void *ops); + void (*host_exit)(struct device *dev, void *gvt); int (*attach_vgpu)(void *vgpu, unsigned long *handle); void (*detach_vgpu)(unsigned long handle); int (*inject_msi)(unsigned long handle, u32 addr, u16 data); @@ -60,8 +53,7 @@ struct intel_gvt_mpt { unsigned long len); unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn); int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, - unsigned long mfn, unsigned int nr, bool map, - int type); + unsigned long mfn, unsigned int nr, bool map); int (*set_trap_area)(unsigned long handle, u64 start, u64 end, bool map); }; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c new file mode 100644 index 000000000000..dc0365033157 --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -0,0 +1,597 @@ +/* + * KVMGT - the implementation of Intel mediated pass-through framework for KVM + * + * Copyright(c) 2014-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: + * Kevin Tian <kevin.tian@intel.com> + * Jike Song <jike.song@intel.com> + * Xiaoguang Chen <xiaoguang.chen@intel.com> + */ + +#include <linux/init.h> +#include <linux/device.h> +#include <linux/mm.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/rbtree.h> +#include <linux/spinlock.h> +#include <linux/eventfd.h> +#include <linux/uuid.h> +#include <linux/kvm_host.h> +#include <linux/vfio.h> + +#include "i915_drv.h" +#include "gvt.h" + +static inline long kvmgt_pin_pages(struct device *dev, unsigned long *user_pfn, + long npage, int prot, unsigned long *phys_pfn) +{ + return 0; +} +static inline long kvmgt_unpin_pages(struct device *dev, unsigned long *pfn, + long npage) +{ + return 0; +} + +static const struct intel_gvt_ops *intel_gvt_ops; + + +/* helper macros copied from vfio-pci */ +#define VFIO_PCI_OFFSET_SHIFT 40 +#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT) +#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) +#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) + +struct vfio_region { + u32 type; + u32 subtype; + size_t size; + u32 flags; +}; + +struct kvmgt_pgfn { + gfn_t gfn; + struct hlist_node hnode; +}; + +struct kvmgt_guest_info { + struct kvm *kvm; + struct intel_vgpu *vgpu; + struct kvm_page_track_notifier_node track_node; +#define NR_BKT (1 << 18) + struct hlist_head ptable[NR_BKT]; +#undef NR_BKT +}; + +struct gvt_dma { + struct rb_node node; + gfn_t gfn; + kvm_pfn_t pfn; +}; + +static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) +{ + struct rb_node *node = vgpu->vdev.cache.rb_node; + struct gvt_dma *ret = NULL; + + while (node) { + struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node); + + if (gfn < itr->gfn) + node = node->rb_left; + else if (gfn > itr->gfn) + node = node->rb_right; + else { + ret = itr; + goto out; + } + } + +out: + return ret; +} + +static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) +{ + struct gvt_dma *entry; + + mutex_lock(&vgpu->vdev.cache_lock); + entry = __gvt_cache_find(vgpu, gfn); + mutex_unlock(&vgpu->vdev.cache_lock); + + return entry == NULL ? 0 : entry->pfn; +} + +static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) +{ + struct gvt_dma *new, *itr; + struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL; + + new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL); + if (!new) + return; + + new->gfn = gfn; + new->pfn = pfn; + + mutex_lock(&vgpu->vdev.cache_lock); + while (*link) { + parent = *link; + itr = rb_entry(parent, struct gvt_dma, node); + + if (gfn == itr->gfn) + goto out; + else if (gfn < itr->gfn) + link = &parent->rb_left; + else + link = &parent->rb_right; + } + + rb_link_node(&new->node, parent, link); + rb_insert_color(&new->node, &vgpu->vdev.cache); + mutex_unlock(&vgpu->vdev.cache_lock); + return; + +out: + mutex_unlock(&vgpu->vdev.cache_lock); + kfree(new); +} + +static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, + struct gvt_dma *entry) +{ + rb_erase(&entry->node, &vgpu->vdev.cache); + kfree(entry); +} + +static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn) +{ + struct device *dev = vgpu->vdev.mdev; + struct gvt_dma *this; + unsigned long pfn; + + mutex_lock(&vgpu->vdev.cache_lock); + this = __gvt_cache_find(vgpu, gfn); + if (!this) { + mutex_unlock(&vgpu->vdev.cache_lock); + return; + } + + pfn = this->pfn; + WARN_ON((kvmgt_unpin_pages(dev, &pfn, 1) != 1)); + __gvt_cache_remove_entry(vgpu, this); + mutex_unlock(&vgpu->vdev.cache_lock); +} + +static void gvt_cache_init(struct intel_vgpu *vgpu) +{ + vgpu->vdev.cache = RB_ROOT; + mutex_init(&vgpu->vdev.cache_lock); +} + +static void gvt_cache_destroy(struct intel_vgpu *vgpu) +{ + struct gvt_dma *dma; + struct rb_node *node = NULL; + struct device *dev = vgpu->vdev.mdev; + unsigned long pfn; + + mutex_lock(&vgpu->vdev.cache_lock); + while ((node = rb_first(&vgpu->vdev.cache))) { + dma = rb_entry(node, struct gvt_dma, node); + pfn = dma->pfn; + + kvmgt_unpin_pages(dev, &pfn, 1); + __gvt_cache_remove_entry(vgpu, dma); + } + mutex_unlock(&vgpu->vdev.cache_lock); +} + +static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, + const char *name) +{ + int i; + struct intel_vgpu_type *t; + const char *driver_name = dev_driver_string( + &gvt->dev_priv->drm.pdev->dev); + + for (i = 0; i < gvt->num_types; i++) { + t = &gvt->types[i]; + if (!strncmp(t->name, name + strlen(driver_name) + 1, + sizeof(t->name))) + return t; + } + + return NULL; +} + +static struct attribute *type_attrs[] = { + NULL, +}; + +static struct attribute_group *intel_vgpu_type_groups[] = { + [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL, +}; + +static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) +{ + int i, j; + struct intel_vgpu_type *type; + struct attribute_group *group; + + for (i = 0; i < gvt->num_types; i++) { + type = &gvt->types[i]; + + group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL); + if (WARN_ON(!group)) + goto unwind; + + group->name = type->name; + group->attrs = type_attrs; + intel_vgpu_type_groups[i] = group; + } + + return true; + +unwind: + for (j = 0; j < i; j++) { + group = intel_vgpu_type_groups[j]; + kfree(group); + } + + return false; +} + +static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) +{ + int i; + struct attribute_group *group; + + for (i = 0; i < gvt->num_types; i++) { + group = intel_vgpu_type_groups[i]; + kfree(group); + } +} + +static void kvmgt_protect_table_init(struct kvmgt_guest_info *info) +{ + hash_init(info->ptable); +} + +static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info) +{ + struct kvmgt_pgfn *p; + struct hlist_node *tmp; + int i; + + hash_for_each_safe(info->ptable, i, tmp, p, hnode) { + hash_del(&p->hnode); + kfree(p); + } +} + +static struct kvmgt_pgfn * +__kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn) +{ + struct kvmgt_pgfn *p, *res = NULL; + + hash_for_each_possible(info->ptable, p, hnode, gfn) { + if (gfn == p->gfn) { + res = p; + break; + } + } + + return res; +} + +static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info, + gfn_t gfn) +{ + struct kvmgt_pgfn *p; + + p = __kvmgt_protect_table_find(info, gfn); + return !!p; +} + +static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn) +{ + struct kvmgt_pgfn *p; + + if (kvmgt_gfn_is_write_protected(info, gfn)) + return; + + p = kmalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC); + if (WARN(!p, "gfn: 0x%llx\n", gfn)) + return; + + p->gfn = gfn; + hash_add(info->ptable, &p->hnode, gfn); +} + +static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, + gfn_t gfn) +{ + struct kvmgt_pgfn *p; + + p = __kvmgt_protect_table_find(info, gfn); + if (p) { + hash_del(&p->hnode); + kfree(p); + } +} + +static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) +{ + if (!intel_gvt_init_vgpu_type_groups(gvt)) + return -EFAULT; + + intel_gvt_ops = ops; + + /* MDEV is not yet available */ + return -ENODEV; +} + +static void kvmgt_host_exit(struct device *dev, void *gvt) +{ + intel_gvt_cleanup_vgpu_type_groups(gvt); +} + +static int kvmgt_write_protect_add(unsigned long handle, u64 gfn) +{ + struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle; + struct kvm *kvm = info->kvm; + struct kvm_memory_slot *slot; + int idx; + + idx = srcu_read_lock(&kvm->srcu); + slot = gfn_to_memslot(kvm, gfn); + + spin_lock(&kvm->mmu_lock); + + if (kvmgt_gfn_is_write_protected(info, gfn)) + goto out; + + kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); + kvmgt_protect_table_add(info, gfn); + +out: + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); + return 0; +} + +static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn) +{ + struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle; + struct kvm *kvm = info->kvm; + struct kvm_memory_slot *slot; + int idx; + + idx = srcu_read_lock(&kvm->srcu); + slot = gfn_to_memslot(kvm, gfn); + + spin_lock(&kvm->mmu_lock); + + if (!kvmgt_gfn_is_write_protected(info, gfn)) + goto out; + + kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); + kvmgt_protect_table_del(info, gfn); + +out: + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); + return 0; +} + +static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, + const u8 *val, int len, + struct kvm_page_track_notifier_node *node) +{ + struct kvmgt_guest_info *info = container_of(node, + struct kvmgt_guest_info, track_node); + + if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa))) + intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa, + (void *)val, len); +} + +static void kvmgt_page_track_flush_slot(struct kvm *kvm, + struct kvm_memory_slot *slot, + struct kvm_page_track_notifier_node *node) +{ + int i; + gfn_t gfn; + struct kvmgt_guest_info *info = container_of(node, + struct kvmgt_guest_info, track_node); + + spin_lock(&kvm->mmu_lock); + for (i = 0; i < slot->npages; i++) { + gfn = slot->base_gfn + i; + if (kvmgt_gfn_is_write_protected(info, gfn)) { + kvm_slot_page_track_remove_page(kvm, slot, gfn, + KVM_PAGE_TRACK_WRITE); + kvmgt_protect_table_del(info, gfn); + } + } + spin_unlock(&kvm->mmu_lock); +} + +static bool kvmgt_check_guest(void) +{ + unsigned int eax, ebx, ecx, edx; + char s[12]; + unsigned int *i; + + eax = KVM_CPUID_SIGNATURE; + ebx = ecx = edx = 0; + + asm volatile ("cpuid" + : "+a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) + : + : "cc", "memory"); + i = (unsigned int *)s; + i[0] = ebx; + i[1] = ecx; + i[2] = edx; + + return !strncmp(s, "KVMKVMKVM", strlen("KVMKVMKVM")); +} + +/** + * NOTE: + * It's actually impossible to check if we are running in KVM host, + * since the "KVM host" is simply native. So we only dectect guest here. + */ +static int kvmgt_detect_host(void) +{ +#ifdef CONFIG_INTEL_IOMMU + if (intel_iommu_gfx_mapped) { + gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n"); + return -ENODEV; + } +#endif + return kvmgt_check_guest() ? -ENODEV : 0; +} + +static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle) +{ + /* nothing to do here */ + return 0; +} + +static void kvmgt_detach_vgpu(unsigned long handle) +{ + /* nothing to do here */ +} + +static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) +{ + struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle; + struct intel_vgpu *vgpu = info->vgpu; + + if (vgpu->vdev.msi_trigger) + return eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1; + + return false; +} + +static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) +{ + unsigned long pfn; + struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle; + int rc; + + pfn = gvt_cache_find(info->vgpu, gfn); + if (pfn != 0) + return pfn; + + rc = kvmgt_pin_pages(info->vgpu->vdev.mdev, &gfn, 1, + IOMMU_READ | IOMMU_WRITE, &pfn); + if (rc != 1) { + gvt_err("vfio_pin_pages failed for gfn: 0x%lx\n", gfn); + return 0; + } + + gvt_cache_add(info->vgpu, gfn, pfn); + return pfn; +} + +static void *kvmgt_gpa_to_hva(unsigned long handle, unsigned long gpa) +{ + unsigned long pfn; + gfn_t gfn = gpa_to_gfn(gpa); + + pfn = kvmgt_gfn_to_pfn(handle, gfn); + if (!pfn) + return NULL; + + return (char *)pfn_to_kaddr(pfn) + offset_in_page(gpa); +} + +static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, + void *buf, unsigned long len, bool write) +{ + void *hva = NULL; + + hva = kvmgt_gpa_to_hva(handle, gpa); + if (!hva) + return -EFAULT; + + if (write) + memcpy(hva, buf, len); + else + memcpy(buf, hva, len); + + return 0; +} + +static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, + void *buf, unsigned long len) +{ + return kvmgt_rw_gpa(handle, gpa, buf, len, false); +} + +static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa, + void *buf, unsigned long len) +{ + return kvmgt_rw_gpa(handle, gpa, buf, len, true); +} + +static unsigned long kvmgt_virt_to_pfn(void *addr) +{ + return PFN_DOWN(__pa(addr)); +} + +struct intel_gvt_mpt kvmgt_mpt = { + .detect_host = kvmgt_detect_host, + .host_init = kvmgt_host_init, + .host_exit = kvmgt_host_exit, + .attach_vgpu = kvmgt_attach_vgpu, + .detach_vgpu = kvmgt_detach_vgpu, + .inject_msi = kvmgt_inject_msi, + .from_virt_to_mfn = kvmgt_virt_to_pfn, + .set_wp_page = kvmgt_write_protect_add, + .unset_wp_page = kvmgt_write_protect_remove, + .read_gpa = kvmgt_read_gpa, + .write_gpa = kvmgt_write_gpa, + .gfn_to_mfn = kvmgt_gfn_to_pfn, +}; +EXPORT_SYMBOL_GPL(kvmgt_mpt); + +static int __init kvmgt_init(void) +{ + return 0; +} + +static void __exit kvmgt_exit(void) +{ +} + +module_init(kvmgt_init); +module_exit(kvmgt_exit); + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Intel Corporation"); diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 585b01f63254..09c9450a1946 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -67,10 +67,9 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) * Returns: * Zero on success, negative error code if failed */ -int intel_vgpu_emulate_mmio_read(void *__vgpu, uint64_t pa, +int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, void *p_data, unsigned int bytes) { - struct intel_vgpu *vgpu = __vgpu; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_mmio_info *mmio; unsigned int offset = 0; @@ -179,10 +178,9 @@ err: * Returns: * Zero on success, negative error code if failed */ -int intel_vgpu_emulate_mmio_write(void *__vgpu, uint64_t pa, +int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, void *p_data, unsigned int bytes) { - struct intel_vgpu *vgpu = __vgpu; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_mmio_info *mmio; unsigned int offset = 0; diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 9dc739a01892..87d5b5e366a3 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -87,10 +87,11 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, }) int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); -int intel_vgpu_emulate_mmio_read(void *__vgpu, u64 pa, void *p_data, - unsigned int bytes); -int intel_vgpu_emulate_mmio_write(void *__vgpu, u64 pa, void *p_data, - unsigned int bytes); + +int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, + void *p_data, unsigned int bytes); +int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, + void *p_data, unsigned int bytes); bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt, unsigned int offset); bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset); diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 67858782d327..1af5830c0a56 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -56,6 +56,35 @@ static inline int intel_gvt_hypervisor_detect_host(void) } /** + * intel_gvt_hypervisor_host_init - init GVT-g host side + * + * Returns: + * Zero on success, negative error code if failed + */ +static inline int intel_gvt_hypervisor_host_init(struct device *dev, + void *gvt, const void *ops) +{ + /* optional to provide */ + if (!intel_gvt_host.mpt->host_init) + return 0; + + return intel_gvt_host.mpt->host_init(dev, gvt, ops); +} + +/** + * intel_gvt_hypervisor_host_exit - exit GVT-g host side + */ +static inline void intel_gvt_hypervisor_host_exit(struct device *dev, + void *gvt) +{ + /* optional to provide */ + if (!intel_gvt_host.mpt->host_exit) + return; + + intel_gvt_host.mpt->host_exit(dev, gvt); +} + +/** * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU * related stuffs inside hypervisor. * @@ -64,6 +93,10 @@ static inline int intel_gvt_hypervisor_detect_host(void) */ static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu) { + /* optional to provide */ + if (!intel_gvt_host.mpt->attach_vgpu) + return 0; + return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle); } @@ -76,6 +109,10 @@ static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu) */ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) { + /* optional to provide */ + if (!intel_gvt_host.mpt->detach_vgpu) + return; + intel_gvt_host.mpt->detach_vgpu(vgpu->handle); } @@ -224,11 +261,6 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn( return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn); } -enum { - GVT_MAP_APERTURE = 0, - GVT_MAP_OPREGION, -}; - /** * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN * @vgpu: a vGPU @@ -236,7 +268,6 @@ enum { * @mfn: host PFN * @nr: amount of PFNs * @map: map or unmap - * @type: map type * * Returns: * Zero on success, negative error code if failed. @@ -244,10 +275,14 @@ enum { static inline int intel_gvt_hypervisor_map_gfn_to_mfn( struct intel_vgpu *vgpu, unsigned long gfn, unsigned long mfn, unsigned int nr, - bool map, int type) + bool map) { + /* a MPT implementation could have MMIO mapped elsewhere */ + if (!intel_gvt_host.mpt->map_gfn_to_mfn) + return 0; + return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr, - map, type); + map); } /** @@ -263,6 +298,10 @@ static inline int intel_gvt_hypervisor_map_gfn_to_mfn( static inline int intel_gvt_hypervisor_set_trap_area( struct intel_vgpu *vgpu, u64 start, u64 end, bool map) { + /* a MPT implementation could have MMIO trapped elsewhere */ + if (!intel_gvt_host.mpt->set_trap_area) + return 0; + return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map); } diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 95218913b0bc..d2a0fbc896c3 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -73,7 +73,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) } ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, vgpu_opregion(vgpu)->gfn[i], - mfn, 1, map, GVT_MAP_OPREGION); + mfn, 1, map); if (ret) { gvt_err("fail to map GFN to MFN, errno: %d\n", ret); return ret; @@ -89,28 +89,18 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) */ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) { - int i; - gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id); if (!vgpu_opregion(vgpu)->va) return; - if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) { - vunmap(vgpu_opregion(vgpu)->va); - for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) { - if (vgpu_opregion(vgpu)->pages[i]) { - put_page(vgpu_opregion(vgpu)->pages[i]); - vgpu_opregion(vgpu)->pages[i] = NULL; - } - } - } else { + if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { map_vgpu_opregion(vgpu, false); free_pages((unsigned long)vgpu_opregion(vgpu)->va, INTEL_GVT_OPREGION_PORDER); - } - vgpu_opregion(vgpu)->va = NULL; + vgpu_opregion(vgpu)->va = NULL; + } } /** @@ -137,22 +127,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa) ret = map_vgpu_opregion(vgpu, true); if (ret) return ret; - } else { - gvt_dbg_core("emulate opregion from userspace\n"); - - /* - * If opregion pages are not allocated from host kenrel, - * most of the params are meaningless - */ - ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, - 0, /* not used */ - 0, /* not used */ - 2, /* not used */ - 1, - GVT_MAP_OPREGION); - if (ret) - return ret; } + return 0; } diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index 3af894b3d257..44136b1f3aab 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c @@ -152,6 +152,8 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id); + else + vgpu_vreg(vgpu, regs[ring_id]) = 0; intel_uncore_forcewake_put(dev_priv, fw); diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 1df6a5460f3e..678b0be85376 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -36,12 +36,10 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) { - struct intel_vgpu_execlist *execlist; enum intel_engine_id i; struct intel_engine_cs *engine; for_each_engine(engine, vgpu->gvt->dev_priv, i) { - execlist = &vgpu->execlist[i]; if (!list_empty(workload_q_head(vgpu, i))) return true; } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 18acb45dd14d..f898df38dd9a 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -89,15 +89,15 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) } page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); - dst = kmap_atomic(page); + dst = kmap(page); intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, GTT_PAGE_SIZE); - kunmap_atomic(dst); + kunmap(page); i++; } page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); - shadow_ring_context = kmap_atomic(page); + shadow_ring_context = kmap(page); #define COPY_REG(name) \ intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ @@ -123,7 +123,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) sizeof(*shadow_ring_context), GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); - kunmap_atomic(shadow_ring_context); + kunmap(page); return 0; } @@ -160,8 +160,6 @@ static int shadow_context_status_change(struct notifier_block *nb, static int dispatch_workload(struct intel_vgpu_workload *workload) { - struct intel_vgpu *vgpu = workload->vgpu; - struct intel_gvt *gvt = vgpu->gvt; int ring_id = workload->ring_id; struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; @@ -174,6 +172,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) shadow_ctx->desc_template = workload->ctx_desc.addressing_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; + mutex_lock(&dev_priv->drm.struct_mutex); + rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); if (IS_ERR(rq)) { gvt_err("fail to allocate gem request\n"); @@ -185,40 +185,35 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) workload->req = i915_gem_request_get(rq); - mutex_lock(&gvt->lock); - ret = intel_gvt_scan_and_shadow_workload(workload); if (ret) - goto err; + goto out; ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); if (ret) - goto err; + goto out; ret = populate_shadow_context(workload); if (ret) - goto err; + goto out; if (workload->prepare) { ret = workload->prepare(workload); if (ret) - goto err; + goto out; } - mutex_unlock(&gvt->lock); - gvt_dbg_sched("ring id %d submit workload to i915 %p\n", ring_id, workload->req); - i915_add_request_no_flush(rq); + ret = 0; workload->dispatched = true; - return 0; -err: - workload->status = ret; - - mutex_unlock(&gvt->lock); +out: + if (ret) + workload->status = ret; i915_add_request_no_flush(rq); + mutex_unlock(&dev_priv->drm.struct_mutex); return ret; } @@ -318,10 +313,10 @@ static void update_guest_context(struct intel_vgpu_workload *workload) } page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); - src = kmap_atomic(page); + src = kmap(page); intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, GTT_PAGE_SIZE); - kunmap_atomic(src); + kunmap(page); i++; } @@ -329,7 +324,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); - shadow_ring_context = kmap_atomic(page); + shadow_ring_context = kmap(page); #define COPY_REG(name) \ intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \ @@ -347,7 +342,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) sizeof(*shadow_ring_context), GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); - kunmap_atomic(shadow_ring_context); + kunmap(page); } static void complete_current_workload(struct intel_gvt *gvt, int ring_id) @@ -438,9 +433,9 @@ static int workload_thread(void *priv) intel_uncore_forcewake_get(gvt->dev_priv, FORCEWAKE_ALL); - mutex_lock(&gvt->dev_priv->drm.struct_mutex); + mutex_lock(&gvt->lock); ret = dispatch_workload(workload); - mutex_unlock(&gvt->dev_priv->drm.struct_mutex); + mutex_unlock(&gvt->lock); if (ret) { gvt_err("fail to dispatch workload, skip\n"); @@ -455,15 +450,15 @@ static int workload_thread(void *priv) if (lret < 0) { workload->status = lret; gvt_err("fail to wait workload, skip\n"); + } else { + workload->status = 0; } complete: gvt_dbg_sched("will complete workload %p\n, status: %d\n", workload, workload->status); - mutex_lock(&gvt->dev_priv->drm.struct_mutex); complete_current_workload(gvt, ring_id); - mutex_unlock(&gvt->dev_priv->drm.struct_mutex); i915_gem_request_put(fetch_and_zero(&workload->req)); diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 4f54005b976d..4f64845d8a4c 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -46,9 +46,13 @@ int setup_vgpu_mmio(struct intel_vgpu *vgpu) struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; - vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); - if (!vgpu->mmio.vreg) - return -ENOMEM; + if (vgpu->mmio.vreg) + memset(vgpu->mmio.vreg, 0, info->mmio_size * 2); + else { + vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); + if (!vgpu->mmio.vreg) + return -ENOMEM; + } vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; @@ -95,6 +99,7 @@ static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu, */ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); + memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { vgpu->cfg_space.bar[i].size = pci_resource_len( @@ -133,6 +138,106 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) } /** + * intel_gvt_init_vgpu_types - initialize vGPU type list + * @gvt : GVT device + * + * Initialize vGPU type list based on available resource. + * + */ +int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) +{ + unsigned int num_types; + unsigned int i, low_avail; + unsigned int min_low; + + /* vGPU type name is defined as GVTg_Vx_y which contains + * physical GPU generation type and 'y' means maximum vGPU + * instances user can create on one physical GPU for this + * type. + * + * Depend on physical SKU resource, might see vGPU types like + * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create + * different types of vGPU on same physical GPU depending on + * available resource. Each vGPU type will have "avail_instance" + * to indicate how many vGPU instance can be created for this + * type. + * + * Currently use static size here as we init type earlier.. + */ + low_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE; + num_types = 4; + + gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type), + GFP_KERNEL); + if (!gvt->types) + return -ENOMEM; + + min_low = MB_TO_BYTES(32); + for (i = 0; i < num_types; ++i) { + if (low_avail / min_low == 0) + break; + gvt->types[i].low_gm_size = min_low; + gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size; + gvt->types[i].fence = 4; + gvt->types[i].max_instance = low_avail / min_low; + gvt->types[i].avail_instance = gvt->types[i].max_instance; + + if (IS_GEN8(gvt->dev_priv)) + sprintf(gvt->types[i].name, "GVTg_V4_%u", + gvt->types[i].max_instance); + else if (IS_GEN9(gvt->dev_priv)) + sprintf(gvt->types[i].name, "GVTg_V5_%u", + gvt->types[i].max_instance); + + min_low <<= 1; + gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n", + i, gvt->types[i].name, gvt->types[i].max_instance, + gvt->types[i].avail_instance, + gvt->types[i].low_gm_size, + gvt->types[i].high_gm_size, gvt->types[i].fence); + } + + gvt->num_types = i; + return 0; +} + +void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt) +{ + kfree(gvt->types); +} + +static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) +{ + int i; + unsigned int low_gm_avail, high_gm_avail, fence_avail; + unsigned int low_gm_min, high_gm_min, fence_min, total_min; + + /* Need to depend on maxium hw resource size but keep on + * static config for now. + */ + low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - + gvt->gm.vgpu_allocated_low_gm_size; + high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE - + gvt->gm.vgpu_allocated_high_gm_size; + fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - + gvt->fence.vgpu_allocated_fence_num; + + for (i = 0; i < gvt->num_types; i++) { + low_gm_min = low_gm_avail / gvt->types[i].low_gm_size; + high_gm_min = high_gm_avail / gvt->types[i].high_gm_size; + fence_min = fence_avail / gvt->types[i].fence; + total_min = min(min(low_gm_min, high_gm_min), fence_min); + gvt->types[i].avail_instance = min(gvt->types[i].max_instance, + total_min); + + gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n", + i, gvt->types[i].name, gvt->types[i].max_instance, + gvt->types[i].avail_instance, gvt->types[i].low_gm_size, + gvt->types[i].high_gm_size, gvt->types[i].fence); + } +} + +/** * intel_gvt_destroy_vgpu - destroy a virtual GPU * @vgpu: virtual GPU * @@ -166,20 +271,11 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) clean_vgpu_mmio(vgpu); vfree(vgpu); + intel_gvt_update_vgpu_types(gvt); mutex_unlock(&gvt->lock); } -/** - * intel_gvt_create_vgpu - create a virtual GPU - * @gvt: GVT device - * @param: vGPU creation parameters - * - * This function is called when user wants to create a virtual GPU. - * - * Returns: - * pointer to intel_vgpu, error pointer if failed. - */ -struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, +static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu_creation_params *param) { struct intel_vgpu *vgpu; @@ -224,15 +320,9 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_detach_hypervisor_vgpu; - if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) { - ret = intel_vgpu_init_opregion(vgpu, 0); - if (ret) - goto out_clean_gtt; - } - ret = intel_vgpu_init_display(vgpu); if (ret) - goto out_clean_opregion; + goto out_clean_gtt; ret = intel_vgpu_init_execlist(vgpu); if (ret) @@ -257,8 +347,6 @@ out_clean_execlist: intel_vgpu_clean_execlist(vgpu); out_clean_display: intel_vgpu_clean_display(vgpu); -out_clean_opregion: - intel_vgpu_clean_opregion(vgpu); out_clean_gtt: intel_vgpu_clean_gtt(vgpu); out_detach_hypervisor_vgpu: @@ -272,3 +360,49 @@ out_free_vgpu: mutex_unlock(&gvt->lock); return ERR_PTR(ret); } + +/** + * intel_gvt_create_vgpu - create a virtual GPU + * @gvt: GVT device + * @type: type of the vGPU to create + * + * This function is called when user wants to create a virtual GPU. + * + * Returns: + * pointer to intel_vgpu, error pointer if failed. + */ +struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, + struct intel_vgpu_type *type) +{ + struct intel_vgpu_creation_params param; + struct intel_vgpu *vgpu; + + param.handle = 0; + param.low_gm_sz = type->low_gm_size; + param.high_gm_sz = type->high_gm_size; + param.fence_sz = type->fence; + + /* XXX current param based on MB */ + param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz); + param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz); + + vgpu = __intel_gvt_create_vgpu(gvt, ¶m); + if (IS_ERR(vgpu)) + return vgpu; + + /* calculate left instance change for types */ + intel_gvt_update_vgpu_types(gvt); + + return vgpu; +} + +/** + * intel_gvt_reset_vgpu - reset a virtual GPU + * @vgpu: virtual GPU + * + * This function is called when user wants to reset a virtual GPU. + * + */ +void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) +{ +} diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 7166a1a6265d..b7f42c448a44 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -547,11 +547,11 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) pipe, plane); } if (work->flip_queued_req) { - struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req); + struct intel_engine_cs *engine = work->flip_queued_req->engine; seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n", engine->name, - i915_gem_request_get_seqno(work->flip_queued_req), + work->flip_queued_req->global_seqno, atomic_read(&dev_priv->gt.global_timeline.next_seqno), intel_engine_get_seqno(engine), i915_gem_request_completed(work->flip_queued_req)); @@ -631,8 +631,9 @@ static void print_request(struct seq_file *m, struct drm_i915_gem_request *rq, const char *prefix) { - seq_printf(m, "%s%x [%x:%x] @ %d: %s\n", prefix, + seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix, rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno, + rq->priotree.priority, jiffies_to_msecs(jiffies - rq->emitted_jiffies), rq->timeline->common->name); } @@ -1761,8 +1762,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); intel_runtime_pm_put(dev_priv); - seq_printf(m, "self-refresh: %s\n", - sr_enabled ? "enabled" : "disabled"); + seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); return 0; } @@ -1896,7 +1896,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fbdev_fb->base.height, fbdev_fb->base.depth, fbdev_fb->base.bits_per_pixel, - fbdev_fb->base.modifier[0], + fbdev_fb->base.modifier, drm_framebuffer_read_refcount(&fbdev_fb->base)); describe_obj(m, fbdev_fb->obj); seq_putc(m, '\n'); @@ -1914,7 +1914,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fb->base.height, fb->base.depth, fb->base.bits_per_pixel, - fb->base.modifier[0], + fb->base.modifier, drm_framebuffer_read_refcount(&fb->base)); describe_obj(m, fb->obj); seq_putc(m, '\n'); @@ -3216,6 +3216,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) if (i915.enable_execlists) { u32 ptr, read, write; + struct rb_node *rb; seq_printf(m, "\tExeclist status: 0x%08x %08x\n", I915_READ(RING_EXECLIST_STATUS_LO(engine)), @@ -3254,11 +3255,12 @@ static int i915_engine_info(struct seq_file *m, void *unused) seq_printf(m, "\t\tELSP[1] idle\n"); rcu_read_unlock(); - spin_lock_irq(&engine->execlist_lock); - list_for_each_entry(rq, &engine->execlist_queue, execlist_link) { + spin_lock_irq(&engine->timeline->lock); + for (rb = engine->execlist_first; rb; rb = rb_next(rb)) { + rq = rb_entry(rb, typeof(*rq), priotree.node); print_request(m, rq, "\t\tQ "); } - spin_unlock_irq(&engine->execlist_lock); + spin_unlock_irq(&engine->timeline->lock); } else if (INTEL_GEN(dev_priv) > 6) { seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(engine))); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0213a3090ab3..445fec9c2841 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -150,7 +150,7 @@ static void intel_detect_pch(struct drm_device *dev) /* In all current cases, num_pipes is equivalent to the PCH_NOP setting * (which really amounts to a PCH but no South Display). */ - if (INTEL_INFO(dev)->num_pipes == 0) { + if (INTEL_INFO(dev_priv)->num_pipes == 0) { dev_priv->pch_type = PCH_NOP; return; } @@ -323,6 +323,10 @@ static int i915_getparam(struct drm_device *dev, void *data, */ value = i915_gem_mmap_gtt_version(); break; + case I915_PARAM_HAS_SCHEDULER: + value = dev_priv->engine[RCS] && + dev_priv->engine[RCS]->schedule; + break; case I915_PARAM_MMAP_VERSION: /* Remember to bump this if the version changes! */ case I915_PARAM_HAS_GEM: @@ -374,12 +378,12 @@ static int intel_alloc_mchbar_resource(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; + int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp_lo, temp_hi = 0; u64 mchbar_addr; int ret; - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); mchbar_addr = ((u64)temp_hi << 32) | temp_lo; @@ -406,7 +410,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev) return ret; } - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) pci_write_config_dword(dev_priv->bridge_dev, reg + 4, upper_32_bits(dev_priv->mch_res.start)); @@ -420,7 +424,7 @@ static void intel_setup_mchbar(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; + int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp; bool enabled; @@ -460,7 +464,7 @@ static void intel_teardown_mchbar(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; + int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; if (dev_priv->mchbar_need_disable) { if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { @@ -491,7 +495,7 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state) { struct drm_device *dev = cookie; - intel_modeset_vga_set_state(dev, state); + intel_modeset_vga_set_state(to_i915(dev), state); if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; @@ -607,7 +611,7 @@ static int i915_load_modeset_init(struct drm_device *dev) intel_modeset_gem_init(dev); - if (INTEL_INFO(dev)->num_pipes == 0) + if (INTEL_INFO(dev_priv)->num_pipes == 0) return 0; ret = intel_fbdev_init(dev); @@ -879,7 +883,7 @@ static int i915_mmio_setup(struct drm_device *dev) * the register BAR remains the same size for all the earlier * generations up to Ironlake. */ - if (INTEL_INFO(dev)->gen < 5) + if (INTEL_GEN(dev_priv) < 5) mmio_size = 512 * 1024; else mmio_size = 2 * 1024 * 1024; @@ -1168,8 +1172,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) /** * i915_driver_load - setup chip and create an initial config - * @dev: DRM device - * @flags: startup flags + * @pdev: PCI device + * @ent: matching PCI ID entry * * The driver load routine has to do several things: * - drive output discovery via intel_modeset_init() @@ -1438,7 +1442,7 @@ static int i915_drm_suspend(struct drm_device *dev) intel_suspend_hw(dev_priv); - i915_gem_suspend_gtt_mappings(dev); + i915_gem_suspend_gtt_mappings(dev_priv); i915_save_state(dev); @@ -1512,7 +1516,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) * Fujitsu FSC S7110 * Acer Aspire 1830T */ - if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) + if (!(hibernation && INTEL_GEN(dev_priv) < 6)) pci_set_power_state(pdev, PCI_D3hot); dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); @@ -2422,7 +2426,7 @@ static int intel_runtime_resume(struct device *kdev) * No point of rolling back things in case of an error, as the best * we can do is to hope that things will still work (and disable RPM). */ - i915_gem_init_swizzling(dev); + i915_gem_init_swizzling(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6f4a6bcf6ed4..56002a52936d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -60,11 +60,15 @@ #include "intel_ringbuffer.h" #include "i915_gem.h" +#include "i915_gem_fence_reg.h" +#include "i915_gem_object.h" #include "i915_gem_gtt.h" #include "i915_gem_render_state.h" #include "i915_gem_request.h" #include "i915_gem_timeline.h" +#include "i915_vma.h" + #include "intel_gvt.h" /* General customization: @@ -72,8 +76,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20161108" -#define DRIVER_TIMESTAMP 1478587895 +#define DRIVER_DATE "20161121" +#define DRIVER_TIMESTAMP 1479717903 #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -125,6 +129,11 @@ static inline const char *onoff(bool v) return v ? "on" : "off"; } +static inline const char *enableddisabled(bool v) +{ + return v ? "enabled" : "disabled"; +} + enum pipe { INVALID_PIPE = -1, PIPE_A = 0, @@ -459,23 +468,6 @@ struct intel_opregion { struct intel_overlay; struct intel_overlay_error_state; -struct drm_i915_fence_reg { - struct list_head link; - struct drm_i915_private *i915; - struct i915_vma *vma; - int pin_count; - int id; - /** - * Whether the tiling parameters for the currently - * associated fence register have changed. Note that - * for the purposes of tracking tiling changes we also - * treat the unfenced register, the register slot that - * the object occupies whilst it executes a fenced - * command (such as BLT on gen2/3), as a "fence". - */ - bool dirty; -}; - struct sdvo_device_mapping { u8 initialized; u8 dvo_port; @@ -487,6 +479,7 @@ struct sdvo_device_mapping { struct intel_connector; struct intel_encoder; +struct intel_atomic_state; struct intel_crtc_state; struct intel_initial_plane_config; struct intel_crtc; @@ -500,8 +493,12 @@ struct drm_i915_display_funcs { int (*compute_intermediate_wm)(struct drm_device *dev, struct intel_crtc *intel_crtc, struct intel_crtc_state *newstate); - void (*initial_watermarks)(struct intel_crtc_state *cstate); - void (*optimize_watermarks)(struct intel_crtc_state *cstate); + void (*initial_watermarks)(struct intel_atomic_state *state, + struct intel_crtc_state *cstate); + void (*atomic_update_watermarks)(struct intel_atomic_state *state, + struct intel_crtc_state *cstate); + void (*optimize_watermarks)(struct intel_atomic_state *state, + struct intel_crtc_state *cstate); int (*compute_global_watermarks)(struct drm_atomic_state *state); void (*update_wm)(struct intel_crtc *crtc); int (*modeset_calc_cdclk)(struct drm_atomic_state *state); @@ -562,6 +559,18 @@ enum forcewake_domains { #define FW_REG_READ (1) #define FW_REG_WRITE (2) +enum decoupled_power_domain { + GEN9_DECOUPLED_PD_BLITTER = 0, + GEN9_DECOUPLED_PD_RENDER, + GEN9_DECOUPLED_PD_MEDIA, + GEN9_DECOUPLED_PD_ALL +}; + +enum decoupled_ops { + GEN9_DECOUPLED_OP_WRITE = 0, + GEN9_DECOUPLED_OP_READ +}; + enum forcewake_domains intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, i915_reg_t reg, unsigned int op); @@ -668,7 +677,7 @@ struct intel_csr { func(is_skylake); \ func(is_broxton); \ func(is_kabylake); \ - func(is_preliminary); \ + func(is_alpha_support); \ /* Keep has_* in alphabetical order */ \ func(has_64bit_reloc); \ func(has_csr); \ @@ -696,7 +705,8 @@ struct intel_csr { func(cursor_needs_physical); \ func(hws_needs_physical); \ func(overlay_needs_physical); \ - func(supports_tv) + func(supports_tv); \ + func(has_decoupled_mmio) struct sseu_dev_info { u8 slice_mask; @@ -949,6 +959,7 @@ struct i915_gem_context { /* Unique identifier for this context, used by the hw for tracking */ unsigned int hw_id; u32 user_handle; + int priority; /* greater priorities are serviced first */ u32 ggtt_alignment; @@ -1791,6 +1802,7 @@ struct drm_i915_private { struct kmem_cache *objects; struct kmem_cache *vmas; struct kmem_cache *requests; + struct kmem_cache *dependencies; const struct intel_device_info info; @@ -2053,13 +2065,6 @@ struct drm_i915_private { */ uint16_t skl_latency[8]; - /* - * The skl_wm_values structure is a bit too big for stack - * allocation, so we keep the staging struct where we store - * intermediate results here instead. - */ - struct skl_wm_values skl_results; - /* current hardware state */ union { struct ilk_wm_values hw; @@ -2179,31 +2184,6 @@ enum hdmi_force_audio { #define I915_GTT_OFFSET_NONE ((u32)-1) -struct drm_i915_gem_object_ops { - unsigned int flags; -#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1 -#define I915_GEM_OBJECT_IS_SHRINKABLE 0x2 - - /* Interface between the GEM object and its backing storage. - * get_pages() is called once prior to the use of the associated set - * of pages before to binding them into the GTT, and put_pages() is - * called after we no longer need them. As we expect there to be - * associated cost with migrating pages between the backing storage - * and making them available for the GPU (e.g. clflush), we may hold - * onto the pages after they are no longer referenced by the GPU - * in case they may be used again shortly (for example migrating the - * pages to a different memory domain within the GTT). put_pages() - * will therefore most likely be called when the object itself is - * being released or under memory pressure (where we attempt to - * reap pages for the shrinker). - */ - struct sg_table *(*get_pages)(struct drm_i915_gem_object *); - void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); - - int (*dmabuf_export)(struct drm_i915_gem_object *); - void (*release)(struct drm_i915_gem_object *); -}; - /* * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is * considered to be the frontbuffer for the given plane interface-wise. This @@ -2225,292 +2205,6 @@ struct drm_i915_gem_object_ops { #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) -struct drm_i915_gem_object { - struct drm_gem_object base; - - const struct drm_i915_gem_object_ops *ops; - - /** List of VMAs backed by this object */ - struct list_head vma_list; - struct rb_root vma_tree; - - /** Stolen memory for this object, instead of being backed by shmem. */ - struct drm_mm_node *stolen; - struct list_head global_link; - union { - struct rcu_head rcu; - struct llist_node freed; - }; - - /** - * Whether the object is currently in the GGTT mmap. - */ - struct list_head userfault_link; - - /** Used in execbuf to temporarily hold a ref */ - struct list_head obj_exec_link; - - struct list_head batch_pool_link; - - unsigned long flags; - - /** - * Have we taken a reference for the object for incomplete GPU - * activity? - */ -#define I915_BO_ACTIVE_REF 0 - - /* - * Is the object to be mapped as read-only to the GPU - * Only honoured if hardware has relevant pte bit - */ - unsigned long gt_ro:1; - unsigned int cache_level:3; - unsigned int cache_dirty:1; - - atomic_t frontbuffer_bits; - unsigned int frontbuffer_ggtt_origin; /* write once */ - - /** Current tiling stride for the object, if it's tiled. */ - unsigned int tiling_and_stride; -#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */ -#define TILING_MASK (FENCE_MINIMUM_STRIDE-1) -#define STRIDE_MASK (~TILING_MASK) - - /** Count of VMA actually bound by this object */ - unsigned int bind_count; - unsigned int active_count; - unsigned int pin_display; - - struct { - struct mutex lock; /* protects the pages and their use */ - atomic_t pages_pin_count; - - struct sg_table *pages; - void *mapping; - - struct i915_gem_object_page_iter { - struct scatterlist *sg_pos; - unsigned int sg_idx; /* in pages, but 32bit eek! */ - - struct radix_tree_root radix; - struct mutex lock; /* protects this cache */ - } get_page; - - /** - * Advice: are the backing pages purgeable? - */ - unsigned int madv:2; - - /** - * This is set if the object has been written to since the - * pages were last acquired. - */ - bool dirty:1; - - /** - * This is set if the object has been pinned due to unknown - * swizzling. - */ - bool quirked:1; - } mm; - - /** Breadcrumb of last rendering to the buffer. - * There can only be one writer, but we allow for multiple readers. - * If there is a writer that necessarily implies that all other - * read requests are complete - but we may only be lazily clearing - * the read requests. A read request is naturally the most recent - * request on a ring, so we may have two different write and read - * requests on one ring where the write request is older than the - * read request. This allows for the CPU to read from an active - * buffer by only waiting for the write to complete. - */ - struct reservation_object *resv; - - /** References from framebuffers, locks out tiling changes. */ - unsigned long framebuffer_references; - - /** Record of address bit 17 of each page at last unbind. */ - unsigned long *bit_17; - - struct i915_gem_userptr { - uintptr_t ptr; - unsigned read_only :1; - - struct i915_mm_struct *mm; - struct i915_mmu_object *mmu_object; - struct work_struct *work; - } userptr; - - /** for phys allocated objects */ - struct drm_dma_handle *phys_handle; - - struct reservation_object __builtin_resv; -}; - -static inline struct drm_i915_gem_object * -to_intel_bo(struct drm_gem_object *gem) -{ - /* Assert that to_intel_bo(NULL) == NULL */ - BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base)); - - return container_of(gem, struct drm_i915_gem_object, base); -} - -/** - * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle - * @filp: DRM file private date - * @handle: userspace handle - * - * Returns: - * - * A pointer to the object named by the handle if such exists on @filp, NULL - * otherwise. This object is only valid whilst under the RCU read lock, and - * note carefully the object may be in the process of being destroyed. - */ -static inline struct drm_i915_gem_object * -i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) -{ -#ifdef CONFIG_LOCKDEP - WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); -#endif - return idr_find(&file->object_idr, handle); -} - -static inline struct drm_i915_gem_object * -i915_gem_object_lookup(struct drm_file *file, u32 handle) -{ - struct drm_i915_gem_object *obj; - - rcu_read_lock(); - obj = i915_gem_object_lookup_rcu(file, handle); - if (obj && !kref_get_unless_zero(&obj->base.refcount)) - obj = NULL; - rcu_read_unlock(); - - return obj; -} - -__deprecated -extern struct drm_gem_object * -drm_gem_object_lookup(struct drm_file *file, u32 handle); - -__attribute__((nonnull)) -static inline struct drm_i915_gem_object * -i915_gem_object_get(struct drm_i915_gem_object *obj) -{ - drm_gem_object_reference(&obj->base); - return obj; -} - -__deprecated -extern void drm_gem_object_reference(struct drm_gem_object *); - -__attribute__((nonnull)) -static inline void -i915_gem_object_put(struct drm_i915_gem_object *obj) -{ - __drm_gem_object_unreference(&obj->base); -} - -__deprecated -extern void drm_gem_object_unreference(struct drm_gem_object *); - -__deprecated -extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *); - -static inline bool -i915_gem_object_is_dead(const struct drm_i915_gem_object *obj) -{ - return atomic_read(&obj->base.refcount.refcount) == 0; -} - -static inline bool -i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) -{ - return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE; -} - -static inline bool -i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) -{ - return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE; -} - -static inline bool -i915_gem_object_is_active(const struct drm_i915_gem_object *obj) -{ - return obj->active_count; -} - -static inline bool -i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj) -{ - return test_bit(I915_BO_ACTIVE_REF, &obj->flags); -} - -static inline void -i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj) -{ - lockdep_assert_held(&obj->base.dev->struct_mutex); - __set_bit(I915_BO_ACTIVE_REF, &obj->flags); -} - -static inline void -i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj) -{ - lockdep_assert_held(&obj->base.dev->struct_mutex); - __clear_bit(I915_BO_ACTIVE_REF, &obj->flags); -} - -void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj); - -static inline unsigned int -i915_gem_object_get_tiling(struct drm_i915_gem_object *obj) -{ - return obj->tiling_and_stride & TILING_MASK; -} - -static inline bool -i915_gem_object_is_tiled(struct drm_i915_gem_object *obj) -{ - return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; -} - -static inline unsigned int -i915_gem_object_get_stride(struct drm_i915_gem_object *obj) -{ - return obj->tiling_and_stride & STRIDE_MASK; -} - -static inline struct intel_engine_cs * -i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) -{ - struct intel_engine_cs *engine = NULL; - struct dma_fence *fence; - - rcu_read_lock(); - fence = reservation_object_get_excl_rcu(obj->resv); - rcu_read_unlock(); - - if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) - engine = to_request(fence)->engine; - dma_fence_put(fence); - - return engine; -} - -static inline struct i915_vma *i915_vma_get(struct i915_vma *vma) -{ - i915_gem_object_get(vma->obj); - return vma; -} - -static inline void i915_vma_put(struct i915_vma *vma) -{ - i915_gem_object_put(vma->obj); -} - /* * Optimised SGL iterator for GEM objects */ @@ -2683,24 +2377,19 @@ struct drm_i915_cmd_table { int count; }; -/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */ -#define __I915__(p) ({ \ - struct drm_i915_private *__p; \ - if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \ - __p = (struct drm_i915_private *)p; \ - else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \ - __p = to_i915((struct drm_device *)p); \ - else \ - BUILD_BUG(); \ - __p; \ -}) -#define INTEL_INFO(p) (&__I915__(p)->info) +static inline const struct intel_device_info * +intel_info(const struct drm_i915_private *dev_priv) +{ + return &dev_priv->info; +} + +#define INTEL_INFO(dev_priv) intel_info((dev_priv)) #define INTEL_GEN(dev_priv) ((dev_priv)->info.gen) #define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id) #define REVID_FOREVER 0xff -#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision) +#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) #define GEN_FOREVER (0) /* @@ -2797,7 +2486,7 @@ struct drm_i915_cmd_table { #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030) -#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) +#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support) #define SKL_REVID_A0 0x0 #define SKL_REVID_B0 0x1 @@ -2851,28 +2540,31 @@ struct drm_i915_cmd_table { #define ALL_ENGINES (~0) #define HAS_ENGINE(dev_priv, id) \ - (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id))) + (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id))) #define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS) #define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2) #define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) #define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) -#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) -#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop) -#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED)) +#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc) +#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop) +#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED)) #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) -#define HWS_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->hws_needs_physical) -#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->has_hw_contexts) -#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->has_logical_ring_contexts) -#define USES_PPGTT(dev) (i915.enable_ppgtt) -#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2) -#define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3) +#define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical) -#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) -#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) +#define HAS_HW_CONTEXTS(dev_priv) ((dev_priv)->info.has_hw_contexts) +#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ + ((dev_priv)->info.has_logical_ring_contexts) +#define USES_PPGTT(dev_priv) (i915.enable_ppgtt) +#define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2) +#define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3) + +#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay) +#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ + ((dev_priv)->info.overlay_needs_physical) /* Early gen2 have a totally busted CS tlb and require pinned batches. */ #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv)) @@ -2889,8 +2581,8 @@ struct drm_i915_cmd_table { * legacy irq no. is shared with another device. The kernel then disables that * interrupt source and so prevents the other device from working properly. */ -#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) -#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->has_gmbus_irq) +#define HAS_AUX_IRQ(dev_priv) ((dev_priv)->info.gen >= 5) +#define HAS_GMBUS_IRQ(dev_priv) ((dev_priv)->info.has_gmbus_irq) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. @@ -2898,24 +2590,24 @@ struct drm_i915_cmd_table { #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \ !(IS_I915G(dev_priv) || \ IS_I915GM(dev_priv))) -#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) -#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) +#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv) +#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug) -#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) -#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) -#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) +#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) +#define HAS_PIPE_CXSR(dev_priv) ((dev_priv)->info.has_pipe_cxsr) +#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc) #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) -#define HAS_DP_MST(dev) (INTEL_INFO(dev)->has_dp_mst) +#define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst) -#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi) -#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) -#define HAS_PSR(dev) (INTEL_INFO(dev)->has_psr) -#define HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) -#define HAS_RC6p(dev) (INTEL_INFO(dev)->has_rc6p) +#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi) +#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg) +#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr) +#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6) +#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p) -#define HAS_CSR(dev) (INTEL_INFO(dev)->has_csr) +#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr) #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) @@ -2925,13 +2617,13 @@ struct drm_i915_cmd_table { * command submission once loaded. But these are logically independent * properties, so we have separate macros to test them. */ -#define HAS_GUC(dev) (INTEL_INFO(dev)->has_guc) -#define HAS_GUC_UCODE(dev) (HAS_GUC(dev)) -#define HAS_GUC_SCHED(dev) (HAS_GUC(dev)) +#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc) +#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) +#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv)) -#define HAS_RESOURCE_STREAMER(dev) (INTEL_INFO(dev)->has_resource_streamer) +#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer) -#define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu) +#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu) #define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 @@ -2971,6 +2663,8 @@ struct drm_i915_cmd_table { #define GT_FREQUENCY_MULTIPLIER 50 #define GEN9_FREQ_SCALER 3 +#define HAS_DECOUPLED_MMIO(dev_priv) (INTEL_INFO(dev_priv)->has_decoupled_mmio) + #include "i915_trace.h" static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) @@ -3222,13 +2916,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, u64 alignment, u64 flags); -int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, - u32 flags); -void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); -int __must_check i915_vma_unbind(struct i915_vma *vma); -void i915_vma_close(struct i915_vma *vma); -void i915_vma_destroy(struct i915_vma *vma); - int i915_gem_object_unbind(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); @@ -3405,10 +3092,10 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error) void i915_gem_reset(struct drm_i915_private *dev_priv); void i915_gem_set_wedged(struct drm_i915_private *dev_priv); -bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); +void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); int __must_check i915_gem_init(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); -void i915_gem_init_swizzling(struct drm_device *dev); +void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); void i915_gem_cleanup_engines(struct drm_device *dev); int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, unsigned int flags); @@ -3419,6 +3106,11 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj, unsigned int flags, long timeout, struct intel_rps_client *rps); +int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, + unsigned int flags, + int priority); +#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX + int __must_check i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); @@ -3478,57 +3170,13 @@ i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o, return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view)); } -/* i915_gem_fence.c */ +/* i915_gem_fence_reg.c */ int __must_check i915_vma_get_fence(struct i915_vma *vma); int __must_check i915_vma_put_fence(struct i915_vma *vma); -/** - * i915_vma_pin_fence - pin fencing state - * @vma: vma to pin fencing for - * - * This pins the fencing state (whether tiled or untiled) to make sure the - * vma (and its object) is ready to be used as a scanout target. Fencing - * status must be synchronize first by calling i915_vma_get_fence(): - * - * The resulting fence pin reference must be released again with - * i915_vma_unpin_fence(). - * - * Returns: - * - * True if the vma has a fence, false otherwise. - */ -static inline bool -i915_vma_pin_fence(struct i915_vma *vma) -{ - lockdep_assert_held(&vma->vm->dev->struct_mutex); - if (vma->fence) { - vma->fence->pin_count++; - return true; - } else - return false; -} - -/** - * i915_vma_unpin_fence - unpin fencing state - * @vma: vma to unpin fencing for - * - * This releases the fence pin reference acquired through - * i915_vma_pin_fence. It will handle both objects with and without an - * attached fence correctly, callers do not need to distinguish this. - */ -static inline void -i915_vma_unpin_fence(struct i915_vma *vma) -{ - lockdep_assert_held(&vma->vm->dev->struct_mutex); - if (vma->fence) { - GEM_BUG_ON(vma->fence->pin_count <= 0); - vma->fence->pin_count--; - } -} - -void i915_gem_restore_fences(struct drm_device *dev); +void i915_gem_restore_fences(struct drm_i915_private *dev_priv); -void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); +void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv); void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, struct sg_table *pages); void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, @@ -3631,7 +3279,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, u64 end); void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, struct drm_mm_node *node); -int i915_gem_init_stolen(struct drm_device *dev); +int i915_gem_init_stolen(struct drm_i915_private *dev_priv); void i915_gem_cleanup_stolen(struct drm_device *dev); struct drm_i915_gem_object * i915_gem_object_create_stolen(struct drm_device *dev, u32 size); @@ -3833,10 +3481,11 @@ extern void intel_modeset_gem_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); extern int intel_connector_register(struct drm_connector *); extern void intel_connector_unregister(struct drm_connector *); -extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); +extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, + bool state); extern void intel_display_resume(struct drm_device *dev); -extern void i915_redisable_vga(struct drm_device *dev); -extern void i915_redisable_vga_power_on(struct drm_device *dev); +extern void i915_redisable_vga(struct drm_i915_private *dev_priv); +extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); extern void intel_init_pch_refclk(struct drm_device *dev); extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); @@ -3855,7 +3504,7 @@ extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, extern struct intel_display_error_state * intel_display_capture_error_state(struct drm_i915_private *dev_priv); extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, - struct drm_device *dev, + struct drm_i915_private *dev_priv, struct intel_display_error_state *error); int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 41e697e5dbcd..902fa427c196 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -34,6 +34,7 @@ #include "intel_drv.h" #include "intel_frontbuffer.h" #include "intel_mocs.h" +#include <linux/dma-fence-array.h> #include <linux/reservation.h> #include <linux/shmem_fs.h> #include <linux/slab.h> @@ -48,7 +49,7 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o static bool cpu_cache_is_coherent(struct drm_device *dev, enum i915_cache_level level) { - return HAS_LLC(dev) || level != I915_CACHE_NONE; + return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE; } static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) @@ -220,15 +221,17 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) } static void -__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj) +__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, + struct sg_table *pages) { GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); if (obj->mm.madv == I915_MADV_DONTNEED) obj->mm.dirty = false; - if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) - i915_gem_clflush_object(obj, false); + if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && + !cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) + drm_clflush_sg(pages); obj->base.read_domains = I915_GEM_DOMAIN_CPU; obj->base.write_domain = I915_GEM_DOMAIN_CPU; @@ -238,7 +241,7 @@ static void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, struct sg_table *pages) { - __i915_gem_object_release_shmem(obj); + __i915_gem_object_release_shmem(obj, pages); if (obj->mm.dirty) { struct address_space *mapping = obj->base.filp->f_mapping; @@ -433,6 +436,70 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, return timeout; } +static void __fence_set_priority(struct dma_fence *fence, int prio) +{ + struct drm_i915_gem_request *rq; + struct intel_engine_cs *engine; + + if (!dma_fence_is_i915(fence)) + return; + + rq = to_request(fence); + engine = rq->engine; + if (!engine->schedule) + return; + + engine->schedule(rq, prio); +} + +static void fence_set_priority(struct dma_fence *fence, int prio) +{ + /* Recurse once into a fence-array */ + if (dma_fence_is_array(fence)) { + struct dma_fence_array *array = to_dma_fence_array(fence); + int i; + + for (i = 0; i < array->num_fences; i++) + __fence_set_priority(array->fences[i], prio); + } else { + __fence_set_priority(fence, prio); + } +} + +int +i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, + unsigned int flags, + int prio) +{ + struct dma_fence *excl; + + if (flags & I915_WAIT_ALL) { + struct dma_fence **shared; + unsigned int count, i; + int ret; + + ret = reservation_object_get_fences_rcu(obj->resv, + &excl, &count, &shared); + if (ret) + return ret; + + for (i = 0; i < count; i++) { + fence_set_priority(shared[i], prio); + dma_fence_put(shared[i]); + } + + kfree(shared); + } else { + excl = reservation_object_get_excl_rcu(obj->resv); + } + + if (excl) { + fence_set_priority(excl, prio); + dma_fence_put(excl); + } + return 0; +} + /** * Waits for rendering to the object to be completed * @obj: i915 gem object @@ -1757,7 +1824,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) goto err_rpm; /* Access to snoopable pages through the GTT is incoherent. */ - if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) { + if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) { ret = -EFAULT; goto err_unlock; } @@ -2150,7 +2217,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, struct sgt_iter sgt_iter; struct page *page; - __i915_gem_object_release_shmem(obj); + __i915_gem_object_release_shmem(obj, pages); i915_gem_gtt_finish_pages(obj, pages); @@ -2232,6 +2299,30 @@ static unsigned int swiotlb_max_size(void) #endif } +static void i915_sg_trim(struct sg_table *orig_st) +{ + struct sg_table new_st; + struct scatterlist *sg, *new_sg; + unsigned int i; + + if (orig_st->nents == orig_st->orig_nents) + return; + + if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL)) + return; + + new_sg = new_st.sgl; + for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { + sg_set_page(new_sg, sg_page(sg), sg->length, 0); + /* called before being DMA mapped, no need to copy sg->dma_* */ + new_sg = sg_next(new_sg); + } + + sg_free_table(orig_st); + + *orig_st = new_st; +} + static struct sg_table * i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) { @@ -2296,7 +2387,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) page = shmem_read_mapping_page(mapping, i); if (IS_ERR(page)) { ret = PTR_ERR(page); - goto err_pages; + goto err_sg; } } if (!i || @@ -2317,6 +2408,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) if (sg) /* loop terminated early; short sg table */ sg_mark_end(sg); + /* Trim unused sg entries to avoid wasting memory. */ + i915_sg_trim(st); + ret = i915_gem_gtt_prepare_pages(obj, st); if (ret) goto err_pages; @@ -2326,8 +2420,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) return st; -err_pages: +err_sg: sg_mark_end(sg); +err_pages: for_each_sgt_page(page, sgt_iter, st) put_page(page); sg_free_table(st); @@ -2657,7 +2752,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) for_each_engine(engine, dev_priv, id) i915_gem_reset_engine(engine); - i915_gem_restore_fences(&dev_priv->drm); + i915_gem_restore_fences(dev_priv); if (dev_priv->gt.awake) { intel_sanitize_gt_powersave(dev_priv); @@ -2689,12 +2784,17 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine) */ if (i915.enable_execlists) { - spin_lock(&engine->execlist_lock); - INIT_LIST_HEAD(&engine->execlist_queue); + unsigned long flags; + + spin_lock_irqsave(&engine->timeline->lock, flags); + i915_gem_request_put(engine->execlist_port[0].request); i915_gem_request_put(engine->execlist_port[1].request); memset(engine->execlist_port, 0, sizeof(engine->execlist_port)); - spin_unlock(&engine->execlist_lock); + engine->execlist_queue = RB_ROOT; + engine->execlist_first = NULL; + + spin_unlock_irqrestore(&engine->timeline->lock, flags); } } @@ -2892,117 +2992,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) return ret; } -static void __i915_vma_iounmap(struct i915_vma *vma) -{ - GEM_BUG_ON(i915_vma_is_pinned(vma)); - - if (vma->iomap == NULL) - return; - - io_mapping_unmap(vma->iomap); - vma->iomap = NULL; -} - -int i915_vma_unbind(struct i915_vma *vma) -{ - struct drm_i915_gem_object *obj = vma->obj; - unsigned long active; - int ret; - - lockdep_assert_held(&obj->base.dev->struct_mutex); - - /* First wait upon any activity as retiring the request may - * have side-effects such as unpinning or even unbinding this vma. - */ - active = i915_vma_get_active(vma); - if (active) { - int idx; - - /* When a closed VMA is retired, it is unbound - eek. - * In order to prevent it from being recursively closed, - * take a pin on the vma so that the second unbind is - * aborted. - * - * Even more scary is that the retire callback may free - * the object (last active vma). To prevent the explosion - * we defer the actual object free to a worker that can - * only proceed once it acquires the struct_mutex (which - * we currently hold, therefore it cannot free this object - * before we are finished). - */ - __i915_vma_pin(vma); - - for_each_active(active, idx) { - ret = i915_gem_active_retire(&vma->last_read[idx], - &vma->vm->dev->struct_mutex); - if (ret) - break; - } - - __i915_vma_unpin(vma); - if (ret) - return ret; - - GEM_BUG_ON(i915_vma_is_active(vma)); - } - - if (i915_vma_is_pinned(vma)) - return -EBUSY; - - if (!drm_mm_node_allocated(&vma->node)) - goto destroy; - - GEM_BUG_ON(obj->bind_count == 0); - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - - if (i915_vma_is_map_and_fenceable(vma)) { - /* release the fence reg _after_ flushing */ - ret = i915_vma_put_fence(vma); - if (ret) - return ret; - - /* Force a pagefault for domain tracking on next user access */ - i915_gem_release_mmap(obj); - - __i915_vma_iounmap(vma); - vma->flags &= ~I915_VMA_CAN_FENCE; - } - - if (likely(!vma->vm->closed)) { - trace_i915_vma_unbind(vma); - vma->vm->unbind_vma(vma); - } - vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); - - drm_mm_remove_node(&vma->node); - list_move_tail(&vma->vm_link, &vma->vm->unbound_list); - - if (vma->pages != obj->mm.pages) { - GEM_BUG_ON(!vma->pages); - sg_free_table(vma->pages); - kfree(vma->pages); - } - vma->pages = NULL; - - /* Since the unbound list is global, only move to that list if - * no more VMAs exist. */ - if (--obj->bind_count == 0) - list_move_tail(&obj->global_link, - &to_i915(obj->base.dev)->mm.unbound_list); - - /* And finally now the object is completely decoupled from this vma, - * we can drop its hold on the backing storage and allow it to be - * reaped by the shrinker. - */ - i915_gem_object_unpin_pages(obj); - -destroy: - if (unlikely(i915_vma_is_closed(vma))) - i915_vma_destroy(vma); - - return 0; -} - static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags) { int ret, i; @@ -3018,201 +3007,43 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags) int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) { - struct i915_gem_timeline *tl; int ret; - list_for_each_entry(tl, &i915->gt.timelines, link) { - ret = wait_for_timeline(tl, flags); - if (ret) - return ret; - } - - return 0; -} - -static bool i915_gem_valid_gtt_space(struct i915_vma *vma, - unsigned long cache_level) -{ - struct drm_mm_node *gtt_space = &vma->node; - struct drm_mm_node *other; - - /* - * On some machines we have to be careful when putting differing types - * of snoopable memory together to avoid the prefetcher crossing memory - * domains and dying. During vm initialisation, we decide whether or not - * these constraints apply and set the drm_mm.color_adjust - * appropriately. - */ - if (vma->vm->mm.color_adjust == NULL) - return true; - - if (!drm_mm_node_allocated(gtt_space)) - return true; - - if (list_empty(>t_space->node_list)) - return true; - - other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list); - if (other->allocated && !other->hole_follows && other->color != cache_level) - return false; + if (flags & I915_WAIT_LOCKED) { + struct i915_gem_timeline *tl; - other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list); - if (other->allocated && !gtt_space->hole_follows && other->color != cache_level) - return false; + lockdep_assert_held(&i915->drm.struct_mutex); - return true; -} - -/** - * i915_vma_insert - finds a slot for the vma in its address space - * @vma: the vma - * @size: requested size in bytes (can be larger than the VMA) - * @alignment: required alignment - * @flags: mask of PIN_* flags to use - * - * First we try to allocate some free space that meets the requirements for - * the VMA. Failiing that, if the flags permit, it will evict an old VMA, - * preferrably the oldest idle entry to make room for the new VMA. - * - * Returns: - * 0 on success, negative error code otherwise. - */ -static int -i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) -{ - struct drm_i915_private *dev_priv = to_i915(vma->vm->dev); - struct drm_i915_gem_object *obj = vma->obj; - u64 start, end; - int ret; - - GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); - GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); - - size = max(size, vma->size); - if (flags & PIN_MAPPABLE) - size = i915_gem_get_ggtt_size(dev_priv, size, - i915_gem_object_get_tiling(obj)); - - alignment = max(max(alignment, vma->display_alignment), - i915_gem_get_ggtt_alignment(dev_priv, size, - i915_gem_object_get_tiling(obj), - flags & PIN_MAPPABLE)); - - start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; - - end = vma->vm->total; - if (flags & PIN_MAPPABLE) - end = min_t(u64, end, dev_priv->ggtt.mappable_end); - if (flags & PIN_ZONE_4G) - end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE); - - /* If binding the object/GGTT view requires more space than the entire - * aperture has, reject it early before evicting everything in a vain - * attempt to find space. - */ - if (size > end) { - DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n", - size, obj->base.size, - flags & PIN_MAPPABLE ? "mappable" : "total", - end); - return -E2BIG; - } - - ret = i915_gem_object_pin_pages(obj); - if (ret) - return ret; - - if (flags & PIN_OFFSET_FIXED) { - u64 offset = flags & PIN_OFFSET_MASK; - if (offset & (alignment - 1) || offset > end - size) { - ret = -EINVAL; - goto err_unpin; - } - - vma->node.start = offset; - vma->node.size = size; - vma->node.color = obj->cache_level; - ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); - if (ret) { - ret = i915_gem_evict_for_vma(vma); - if (ret == 0) - ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); + list_for_each_entry(tl, &i915->gt.timelines, link) { + ret = wait_for_timeline(tl, flags); if (ret) - goto err_unpin; + return ret; } } else { - u32 search_flag, alloc_flag; - - if (flags & PIN_HIGH) { - search_flag = DRM_MM_SEARCH_BELOW; - alloc_flag = DRM_MM_CREATE_TOP; - } else { - search_flag = DRM_MM_SEARCH_DEFAULT; - alloc_flag = DRM_MM_CREATE_DEFAULT; - } - - /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks, - * so we know that we always have a minimum alignment of 4096. - * The drm_mm range manager is optimised to return results - * with zero alignment, so where possible use the optimal - * path. - */ - if (alignment <= 4096) - alignment = 0; - -search_free: - ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm, - &vma->node, - size, alignment, - obj->cache_level, - start, end, - search_flag, - alloc_flag); - if (ret) { - ret = i915_gem_evict_something(vma->vm, size, alignment, - obj->cache_level, - start, end, - flags); - if (ret == 0) - goto search_free; - - goto err_unpin; - } - - GEM_BUG_ON(vma->node.start < start); - GEM_BUG_ON(vma->node.start + vma->node.size > end); + ret = wait_for_timeline(&i915->gt.global_timeline, flags); + if (ret) + return ret; } - GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level)); - - list_move_tail(&obj->global_link, &dev_priv->mm.bound_list); - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); - obj->bind_count++; - GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); return 0; - -err_unpin: - i915_gem_object_unpin_pages(obj); - return ret; } -bool -i915_gem_clflush_object(struct drm_i915_gem_object *obj, - bool force) +void i915_gem_clflush_object(struct drm_i915_gem_object *obj, + bool force) { /* If we don't have a page list set up, then we're not pinned * to GPU, and we can ignore the cache flush because it'll happen * again at bind time. */ if (!obj->mm.pages) - return false; + return; /* * Stolen memory is always coherent with the GPU as it is explicitly * marked as wc by the system, or the system is cache-coherent. */ if (obj->stolen || obj->phys_handle) - return false; + return; /* If the GPU is snooping the contents of the CPU cache, * we do not need to manually clear the CPU cache lines. However, @@ -3224,14 +3055,12 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj, */ if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) { obj->cache_dirty = true; - return false; + return; } trace_i915_gem_object_clflush(obj); drm_clflush_sg(obj->mm.pages); obj->cache_dirty = false; - - return true; } /** Flushes the GTT write domain for the object if it's dirty. */ @@ -3277,9 +3106,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) return; - if (i915_gem_clflush_object(obj, obj->pin_display)) - i915_gem_chipset_flush(to_i915(obj->base.dev)); - + i915_gem_clflush_object(obj, obj->pin_display); intel_fb_obj_flush(obj, false, ORIGIN_CPU); obj->base.write_domain = 0; @@ -3378,12 +3205,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { struct i915_vma *vma; - int ret = 0; + int ret; lockdep_assert_held(&obj->base.dev->struct_mutex); if (obj->cache_level == cache_level) - goto out; + return 0; /* Inspect the list of currently bound VMA and unbind any that would * be invalid given the new cache-level. This is principally to @@ -3435,7 +3262,8 @@ restart: if (ret) return ret; - if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) { + if (!HAS_LLC(to_i915(obj->base.dev)) && + cache_level != I915_CACHE_NONE) { /* Access to snoopable pages through the GTT is * incoherent and on some machines causes a hard * lockup. Relinquish the CPU mmaping to force @@ -3477,20 +3305,14 @@ restart: } } + if (obj->base.write_domain == I915_GEM_DOMAIN_CPU && + cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) + obj->cache_dirty = true; + list_for_each_entry(vma, &obj->vma_list, obj_link) vma->node.color = cache_level; obj->cache_level = cache_level; -out: - /* Flush the dirty CPU caches to the backing storage so that the - * object is now coherent at its new cache level (with respect - * to the access domain). - */ - if (obj->cache_dirty && cpu_write_needs_clflush(obj)) { - if (i915_gem_clflush_object(obj, true)) - i915_gem_chipset_flush(to_i915(obj->base.dev)); - } - return 0; } @@ -3646,7 +3468,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, vma->display_alignment = max_t(u64, vma->display_alignment, alignment); - i915_gem_object_flush_cpu_write_domain(obj); + /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ + if (obj->cache_dirty) { + i915_gem_clflush_object(obj, true); + intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); + } old_write_domain = obj->base.write_domain; old_read_domains = obj->base.read_domains; @@ -3798,100 +3624,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) return ret < 0 ? ret : 0; } -static bool -i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) -{ - if (!drm_mm_node_allocated(&vma->node)) - return false; - - if (vma->node.size < size) - return true; - - if (alignment && vma->node.start & (alignment - 1)) - return true; - - if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) - return true; - - if (flags & PIN_OFFSET_BIAS && - vma->node.start < (flags & PIN_OFFSET_MASK)) - return true; - - if (flags & PIN_OFFSET_FIXED && - vma->node.start != (flags & PIN_OFFSET_MASK)) - return true; - - return false; -} - -void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) -{ - struct drm_i915_gem_object *obj = vma->obj; - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - bool mappable, fenceable; - u32 fence_size, fence_alignment; - - fence_size = i915_gem_get_ggtt_size(dev_priv, - vma->size, - i915_gem_object_get_tiling(obj)); - fence_alignment = i915_gem_get_ggtt_alignment(dev_priv, - vma->size, - i915_gem_object_get_tiling(obj), - true); - - fenceable = (vma->node.size == fence_size && - (vma->node.start & (fence_alignment - 1)) == 0); - - mappable = (vma->node.start + fence_size <= - dev_priv->ggtt.mappable_end); - - /* - * Explicitly disable for rotated VMA since the display does not - * need the fence and the VMA is not accessible to other users. - */ - if (mappable && fenceable && - vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED) - vma->flags |= I915_VMA_CAN_FENCE; - else - vma->flags &= ~I915_VMA_CAN_FENCE; -} - -int __i915_vma_do_pin(struct i915_vma *vma, - u64 size, u64 alignment, u64 flags) -{ - unsigned int bound = vma->flags; - int ret; - - lockdep_assert_held(&vma->vm->dev->struct_mutex); - GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); - GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); - - if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { - ret = -EBUSY; - goto err; - } - - if ((bound & I915_VMA_BIND_MASK) == 0) { - ret = i915_vma_insert(vma, size, alignment, flags); - if (ret) - goto err; - } - - ret = i915_vma_bind(vma, vma->obj->cache_level, flags); - if (ret) - goto err; - - if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) - __i915_vma_set_map_and_fenceable(vma); - - GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); - return 0; - -err: - __i915_vma_unpin(vma); - return ret; -} - struct i915_vma * i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view, @@ -4156,6 +3888,16 @@ out: return err; } +static void +frontbuffer_retire(struct i915_gem_active *active, + struct drm_i915_gem_request *request) +{ + struct drm_i915_gem_object *obj = + container_of(active, typeof(*obj), frontbuffer_write); + + intel_fb_obj_flush(obj, true, ORIGIN_CS); +} + void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops) { @@ -4173,6 +3915,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, obj->resv = &obj->__builtin_resv; obj->frontbuffer_ggtt_origin = ORIGIN_GTT; + init_request_active(&obj->frontbuffer_write, frontbuffer_retire); obj->mm.madv = I915_MADV_WILLNEED; INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); @@ -4235,7 +3978,7 @@ i915_gem_object_create(struct drm_device *dev, u64 size) obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->base.read_domains = I915_GEM_DOMAIN_CPU; - if (HAS_LLC(dev)) { + if (HAS_LLC(dev_priv)) { /* On some devices, we can have the GPU use the LLC (the CPU * cache) for about a 10% performance improvement * compared to uncached. Graphics requests other than @@ -4481,7 +4224,7 @@ int i915_gem_suspend(struct drm_device *dev) * machines is a good idea, we don't - just in case it leaves the * machine in an unusable condition. */ - if (HAS_HW_CONTEXTS(dev)) { + if (HAS_HW_CONTEXTS(dev_priv)) { int reset = intel_gpu_reset(dev_priv, ALL_ENGINES); WARN_ON(reset && reset != -ENODEV); } @@ -4500,7 +4243,7 @@ void i915_gem_resume(struct drm_device *dev) WARN_ON(dev_priv->gt.awake); mutex_lock(&dev->struct_mutex); - i915_gem_restore_gtt_mappings(dev); + i915_gem_restore_gtt_mappings(dev_priv); /* As we didn't flush the kernel context before suspend, we cannot * guarantee that the context image is complete. So let's just reset @@ -4511,11 +4254,9 @@ void i915_gem_resume(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); } -void i915_gem_init_swizzling(struct drm_device *dev) +void i915_gem_init_swizzling(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - - if (INTEL_INFO(dev)->gen < 5 || + if (INTEL_GEN(dev_priv) < 5 || dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) return; @@ -4574,7 +4315,7 @@ i915_gem_init_hw(struct drm_device *dev) /* Double layer security blanket, see i915_gem_init() */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9) + if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9) I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); if (IS_HASWELL(dev_priv)) @@ -4586,14 +4327,14 @@ i915_gem_init_hw(struct drm_device *dev) u32 temp = I915_READ(GEN7_MSG_CTL); temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); I915_WRITE(GEN7_MSG_CTL, temp); - } else if (INTEL_INFO(dev)->gen >= 7) { + } else if (INTEL_GEN(dev_priv) >= 7) { u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT); temp &= ~RESET_PCH_HANDSHAKE_ENABLE; I915_WRITE(HSW_NDE_RSTWRN_OPT, temp); } } - i915_gem_init_swizzling(dev); + i915_gem_init_swizzling(dev_priv); /* * At least 830 can leave some of the unused rings @@ -4605,7 +4346,7 @@ i915_gem_init_hw(struct drm_device *dev) BUG_ON(!dev_priv->kernel_context); - ret = i915_ppgtt_init_hw(dev); + ret = i915_ppgtt_init_hw(dev_priv); if (ret) { DRM_ERROR("PPGTT enable HW failed %d\n", ret); goto out; @@ -4720,7 +4461,6 @@ i915_gem_cleanup_engines(struct drm_device *dev) void i915_gem_load_init_fences(struct drm_i915_private *dev_priv) { - struct drm_device *dev = &dev_priv->drm; int i; if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && @@ -4744,9 +4484,9 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) fence->id = i; list_add_tail(&fence->link, &dev_priv->mm.fence_list); } - i915_gem_restore_fences(dev); + i915_gem_restore_fences(dev_priv); - i915_gem_detect_bit_6_swizzle(dev); + i915_gem_detect_bit_6_swizzle(dev_priv); } int @@ -4770,14 +4510,18 @@ i915_gem_load_init(struct drm_device *dev) if (!dev_priv->requests) goto err_vmas; + dev_priv->dependencies = KMEM_CACHE(i915_dependency, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT); + if (!dev_priv->dependencies) + goto err_requests; + mutex_lock(&dev_priv->drm.struct_mutex); INIT_LIST_HEAD(&dev_priv->gt.timelines); - err = i915_gem_timeline_init(dev_priv, - &dev_priv->gt.global_timeline, - "[execution]"); + err = i915_gem_timeline_init__global(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); if (err) - goto err_requests; + goto err_dependencies; INIT_LIST_HEAD(&dev_priv->context_list); INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work); @@ -4805,6 +4549,8 @@ i915_gem_load_init(struct drm_device *dev) return 0; +err_dependencies: + kmem_cache_destroy(dev_priv->dependencies); err_requests: kmem_cache_destroy(dev_priv->requests); err_vmas: @@ -4821,6 +4567,12 @@ void i915_gem_load_cleanup(struct drm_device *dev) WARN_ON(!llist_empty(&dev_priv->mm.free_list)); + mutex_lock(&dev_priv->drm.struct_mutex); + i915_gem_timeline_fini(&dev_priv->gt.global_timeline); + WARN_ON(!list_empty(&dev_priv->gt.timelines)); + mutex_unlock(&dev_priv->drm.struct_mutex); + + kmem_cache_destroy(dev_priv->dependencies); kmem_cache_destroy(dev_priv->requests); kmem_cache_destroy(dev_priv->vmas); kmem_cache_destroy(dev_priv->objects); @@ -4905,7 +4657,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file) struct drm_i915_file_private *file_priv; int ret; - DRM_DEBUG_DRIVER("\n"); + DRM_DEBUG("\n"); file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); if (!file_priv) diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index 735580d72eb1..51ec793f2e20 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -28,7 +28,7 @@ #ifdef CONFIG_DRM_I915_DEBUG_GEM #define GEM_BUG_ON(expr) BUG_ON(expr) #else -#define GEM_BUG_ON(expr) +#define GEM_BUG_ON(expr) do { } while (0) #endif #define I915_NUM_ENGINES 5 diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 6dd475735f0a..1f94b8d6d83d 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -476,6 +476,7 @@ int i915_gem_context_init(struct drm_device *dev) return PTR_ERR(ctx); } + ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */ dev_priv->kernel_context = ctx; DRM_DEBUG_DRIVER("%s context support initialized\n", diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index fb5b44339f71..097d9d8c2315 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -287,7 +287,7 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) if (DBG_USE_CPU_RELOC) return DBG_USE_CPU_RELOC > 0; - return (HAS_LLC(obj->base.dev) || + return (HAS_LLC(to_i915(obj->base.dev)) || obj->base.write_domain == I915_GEM_DOMAIN_CPU || obj->cache_level != I915_CACHE_NONE); } @@ -833,7 +833,7 @@ need_reloc_mappable(struct i915_vma *vma) return false; /* See also use_cpu_reloc() */ - if (HAS_LLC(vma->obj->base.dev)) + if (HAS_LLC(to_i915(vma->obj->base.dev))) return false; if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU) @@ -1276,9 +1276,8 @@ void i915_vma_move_to_active(struct i915_vma *vma, list_move_tail(&vma->vm_link, &vma->vm->active_list); if (flags & EXEC_OBJECT_WRITE) { - i915_gem_active_set(&vma->last_write, req); - - intel_fb_obj_invalidate(obj, ORIGIN_CS); + if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) + i915_gem_active_set(&obj->frontbuffer_write, req); /* update for the implicit flush after a batch */ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; @@ -1624,7 +1623,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } if (args->flags & I915_EXEC_RESOURCE_STREAMER) { - if (!HAS_RESOURCE_STREAMER(dev)) { + if (!HAS_RESOURCE_STREAMER(dev_priv)) { DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n"); return -EINVAL; } @@ -1878,7 +1877,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; exec2_list[i].alignment = exec_list[i].alignment; exec2_list[i].offset = exec_list[i].offset; - if (INTEL_INFO(dev)->gen < 4) + if (INTEL_GEN(to_i915(dev)) < 4) exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; else exec2_list[i].flags = 0; diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index cd59dbc6588c..0efa3571afc3 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -368,15 +368,14 @@ i915_vma_get_fence(struct i915_vma *vma) /** * i915_gem_restore_fences - restore fence state - * @dev: DRM device + * @dev_priv: i915 device private * * Restore the hw fence state to match the software tracking again, to be called * after a gpu reset and on resume. Note that on runtime suspend we only cancel * the fences, to be reacquired by the user later. */ -void i915_gem_restore_fences(struct drm_device *dev) +void i915_gem_restore_fences(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); int i; for (i = 0; i < dev_priv->num_fence_regs; i++) { @@ -451,15 +450,14 @@ void i915_gem_restore_fences(struct drm_device *dev) /** * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern - * @dev: DRM device + * @dev_priv: i915 device private * * Detects bit 6 swizzling of address lookup between IGD access and CPU * access through main memory. */ void -i915_gem_detect_bit_6_swizzle(struct drm_device *dev) +i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; @@ -473,7 +471,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) */ swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; - } else if (INTEL_INFO(dev)->gen >= 6) { + } else if (INTEL_GEN(dev_priv) >= 6) { if (dev_priv->preserve_bios_swizzle) { if (I915_READ(DISP_ARB_CTL) & DISP_TILE_SURFACE_SWIZZLING) { diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h new file mode 100644 index 000000000000..22c4a2d01adf --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h @@ -0,0 +1,51 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __I915_FENCE_REG_H__ +#define __I915_FENCE_REG_H__ + +#include <linux/list.h> + +struct drm_i915_private; +struct i915_vma; + +struct drm_i915_fence_reg { + struct list_head link; + struct drm_i915_private *i915; + struct i915_vma *vma; + int pin_count; + int id; + /** + * Whether the tiling parameters for the currently + * associated fence register have changed. Note that + * for the purposes of tracking tiling changes we also + * treat the unfenced register, the register slot that + * the object occupies whilst it executes a fenced + * command (such as BLT on gen2/3), as a "fence". + */ + bool dirty; +}; + +#endif + diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index a5fafa3d4fc8..b4bde1452f2a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -96,13 +96,6 @@ * */ -static inline struct i915_ggtt * -i915_vm_to_ggtt(struct i915_address_space *vm) -{ - GEM_BUG_ON(!i915_is_ggtt(vm)); - return container_of(vm, struct i915_ggtt, base); -} - static int i915_get_ggtt_vma_pages(struct i915_vma *vma); @@ -327,10 +320,10 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr, return pte; } -static int __setup_page_dma(struct drm_device *dev, +static int __setup_page_dma(struct drm_i915_private *dev_priv, struct i915_page_dma *p, gfp_t flags) { - struct device *kdev = &dev->pdev->dev; + struct device *kdev = &dev_priv->drm.pdev->dev; p->page = alloc_page(flags); if (!p->page) @@ -347,14 +340,16 @@ static int __setup_page_dma(struct drm_device *dev, return 0; } -static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p) +static int setup_page_dma(struct drm_i915_private *dev_priv, + struct i915_page_dma *p) { - return __setup_page_dma(dev, p, I915_GFP_DMA); + return __setup_page_dma(dev_priv, p, I915_GFP_DMA); } -static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p) +static void cleanup_page_dma(struct drm_i915_private *dev_priv, + struct i915_page_dma *p) { - struct pci_dev *pdev = dev->pdev; + struct pci_dev *pdev = dev_priv->drm.pdev; if (WARN_ON(!p->page)) return; @@ -387,8 +382,8 @@ static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr) #define kunmap_px(ppgtt, vaddr) \ kunmap_page_dma(to_i915((ppgtt)->base.dev), (vaddr)) -#define setup_px(dev, px) setup_page_dma((dev), px_base(px)) -#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px)) +#define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px)) +#define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px)) #define fill_px(dev_priv, px, v) fill_page_dma((dev_priv), px_base(px), (v)) #define fill32_px(dev_priv, px, v) \ fill_page_dma_32((dev_priv), px_base(px), (v)) @@ -416,24 +411,23 @@ static void fill_page_dma_32(struct drm_i915_private *dev_priv, } static int -setup_scratch_page(struct drm_device *dev, +setup_scratch_page(struct drm_i915_private *dev_priv, struct i915_page_dma *scratch, gfp_t gfp) { - return __setup_page_dma(dev, scratch, gfp | __GFP_ZERO); + return __setup_page_dma(dev_priv, scratch, gfp | __GFP_ZERO); } -static void cleanup_scratch_page(struct drm_device *dev, +static void cleanup_scratch_page(struct drm_i915_private *dev_priv, struct i915_page_dma *scratch) { - cleanup_page_dma(dev, scratch); + cleanup_page_dma(dev_priv, scratch); } -static struct i915_page_table *alloc_pt(struct drm_device *dev) +static struct i915_page_table *alloc_pt(struct drm_i915_private *dev_priv) { struct i915_page_table *pt; - const size_t count = INTEL_INFO(dev)->gen >= 8 ? - GEN8_PTES : GEN6_PTES; + const size_t count = INTEL_GEN(dev_priv) >= 8 ? GEN8_PTES : GEN6_PTES; int ret = -ENOMEM; pt = kzalloc(sizeof(*pt), GFP_KERNEL); @@ -446,7 +440,7 @@ static struct i915_page_table *alloc_pt(struct drm_device *dev) if (!pt->used_ptes) goto fail_bitmap; - ret = setup_px(dev, pt); + ret = setup_px(dev_priv, pt); if (ret) goto fail_page_m; @@ -460,9 +454,10 @@ fail_bitmap: return ERR_PTR(ret); } -static void free_pt(struct drm_device *dev, struct i915_page_table *pt) +static void free_pt(struct drm_i915_private *dev_priv, + struct i915_page_table *pt) { - cleanup_px(dev, pt); + cleanup_px(dev_priv, pt); kfree(pt->used_ptes); kfree(pt); } @@ -491,7 +486,7 @@ static void gen6_initialize_pt(struct i915_address_space *vm, fill32_px(to_i915(vm->dev), pt, scratch_pte); } -static struct i915_page_directory *alloc_pd(struct drm_device *dev) +static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv) { struct i915_page_directory *pd; int ret = -ENOMEM; @@ -505,7 +500,7 @@ static struct i915_page_directory *alloc_pd(struct drm_device *dev) if (!pd->used_pdes) goto fail_bitmap; - ret = setup_px(dev, pd); + ret = setup_px(dev_priv, pd); if (ret) goto fail_page_m; @@ -519,10 +514,11 @@ fail_bitmap: return ERR_PTR(ret); } -static void free_pd(struct drm_device *dev, struct i915_page_directory *pd) +static void free_pd(struct drm_i915_private *dev_priv, + struct i915_page_directory *pd) { if (px_page(pd)) { - cleanup_px(dev, pd); + cleanup_px(dev_priv, pd); kfree(pd->used_pdes); kfree(pd); } @@ -538,10 +534,10 @@ static void gen8_initialize_pd(struct i915_address_space *vm, fill_px(to_i915(vm->dev), pd, scratch_pde); } -static int __pdp_init(struct drm_device *dev, +static int __pdp_init(struct drm_i915_private *dev_priv, struct i915_page_directory_pointer *pdp) { - size_t pdpes = I915_PDPES_PER_PDP(dev); + size_t pdpes = I915_PDPES_PER_PDP(dev_priv); pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), @@ -570,22 +566,22 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp) } static struct -i915_page_directory_pointer *alloc_pdp(struct drm_device *dev) +i915_page_directory_pointer *alloc_pdp(struct drm_i915_private *dev_priv) { struct i915_page_directory_pointer *pdp; int ret = -ENOMEM; - WARN_ON(!USES_FULL_48BIT_PPGTT(dev)); + WARN_ON(!USES_FULL_48BIT_PPGTT(dev_priv)); pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); if (!pdp) return ERR_PTR(-ENOMEM); - ret = __pdp_init(dev, pdp); + ret = __pdp_init(dev_priv, pdp); if (ret) goto fail_bitmap; - ret = setup_px(dev, pdp); + ret = setup_px(dev_priv, pdp); if (ret) goto fail_page_m; @@ -599,12 +595,12 @@ fail_bitmap: return ERR_PTR(ret); } -static void free_pdp(struct drm_device *dev, +static void free_pdp(struct drm_i915_private *dev_priv, struct i915_page_directory_pointer *pdp) { __pdp_fini(pdp); - if (USES_FULL_48BIT_PPGTT(dev)) { - cleanup_px(dev, pdp); + if (USES_FULL_48BIT_PPGTT(dev_priv)) { + cleanup_px(dev_priv, pdp); kfree(pdp); } } @@ -638,7 +634,7 @@ gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt, { gen8_ppgtt_pdpe_t *page_directorypo; - if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) + if (!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev))) return; page_directorypo = kmap_px(pdp); @@ -654,7 +650,7 @@ gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt, { gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4); - WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)); + WARN_ON(!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev))); pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC); kunmap_px(ppgtt, pagemap); } @@ -714,7 +710,7 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt, */ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) { - ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask; + ppgtt->pd_dirty_rings = INTEL_INFO(to_i915(ppgtt->base.dev))->ring_mask; } /* Removes entries from a single page table, releasing it if it's empty. @@ -741,7 +737,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, bitmap_clear(pt->used_ptes, pte, num_entries); if (bitmap_empty(pt->used_ptes, GEN8_PTES)) { - free_pt(vm->dev, pt); + free_pt(to_i915(vm->dev), pt); return true; } @@ -783,7 +779,7 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, } if (bitmap_empty(pd->used_pdes, I915_PDES)) { - free_pd(vm->dev, pd); + free_pd(to_i915(vm->dev), pd); return true; } @@ -799,6 +795,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, uint64_t length) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct drm_i915_private *dev_priv = to_i915(vm->dev); struct i915_page_directory *pd; uint64_t pdpe; gen8_ppgtt_pdpe_t *pdpe_vaddr; @@ -811,7 +808,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, if (gen8_ppgtt_clear_pd(vm, pd, start, length)) { __clear_bit(pdpe, pdp->used_pdpes); - if (USES_FULL_48BIT_PPGTT(vm->dev)) { + if (USES_FULL_48BIT_PPGTT(dev_priv)) { pdpe_vaddr = kmap_px(pdp); pdpe_vaddr[pdpe] = scratch_pdpe; kunmap_px(ppgtt, pdpe_vaddr); @@ -821,9 +818,9 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, mark_tlbs_dirty(ppgtt); - if (USES_FULL_48BIT_PPGTT(vm->dev) && - bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(vm->dev))) { - free_pdp(vm->dev, pdp); + if (USES_FULL_48BIT_PPGTT(dev_priv) && + bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv))) { + free_pdp(dev_priv, pdp); return true; } @@ -846,7 +843,7 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm, gen8_ppgtt_pml4e_t scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC); - GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->dev)); + GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))); gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { if (WARN_ON(!pml4->pdps[pml4e])) @@ -866,7 +863,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - if (USES_FULL_48BIT_PPGTT(vm->dev)) + if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length); else gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length); @@ -901,7 +898,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, kunmap_px(ppgtt, pt_vaddr); pt_vaddr = NULL; if (++pde == I915_PDES) { - if (++pdpe == I915_PDPES_PER_PDP(vm->dev)) + if (++pdpe == I915_PDPES_PER_PDP(to_i915(vm->dev))) break; pde = 0; } @@ -924,7 +921,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0); - if (!USES_FULL_48BIT_PPGTT(vm->dev)) { + if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) { gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start, cache_level); } else { @@ -939,7 +936,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, } } -static void gen8_free_page_tables(struct drm_device *dev, +static void gen8_free_page_tables(struct drm_i915_private *dev_priv, struct i915_page_directory *pd) { int i; @@ -951,34 +948,34 @@ static void gen8_free_page_tables(struct drm_device *dev, if (WARN_ON(!pd->page_table[i])) continue; - free_pt(dev, pd->page_table[i]); + free_pt(dev_priv, pd->page_table[i]); pd->page_table[i] = NULL; } } static int gen8_init_scratch(struct i915_address_space *vm) { - struct drm_device *dev = vm->dev; + struct drm_i915_private *dev_priv = to_i915(vm->dev); int ret; - ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA); + ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA); if (ret) return ret; - vm->scratch_pt = alloc_pt(dev); + vm->scratch_pt = alloc_pt(dev_priv); if (IS_ERR(vm->scratch_pt)) { ret = PTR_ERR(vm->scratch_pt); goto free_scratch_page; } - vm->scratch_pd = alloc_pd(dev); + vm->scratch_pd = alloc_pd(dev_priv); if (IS_ERR(vm->scratch_pd)) { ret = PTR_ERR(vm->scratch_pd); goto free_pt; } - if (USES_FULL_48BIT_PPGTT(dev)) { - vm->scratch_pdp = alloc_pdp(dev); + if (USES_FULL_48BIT_PPGTT(dev_priv)) { + vm->scratch_pdp = alloc_pdp(dev_priv); if (IS_ERR(vm->scratch_pdp)) { ret = PTR_ERR(vm->scratch_pdp); goto free_pd; @@ -987,17 +984,17 @@ static int gen8_init_scratch(struct i915_address_space *vm) gen8_initialize_pt(vm, vm->scratch_pt); gen8_initialize_pd(vm, vm->scratch_pd); - if (USES_FULL_48BIT_PPGTT(dev)) + if (USES_FULL_48BIT_PPGTT(dev_priv)) gen8_initialize_pdp(vm, vm->scratch_pdp); return 0; free_pd: - free_pd(dev, vm->scratch_pd); + free_pd(dev_priv, vm->scratch_pd); free_pt: - free_pt(dev, vm->scratch_pt); + free_pt(dev_priv, vm->scratch_pt); free_scratch_page: - cleanup_scratch_page(dev, &vm->scratch_page); + cleanup_scratch_page(dev_priv, &vm->scratch_page); return ret; } @@ -1035,54 +1032,56 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) static void gen8_free_scratch(struct i915_address_space *vm) { - struct drm_device *dev = vm->dev; + struct drm_i915_private *dev_priv = to_i915(vm->dev); - if (USES_FULL_48BIT_PPGTT(dev)) - free_pdp(dev, vm->scratch_pdp); - free_pd(dev, vm->scratch_pd); - free_pt(dev, vm->scratch_pt); - cleanup_scratch_page(dev, &vm->scratch_page); + if (USES_FULL_48BIT_PPGTT(dev_priv)) + free_pdp(dev_priv, vm->scratch_pdp); + free_pd(dev_priv, vm->scratch_pd); + free_pt(dev_priv, vm->scratch_pt); + cleanup_scratch_page(dev_priv, &vm->scratch_page); } -static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev, +static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private *dev_priv, struct i915_page_directory_pointer *pdp) { int i; - for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) { + for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)) { if (WARN_ON(!pdp->page_directory[i])) continue; - gen8_free_page_tables(dev, pdp->page_directory[i]); - free_pd(dev, pdp->page_directory[i]); + gen8_free_page_tables(dev_priv, pdp->page_directory[i]); + free_pd(dev_priv, pdp->page_directory[i]); } - free_pdp(dev, pdp); + free_pdp(dev_priv, pdp); } static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) { + struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); int i; for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) { if (WARN_ON(!ppgtt->pml4.pdps[i])) continue; - gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]); + gen8_ppgtt_cleanup_3lvl(dev_priv, ppgtt->pml4.pdps[i]); } - cleanup_px(ppgtt->base.dev, &ppgtt->pml4); + cleanup_px(dev_priv, &ppgtt->pml4); } static void gen8_ppgtt_cleanup(struct i915_address_space *vm) { + struct drm_i915_private *dev_priv = to_i915(vm->dev); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - if (intel_vgpu_active(to_i915(vm->dev))) + if (intel_vgpu_active(dev_priv)) gen8_ppgtt_notify_vgt(ppgtt, false); - if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) - gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp); + if (!USES_FULL_48BIT_PPGTT(dev_priv)) + gen8_ppgtt_cleanup_3lvl(dev_priv, &ppgtt->pdp); else gen8_ppgtt_cleanup_4lvl(ppgtt); @@ -1113,7 +1112,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm, uint64_t length, unsigned long *new_pts) { - struct drm_device *dev = vm->dev; + struct drm_i915_private *dev_priv = to_i915(vm->dev); struct i915_page_table *pt; uint32_t pde; @@ -1125,7 +1124,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm, continue; } - pt = alloc_pt(dev); + pt = alloc_pt(dev_priv); if (IS_ERR(pt)) goto unwind_out; @@ -1139,7 +1138,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm, unwind_out: for_each_set_bit(pde, new_pts, I915_PDES) - free_pt(dev, pd->page_table[pde]); + free_pt(dev_priv, pd->page_table[pde]); return -ENOMEM; } @@ -1174,10 +1173,10 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm, uint64_t length, unsigned long *new_pds) { - struct drm_device *dev = vm->dev; + struct drm_i915_private *dev_priv = to_i915(vm->dev); struct i915_page_directory *pd; uint32_t pdpe; - uint32_t pdpes = I915_PDPES_PER_PDP(dev); + uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv); WARN_ON(!bitmap_empty(new_pds, pdpes)); @@ -1185,7 +1184,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm, if (test_bit(pdpe, pdp->used_pdpes)) continue; - pd = alloc_pd(dev); + pd = alloc_pd(dev_priv); if (IS_ERR(pd)) goto unwind_out; @@ -1199,7 +1198,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm, unwind_out: for_each_set_bit(pdpe, new_pds, pdpes) - free_pd(dev, pdp->page_directory[pdpe]); + free_pd(dev_priv, pdp->page_directory[pdpe]); return -ENOMEM; } @@ -1227,7 +1226,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm, uint64_t length, unsigned long *new_pdps) { - struct drm_device *dev = vm->dev; + struct drm_i915_private *dev_priv = to_i915(vm->dev); struct i915_page_directory_pointer *pdp; uint32_t pml4e; @@ -1235,7 +1234,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm, gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { if (!test_bit(pml4e, pml4->used_pml4es)) { - pdp = alloc_pdp(dev); + pdp = alloc_pdp(dev_priv); if (IS_ERR(pdp)) goto unwind_out; @@ -1253,7 +1252,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm, unwind_out: for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4) - free_pdp(dev, pml4->pdps[pml4e]); + free_pdp(dev_priv, pml4->pdps[pml4e]); return -ENOMEM; } @@ -1302,12 +1301,12 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); unsigned long *new_page_dirs, *new_page_tables; - struct drm_device *dev = vm->dev; + struct drm_i915_private *dev_priv = to_i915(vm->dev); struct i915_page_directory *pd; const uint64_t orig_start = start; const uint64_t orig_length = length; uint32_t pdpe; - uint32_t pdpes = I915_PDPES_PER_PDP(dev); + uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv); int ret; /* Wrap is never okay since we can only represent 48b, and we don't @@ -1395,11 +1394,12 @@ err_out: for_each_set_bit(temp, new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES), I915_PDES) - free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]); + free_pt(dev_priv, + pdp->page_directory[pdpe]->page_table[temp]); } for_each_set_bit(pdpe, new_page_dirs, pdpes) - free_pd(dev, pdp->page_directory[pdpe]); + free_pd(dev_priv, pdp->page_directory[pdpe]); free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); mark_tlbs_dirty(ppgtt); @@ -1450,7 +1450,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm, err_out: for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4) - gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]); + gen8_ppgtt_cleanup_3lvl(to_i915(vm->dev), pml4->pdps[pml4e]); return ret; } @@ -1460,7 +1460,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - if (USES_FULL_48BIT_PPGTT(vm->dev)) + if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length); else return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length); @@ -1531,7 +1531,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); - if (!USES_FULL_48BIT_PPGTT(vm->dev)) { + if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) { gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m); } else { uint64_t pml4e; @@ -1551,7 +1551,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt) { unsigned long *new_page_dirs, *new_page_tables; - uint32_t pdpes = I915_PDPES_PER_PDP(dev); + uint32_t pdpes = I915_PDPES_PER_PDP(to_i915(ppgtt->base.dev)); int ret; /* We allocate temp bitmap for page tables for no gain @@ -1584,6 +1584,7 @@ static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt) */ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) { + struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); int ret; ret = gen8_init_scratch(&ppgtt->base); @@ -1599,8 +1600,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ppgtt->base.bind_vma = ppgtt_bind_vma; ppgtt->debug_dump = gen8_dump_ppgtt; - if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { - ret = setup_px(ppgtt->base.dev, &ppgtt->pml4); + if (USES_FULL_48BIT_PPGTT(dev_priv)) { + ret = setup_px(dev_priv, &ppgtt->pml4); if (ret) goto free_scratch; @@ -1609,7 +1610,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ppgtt->base.total = 1ULL << 48; ppgtt->switch_mm = gen8_48b_mm_switch; } else { - ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp); + ret = __pdp_init(dev_priv, &ppgtt->pdp); if (ret) goto free_scratch; @@ -1619,14 +1620,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 0, 0, GEN8_PML4E_SHIFT); - if (intel_vgpu_active(to_i915(ppgtt->base.dev))) { + if (intel_vgpu_active(dev_priv)) { ret = gen8_preallocate_top_level_pdps(ppgtt); if (ret) goto free_scratch; } } - if (intel_vgpu_active(to_i915(ppgtt->base.dev))) + if (intel_vgpu_active(dev_priv)) gen8_ppgtt_notify_vgt(ppgtt, true); return 0; @@ -1801,22 +1802,21 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, return 0; } -static void gen8_ppgtt_enable(struct drm_device *dev) +static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; enum intel_engine_id id; for_each_engine(engine, dev_priv, id) { - u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0; + u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ? + GEN8_GFX_PPGTT_48B : 0; I915_WRITE(RING_MODE_GEN7(engine), _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level)); } } -static void gen7_ppgtt_enable(struct drm_device *dev) +static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; uint32_t ecochk, ecobits; enum intel_engine_id id; @@ -1840,9 +1840,8 @@ static void gen7_ppgtt_enable(struct drm_device *dev) } } -static void gen6_ppgtt_enable(struct drm_device *dev) +static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); uint32_t ecochk, gab_ctl, ecobits; ecobits = I915_READ(GAC_ECO_BITS); @@ -1928,8 +1927,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, uint64_t start_in, uint64_t length_in) { DECLARE_BITMAP(new_page_tables, I915_PDES); - struct drm_device *dev = vm->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(vm->dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_page_table *pt; @@ -1959,7 +1957,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, /* We've already allocated a page table */ WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES)); - pt = alloc_pt(dev); + pt = alloc_pt(dev_priv); if (IS_ERR(pt)) { ret = PTR_ERR(pt); goto unwind_out; @@ -2007,7 +2005,7 @@ unwind_out: struct i915_page_table *pt = ppgtt->pd.page_table[pde]; ppgtt->pd.page_table[pde] = vm->scratch_pt; - free_pt(vm->dev, pt); + free_pt(dev_priv, pt); } mark_tlbs_dirty(ppgtt); @@ -2016,16 +2014,16 @@ unwind_out: static int gen6_init_scratch(struct i915_address_space *vm) { - struct drm_device *dev = vm->dev; + struct drm_i915_private *dev_priv = to_i915(vm->dev); int ret; - ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA); + ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA); if (ret) return ret; - vm->scratch_pt = alloc_pt(dev); + vm->scratch_pt = alloc_pt(dev_priv); if (IS_ERR(vm->scratch_pt)) { - cleanup_scratch_page(dev, &vm->scratch_page); + cleanup_scratch_page(dev_priv, &vm->scratch_page); return PTR_ERR(vm->scratch_pt); } @@ -2036,17 +2034,17 @@ static int gen6_init_scratch(struct i915_address_space *vm) static void gen6_free_scratch(struct i915_address_space *vm) { - struct drm_device *dev = vm->dev; + struct drm_i915_private *dev_priv = to_i915(vm->dev); - free_pt(dev, vm->scratch_pt); - cleanup_scratch_page(dev, &vm->scratch_page); + free_pt(dev_priv, vm->scratch_pt); + cleanup_scratch_page(dev_priv, &vm->scratch_page); } static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_page_directory *pd = &ppgtt->pd; - struct drm_device *dev = vm->dev; + struct drm_i915_private *dev_priv = to_i915(vm->dev); struct i915_page_table *pt; uint32_t pde; @@ -2054,7 +2052,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) gen6_for_all_pdes(pt, pd, pde) if (pt != vm->scratch_pt) - free_pt(dev, pt); + free_pt(dev_priv, pt); gen6_free_scratch(vm); } @@ -2062,8 +2060,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) { struct i915_address_space *vm = &ppgtt->base; - struct drm_device *dev = ppgtt->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; bool retried = false; int ret; @@ -2128,8 +2125,7 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) { - struct drm_device *dev = ppgtt->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; int ret; @@ -2200,10 +2196,15 @@ static void i915_address_space_init(struct i915_address_space *vm, list_add_tail(&vm->global_link, &dev_priv->vm_list); } -static void gtt_write_workarounds(struct drm_device *dev) +static void i915_address_space_fini(struct i915_address_space *vm) { - struct drm_i915_private *dev_priv = to_i915(dev); + i915_gem_timeline_fini(&vm->timeline); + drm_mm_takedown(&vm->mm); + list_del(&vm->global_link); +} +static void gtt_write_workarounds(struct drm_i915_private *dev_priv) +{ /* This function is for gtt related workarounds. This function is * called on driver load and after a GPU reset, so you can place * workarounds here even if they get overwritten by GPU reset. @@ -2236,11 +2237,9 @@ static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt, return ret; } -int i915_ppgtt_init_hw(struct drm_device *dev) +int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - - gtt_write_workarounds(dev); + gtt_write_workarounds(dev_priv); /* In the case of execlists, PPGTT is enabled by the context descriptor * and the PDPs are contained within the context itself. We don't @@ -2248,17 +2247,17 @@ int i915_ppgtt_init_hw(struct drm_device *dev) if (i915.enable_execlists) return 0; - if (!USES_PPGTT(dev)) + if (!USES_PPGTT(dev_priv)) return 0; if (IS_GEN6(dev_priv)) - gen6_ppgtt_enable(dev); + gen6_ppgtt_enable(dev_priv); else if (IS_GEN7(dev_priv)) - gen7_ppgtt_enable(dev); - else if (INTEL_INFO(dev)->gen >= 8) - gen8_ppgtt_enable(dev); + gen7_ppgtt_enable(dev_priv); + else if (INTEL_GEN(dev_priv) >= 8) + gen8_ppgtt_enable(dev_priv); else - MISSING_CASE(INTEL_INFO(dev)->gen); + MISSING_CASE(INTEL_GEN(dev_priv)); return 0; } @@ -2286,7 +2285,7 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv, return ppgtt; } -void i915_ppgtt_release(struct kref *kref) +void i915_ppgtt_release(struct kref *kref) { struct i915_hw_ppgtt *ppgtt = container_of(kref, struct i915_hw_ppgtt, ref); @@ -2298,9 +2297,7 @@ void i915_ppgtt_release(struct kref *kref) WARN_ON(!list_empty(&ppgtt->base.inactive_list)); WARN_ON(!list_empty(&ppgtt->base.unbound_list)); - i915_gem_timeline_fini(&ppgtt->base.timeline); - list_del(&ppgtt->base.global_link); - drm_mm_takedown(&ppgtt->base.mm); + i915_address_space_fini(&ppgtt->base); ppgtt->base.cleanup(&ppgtt->base); kfree(ppgtt); @@ -2362,15 +2359,14 @@ static void i915_ggtt_flush(struct drm_i915_private *dev_priv) } } -void i915_gem_suspend_gtt_mappings(struct drm_device *dev) +void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; /* Don't bother messing with faults pre GEN6 as we have little * documentation supporting that it's a good idea. */ - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_GEN(dev_priv) < 6) return; i915_check_and_clear_faults(dev_priv); @@ -2842,8 +2838,9 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) if (drm_mm_initialized(&ggtt->base.mm)) { intel_vgt_deballoon(dev_priv); - drm_mm_takedown(&ggtt->base.mm); - list_del(&ggtt->base.global_link); + mutex_lock(&dev_priv->drm.struct_mutex); + i915_address_space_fini(&ggtt->base); + mutex_unlock(&dev_priv->drm.struct_mutex); } ggtt->base.cleanup(&ggtt->base); @@ -2932,6 +2929,7 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl) static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) { + struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev); struct pci_dev *pdev = ggtt->base.dev->pdev; phys_addr_t phys_addr; int ret; @@ -2946,7 +2944,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) * resort to an uncached mapping. The WC issue is easily caught by the * readback check when writing GTT PTE entries. */ - if (IS_BROXTON(to_i915(ggtt->base.dev))) + if (IS_BROXTON(dev_priv)) ggtt->gsm = ioremap_nocache(phys_addr, size); else ggtt->gsm = ioremap_wc(phys_addr, size); @@ -2955,9 +2953,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) return -ENOMEM; } - ret = setup_scratch_page(ggtt->base.dev, - &ggtt->base.scratch_page, - GFP_DMA32); + ret = setup_scratch_page(dev_priv, &ggtt->base.scratch_page, GFP_DMA32); if (ret) { DRM_ERROR("Scratch setup failed\n"); /* iounmap will also get called at remove, but meh */ @@ -3046,7 +3042,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm) struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); iounmap(ggtt->gsm); - cleanup_scratch_page(vm->dev, &vm->scratch_page); + cleanup_scratch_page(to_i915(vm->dev), &vm->scratch_page); } static int gen8_gmch_probe(struct i915_ggtt *ggtt) @@ -3262,7 +3258,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) * Initialise stolen early so that we may reserve preallocated * objects for the BIOS to KMS transition. */ - ret = i915_gem_init_stolen(&dev_priv->drm); + ret = i915_gem_init_stolen(dev_priv); if (ret) goto out_gtt_cleanup; @@ -3281,9 +3277,8 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv) return 0; } -void i915_gem_restore_gtt_mappings(struct drm_device *dev) +void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_object *obj, *on; @@ -3318,7 +3313,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) ggtt->base.closed = false; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv)) chv_setup_private_ppat(dev_priv); else @@ -3327,7 +3322,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) return; } - if (USES_PPGTT(dev)) { + if (USES_PPGTT(dev_priv)) { struct i915_address_space *vm; list_for_each_entry(vm, &dev_priv->vm_list, global_link) { @@ -3348,176 +3343,6 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) i915_ggtt_flush(dev_priv); } -static void -i915_vma_retire(struct i915_gem_active *active, - struct drm_i915_gem_request *rq) -{ - const unsigned int idx = rq->engine->id; - struct i915_vma *vma = - container_of(active, struct i915_vma, last_read[idx]); - struct drm_i915_gem_object *obj = vma->obj; - - GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx)); - - i915_vma_clear_active(vma, idx); - if (i915_vma_is_active(vma)) - return; - - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); - if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma))) - WARN_ON(i915_vma_unbind(vma)); - - GEM_BUG_ON(!i915_gem_object_is_active(obj)); - if (--obj->active_count) - return; - - /* Bump our place on the bound list to keep it roughly in LRU order - * so that we don't steal from recently used but inactive objects - * (unless we are forced to ofc!) - */ - if (obj->bind_count) - list_move_tail(&obj->global_link, &rq->i915->mm.bound_list); - - obj->mm.dirty = true; /* be paranoid */ - - if (i915_gem_object_has_active_reference(obj)) { - i915_gem_object_clear_active_reference(obj); - i915_gem_object_put(obj); - } -} - -static void -i915_ggtt_retire__write(struct i915_gem_active *active, - struct drm_i915_gem_request *request) -{ - struct i915_vma *vma = - container_of(active, struct i915_vma, last_write); - - intel_fb_obj_flush(vma->obj, true, ORIGIN_CS); -} - -void i915_vma_destroy(struct i915_vma *vma) -{ - GEM_BUG_ON(vma->node.allocated); - GEM_BUG_ON(i915_vma_is_active(vma)); - GEM_BUG_ON(!i915_vma_is_closed(vma)); - GEM_BUG_ON(vma->fence); - - list_del(&vma->vm_link); - if (!i915_vma_is_ggtt(vma)) - i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); - - kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); -} - -void i915_vma_close(struct i915_vma *vma) -{ - GEM_BUG_ON(i915_vma_is_closed(vma)); - vma->flags |= I915_VMA_CLOSED; - - list_del(&vma->obj_link); - rb_erase(&vma->obj_node, &vma->obj->vma_tree); - - if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma)) - WARN_ON(i915_vma_unbind(vma)); -} - -static inline long vma_compare(struct i915_vma *vma, - struct i915_address_space *vm, - const struct i915_ggtt_view *view) -{ - GEM_BUG_ON(view && !i915_is_ggtt(vm)); - - if (vma->vm != vm) - return vma->vm - vm; - - if (!view) - return vma->ggtt_view.type; - - if (vma->ggtt_view.type != view->type) - return vma->ggtt_view.type - view->type; - - return memcmp(&vma->ggtt_view.params, - &view->params, - sizeof(view->params)); -} - -static struct i915_vma * -__i915_vma_create(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view) -{ - struct i915_vma *vma; - struct rb_node *rb, **p; - int i; - - GEM_BUG_ON(vm->closed); - - vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL); - if (vma == NULL) - return ERR_PTR(-ENOMEM); - - INIT_LIST_HEAD(&vma->exec_list); - for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) - init_request_active(&vma->last_read[i], i915_vma_retire); - init_request_active(&vma->last_write, - i915_is_ggtt(vm) ? i915_ggtt_retire__write : NULL); - init_request_active(&vma->last_fence, NULL); - list_add(&vma->vm_link, &vm->unbound_list); - vma->vm = vm; - vma->obj = obj; - vma->size = obj->base.size; - - if (view) { - vma->ggtt_view = *view; - if (view->type == I915_GGTT_VIEW_PARTIAL) { - vma->size = view->params.partial.size; - vma->size <<= PAGE_SHIFT; - } else if (view->type == I915_GGTT_VIEW_ROTATED) { - vma->size = - intel_rotation_info_size(&view->params.rotated); - vma->size <<= PAGE_SHIFT; - } - } - - if (i915_is_ggtt(vm)) { - vma->flags |= I915_VMA_GGTT; - list_add(&vma->obj_link, &obj->vma_list); - } else { - i915_ppgtt_get(i915_vm_to_ppgtt(vm)); - list_add_tail(&vma->obj_link, &obj->vma_list); - } - - rb = NULL; - p = &obj->vma_tree.rb_node; - while (*p) { - struct i915_vma *pos; - - rb = *p; - pos = rb_entry(rb, struct i915_vma, obj_node); - if (vma_compare(pos, vm, view) < 0) - p = &rb->rb_right; - else - p = &rb->rb_left; - } - rb_link_node(&vma->obj_node, rb, p); - rb_insert_color(&vma->obj_node, &obj->vma_tree); - - return vma; -} - -struct i915_vma * -i915_vma_create(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view) -{ - lockdep_assert_held(&obj->base.dev->struct_mutex); - GEM_BUG_ON(view && !i915_is_ggtt(vm)); - GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view)); - - return __i915_vma_create(obj, vm, view); -} - struct i915_vma * i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_address_space *vm, @@ -3530,7 +3355,7 @@ i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); long cmp; - cmp = vma_compare(vma, vm, view); + cmp = i915_vma_compare(vma, vm, view); if (cmp == 0) return vma; @@ -3555,7 +3380,7 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, vma = i915_gem_obj_to_vma(obj, vm, view); if (!vma) { - vma = __i915_vma_create(obj, vm, view); + vma = i915_vma_create(obj, vm, view); GEM_BUG_ON(vma != i915_gem_obj_to_vma(obj, vm, view)); } @@ -3747,99 +3572,3 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) return ret; } -/** - * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. - * @vma: VMA to map - * @cache_level: mapping cache level - * @flags: flags like global or local mapping - * - * DMA addresses are taken from the scatter-gather table of this object (or of - * this VMA in case of non-default GGTT views) and PTE entries set up. - * Note that DMA addresses are also the only part of the SG table we care about. - */ -int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, - u32 flags) -{ - u32 bind_flags; - u32 vma_flags; - int ret; - - if (WARN_ON(flags == 0)) - return -EINVAL; - - bind_flags = 0; - if (flags & PIN_GLOBAL) - bind_flags |= I915_VMA_GLOBAL_BIND; - if (flags & PIN_USER) - bind_flags |= I915_VMA_LOCAL_BIND; - - vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); - if (flags & PIN_UPDATE) - bind_flags |= vma_flags; - else - bind_flags &= ~vma_flags; - if (bind_flags == 0) - return 0; - - if (vma_flags == 0 && vma->vm->allocate_va_range) { - trace_i915_va_alloc(vma); - ret = vma->vm->allocate_va_range(vma->vm, - vma->node.start, - vma->node.size); - if (ret) - return ret; - } - - ret = vma->vm->bind_vma(vma, cache_level, bind_flags); - if (ret) - return ret; - - vma->flags |= bind_flags; - return 0; -} - -void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) -{ - void __iomem *ptr; - - /* Access through the GTT requires the device to be awake. */ - assert_rpm_wakelock_held(to_i915(vma->vm->dev)); - - lockdep_assert_held(&vma->vm->dev->struct_mutex); - if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) - return IO_ERR_PTR(-ENODEV); - - GEM_BUG_ON(!i915_vma_is_ggtt(vma)); - GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); - - ptr = vma->iomap; - if (ptr == NULL) { - ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable, - vma->node.start, - vma->node.size); - if (ptr == NULL) - return IO_ERR_PTR(-ENOMEM); - - vma->iomap = ptr; - } - - __i915_vma_pin(vma); - return ptr; -} - -void i915_vma_unpin_and_release(struct i915_vma **p_vma) -{ - struct i915_vma *vma; - struct drm_i915_gem_object *obj; - - vma = fetch_and_zero(p_vma); - if (!vma) - return; - - obj = vma->obj; - - i915_vma_unpin(vma); - i915_vma_close(vma); - - __i915_gem_object_release_unless_active(obj); -} diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index c23ef9db1f53..4f35be4c26c7 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -35,7 +35,9 @@ #define __I915_GEM_GTT_H__ #include <linux/io-mapping.h> +#include <linux/mm.h> +#include "i915_gem_timeline.h" #include "i915_gem_request.h" #define I915_FENCE_REG_NONE -1 @@ -118,8 +120,8 @@ typedef uint64_t gen8_ppgtt_pml4e_t; #define GEN8_LEGACY_PDPES 4 #define GEN8_PTES I915_PTES(sizeof(gen8_pte_t)) -#define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\ - GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES) +#define I915_PDPES_PER_PDP(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\ + GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES) #define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) #define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ @@ -138,6 +140,8 @@ typedef uint64_t gen8_ppgtt_pml4e_t; #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) #define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8)) +struct sg_table; + enum i915_ggtt_view_type { I915_GGTT_VIEW_NORMAL = 0, I915_GGTT_VIEW_ROTATED, @@ -168,135 +172,7 @@ extern const struct i915_ggtt_view i915_ggtt_view_rotated; enum i915_cache_level; -/** - * A VMA represents a GEM BO that is bound into an address space. Therefore, a - * VMA's presence cannot be guaranteed before binding, or after unbinding the - * object into/from the address space. - * - * To make things as simple as possible (ie. no refcounting), a VMA's lifetime - * will always be <= an objects lifetime. So object refcounting should cover us. - */ -struct i915_vma { - struct drm_mm_node node; - struct drm_i915_gem_object *obj; - struct i915_address_space *vm; - struct drm_i915_fence_reg *fence; - struct sg_table *pages; - void __iomem *iomap; - u64 size; - u64 display_alignment; - - unsigned int flags; - /** - * How many users have pinned this object in GTT space. The following - * users can each hold at most one reference: pwrite/pread, execbuffer - * (objects are not allowed multiple times for the same batchbuffer), - * and the framebuffer code. When switching/pageflipping, the - * framebuffer code has at most two buffers pinned per crtc. - * - * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 - * bits with absolutely no headroom. So use 4 bits. - */ -#define I915_VMA_PIN_MASK 0xf -#define I915_VMA_PIN_OVERFLOW BIT(5) - - /** Flags and address space this VMA is bound to */ -#define I915_VMA_GLOBAL_BIND BIT(6) -#define I915_VMA_LOCAL_BIND BIT(7) -#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW) - -#define I915_VMA_GGTT BIT(8) -#define I915_VMA_CAN_FENCE BIT(9) -#define I915_VMA_CLOSED BIT(10) - - unsigned int active; - struct i915_gem_active last_read[I915_NUM_ENGINES]; - struct i915_gem_active last_write; - struct i915_gem_active last_fence; - - /** - * Support different GGTT views into the same object. - * This means there can be multiple VMA mappings per object and per VM. - * i915_ggtt_view_type is used to distinguish between those entries. - * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also - * assumed in GEM functions which take no ggtt view parameter. - */ - struct i915_ggtt_view ggtt_view; - - /** This object's place on the active/inactive lists */ - struct list_head vm_link; - - struct list_head obj_link; /* Link in the object's VMA list */ - struct rb_node obj_node; - - /** This vma's place in the batchbuffer or on the eviction list */ - struct list_head exec_list; - - /** - * Used for performing relocations during execbuffer insertion. - */ - struct hlist_node exec_node; - unsigned long exec_handle; - struct drm_i915_gem_exec_object2 *exec_entry; -}; - -struct i915_vma * -i915_vma_create(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view); -void i915_vma_unpin_and_release(struct i915_vma **p_vma); - -static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) -{ - return vma->flags & I915_VMA_GGTT; -} - -static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma) -{ - return vma->flags & I915_VMA_CAN_FENCE; -} - -static inline bool i915_vma_is_closed(const struct i915_vma *vma) -{ - return vma->flags & I915_VMA_CLOSED; -} - -static inline unsigned int i915_vma_get_active(const struct i915_vma *vma) -{ - return vma->active; -} - -static inline bool i915_vma_is_active(const struct i915_vma *vma) -{ - return i915_vma_get_active(vma); -} - -static inline void i915_vma_set_active(struct i915_vma *vma, - unsigned int engine) -{ - vma->active |= BIT(engine); -} - -static inline void i915_vma_clear_active(struct i915_vma *vma, - unsigned int engine) -{ - vma->active &= ~BIT(engine); -} - -static inline bool i915_vma_has_active_engine(const struct i915_vma *vma, - unsigned int engine) -{ - return vma->active & BIT(engine); -} - -static inline u32 i915_ggtt_offset(const struct i915_vma *vma) -{ - GEM_BUG_ON(!i915_vma_is_ggtt(vma)); - GEM_BUG_ON(!vma->node.allocated); - GEM_BUG_ON(upper_32_bits(vma->node.start)); - GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1)); - return lower_32_bits(vma->node.start); -} +struct i915_vma; struct i915_page_dma { struct page *page; @@ -606,13 +482,20 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n) px_dma(ppgtt->base.scratch_pd); } +static inline struct i915_ggtt * +i915_vm_to_ggtt(struct i915_address_space *vm) +{ + GEM_BUG_ON(!i915_is_ggtt(vm)); + return container_of(vm, struct i915_ggtt, base); +} + int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv); int i915_ggtt_init_hw(struct drm_i915_private *dev_priv); int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv); int i915_gem_init_ggtt(struct drm_i915_private *dev_priv); void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); -int i915_ppgtt_init_hw(struct drm_device *dev); +int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv); void i915_ppgtt_release(struct kref *kref); struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv, struct drm_i915_file_private *fpriv, @@ -629,8 +512,8 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) } void i915_check_and_clear_faults(struct drm_i915_private *dev_priv); -void i915_gem_suspend_gtt_mappings(struct drm_device *dev); -void i915_gem_restore_gtt_mappings(struct drm_device *dev); +void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv); +void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv); int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, struct sg_table *pages); @@ -653,88 +536,4 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, #define PIN_OFFSET_FIXED BIT(11) #define PIN_OFFSET_MASK (~4095) -int __i915_vma_do_pin(struct i915_vma *vma, - u64 size, u64 alignment, u64 flags); -static inline int __must_check -i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) -{ - BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW); - BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); - BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); - - /* Pin early to prevent the shrinker/eviction logic from destroying - * our vma as we insert and bind. - */ - if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) - return 0; - - return __i915_vma_do_pin(vma, size, alignment, flags); -} - -static inline int i915_vma_pin_count(const struct i915_vma *vma) -{ - return vma->flags & I915_VMA_PIN_MASK; -} - -static inline bool i915_vma_is_pinned(const struct i915_vma *vma) -{ - return i915_vma_pin_count(vma); -} - -static inline void __i915_vma_pin(struct i915_vma *vma) -{ - vma->flags++; - GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW); -} - -static inline void __i915_vma_unpin(struct i915_vma *vma) -{ - GEM_BUG_ON(!i915_vma_is_pinned(vma)); - vma->flags--; -} - -static inline void i915_vma_unpin(struct i915_vma *vma) -{ - GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - __i915_vma_unpin(vma); -} - -/** - * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture - * @vma: VMA to iomap - * - * The passed in VMA has to be pinned in the global GTT mappable region. - * An extra pinning of the VMA is acquired for the return iomapping, - * the caller must call i915_vma_unpin_iomap to relinquish the pinning - * after the iomapping is no longer required. - * - * Callers must hold the struct_mutex. - * - * Returns a valid iomapped pointer or ERR_PTR. - */ -void __iomem *i915_vma_pin_iomap(struct i915_vma *vma); -#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x)) - -/** - * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap - * @vma: VMA to unpin - * - * Unpins the previously iomapped VMA from i915_vma_pin_iomap(). - * - * Callers must hold the struct_mutex. This function is only valid to be - * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap(). - */ -static inline void i915_vma_unpin_iomap(struct i915_vma *vma) -{ - lockdep_assert_held(&vma->vm->dev->struct_mutex); - GEM_BUG_ON(vma->iomap == NULL); - i915_vma_unpin(vma); -} - -static inline struct page *i915_vma_first_page(struct i915_vma *vma) -{ - GEM_BUG_ON(!vma->pages); - return sg_page(vma->pages->sgl); -} - #endif diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h new file mode 100644 index 000000000000..6a368de9d81e --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -0,0 +1,338 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __I915_GEM_OBJECT_H__ +#define __I915_GEM_OBJECT_H__ + +#include <linux/reservation.h> + +#include <drm/drm_vma_manager.h> +#include <drm/drm_gem.h> +#include <drm/drmP.h> + +#include <drm/i915_drm.h> + +struct drm_i915_gem_object_ops { + unsigned int flags; +#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1 +#define I915_GEM_OBJECT_IS_SHRINKABLE 0x2 + + /* Interface between the GEM object and its backing storage. + * get_pages() is called once prior to the use of the associated set + * of pages before to binding them into the GTT, and put_pages() is + * called after we no longer need them. As we expect there to be + * associated cost with migrating pages between the backing storage + * and making them available for the GPU (e.g. clflush), we may hold + * onto the pages after they are no longer referenced by the GPU + * in case they may be used again shortly (for example migrating the + * pages to a different memory domain within the GTT). put_pages() + * will therefore most likely be called when the object itself is + * being released or under memory pressure (where we attempt to + * reap pages for the shrinker). + */ + struct sg_table *(*get_pages)(struct drm_i915_gem_object *); + void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); + + int (*dmabuf_export)(struct drm_i915_gem_object *); + void (*release)(struct drm_i915_gem_object *); +}; + +struct drm_i915_gem_object { + struct drm_gem_object base; + + const struct drm_i915_gem_object_ops *ops; + + /** List of VMAs backed by this object */ + struct list_head vma_list; + struct rb_root vma_tree; + + /** Stolen memory for this object, instead of being backed by shmem. */ + struct drm_mm_node *stolen; + struct list_head global_link; + union { + struct rcu_head rcu; + struct llist_node freed; + }; + + /** + * Whether the object is currently in the GGTT mmap. + */ + struct list_head userfault_link; + + /** Used in execbuf to temporarily hold a ref */ + struct list_head obj_exec_link; + + struct list_head batch_pool_link; + + unsigned long flags; + + /** + * Have we taken a reference for the object for incomplete GPU + * activity? + */ +#define I915_BO_ACTIVE_REF 0 + + /* + * Is the object to be mapped as read-only to the GPU + * Only honoured if hardware has relevant pte bit + */ + unsigned long gt_ro:1; + unsigned int cache_level:3; + unsigned int cache_dirty:1; + + atomic_t frontbuffer_bits; + unsigned int frontbuffer_ggtt_origin; /* write once */ + struct i915_gem_active frontbuffer_write; + + /** Current tiling stride for the object, if it's tiled. */ + unsigned int tiling_and_stride; +#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */ +#define TILING_MASK (FENCE_MINIMUM_STRIDE-1) +#define STRIDE_MASK (~TILING_MASK) + + /** Count of VMA actually bound by this object */ + unsigned int bind_count; + unsigned int active_count; + unsigned int pin_display; + + struct { + struct mutex lock; /* protects the pages and their use */ + atomic_t pages_pin_count; + + struct sg_table *pages; + void *mapping; + + struct i915_gem_object_page_iter { + struct scatterlist *sg_pos; + unsigned int sg_idx; /* in pages, but 32bit eek! */ + + struct radix_tree_root radix; + struct mutex lock; /* protects this cache */ + } get_page; + + /** + * Advice: are the backing pages purgeable? + */ + unsigned int madv:2; + + /** + * This is set if the object has been written to since the + * pages were last acquired. + */ + bool dirty:1; + + /** + * This is set if the object has been pinned due to unknown + * swizzling. + */ + bool quirked:1; + } mm; + + /** Breadcrumb of last rendering to the buffer. + * There can only be one writer, but we allow for multiple readers. + * If there is a writer that necessarily implies that all other + * read requests are complete - but we may only be lazily clearing + * the read requests. A read request is naturally the most recent + * request on a ring, so we may have two different write and read + * requests on one ring where the write request is older than the + * read request. This allows for the CPU to read from an active + * buffer by only waiting for the write to complete. + */ + struct reservation_object *resv; + + /** References from framebuffers, locks out tiling changes. */ + unsigned long framebuffer_references; + + /** Record of address bit 17 of each page at last unbind. */ + unsigned long *bit_17; + + struct i915_gem_userptr { + uintptr_t ptr; + unsigned read_only :1; + + struct i915_mm_struct *mm; + struct i915_mmu_object *mmu_object; + struct work_struct *work; + } userptr; + + /** for phys allocated objects */ + struct drm_dma_handle *phys_handle; + + struct reservation_object __builtin_resv; +}; + +static inline struct drm_i915_gem_object * +to_intel_bo(struct drm_gem_object *gem) +{ + /* Assert that to_intel_bo(NULL) == NULL */ + BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base)); + + return container_of(gem, struct drm_i915_gem_object, base); +} + +/** + * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle + * @filp: DRM file private date + * @handle: userspace handle + * + * Returns: + * + * A pointer to the object named by the handle if such exists on @filp, NULL + * otherwise. This object is only valid whilst under the RCU read lock, and + * note carefully the object may be in the process of being destroyed. + */ +static inline struct drm_i915_gem_object * +i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) +{ +#ifdef CONFIG_LOCKDEP + WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); +#endif + return idr_find(&file->object_idr, handle); +} + +static inline struct drm_i915_gem_object * +i915_gem_object_lookup(struct drm_file *file, u32 handle) +{ + struct drm_i915_gem_object *obj; + + rcu_read_lock(); + obj = i915_gem_object_lookup_rcu(file, handle); + if (obj && !kref_get_unless_zero(&obj->base.refcount)) + obj = NULL; + rcu_read_unlock(); + + return obj; +} + +__deprecated +extern struct drm_gem_object * +drm_gem_object_lookup(struct drm_file *file, u32 handle); + +__attribute__((nonnull)) +static inline struct drm_i915_gem_object * +i915_gem_object_get(struct drm_i915_gem_object *obj) +{ + drm_gem_object_reference(&obj->base); + return obj; +} + +__deprecated +extern void drm_gem_object_reference(struct drm_gem_object *); + +__attribute__((nonnull)) +static inline void +i915_gem_object_put(struct drm_i915_gem_object *obj) +{ + __drm_gem_object_unreference(&obj->base); +} + +__deprecated +extern void drm_gem_object_unreference(struct drm_gem_object *); + +__deprecated +extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *); + +static inline bool +i915_gem_object_is_dead(const struct drm_i915_gem_object *obj) +{ + return atomic_read(&obj->base.refcount.refcount) == 0; +} + +static inline bool +i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) +{ + return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE; +} + +static inline bool +i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) +{ + return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE; +} + +static inline bool +i915_gem_object_is_active(const struct drm_i915_gem_object *obj) +{ + return obj->active_count; +} + +static inline bool +i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj) +{ + return test_bit(I915_BO_ACTIVE_REF, &obj->flags); +} + +static inline void +i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj) +{ + lockdep_assert_held(&obj->base.dev->struct_mutex); + __set_bit(I915_BO_ACTIVE_REF, &obj->flags); +} + +static inline void +i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj) +{ + lockdep_assert_held(&obj->base.dev->struct_mutex); + __clear_bit(I915_BO_ACTIVE_REF, &obj->flags); +} + +void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj); + +static inline unsigned int +i915_gem_object_get_tiling(struct drm_i915_gem_object *obj) +{ + return obj->tiling_and_stride & TILING_MASK; +} + +static inline bool +i915_gem_object_is_tiled(struct drm_i915_gem_object *obj) +{ + return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; +} + +static inline unsigned int +i915_gem_object_get_stride(struct drm_i915_gem_object *obj) +{ + return obj->tiling_and_stride & STRIDE_MASK; +} + +static inline struct intel_engine_cs * +i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) +{ + struct intel_engine_cs *engine = NULL; + struct dma_fence *fence; + + rcu_read_lock(); + fence = reservation_object_get_excl_rcu(obj->resv); + rcu_read_unlock(); + + if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) + engine = to_request(fence)->engine; + dma_fence_put(fence); + + return engine; +} + +#endif + diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 0b3b051a5683..27e8f257fb39 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -113,6 +113,82 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) spin_unlock(&file_priv->mm.lock); } +static struct i915_dependency * +i915_dependency_alloc(struct drm_i915_private *i915) +{ + return kmem_cache_alloc(i915->dependencies, GFP_KERNEL); +} + +static void +i915_dependency_free(struct drm_i915_private *i915, + struct i915_dependency *dep) +{ + kmem_cache_free(i915->dependencies, dep); +} + +static void +__i915_priotree_add_dependency(struct i915_priotree *pt, + struct i915_priotree *signal, + struct i915_dependency *dep, + unsigned long flags) +{ + INIT_LIST_HEAD(&dep->dfs_link); + list_add(&dep->wait_link, &signal->waiters_list); + list_add(&dep->signal_link, &pt->signalers_list); + dep->signaler = signal; + dep->flags = flags; +} + +static int +i915_priotree_add_dependency(struct drm_i915_private *i915, + struct i915_priotree *pt, + struct i915_priotree *signal) +{ + struct i915_dependency *dep; + + dep = i915_dependency_alloc(i915); + if (!dep) + return -ENOMEM; + + __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC); + return 0; +} + +static void +i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt) +{ + struct i915_dependency *dep, *next; + + GEM_BUG_ON(!RB_EMPTY_NODE(&pt->node)); + + /* Everyone we depended upon (the fences we wait to be signaled) + * should retire before us and remove themselves from our list. + * However, retirement is run independently on each timeline and + * so we may be called out-of-order. + */ + list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) { + list_del(&dep->wait_link); + if (dep->flags & I915_DEPENDENCY_ALLOC) + i915_dependency_free(i915, dep); + } + + /* Remove ourselves from everyone who depends upon us */ + list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) { + list_del(&dep->signal_link); + if (dep->flags & I915_DEPENDENCY_ALLOC) + i915_dependency_free(i915, dep); + } +} + +static void +i915_priotree_init(struct i915_priotree *pt) +{ + INIT_LIST_HEAD(&pt->signalers_list); + INIT_LIST_HEAD(&pt->waiters_list); + RB_CLEAR_NODE(&pt->node); + pt->priority = INT_MIN; +} + void i915_gem_retire_noop(struct i915_gem_active *active, struct drm_i915_gem_request *request) { @@ -124,7 +200,10 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) struct i915_gem_active *active, *next; lockdep_assert_held(&request->i915->drm.struct_mutex); + GEM_BUG_ON(!i915_sw_fence_done(&request->submit)); + GEM_BUG_ON(!i915_sw_fence_done(&request->execute)); GEM_BUG_ON(!i915_gem_request_completed(request)); + GEM_BUG_ON(!request->i915->gt.active_requests); trace_i915_gem_request_retire(request); @@ -142,7 +221,12 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) */ list_del(&request->ring_link); request->ring->last_retired_head = request->postfix; - request->i915->gt.active_requests--; + if (!--request->i915->gt.active_requests) { + GEM_BUG_ON(!request->i915->gt.awake); + mod_delayed_work(request->i915->wq, + &request->i915->gt.idle_work, + msecs_to_jiffies(100)); + } /* Walk through the active list, calling retire on each. This allows * objects to track their GPU activity and mark themselves as idle @@ -182,6 +266,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) i915_gem_context_put(request->ctx); dma_fence_signal(&request->fence); + + i915_priotree_fini(request->i915, &request->priotree); i915_gem_request_put(request); } @@ -241,9 +327,8 @@ static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno) /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) { - while (intel_kick_waiters(i915) || intel_kick_signalers(i915)) - yield(); - yield(); + while (intel_breadcrumbs_busy(i915)) + cond_resched(); /* spin until threads are complete */ } atomic_set(&timeline->next_seqno, seqno); @@ -307,25 +392,16 @@ static u32 timeline_get_seqno(struct i915_gem_timeline *tl) return atomic_inc_return(&tl->next_seqno); } -static int __i915_sw_fence_call -submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) +void __i915_gem_request_submit(struct drm_i915_gem_request *request) { - struct drm_i915_gem_request *request = - container_of(fence, typeof(*request), submit); struct intel_engine_cs *engine = request->engine; struct intel_timeline *timeline; - unsigned long flags; u32 seqno; - if (state != FENCE_COMPLETE) - return NOTIFY_DONE; - /* Transfer from per-context onto the global per-engine timeline */ timeline = engine->timeline; GEM_BUG_ON(timeline == request->timeline); - - /* Will be called from irq-context when using foreign DMA fences */ - spin_lock_irqsave(&timeline->lock, flags); + assert_spin_locked(&timeline->lock); seqno = timeline_get_seqno(timeline->common); GEM_BUG_ON(!seqno); @@ -345,14 +421,43 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) GEM_BUG_ON(!request->global_seqno); engine->emit_breadcrumb(request, request->ring->vaddr + request->postfix); - engine->submit_request(request); - spin_lock_nested(&request->timeline->lock, SINGLE_DEPTH_NESTING); + spin_lock(&request->timeline->lock); list_move_tail(&request->link, &timeline->requests); spin_unlock(&request->timeline->lock); - spin_unlock_irqrestore(&timeline->lock, flags); + i915_sw_fence_commit(&request->execute); +} + +void i915_gem_request_submit(struct drm_i915_gem_request *request) +{ + struct intel_engine_cs *engine = request->engine; + unsigned long flags; + + /* Will be called from irq-context when using foreign fences. */ + spin_lock_irqsave(&engine->timeline->lock, flags); + + __i915_gem_request_submit(request); + + spin_unlock_irqrestore(&engine->timeline->lock, flags); +} + +static int __i915_sw_fence_call +submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) +{ + if (state == FENCE_COMPLETE) { + struct drm_i915_gem_request *request = + container_of(fence, typeof(*request), submit); + + request->engine->submit_request(request); + } + + return NOTIFY_DONE; +} +static int __i915_sw_fence_call +execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) +{ return NOTIFY_DONE; } @@ -441,6 +546,14 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, __timeline_get_seqno(req->timeline->common)); i915_sw_fence_init(&req->submit, submit_notify); + i915_sw_fence_init(&req->execute, execute_notify); + /* Ensure that the execute fence completes after the submit fence - + * as we complete the execute fence from within the submit fence + * callback, its completion would otherwise be visible first. + */ + i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq); + + i915_priotree_init(&req->priotree); INIT_LIST_HEAD(&req->active_list); req->i915 = dev_priv; @@ -495,6 +608,14 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to, GEM_BUG_ON(to == from); + if (to->engine->schedule) { + ret = i915_priotree_add_dependency(to->i915, + &to->priotree, + &from->priotree); + if (ret < 0) + return ret; + } + if (to->timeline == from->timeline) return 0; @@ -650,6 +771,8 @@ static void i915_gem_mark_busy(const struct intel_engine_cs *engine) if (dev_priv->gt.awake) return; + GEM_BUG_ON(!dev_priv->gt.active_requests); + intel_runtime_pm_get_noresume(dev_priv); dev_priv->gt.awake = true; @@ -718,9 +841,15 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) prev = i915_gem_active_raw(&timeline->last_request, &request->i915->drm.struct_mutex); - if (prev) + if (prev) { i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, &request->submitq); + if (engine->schedule) + __i915_priotree_add_dependency(&request->priotree, + &prev->priotree, + &request->dep, + 0); + } spin_lock_irq(&timeline->lock); list_add_tail(&request->link, &timeline->requests); @@ -737,6 +866,19 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) i915_gem_mark_busy(engine); + /* Let the backend know a new request has arrived that may need + * to adjust the existing execution schedule due to a high priority + * request - i.e. we may want to preempt the current request in order + * to run a high priority dependency chain *before* we can execute this + * request. + * + * This is called before the request is ready to run so that we can + * decide whether to preempt the entire chain so that it is ready to + * run at the earliest possible convenience. + */ + if (engine->schedule) + engine->schedule(request, request->ctx->priority); + local_bh_disable(); i915_sw_fence_commit(&request->submit); local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ @@ -817,9 +959,9 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req, } static long -__i915_request_wait_for_submit(struct drm_i915_gem_request *request, - unsigned int flags, - long timeout) +__i915_request_wait_for_execute(struct drm_i915_gem_request *request, + unsigned int flags, + long timeout) { const int state = flags & I915_WAIT_INTERRUPTIBLE ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; @@ -831,9 +973,9 @@ __i915_request_wait_for_submit(struct drm_i915_gem_request *request, add_wait_queue(q, &reset); do { - prepare_to_wait(&request->submit.wait, &wait, state); + prepare_to_wait(&request->execute.wait, &wait, state); - if (i915_sw_fence_done(&request->submit)) + if (i915_sw_fence_done(&request->execute)) break; if (flags & I915_WAIT_LOCKED && @@ -851,7 +993,7 @@ __i915_request_wait_for_submit(struct drm_i915_gem_request *request, timeout = io_schedule_timeout(timeout); } while (timeout); - finish_wait(&request->submit.wait, &wait); + finish_wait(&request->execute.wait, &wait); if (flags & I915_WAIT_LOCKED) remove_wait_queue(q, &reset); @@ -903,13 +1045,14 @@ long i915_wait_request(struct drm_i915_gem_request *req, trace_i915_gem_request_wait_begin(req); - if (!i915_sw_fence_done(&req->submit)) { - timeout = __i915_request_wait_for_submit(req, flags, timeout); + if (!i915_sw_fence_done(&req->execute)) { + timeout = __i915_request_wait_for_execute(req, flags, timeout); if (timeout < 0) goto complete; - GEM_BUG_ON(!i915_sw_fence_done(&req->submit)); + GEM_BUG_ON(!i915_sw_fence_done(&req->execute)); } + GEM_BUG_ON(!i915_sw_fence_done(&req->submit)); GEM_BUG_ON(!req->global_seqno); /* Optimistic short spin before touching IRQs */ @@ -1013,13 +1156,6 @@ void i915_gem_retire_requests(struct drm_i915_private *dev_priv) if (!dev_priv->gt.active_requests) return; - GEM_BUG_ON(!dev_priv->gt.awake); - for_each_engine(engine, dev_priv, id) engine_retire_requests(engine); - - if (!dev_priv->gt.active_requests) - mod_delayed_work(dev_priv->wq, - &dev_priv->gt.idle_work, - msecs_to_jiffies(100)); } diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index 75f8360b3421..e2b077df2da0 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -30,6 +30,9 @@ #include "i915_gem.h" #include "i915_sw_fence.h" +struct drm_file; +struct drm_i915_gem_object; + struct intel_wait { struct rb_node node; struct task_struct *tsk; @@ -41,6 +44,33 @@ struct intel_signal_node { struct intel_wait wait; }; +struct i915_dependency { + struct i915_priotree *signaler; + struct list_head signal_link; + struct list_head wait_link; + struct list_head dfs_link; + unsigned long flags; +#define I915_DEPENDENCY_ALLOC BIT(0) +}; + +/* Requests exist in a complex web of interdependencies. Each request + * has to wait for some other request to complete before it is ready to be run + * (e.g. we have to wait until the pixels have been rendering into a texture + * before we can copy from it). We track the readiness of a request in terms + * of fences, but we also need to keep the dependency tree for the lifetime + * of the request (beyond the life of an individual fence). We use the tree + * at various points to reorder the requests whilst keeping the requests + * in order with respect to their various dependencies. + */ +struct i915_priotree { + struct list_head signalers_list; /* those before us, we depend upon */ + struct list_head waiters_list; /* those after us, they depend upon us */ + struct rb_node node; + int priority; +#define I915_PRIORITY_MAX 1024 +#define I915_PRIORITY_MIN (-I915_PRIORITY_MAX) +}; + /** * Request queue structure. * @@ -84,8 +114,34 @@ struct drm_i915_gem_request { struct intel_timeline *timeline; struct intel_signal_node signaling; + /* Fences for the various phases in the request's lifetime. + * + * The submit fence is used to await upon all of the request's + * dependencies. When it is signaled, the request is ready to run. + * It is used by the driver to then queue the request for execution. + * + * The execute fence is used to signal when the request has been + * sent to hardware. + * + * It is illegal for the submit fence of one request to wait upon the + * execute fence of an earlier request. It should be sufficient to + * wait upon the submit fence of the earlier request. + */ struct i915_sw_fence submit; + struct i915_sw_fence execute; wait_queue_t submitq; + wait_queue_t execq; + + /* A list of everyone we wait upon, and everyone who waits upon us. + * Even though we will not be submitted to the hardware before the + * submit fence is signaled (it waits for all external events as well + * as our own requests), the scheduler still needs to know the + * dependency tree for the lifetime of the request (from execbuf + * to retirement), i.e. bidirectional dependency information for the + * request not tied to individual fences. + */ + struct i915_priotree priotree; + struct i915_dependency dep; u32 global_seqno; @@ -143,9 +199,6 @@ struct drm_i915_gem_request { struct drm_i915_file_private *file_priv; /** file_priv list entry for this request */ struct list_head client_list; - - /** Link in the execlist submission queue, guarded by execlist_lock. */ - struct list_head execlist_link; }; extern const struct dma_fence_ops i915_fence_ops; @@ -162,18 +215,6 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, struct drm_file *file); void i915_gem_request_retire_upto(struct drm_i915_gem_request *req); -static inline u32 -i915_gem_request_get_seqno(struct drm_i915_gem_request *req) -{ - return req ? req->global_seqno : 0; -} - -static inline struct intel_engine_cs * -i915_gem_request_get_engine(struct drm_i915_gem_request *req) -{ - return req ? req->engine : NULL; -} - static inline struct drm_i915_gem_request * to_request(struct dma_fence *fence) { @@ -226,6 +267,9 @@ void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches); #define i915_add_request_no_flush(req) \ __i915_add_request(req, false) +void __i915_gem_request_submit(struct drm_i915_gem_request *request); +void i915_gem_request_submit(struct drm_i915_gem_request *request); + struct intel_rps_client; #define NO_WAITBOOST ERR_PTR(-1) #define IS_RPS_CLIENT(p) (!IS_ERR(p)) diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index b1d367dba347..ebaa941c83af 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -89,9 +89,8 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, mutex_unlock(&dev_priv->mm.stolen_lock); } -static unsigned long i915_stolen_to_physical(struct drm_device *dev) +static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; struct i915_ggtt *ggtt = &dev_priv->ggtt; struct resource *r; @@ -253,7 +252,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) * kernel. So if the region is already marked as busy, something * is seriously wrong. */ - r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size, + r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size, "Graphics Stolen Memory"); if (r == NULL) { /* @@ -264,7 +263,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) * PCI bus, but have an off-by-one error. Hence retry the * reservation starting from 1 instead of 0. */ - r = devm_request_mem_region(dev->dev, base + 1, + r = devm_request_mem_region(dev_priv->drm.dev, base + 1, ggtt->stolen_size - 1, "Graphics Stolen Memory"); /* @@ -408,9 +407,8 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, *size = stolen_top - *base; } -int i915_gem_init_stolen(struct drm_device *dev) +int i915_gem_init_stolen(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; unsigned long reserved_total, reserved_base = 0, reserved_size; unsigned long stolen_top; @@ -418,7 +416,7 @@ int i915_gem_init_stolen(struct drm_device *dev) mutex_init(&dev_priv->mm.stolen_lock); #ifdef CONFIG_INTEL_IOMMU - if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) { + if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) { DRM_INFO("DMAR active, disabling use of stolen memory\n"); return 0; } @@ -427,7 +425,7 @@ int i915_gem_init_stolen(struct drm_device *dev) if (ggtt->stolen_size == 0) return 0; - dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); + dev_priv->mm.stolen_base = i915_stolen_to_physical(dev_priv); if (dev_priv->mm.stolen_base == 0) return 0; @@ -515,12 +513,10 @@ i915_pages_create_for_stolen(struct drm_device *dev, u32 offset, u32 size) { struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; struct sg_table *st; struct scatterlist *sg; - DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); - BUG_ON(offset > ggtt->stolen_size - size); + GEM_BUG_ON(offset > dev_priv->ggtt.stolen_size - size); /* We hide that we have no struct page backing our stolen object * by wrapping the contiguous physical allocation with a fake @@ -529,11 +525,11 @@ i915_pages_create_for_stolen(struct drm_device *dev, st = kmalloc(sizeof(*st), GFP_KERNEL); if (st == NULL) - return NULL; + return ERR_PTR(-ENOMEM); if (sg_alloc_table(st, 1, GFP_KERNEL)) { kfree(st); - return NULL; + return ERR_PTR(-ENOMEM); } sg = st->sgl; @@ -557,7 +553,7 @@ i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, struct sg_table *pages) { - /* Should only be called during free */ + /* Should only be called from i915_gem_object_release_stolen() */ sg_free_table(pages); kfree(pages); } @@ -566,15 +562,16 @@ static void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); + + GEM_BUG_ON(!stolen); __i915_gem_object_unpin_pages(obj); - if (obj->stolen) { - i915_gem_stolen_remove_node(dev_priv, obj->stolen); - kfree(obj->stolen); - obj->stolen = NULL; - } + i915_gem_stolen_remove_node(dev_priv, stolen); + kfree(stolen); } + static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { .get_pages = i915_gem_object_get_pages_stolen, .put_pages = i915_gem_object_put_pages_stolen, @@ -596,7 +593,8 @@ _i915_gem_object_create_stolen(struct drm_device *dev, obj->stolen = stolen; obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; - obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE; + obj->cache_level = HAS_LLC(to_i915(dev)) ? + I915_CACHE_LLC : I915_CACHE_NONE; if (i915_gem_object_pin_pages(obj)) goto cleanup; @@ -619,7 +617,6 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size) if (!drm_mm_initialized(&dev_priv->mm.stolen)) return NULL; - DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); if (size == 0) return NULL; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 251d51b01174..c85e7b06bdba 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -60,9 +60,9 @@ /* Check pitch constriants for all chips & tiling formats */ static bool -i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) +i915_tiling_ok(struct drm_i915_private *dev_priv, + int stride, int size, int tiling_mode) { - struct drm_i915_private *dev_priv = to_i915(dev); int tile_width; /* Linear is always fine */ @@ -81,10 +81,10 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) /* check maximum stride & object size */ /* i965+ stores the end address of the gtt mapping in the fence * reg, so dont bother to check the size */ - if (INTEL_INFO(dev)->gen >= 7) { + if (INTEL_GEN(dev_priv) >= 7) { if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL) return false; - } else if (INTEL_INFO(dev)->gen >= 4) { + } else if (INTEL_GEN(dev_priv) >= 4) { if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) return false; } else { @@ -104,7 +104,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) return false; /* 965+ just needs multiples of tile width */ - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { if (stride & (tile_width - 1)) return false; return true; @@ -199,7 +199,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, if (!obj) return -ENOENT; - if (!i915_tiling_ok(dev, + if (!i915_tiling_ok(dev_priv, args->stride, obj->base.size, args->tiling_mode)) { i915_gem_object_put(obj); return -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c index fc8f13a79f8f..bf8a471b61e6 100644 --- a/drivers/gpu/drm/i915/i915_gem_timeline.c +++ b/drivers/gpu/drm/i915/i915_gem_timeline.c @@ -24,9 +24,11 @@ #include "i915_drv.h" -int i915_gem_timeline_init(struct drm_i915_private *i915, - struct i915_gem_timeline *timeline, - const char *name) +static int __i915_gem_timeline_init(struct drm_i915_private *i915, + struct i915_gem_timeline *timeline, + const char *name, + struct lock_class_key *lockclass, + const char *lockname) { unsigned int i; u64 fences; @@ -47,8 +49,11 @@ int i915_gem_timeline_init(struct drm_i915_private *i915, tl->fence_context = fences++; tl->common = timeline; - +#ifdef CONFIG_DEBUG_SPINLOCK + __raw_spin_lock_init(&tl->lock.rlock, lockname, lockclass); +#else spin_lock_init(&tl->lock); +#endif init_request_active(&tl->last_request, NULL); INIT_LIST_HEAD(&tl->requests); } @@ -56,6 +61,26 @@ int i915_gem_timeline_init(struct drm_i915_private *i915, return 0; } +int i915_gem_timeline_init(struct drm_i915_private *i915, + struct i915_gem_timeline *timeline, + const char *name) +{ + static struct lock_class_key class; + + return __i915_gem_timeline_init(i915, timeline, name, + &class, "&timeline->lock"); +} + +int i915_gem_timeline_init__global(struct drm_i915_private *i915) +{ + static struct lock_class_key class; + + return __i915_gem_timeline_init(i915, + &i915->gt.global_timeline, + "[execution]", + &class, "&global_timeline->lock"); +} + void i915_gem_timeline_fini(struct i915_gem_timeline *tl) { lockdep_assert_held(&tl->i915->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h index f2bf7b1d49a1..98d99a62b4ae 100644 --- a/drivers/gpu/drm/i915/i915_gem_timeline.h +++ b/drivers/gpu/drm/i915/i915_gem_timeline.h @@ -67,6 +67,7 @@ struct i915_gem_timeline { int i915_gem_timeline_init(struct drm_i915_private *i915, struct i915_gem_timeline *tl, const char *name); +int i915_gem_timeline_init__global(struct drm_i915_private *i915); void i915_gem_timeline_fini(struct i915_gem_timeline *tl); #endif diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 64261639f547..107ddf51065e 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -753,12 +753,13 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_userptr *args = data; struct drm_i915_gem_object *obj; int ret; u32 handle; - if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) { + if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) { /* We cannot support coherent userptr objects on hw without * LLC and broken snooping. */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 204093f3eaa5..ae84aa4b1467 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -528,8 +528,7 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m, int i915_error_state_to_str(struct drm_i915_error_state_buf *m, const struct i915_error_state_file_priv *error_priv) { - struct drm_device *dev = error_priv->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(error_priv->dev); struct pci_dev *pdev = dev_priv->drm.pdev; struct drm_i915_error_state *error = error_priv->error; struct drm_i915_error_object *obj; @@ -573,7 +572,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, pdev->subsystem_device); err_printf(m, "IOMMU enabled?: %d\n", error->iommu); - if (HAS_CSR(dev)) { + if (HAS_CSR(dev_priv)) { struct intel_csr *csr = &dev_priv->csr; err_printf(m, "DMC loaded: %s\n", @@ -585,7 +584,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "EIR: 0x%08x\n", error->eir); err_printf(m, "IER: 0x%08x\n", error->ier); - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { for (i = 0; i < 4; i++) err_printf(m, "GTIER gt %d: 0x%08x\n", i, error->gtier[i]); @@ -600,10 +599,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, for (i = 0; i < dev_priv->num_fence_regs; i++) err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); - if (INTEL_INFO(dev)->gen >= 6) { + if (INTEL_GEN(dev_priv) >= 6) { err_printf(m, "ERROR: 0x%08x\n", error->error); - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_GEN(dev_priv) >= 8) err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", error->fault_data1, error->fault_data0); @@ -708,7 +707,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, intel_overlay_print_error_state(m, error->overlay); if (error->display) - intel_display_print_error_state(m, dev, error->display); + intel_display_print_error_state(m, dev_priv, error->display); out: if (m->bytes == 0 && m->err) @@ -861,16 +860,19 @@ out: static inline uint32_t __active_get_seqno(struct i915_gem_active *active) { - return i915_gem_request_get_seqno(__i915_gem_active_peek(active)); + struct drm_i915_gem_request *request; + + request = __i915_gem_active_peek(active); + return request ? request->global_seqno : 0; } static inline int __active_get_engine_id(struct i915_gem_active *active) { - struct intel_engine_cs *engine; + struct drm_i915_gem_request *request; - engine = i915_gem_request_get_engine(__i915_gem_active_peek(active)); - return engine ? engine->id : -1; + request = __i915_gem_active_peek(active); + return request ? request->engine->id : -1; } static void capture_bo(struct drm_i915_error_buffer *err, @@ -884,8 +886,8 @@ static void capture_bo(struct drm_i915_error_buffer *err, for (i = 0; i < I915_NUM_ENGINES; i++) err->rseqno[i] = __active_get_seqno(&vma->last_read[i]); - err->wseqno = __active_get_seqno(&vma->last_write); - err->engine = __active_get_engine_id(&vma->last_write); + err->wseqno = __active_get_seqno(&obj->frontbuffer_write); + err->engine = __active_get_engine_id(&obj->frontbuffer_write); err->gtt_offset = vma->node.start; err->read_domains = obj->base.read_domains; @@ -1440,7 +1442,6 @@ static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv, static void i915_capture_reg_state(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { - struct drm_device *dev = &dev_priv->drm; int i; /* General organization @@ -1461,7 +1462,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, if (IS_GEN7(dev_priv)) error->err_int = I915_READ(GEN7_ERR_INT); - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0); error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1); } @@ -1473,10 +1474,10 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, } /* 2: Registers which belong to multiple generations */ - if (INTEL_INFO(dev)->gen >= 7) + if (INTEL_GEN(dev_priv) >= 7) error->forcewake = I915_READ_FW(FORCEWAKE_MT); - if (INTEL_INFO(dev)->gen >= 6) { + if (INTEL_GEN(dev_priv) >= 6) { error->derrmr = I915_READ(DERRMR); error->error = I915_READ(ERROR_GEN6); error->done_reg = I915_READ(DONE_REG); @@ -1489,10 +1490,10 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, } /* 4: Everything else */ - if (HAS_HW_CONTEXTS(dev)) + if (HAS_HW_CONTEXTS(dev_priv)) error->ccid = I915_READ(CCID); - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { error->ier = I915_READ(GEN8_DE_MISC_IER); for (i = 0; i < 4; i++) error->gtier[i] = I915_READ(GEN8_GT_IER(i)); diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 666dab7a675a..4462112725ef 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -629,11 +629,23 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) static void i915_guc_submit(struct drm_i915_gem_request *rq) { struct drm_i915_private *dev_priv = rq->i915; - unsigned int engine_id = rq->engine->id; + struct intel_engine_cs *engine = rq->engine; + unsigned int engine_id = engine->id; struct intel_guc *guc = &rq->i915->guc; struct i915_guc_client *client = guc->execbuf_client; int b_ret; + /* We keep the previous context alive until we retire the following + * request. This ensures that any the context object is still pinned + * for any residual writes the HW makes into it on the context switch + * into the next object following the breadcrumb. Otherwise, we may + * retire the context too early. + */ + rq->previous_context = engine->last_context; + engine->last_context = rq->ctx; + + i915_gem_request_submit(rq); + spin_lock(&client->wq_lock); guc_wq_item_append(client, rq); @@ -1520,6 +1532,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) /* Take over from manual control of ELSP (execlists) */ for_each_engine(engine, dev_priv, id) { engine->submit_request = i915_guc_submit; + engine->schedule = NULL; /* Replay the current set of previously submitted requests */ list_for_each_entry(request, diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6d7505b5c5e7..07ca71cabb2b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2848,10 +2848,8 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -static void ibx_irq_reset(struct drm_device *dev) +static void ibx_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - if (HAS_PCH_NOP(dev_priv)) return; @@ -2881,12 +2879,10 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev) POSTING_READ(SDEIER); } -static void gen5_gt_irq_reset(struct drm_device *dev) +static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - GEN5_IRQ_RESET(GT); - if (INTEL_INFO(dev)->gen >= 6) + if (INTEL_GEN(dev_priv) >= 6) GEN5_IRQ_RESET(GEN6_PM); } @@ -2951,9 +2947,9 @@ static void ironlake_irq_reset(struct drm_device *dev) if (IS_GEN7(dev_priv)) I915_WRITE(GEN7_ERR_INT, 0xffffffff); - gen5_gt_irq_reset(dev); + gen5_gt_irq_reset(dev_priv); - ibx_irq_reset(dev); + ibx_irq_reset(dev_priv); } static void valleyview_irq_preinstall(struct drm_device *dev) @@ -2963,7 +2959,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev) I915_WRITE(VLV_MASTER_IER, 0); POSTING_READ(VLV_MASTER_IER); - gen5_gt_irq_reset(dev); + gen5_gt_irq_reset(dev_priv); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display_irqs_enabled) @@ -2999,7 +2995,7 @@ static void gen8_irq_reset(struct drm_device *dev) GEN5_IRQ_RESET(GEN8_PCU_); if (HAS_PCH_SPLIT(dev_priv)) - ibx_irq_reset(dev); + ibx_irq_reset(dev_priv); } void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, @@ -3222,7 +3218,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); - if (INTEL_INFO(dev)->gen >= 6) { + if (INTEL_GEN(dev_priv) >= 6) { /* * RPS interrupts will get enabled/disabled on demand when RPS * itself is enabled/disabled. @@ -3242,7 +3238,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); u32 display_mask, extra_mask; - if (INTEL_INFO(dev)->gen >= 7) { + if (INTEL_GEN(dev_priv) >= 7) { display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | DE_PLANEB_FLIP_DONE_IVB | @@ -3466,7 +3462,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev) I915_WRITE(VLV_MASTER_IER, 0); POSTING_READ(VLV_MASTER_IER); - gen5_gt_irq_reset(dev); + gen5_gt_irq_reset(dev_priv); I915_WRITE(HWSTAM, 0xffffffff); @@ -3678,7 +3674,7 @@ static void i915_irq_preinstall(struct drm_device * dev) struct drm_i915_private *dev_priv = to_i915(dev); int pipe; - if (I915_HAS_HOTPLUG(dev)) { + if (I915_HAS_HOTPLUG(dev_priv)) { i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); } @@ -3712,7 +3708,7 @@ static int i915_irq_postinstall(struct drm_device *dev) I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_USER_INTERRUPT; - if (I915_HAS_HOTPLUG(dev)) { + if (I915_HAS_HOTPLUG(dev_priv)) { i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); POSTING_READ(PORT_HOTPLUG_EN); @@ -3880,7 +3876,7 @@ static void i915_irq_uninstall(struct drm_device * dev) struct drm_i915_private *dev_priv = to_i915(dev); int pipe; - if (I915_HAS_HOTPLUG(dev)) { + if (I915_HAS_HOTPLUG(dev_priv)) { i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); } @@ -4145,7 +4141,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); - if (HAS_GUC_SCHED(dev)) + if (HAS_GUC_SCHED(dev_priv)) dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; /* Let's track the enabled rps events */ diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 629e4334719c..d46ffe7086bc 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -39,7 +39,7 @@ struct i915_params i915 __read_mostly = { .enable_hangcheck = true, .enable_ppgtt = -1, .enable_psr = -1, - .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), + .alpha_support = IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT), .disable_power_well = -1, .enable_ips = 1, .fastboot = 0, @@ -145,9 +145,10 @@ MODULE_PARM_DESC(enable_psr, "Enable PSR " "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) " "Default: -1 (use per-chip default)"); -module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0400); -MODULE_PARM_DESC(preliminary_hw_support, - "Enable preliminary hardware support."); +module_param_named_unsafe(alpha_support, i915.alpha_support, int, 0400); +MODULE_PARM_DESC(alpha_support, + "Enable alpha quality driver support for latest hardware. " + "See also CONFIG_DRM_I915_ALPHA_SUPPORT."); module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400); MODULE_PARM_DESC(disable_power_well, diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 94efc899c1ef..817ad959941e 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -40,7 +40,7 @@ struct i915_params { int enable_ppgtt; int enable_execlists; int enable_psr; - unsigned int preliminary_hw_support; + unsigned int alpha_support; int disable_power_well; int enable_ips; int invert_brightness; diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 2a419500b81a..fce8e198bc76 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -363,6 +363,7 @@ static const struct intel_device_info intel_broxton_info = { .has_hw_contexts = 1, .has_logical_ring_contexts = 1, .has_guc = 1, + .has_decoupled_mmio = 1, .ddb_size = 512, GEN_DEFAULT_PIPEOFFSETS, IVB_CURSOR_OFFSETS, @@ -439,9 +440,10 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct intel_device_info *intel_info = (struct intel_device_info *) ent->driver_data; - if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) { - DRM_INFO("This hardware requires preliminary hardware support.\n" - "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); + if (IS_ALPHA_SUPPORT(intel_info) && !i915.alpha_support) { + DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n" + "See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n" + "to enable support in this kernel version, or check for kernel updates.\n"); return -ENODEV; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3361d7ffc63e..c70c07a7b586 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7342,6 +7342,13 @@ enum { #define SKL_FUSE_PG1_DIST_STATUS (1<<26) #define SKL_FUSE_PG2_DIST_STATUS (1<<25) +/* Decoupled MMIO register pair for kernel driver */ +#define GEN9_DECOUPLED_REG0_DW0 _MMIO(0xF00) +#define GEN9_DECOUPLED_REG0_DW1 _MMIO(0xF04) +#define GEN9_DECOUPLED_DW1_GO (1<<31) +#define GEN9_DECOUPLED_PD_SHIFT 28 +#define GEN9_DECOUPLED_OP_SHIFT 24 + /* Per-pipe DDI Function Control */ #define _TRANS_DDI_FUNC_CTL_A 0x60400 #define _TRANS_DDI_FUNC_CTL_B 0x61400 diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 344cbf39cfa9..b0e1e7ca75da 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -29,12 +29,10 @@ #include "intel_drv.h" #include "i915_reg.h" -static void i915_save_display(struct drm_device *dev) +static void i915_save_display(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - /* Display arbitration control */ - if (INTEL_INFO(dev)->gen <= 4) + if (INTEL_GEN(dev_priv) <= 4) dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); /* save FBC interval */ @@ -42,12 +40,10 @@ static void i915_save_display(struct drm_device *dev) dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); } -static void i915_restore_display(struct drm_device *dev) +static void i915_restore_display(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - /* Display arbitration */ - if (INTEL_INFO(dev)->gen <= 4) + if (INTEL_GEN(dev_priv) <= 4) I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); /* only restore FBC info on the platform that supports FBC*/ @@ -57,7 +53,7 @@ static void i915_restore_display(struct drm_device *dev) if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv)) I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); - i915_redisable_vga(dev); + i915_redisable_vga(dev_priv); } int i915_save_state(struct drm_device *dev) @@ -68,14 +64,14 @@ int i915_save_state(struct drm_device *dev) mutex_lock(&dev->struct_mutex); - i915_save_display(dev); + i915_save_display(dev_priv); if (IS_GEN4(dev_priv)) pci_read_config_word(pdev, GCDGMBUS, &dev_priv->regfile.saveGCDGMBUS); /* Cache mode state */ - if (INTEL_INFO(dev)->gen < 7) + if (INTEL_GEN(dev_priv) < 7) dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); /* Memory Arbitration state */ @@ -114,15 +110,15 @@ int i915_restore_state(struct drm_device *dev) mutex_lock(&dev->struct_mutex); - i915_gem_restore_fences(dev); + i915_gem_restore_fences(dev_priv); if (IS_GEN4(dev_priv)) pci_write_config_word(pdev, GCDGMBUS, dev_priv->regfile.saveGCDGMBUS); - i915_restore_display(dev); + i915_restore_display(dev_priv); /* Cache mode state */ - if (INTEL_INFO(dev)->gen < 7) + if (INTEL_GEN(dev_priv) < 7) I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000); diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 95f2f12e0917..147420ccf49c 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -116,11 +116,14 @@ static void i915_sw_fence_await(struct i915_sw_fence *fence) WARN_ON(atomic_inc_return(&fence->pending) <= 1); } -void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn) +void __i915_sw_fence_init(struct i915_sw_fence *fence, + i915_sw_fence_notify_t fn, + const char *name, + struct lock_class_key *key) { BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK); - init_waitqueue_head(&fence->wait); + __init_waitqueue_head(&fence->wait, name, key); kref_init(&fence->kref); atomic_set(&fence->pending, 1); fence->flags = (unsigned long)fn; diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 707dfc4f0da5..7508d23f823b 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -40,7 +40,22 @@ typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *, enum i915_sw_fence_notify state); #define __i915_sw_fence_call __aligned(4) -void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn); +void __i915_sw_fence_init(struct i915_sw_fence *fence, + i915_sw_fence_notify_t fn, + const char *name, + struct lock_class_key *key); +#ifdef CONFIG_LOCKDEP +#define i915_sw_fence_init(fence, fn) \ +do { \ + static struct lock_class_key __key; \ + \ + __i915_sw_fence_init((fence), (fn), #fence, &__key); \ +} while (0) +#else +#define i915_sw_fence_init(fence, fn) \ + __i915_sw_fence_init((fence), (fn), NULL, NULL) +#endif + void i915_sw_fence_commit(struct i915_sw_fence *fence); int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c new file mode 100644 index 000000000000..a792dcb902b5 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -0,0 +1,638 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "i915_vma.h" + +#include "i915_drv.h" +#include "intel_ringbuffer.h" +#include "intel_frontbuffer.h" + +#include <drm/drm_gem.h> + +static void +i915_vma_retire(struct i915_gem_active *active, + struct drm_i915_gem_request *rq) +{ + const unsigned int idx = rq->engine->id; + struct i915_vma *vma = + container_of(active, struct i915_vma, last_read[idx]); + struct drm_i915_gem_object *obj = vma->obj; + + GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx)); + + i915_vma_clear_active(vma, idx); + if (i915_vma_is_active(vma)) + return; + + list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma))) + WARN_ON(i915_vma_unbind(vma)); + + GEM_BUG_ON(!i915_gem_object_is_active(obj)); + if (--obj->active_count) + return; + + /* Bump our place on the bound list to keep it roughly in LRU order + * so that we don't steal from recently used but inactive objects + * (unless we are forced to ofc!) + */ + if (obj->bind_count) + list_move_tail(&obj->global_link, &rq->i915->mm.bound_list); + + obj->mm.dirty = true; /* be paranoid */ + + if (i915_gem_object_has_active_reference(obj)) { + i915_gem_object_clear_active_reference(obj); + i915_gem_object_put(obj); + } +} + +static struct i915_vma * +__i915_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) +{ + struct i915_vma *vma; + struct rb_node *rb, **p; + int i; + + GEM_BUG_ON(vm->closed); + + vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL); + if (vma == NULL) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&vma->exec_list); + for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) + init_request_active(&vma->last_read[i], i915_vma_retire); + init_request_active(&vma->last_fence, NULL); + list_add(&vma->vm_link, &vm->unbound_list); + vma->vm = vm; + vma->obj = obj; + vma->size = obj->base.size; + + if (view) { + vma->ggtt_view = *view; + if (view->type == I915_GGTT_VIEW_PARTIAL) { + vma->size = view->params.partial.size; + vma->size <<= PAGE_SHIFT; + } else if (view->type == I915_GGTT_VIEW_ROTATED) { + vma->size = + intel_rotation_info_size(&view->params.rotated); + vma->size <<= PAGE_SHIFT; + } + } + + if (i915_is_ggtt(vm)) { + vma->flags |= I915_VMA_GGTT; + list_add(&vma->obj_link, &obj->vma_list); + } else { + i915_ppgtt_get(i915_vm_to_ppgtt(vm)); + list_add_tail(&vma->obj_link, &obj->vma_list); + } + + rb = NULL; + p = &obj->vma_tree.rb_node; + while (*p) { + struct i915_vma *pos; + + rb = *p; + pos = rb_entry(rb, struct i915_vma, obj_node); + if (i915_vma_compare(pos, vm, view) < 0) + p = &rb->rb_right; + else + p = &rb->rb_left; + } + rb_link_node(&vma->obj_node, rb, p); + rb_insert_color(&vma->obj_node, &obj->vma_tree); + + return vma; +} + +struct i915_vma * +i915_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) +{ + lockdep_assert_held(&obj->base.dev->struct_mutex); + GEM_BUG_ON(view && !i915_is_ggtt(vm)); + GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view)); + + return __i915_vma_create(obj, vm, view); +} + +/** + * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. + * @vma: VMA to map + * @cache_level: mapping cache level + * @flags: flags like global or local mapping + * + * DMA addresses are taken from the scatter-gather table of this object (or of + * this VMA in case of non-default GGTT views) and PTE entries set up. + * Note that DMA addresses are also the only part of the SG table we care about. + */ +int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, + u32 flags) +{ + u32 bind_flags; + u32 vma_flags; + int ret; + + if (WARN_ON(flags == 0)) + return -EINVAL; + + bind_flags = 0; + if (flags & PIN_GLOBAL) + bind_flags |= I915_VMA_GLOBAL_BIND; + if (flags & PIN_USER) + bind_flags |= I915_VMA_LOCAL_BIND; + + vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); + if (flags & PIN_UPDATE) + bind_flags |= vma_flags; + else + bind_flags &= ~vma_flags; + if (bind_flags == 0) + return 0; + + if (vma_flags == 0 && vma->vm->allocate_va_range) { + trace_i915_va_alloc(vma); + ret = vma->vm->allocate_va_range(vma->vm, + vma->node.start, + vma->node.size); + if (ret) + return ret; + } + + ret = vma->vm->bind_vma(vma, cache_level, bind_flags); + if (ret) + return ret; + + vma->flags |= bind_flags; + return 0; +} + +void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) +{ + void __iomem *ptr; + + /* Access through the GTT requires the device to be awake. */ + assert_rpm_wakelock_held(to_i915(vma->vm->dev)); + + lockdep_assert_held(&vma->vm->dev->struct_mutex); + if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) + return IO_ERR_PTR(-ENODEV); + + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); + GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); + + ptr = vma->iomap; + if (ptr == NULL) { + ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable, + vma->node.start, + vma->node.size); + if (ptr == NULL) + return IO_ERR_PTR(-ENOMEM); + + vma->iomap = ptr; + } + + __i915_vma_pin(vma); + return ptr; +} + +void i915_vma_unpin_and_release(struct i915_vma **p_vma) +{ + struct i915_vma *vma; + struct drm_i915_gem_object *obj; + + vma = fetch_and_zero(p_vma); + if (!vma) + return; + + obj = vma->obj; + + i915_vma_unpin(vma); + i915_vma_close(vma); + + __i915_gem_object_release_unless_active(obj); +} + +bool +i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) +{ + if (!drm_mm_node_allocated(&vma->node)) + return false; + + if (vma->node.size < size) + return true; + + if (alignment && vma->node.start & (alignment - 1)) + return true; + + if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) + return true; + + if (flags & PIN_OFFSET_BIAS && + vma->node.start < (flags & PIN_OFFSET_MASK)) + return true; + + if (flags & PIN_OFFSET_FIXED && + vma->node.start != (flags & PIN_OFFSET_MASK)) + return true; + + return false; +} + +void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) +{ + struct drm_i915_gem_object *obj = vma->obj; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + bool mappable, fenceable; + u32 fence_size, fence_alignment; + + fence_size = i915_gem_get_ggtt_size(dev_priv, + vma->size, + i915_gem_object_get_tiling(obj)); + fence_alignment = i915_gem_get_ggtt_alignment(dev_priv, + vma->size, + i915_gem_object_get_tiling(obj), + true); + + fenceable = (vma->node.size == fence_size && + (vma->node.start & (fence_alignment - 1)) == 0); + + mappable = (vma->node.start + fence_size <= + dev_priv->ggtt.mappable_end); + + /* + * Explicitly disable for rotated VMA since the display does not + * need the fence and the VMA is not accessible to other users. + */ + if (mappable && fenceable && + vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED) + vma->flags |= I915_VMA_CAN_FENCE; + else + vma->flags &= ~I915_VMA_CAN_FENCE; +} + +bool i915_gem_valid_gtt_space(struct i915_vma *vma, + unsigned long cache_level) +{ + struct drm_mm_node *gtt_space = &vma->node; + struct drm_mm_node *other; + + /* + * On some machines we have to be careful when putting differing types + * of snoopable memory together to avoid the prefetcher crossing memory + * domains and dying. During vm initialisation, we decide whether or not + * these constraints apply and set the drm_mm.color_adjust + * appropriately. + */ + if (vma->vm->mm.color_adjust == NULL) + return true; + + if (!drm_mm_node_allocated(gtt_space)) + return true; + + if (list_empty(>t_space->node_list)) + return true; + + other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list); + if (other->allocated && !other->hole_follows && other->color != cache_level) + return false; + + other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list); + if (other->allocated && !gtt_space->hole_follows && other->color != cache_level) + return false; + + return true; +} + +/** + * i915_vma_insert - finds a slot for the vma in its address space + * @vma: the vma + * @size: requested size in bytes (can be larger than the VMA) + * @alignment: required alignment + * @flags: mask of PIN_* flags to use + * + * First we try to allocate some free space that meets the requirements for + * the VMA. Failiing that, if the flags permit, it will evict an old VMA, + * preferrably the oldest idle entry to make room for the new VMA. + * + * Returns: + * 0 on success, negative error code otherwise. + */ +static int +i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) +{ + struct drm_i915_private *dev_priv = to_i915(vma->vm->dev); + struct drm_i915_gem_object *obj = vma->obj; + u64 start, end; + int ret; + + GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); + GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); + + size = max(size, vma->size); + if (flags & PIN_MAPPABLE) + size = i915_gem_get_ggtt_size(dev_priv, size, + i915_gem_object_get_tiling(obj)); + + alignment = max(max(alignment, vma->display_alignment), + i915_gem_get_ggtt_alignment(dev_priv, size, + i915_gem_object_get_tiling(obj), + flags & PIN_MAPPABLE)); + + start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; + + end = vma->vm->total; + if (flags & PIN_MAPPABLE) + end = min_t(u64, end, dev_priv->ggtt.mappable_end); + if (flags & PIN_ZONE_4G) + end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE); + + /* If binding the object/GGTT view requires more space than the entire + * aperture has, reject it early before evicting everything in a vain + * attempt to find space. + */ + if (size > end) { + DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n", + size, obj->base.size, + flags & PIN_MAPPABLE ? "mappable" : "total", + end); + return -E2BIG; + } + + ret = i915_gem_object_pin_pages(obj); + if (ret) + return ret; + + if (flags & PIN_OFFSET_FIXED) { + u64 offset = flags & PIN_OFFSET_MASK; + if (offset & (alignment - 1) || offset > end - size) { + ret = -EINVAL; + goto err_unpin; + } + + vma->node.start = offset; + vma->node.size = size; + vma->node.color = obj->cache_level; + ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); + if (ret) { + ret = i915_gem_evict_for_vma(vma); + if (ret == 0) + ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); + if (ret) + goto err_unpin; + } + } else { + u32 search_flag, alloc_flag; + + if (flags & PIN_HIGH) { + search_flag = DRM_MM_SEARCH_BELOW; + alloc_flag = DRM_MM_CREATE_TOP; + } else { + search_flag = DRM_MM_SEARCH_DEFAULT; + alloc_flag = DRM_MM_CREATE_DEFAULT; + } + + /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks, + * so we know that we always have a minimum alignment of 4096. + * The drm_mm range manager is optimised to return results + * with zero alignment, so where possible use the optimal + * path. + */ + if (alignment <= 4096) + alignment = 0; + +search_free: + ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm, + &vma->node, + size, alignment, + obj->cache_level, + start, end, + search_flag, + alloc_flag); + if (ret) { + ret = i915_gem_evict_something(vma->vm, size, alignment, + obj->cache_level, + start, end, + flags); + if (ret == 0) + goto search_free; + + goto err_unpin; + } + + GEM_BUG_ON(vma->node.start < start); + GEM_BUG_ON(vma->node.start + vma->node.size > end); + } + GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level)); + + list_move_tail(&obj->global_link, &dev_priv->mm.bound_list); + list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + obj->bind_count++; + GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); + + return 0; + +err_unpin: + i915_gem_object_unpin_pages(obj); + return ret; +} + +int __i915_vma_do_pin(struct i915_vma *vma, + u64 size, u64 alignment, u64 flags) +{ + unsigned int bound = vma->flags; + int ret; + + lockdep_assert_held(&vma->vm->dev->struct_mutex); + GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); + GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); + + if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { + ret = -EBUSY; + goto err; + } + + if ((bound & I915_VMA_BIND_MASK) == 0) { + ret = i915_vma_insert(vma, size, alignment, flags); + if (ret) + goto err; + } + + ret = i915_vma_bind(vma, vma->obj->cache_level, flags); + if (ret) + goto err; + + if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) + __i915_vma_set_map_and_fenceable(vma); + + GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); + return 0; + +err: + __i915_vma_unpin(vma); + return ret; +} + +void i915_vma_destroy(struct i915_vma *vma) +{ + GEM_BUG_ON(vma->node.allocated); + GEM_BUG_ON(i915_vma_is_active(vma)); + GEM_BUG_ON(!i915_vma_is_closed(vma)); + GEM_BUG_ON(vma->fence); + + list_del(&vma->vm_link); + if (!i915_vma_is_ggtt(vma)) + i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); + + kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); +} + +void i915_vma_close(struct i915_vma *vma) +{ + GEM_BUG_ON(i915_vma_is_closed(vma)); + vma->flags |= I915_VMA_CLOSED; + + list_del(&vma->obj_link); + rb_erase(&vma->obj_node, &vma->obj->vma_tree); + + if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma)) + WARN_ON(i915_vma_unbind(vma)); +} + +static void __i915_vma_iounmap(struct i915_vma *vma) +{ + GEM_BUG_ON(i915_vma_is_pinned(vma)); + + if (vma->iomap == NULL) + return; + + io_mapping_unmap(vma->iomap); + vma->iomap = NULL; +} + +int i915_vma_unbind(struct i915_vma *vma) +{ + struct drm_i915_gem_object *obj = vma->obj; + unsigned long active; + int ret; + + lockdep_assert_held(&obj->base.dev->struct_mutex); + + /* First wait upon any activity as retiring the request may + * have side-effects such as unpinning or even unbinding this vma. + */ + active = i915_vma_get_active(vma); + if (active) { + int idx; + + /* When a closed VMA is retired, it is unbound - eek. + * In order to prevent it from being recursively closed, + * take a pin on the vma so that the second unbind is + * aborted. + * + * Even more scary is that the retire callback may free + * the object (last active vma). To prevent the explosion + * we defer the actual object free to a worker that can + * only proceed once it acquires the struct_mutex (which + * we currently hold, therefore it cannot free this object + * before we are finished). + */ + __i915_vma_pin(vma); + + for_each_active(active, idx) { + ret = i915_gem_active_retire(&vma->last_read[idx], + &vma->vm->dev->struct_mutex); + if (ret) + break; + } + + __i915_vma_unpin(vma); + if (ret) + return ret; + + GEM_BUG_ON(i915_vma_is_active(vma)); + } + + if (i915_vma_is_pinned(vma)) + return -EBUSY; + + if (!drm_mm_node_allocated(&vma->node)) + goto destroy; + + GEM_BUG_ON(obj->bind_count == 0); + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + + if (i915_vma_is_map_and_fenceable(vma)) { + /* release the fence reg _after_ flushing */ + ret = i915_vma_put_fence(vma); + if (ret) + return ret; + + /* Force a pagefault for domain tracking on next user access */ + i915_gem_release_mmap(obj); + + __i915_vma_iounmap(vma); + vma->flags &= ~I915_VMA_CAN_FENCE; + } + + if (likely(!vma->vm->closed)) { + trace_i915_vma_unbind(vma); + vma->vm->unbind_vma(vma); + } + vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); + + drm_mm_remove_node(&vma->node); + list_move_tail(&vma->vm_link, &vma->vm->unbound_list); + + if (vma->pages != obj->mm.pages) { + GEM_BUG_ON(!vma->pages); + sg_free_table(vma->pages); + kfree(vma->pages); + } + vma->pages = NULL; + + /* Since the unbound list is global, only move to that list if + * no more VMAs exist. */ + if (--obj->bind_count == 0) + list_move_tail(&obj->global_link, + &to_i915(obj->base.dev)->mm.unbound_list); + + /* And finally now the object is completely decoupled from this vma, + * we can drop its hold on the backing storage and allow it to be + * reaped by the shrinker. + */ + i915_gem_object_unpin_pages(obj); + +destroy: + if (unlikely(i915_vma_is_closed(vma))) + i915_vma_destroy(vma); + + return 0; +} + diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h new file mode 100644 index 000000000000..85446f0b0b3f --- /dev/null +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -0,0 +1,341 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __I915_VMA_H__ +#define __I915_VMA_H__ + +#include <linux/io-mapping.h> + +#include <drm/drm_mm.h> + +#include "i915_gem_gtt.h" +#include "i915_gem_fence_reg.h" +#include "i915_gem_object.h" +#include "i915_gem_request.h" + + +enum i915_cache_level; + +/** + * A VMA represents a GEM BO that is bound into an address space. Therefore, a + * VMA's presence cannot be guaranteed before binding, or after unbinding the + * object into/from the address space. + * + * To make things as simple as possible (ie. no refcounting), a VMA's lifetime + * will always be <= an objects lifetime. So object refcounting should cover us. + */ +struct i915_vma { + struct drm_mm_node node; + struct drm_i915_gem_object *obj; + struct i915_address_space *vm; + struct drm_i915_fence_reg *fence; + struct sg_table *pages; + void __iomem *iomap; + u64 size; + u64 display_alignment; + + unsigned int flags; + /** + * How many users have pinned this object in GTT space. The following + * users can each hold at most one reference: pwrite/pread, execbuffer + * (objects are not allowed multiple times for the same batchbuffer), + * and the framebuffer code. When switching/pageflipping, the + * framebuffer code has at most two buffers pinned per crtc. + * + * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 + * bits with absolutely no headroom. So use 4 bits. + */ +#define I915_VMA_PIN_MASK 0xf +#define I915_VMA_PIN_OVERFLOW BIT(5) + + /** Flags and address space this VMA is bound to */ +#define I915_VMA_GLOBAL_BIND BIT(6) +#define I915_VMA_LOCAL_BIND BIT(7) +#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW) + +#define I915_VMA_GGTT BIT(8) +#define I915_VMA_CAN_FENCE BIT(9) +#define I915_VMA_CLOSED BIT(10) + + unsigned int active; + struct i915_gem_active last_read[I915_NUM_ENGINES]; + struct i915_gem_active last_fence; + + /** + * Support different GGTT views into the same object. + * This means there can be multiple VMA mappings per object and per VM. + * i915_ggtt_view_type is used to distinguish between those entries. + * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also + * assumed in GEM functions which take no ggtt view parameter. + */ + struct i915_ggtt_view ggtt_view; + + /** This object's place on the active/inactive lists */ + struct list_head vm_link; + + struct list_head obj_link; /* Link in the object's VMA list */ + struct rb_node obj_node; + + /** This vma's place in the batchbuffer or on the eviction list */ + struct list_head exec_list; + + /** + * Used for performing relocations during execbuffer insertion. + */ + struct hlist_node exec_node; + unsigned long exec_handle; + struct drm_i915_gem_exec_object2 *exec_entry; +}; + +struct i915_vma * +i915_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view); + +void i915_vma_unpin_and_release(struct i915_vma **p_vma); + +static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) +{ + return vma->flags & I915_VMA_GGTT; +} + +static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma) +{ + return vma->flags & I915_VMA_CAN_FENCE; +} + +static inline bool i915_vma_is_closed(const struct i915_vma *vma) +{ + return vma->flags & I915_VMA_CLOSED; +} + +static inline unsigned int i915_vma_get_active(const struct i915_vma *vma) +{ + return vma->active; +} + +static inline bool i915_vma_is_active(const struct i915_vma *vma) +{ + return i915_vma_get_active(vma); +} + +static inline void i915_vma_set_active(struct i915_vma *vma, + unsigned int engine) +{ + vma->active |= BIT(engine); +} + +static inline void i915_vma_clear_active(struct i915_vma *vma, + unsigned int engine) +{ + vma->active &= ~BIT(engine); +} + +static inline bool i915_vma_has_active_engine(const struct i915_vma *vma, + unsigned int engine) +{ + return vma->active & BIT(engine); +} + +static inline u32 i915_ggtt_offset(const struct i915_vma *vma) +{ + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); + GEM_BUG_ON(!vma->node.allocated); + GEM_BUG_ON(upper_32_bits(vma->node.start)); + GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1)); + return lower_32_bits(vma->node.start); +} + +static inline struct i915_vma *i915_vma_get(struct i915_vma *vma) +{ + i915_gem_object_get(vma->obj); + return vma; +} + +static inline void i915_vma_put(struct i915_vma *vma) +{ + i915_gem_object_put(vma->obj); +} + +static inline long +i915_vma_compare(struct i915_vma *vma, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) +{ + GEM_BUG_ON(view && !i915_is_ggtt(vm)); + + if (vma->vm != vm) + return vma->vm - vm; + + if (!view) + return vma->ggtt_view.type; + + if (vma->ggtt_view.type != view->type) + return vma->ggtt_view.type - view->type; + + return memcmp(&vma->ggtt_view.params, + &view->params, + sizeof(view->params)); +} + +int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, + u32 flags); +bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level); +bool +i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags); +void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); +int __must_check i915_vma_unbind(struct i915_vma *vma); +void i915_vma_close(struct i915_vma *vma); +void i915_vma_destroy(struct i915_vma *vma); + +int __i915_vma_do_pin(struct i915_vma *vma, + u64 size, u64 alignment, u64 flags); +static inline int __must_check +i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) +{ + BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW); + BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); + BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); + + /* Pin early to prevent the shrinker/eviction logic from destroying + * our vma as we insert and bind. + */ + if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) + return 0; + + return __i915_vma_do_pin(vma, size, alignment, flags); +} + +static inline int i915_vma_pin_count(const struct i915_vma *vma) +{ + return vma->flags & I915_VMA_PIN_MASK; +} + +static inline bool i915_vma_is_pinned(const struct i915_vma *vma) +{ + return i915_vma_pin_count(vma); +} + +static inline void __i915_vma_pin(struct i915_vma *vma) +{ + vma->flags++; + GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW); +} + +static inline void __i915_vma_unpin(struct i915_vma *vma) +{ + GEM_BUG_ON(!i915_vma_is_pinned(vma)); + vma->flags--; +} + +static inline void i915_vma_unpin(struct i915_vma *vma) +{ + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + __i915_vma_unpin(vma); +} + +/** + * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture + * @vma: VMA to iomap + * + * The passed in VMA has to be pinned in the global GTT mappable region. + * An extra pinning of the VMA is acquired for the return iomapping, + * the caller must call i915_vma_unpin_iomap to relinquish the pinning + * after the iomapping is no longer required. + * + * Callers must hold the struct_mutex. + * + * Returns a valid iomapped pointer or ERR_PTR. + */ +void __iomem *i915_vma_pin_iomap(struct i915_vma *vma); +#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x)) + +/** + * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap + * @vma: VMA to unpin + * + * Unpins the previously iomapped VMA from i915_vma_pin_iomap(). + * + * Callers must hold the struct_mutex. This function is only valid to be + * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap(). + */ +static inline void i915_vma_unpin_iomap(struct i915_vma *vma) +{ + lockdep_assert_held(&vma->vm->dev->struct_mutex); + GEM_BUG_ON(vma->iomap == NULL); + i915_vma_unpin(vma); +} + +static inline struct page *i915_vma_first_page(struct i915_vma *vma) +{ + GEM_BUG_ON(!vma->pages); + return sg_page(vma->pages->sgl); +} + +/** + * i915_vma_pin_fence - pin fencing state + * @vma: vma to pin fencing for + * + * This pins the fencing state (whether tiled or untiled) to make sure the + * vma (and its object) is ready to be used as a scanout target. Fencing + * status must be synchronize first by calling i915_vma_get_fence(): + * + * The resulting fence pin reference must be released again with + * i915_vma_unpin_fence(). + * + * Returns: + * + * True if the vma has a fence, false otherwise. + */ +static inline bool +i915_vma_pin_fence(struct i915_vma *vma) +{ + lockdep_assert_held(&vma->vm->dev->struct_mutex); + if (vma->fence) { + vma->fence->pin_count++; + return true; + } else + return false; +} + +/** + * i915_vma_unpin_fence - unpin fencing state + * @vma: vma to unpin fencing for + * + * This releases the fence pin reference acquired through + * i915_vma_pin_fence. It will handle both objects with and without an + * attached fence correctly, callers do not need to distinguish this. + */ +static inline void +i915_vma_unpin_fence(struct i915_vma *vma) +{ + lockdep_assert_held(&vma->vm->dev->struct_mutex); + if (vma->fence) { + GEM_BUG_ON(vma->fence->pin_count <= 0); + vma->fence->pin_count--; + } +} + +#endif + diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index 984a6b75c118..dbe9fb41ae53 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -106,6 +106,7 @@ intel_plane_destroy_state(struct drm_plane *plane, static int intel_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { + struct drm_i915_private *dev_priv = to_i915(plane->dev); struct drm_crtc *crtc = state->crtc; struct intel_crtc *intel_crtc; struct intel_crtc_state *crtc_state; @@ -143,8 +144,8 @@ static int intel_plane_atomic_check(struct drm_plane *plane, if (state->fb && drm_rotation_90_or_270(state->rotation)) { struct drm_format_name_buf format_name; - if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || - state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) { + if (state->fb->modifier != I915_FORMAT_MOD_Y_TILED && + state->fb->modifier != I915_FORMAT_MOD_Yf_TILED) { DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n"); return -EINVAL; } @@ -167,6 +168,14 @@ static int intel_plane_atomic_check(struct drm_plane *plane, } } + /* CHV ignores the mirror bit when the rotate bit is set :( */ + if (IS_CHERRYVIEW(dev_priv) && + state->rotation & DRM_ROTATE_180 && + state->rotation & DRM_REFLECT_X) { + DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n"); + return -EINVAL; + } + intel_state->base.visible = false; ret = intel_plane->check_plane(plane, crtc_state, intel_state); if (ret) diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 813fd74d9c8d..1c509f7410f5 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -574,23 +574,26 @@ static void ilk_audio_codec_enable(struct drm_connector *connector, /** * intel_audio_codec_enable - Enable the audio codec for HD audio * @intel_encoder: encoder on which to enable audio + * @crtc_state: pointer to the current crtc state. + * @conn_state: pointer to the current connector state. * * The enable sequences may only be performed after enabling the transcoder and * port, and after completed link training. */ -void intel_audio_codec_enable(struct intel_encoder *intel_encoder) +void intel_audio_codec_enable(struct intel_encoder *intel_encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct drm_encoder *encoder = &intel_encoder->base; - struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); - const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; struct drm_connector *connector; struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct i915_audio_component *acomp = dev_priv->audio_component; enum port port = intel_encoder->port; - enum pipe pipe = crtc->pipe; + enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; - connector = drm_select_eld(encoder); - if (!connector) + connector = conn_state->connector; + if (!connector || !connector->eld[0]) return; DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", @@ -601,7 +604,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder) /* ELD Conn_Type */ connector->eld[5] &= ~(3 << 2); - if (intel_crtc_has_dp_encoder(crtc->config)) + if (intel_crtc_has_dp_encoder(crtc_state)) connector->eld[5] |= (1 << 2); connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 5ab646ef8c9f..7ffab1abc518 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1147,7 +1147,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, if (!child) return; - aux_channel = child->raw[25]; + aux_channel = child->common.aux_channel; ddc_pin = child->common.ddc_pin; is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; @@ -1677,7 +1677,8 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port) return false; } -bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port) +static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child, + enum port port) { static const struct { u16 dp, hdmi; @@ -1691,22 +1692,35 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, }; - int i; if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) return false; - if (!dev_priv->vbt.child_dev_num) + if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) != + (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) return false; + if (p_child->common.dvo_port == port_mapping[port].dp) + return true; + + /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */ + if (p_child->common.dvo_port == port_mapping[port].hdmi && + p_child->common.aux_channel != 0) + return true; + + return false; +} + +bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, + enum port port) +{ + int i; + for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { const union child_device_config *p_child = &dev_priv->vbt.child_dev[i]; - if ((p_child->common.dvo_port == port_mapping[port].dp || - p_child->common.dvo_port == port_mapping[port].hdmi) && - (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) == - (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) + if (child_dev_is_dp_dual_mode(p_child, port)) return true; } diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index c410d3d6465f..c9c46a538edb 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -629,35 +629,28 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) cancel_fake_irq(engine); } -unsigned int intel_kick_waiters(struct drm_i915_private *i915) +unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915) { struct intel_engine_cs *engine; enum intel_engine_id id; unsigned int mask = 0; - /* To avoid the task_struct disappearing beneath us as we wake up - * the process, we must first inspect the task_struct->state under the - * RCU lock, i.e. as we call wake_up_process() we must be holding the - * rcu_read_lock(). - */ - for_each_engine(engine, i915, id) - if (unlikely(intel_engine_wakeup(engine))) - mask |= intel_engine_flag(engine); + for_each_engine(engine, i915, id) { + struct intel_breadcrumbs *b = &engine->breadcrumbs; - return mask; -} + spin_lock_irq(&b->lock); -unsigned int intel_kick_signalers(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - unsigned int mask = 0; + if (b->first_wait) { + wake_up_process(b->first_wait->tsk); + mask |= intel_engine_flag(engine); + } - for_each_engine(engine, i915, id) { - if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) { - wake_up_process(engine->breadcrumbs.signaler); + if (b->first_signal) { + wake_up_process(b->signaler); mask |= intel_engine_flag(engine); } + + spin_unlock_irq(&b->lock); } return mask; diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 445108855275..d81232b79f00 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -95,8 +95,7 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input) static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state) { struct drm_crtc *crtc = crtc_state->crtc; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int i, pipe = intel_crtc->pipe; uint16_t coeffs[9] = { 0, }; @@ -180,7 +179,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state) I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); - if (INTEL_INFO(dev)->gen > 6) { + if (INTEL_GEN(dev_priv) > 6) { uint16_t postoff = 0; if (intel_crtc_state->limited_color_range) @@ -345,11 +344,10 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state) static void broadwell_load_luts(struct drm_crtc_state *state) { struct drm_crtc *crtc = state->crtc; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc_state *intel_state = to_intel_crtc_state(state); enum pipe pipe = to_intel_crtc(crtc)->pipe; - uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size; + uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; if (crtc_state_is_legacy(state)) { haswell_load_luts(state); @@ -428,8 +426,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state) static void cherryview_load_luts(struct drm_crtc_state *state) { struct drm_crtc *crtc = state->crtc; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; struct drm_color_lut *lut; uint32_t i, lut_size; @@ -446,7 +443,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state) if (state->degamma_lut) { lut = (struct drm_color_lut *) state->degamma_lut->data; - lut_size = INTEL_INFO(dev)->color.degamma_lut_size; + lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; for (i = 0; i < lut_size; i++) { /* Write LUT in U0.14 format. */ word0 = @@ -461,7 +458,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state) if (state->gamma_lut) { lut = (struct drm_color_lut *) state->gamma_lut->data; - lut_size = INTEL_INFO(dev)->color.gamma_lut_size; + lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; for (i = 0; i < lut_size; i++) { /* Write LUT in U0.10 format. */ word0 = @@ -497,12 +494,12 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state) int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) { - struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); size_t gamma_length, degamma_length; - degamma_length = INTEL_INFO(dev)->color.degamma_lut_size * + degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size * sizeof(struct drm_color_lut); - gamma_length = INTEL_INFO(dev)->color.gamma_lut_size * + gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size * sizeof(struct drm_color_lut); /* @@ -529,8 +526,7 @@ int intel_color_check(struct drm_crtc *crtc, void intel_color_init(struct drm_crtc *crtc) { - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); drm_mode_crtc_set_gamma_size(crtc, 256); @@ -549,10 +545,10 @@ void intel_color_init(struct drm_crtc *crtc) } /* Enable color management support when we have degamma & gamma LUTs. */ - if (INTEL_INFO(dev)->color.degamma_lut_size != 0 && - INTEL_INFO(dev)->color.gamma_lut_size != 0) + if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 && + INTEL_INFO(dev_priv)->color.gamma_lut_size != 0) drm_crtc_enable_color_mgmt(crtc, - INTEL_INFO(dev)->color.degamma_lut_size, - true, - INTEL_INFO(dev)->color.gamma_lut_size); + INTEL_INFO(dev_priv)->color.degamma_lut_size, + true, + INTEL_INFO(dev_priv)->color.gamma_lut_size); } diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 30eb95b54dcf..86ecec5601d4 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -147,14 +147,13 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, int mode) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; u32 adpa; - if (INTEL_INFO(dev)->gen >= 5) + if (INTEL_GEN(dev_priv) >= 5) adpa = ADPA_HOTPLUG_BITS; else adpa = 0; @@ -673,8 +672,7 @@ static const struct dmi_system_id intel_spurious_crt_detect[] = { static enum drm_connector_status intel_crt_detect(struct drm_connector *connector, bool force) { - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_crt *crt = intel_attached_crt(connector); struct intel_encoder *intel_encoder = &crt->base; enum intel_display_power_domain power_domain; @@ -693,7 +691,7 @@ intel_crt_detect(struct drm_connector *connector, bool force) power_domain = intel_display_port_power_domain(intel_encoder); intel_display_power_get(dev_priv, power_domain); - if (I915_HAS_HOTPLUG(dev)) { + if (I915_HAS_HOTPLUG(dev_priv)) { /* We can not rely on the HPD pin always being correctly wired * up, for example many KVM do not pass it through, and so * only trust an assertion that the monitor is connected. @@ -715,7 +713,7 @@ intel_crt_detect(struct drm_connector *connector, bool force) * broken monitor (without edid) to work behind a broken kvm (that fails * to have the right resistors for HP detection) needs to fix this up. * For now just bail out. */ - if (I915_HAS_HOTPLUG(dev) && !i915.load_detect_test) { + if (I915_HAS_HOTPLUG(dev_priv) && !i915.load_detect_test) { status = connector_status_disconnected; goto out; } @@ -731,7 +729,7 @@ intel_crt_detect(struct drm_connector *connector, bool force) if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) { if (intel_crt_detect_ddc(connector)) status = connector_status_connected; - else if (INTEL_INFO(dev)->gen < 4) + else if (INTEL_GEN(dev_priv) < 4) status = intel_crt_load_detect(crt, to_intel_crtc(connector->state->crtc)->pipe); else if (i915.load_detect_test) @@ -793,11 +791,10 @@ static int intel_crt_set_property(struct drm_connector *connector, void intel_crt_reset(struct drm_encoder *encoder) { - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder)); - if (INTEL_INFO(dev)->gen >= 5) { + if (INTEL_GEN(dev_priv) >= 5) { u32 adpa; adpa = I915_READ(crt->adpa_reg); @@ -915,7 +912,7 @@ void intel_crt_init(struct drm_device *dev) crt->base.disable = intel_disable_crt; } crt->base.enable = intel_enable_crt; - if (I915_HAS_HOTPLUG(dev) && + if (I915_HAS_HOTPLUG(dev_priv) && !dmi_check_system(intel_spurious_crt_detect)) crt->base.hpd_pin = HPD_CRT; if (HAS_DDI(dev_priv)) { @@ -932,7 +929,7 @@ void intel_crt_init(struct drm_device *dev) drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); - if (!I915_HAS_HOTPLUG(dev)) + if (!I915_HAS_HOTPLUG(dev_priv)) intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; /* diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 938ac4dbcb45..10ec9d4b7d45 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1753,8 +1753,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder, struct drm_connector_state *old_conn_state) { struct drm_encoder *encoder = &intel_encoder->base; - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->dev); enum port port = intel_ddi_get_encoder_port(intel_encoder); int type = intel_encoder->type; uint32_t val; @@ -1787,7 +1786,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder, if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) | DPLL_CTRL2_DDI_CLK_OFF(port))); - else if (INTEL_INFO(dev)->gen < 9) + else if (INTEL_GEN(dev_priv) < 9) I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); if (type == INTEL_OUTPUT_HDMI) { @@ -1837,8 +1836,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder, struct drm_encoder *encoder = &intel_encoder->base; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->dev); enum port port = intel_ddi_get_encoder_port(intel_encoder); int type = intel_encoder->type; @@ -1856,7 +1854,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder, } else if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (port == PORT_A && INTEL_INFO(dev)->gen < 9) + if (port == PORT_A && INTEL_GEN(dev_priv) < 9) intel_dp_stop_link_train(intel_dp); intel_edp_backlight_on(intel_dp); @@ -1866,7 +1864,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder, if (intel_crtc->config->has_audio) { intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); - intel_audio_codec_enable(intel_encoder); + intel_audio_codec_enable(intel_encoder, pipe_config, conn_state); } } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 6fd6283fa1f8..b7a7ed82c325 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1035,9 +1035,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, return crtc->config->cpu_transcoder; } -static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) +static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t reg = PIPEDSL(pipe); u32 line1, line2; u32 line_mask; @@ -1072,12 +1071,11 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) */ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; enum pipe pipe = crtc->pipe; - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { i915_reg_t reg = PIPECONF(cpu_transcoder); /* Wait for the Pipe State to go off */ @@ -1087,7 +1085,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) WARN(1, "pipe_off wait timed out\n"); } else { /* Wait for the display line to settle */ - if (wait_for(pipe_dsl_stopped(dev, pipe), 100)) + if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100)) WARN(1, "pipe_off wait timed out\n"); } } @@ -1293,11 +1291,10 @@ static void assert_plane(struct drm_i915_private *dev_priv, static void assert_planes_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = &dev_priv->drm; int i; /* Primary planes are fixed to pipes on gen4+ */ - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { u32 val = I915_READ(DSPCNTR(pipe)); I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, "plane %c assertion failure, should be disabled but not\n", @@ -1319,10 +1316,9 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv, static void assert_sprites_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = &dev_priv->drm; int sprite; - if (INTEL_INFO(dev)->gen >= 9) { + if (INTEL_GEN(dev_priv) >= 9) { for_each_sprite(dev_priv, pipe, sprite) { u32 val = I915_READ(PLANE_CTL(pipe, sprite)); I915_STATE_WARN(val & PLANE_CTL_ENABLE, @@ -1336,12 +1332,12 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv, "sprite %c assertion failure, should be off on pipe %c but is still active\n", sprite_name(pipe, sprite), pipe_name(pipe)); } - } else if (INTEL_INFO(dev)->gen >= 7) { + } else if (INTEL_GEN(dev_priv) >= 7) { u32 val = I915_READ(SPRCTL(pipe)); I915_STATE_WARN(val & SPRITE_ENABLE, "sprite %c assertion failure, should be off on pipe %c but is still active\n", plane_name(pipe), pipe_name(pipe)); - } else if (INTEL_INFO(dev)->gen >= 5) { + } else if (INTEL_GEN(dev_priv) >= 5) { u32 val = I915_READ(DVSCNTR(pipe)); I915_STATE_WARN(val & DVS_ENABLE, "sprite %c assertion failure, should be off on pipe %c but is still active\n", @@ -1595,12 +1591,12 @@ static void chv_enable_pll(struct intel_crtc *crtc, } } -static int intel_num_dvo_pipes(struct drm_device *dev) +static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv) { struct intel_crtc *crtc; int count = 0; - for_each_intel_crtc(dev, crtc) { + for_each_intel_crtc(&dev_priv->drm, crtc) { count += crtc->base.state->active && intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO); } @@ -1610,8 +1606,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev) static void i9xx_enable_pll(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); i915_reg_t reg = DPLL(crtc->pipe); u32 dpll = crtc->config->dpll_hw_state.dpll; @@ -1622,7 +1617,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) assert_panel_unlocked(dev_priv, crtc->pipe); /* Enable DVO 2x clock on both PLLs if necessary */ - if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev) > 0) { + if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) { /* * It appears to be important that we don't enable this * for the current pipe before otherwise configuring the @@ -1647,7 +1642,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) POSTING_READ(reg); udelay(150); - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { I915_WRITE(DPLL_MD(crtc->pipe), crtc->config->dpll_hw_state.dpll_md); } else { @@ -1682,14 +1677,13 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) */ static void i9xx_disable_pll(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; /* Disable DVO 2x clock on both PLLs if necessary */ if (IS_I830(dev_priv) && intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) && - !intel_num_dvo_pipes(dev)) { + !intel_num_dvo_pipes(dev_priv)) { I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); I915_WRITE(DPLL(PIPE_A), @@ -2195,7 +2189,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - alignment = intel_surf_alignment(dev_priv, fb->modifier[0]); + alignment = intel_surf_alignment(dev_priv, fb->modifier); intel_fill_fb_ggtt_view(&view, fb, rotation); @@ -2356,13 +2350,13 @@ static u32 intel_adjust_tile_offset(int *x, int *y, WARN_ON(new_offset > old_offset); - if (fb->modifier[plane] != DRM_FORMAT_MOD_NONE) { + if (fb->modifier != DRM_FORMAT_MOD_NONE) { unsigned int tile_size, tile_width, tile_height; unsigned int pitch_tiles; tile_size = intel_tile_size(dev_priv); intel_tile_dims(dev_priv, &tile_width, &tile_height, - fb->modifier[plane], cpp); + fb->modifier, cpp); if (drm_rotation_90_or_270(rotation)) { pitch_tiles = pitch / tile_height; @@ -2405,7 +2399,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv, unsigned int rotation, u32 alignment) { - uint64_t fb_modifier = fb->modifier[plane]; + uint64_t fb_modifier = fb->modifier; unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); u32 offset, offset_aligned; @@ -2464,7 +2458,7 @@ u32 intel_compute_tile_offset(int *x, int *y, if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1) alignment = 4096; else - alignment = intel_surf_alignment(dev_priv, fb->modifier[plane]); + alignment = intel_surf_alignment(dev_priv, fb->modifier); return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch, rotation, alignment); @@ -2546,13 +2540,13 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, DRM_ROTATE_0, tile_size); offset /= tile_size; - if (fb->modifier[i] != DRM_FORMAT_MOD_NONE) { + if (fb->modifier != DRM_FORMAT_MOD_NONE) { unsigned int tile_width, tile_height; unsigned int pitch_tiles; struct drm_rect r; intel_tile_dims(dev_priv, &tile_width, &tile_height, - fb->modifier[i], cpp); + fb->modifier, cpp); rot_info->plane[i].offset = offset; rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp); @@ -2711,7 +2705,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, mode_cmd.width = fb->width; mode_cmd.height = fb->height; mode_cmd.pitches[0] = fb->pitches[0]; - mode_cmd.modifier[0] = fb->modifier[0]; + mode_cmd.modifier[0] = fb->modifier; mode_cmd.flags = DRM_MODE_FB_MODIFIERS; if (intel_framebuffer_init(dev, to_intel_framebuffer(fb), @@ -2841,7 +2835,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane, { int cpp = drm_format_plane_cpp(fb->pixel_format, plane); - switch (fb->modifier[plane]) { + switch (fb->modifier) { case DRM_FORMAT_MOD_NONE: case I915_FORMAT_MOD_X_TILED: switch (cpp) { @@ -2872,7 +2866,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane, } break; default: - MISSING_CASE(fb->modifier[plane]); + MISSING_CASE(fb->modifier); } return 2048; @@ -2900,7 +2894,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) intel_add_fb_offsets(&x, &y, plane_state, 0); offset = intel_compute_tile_offset(&x, &y, plane_state, 0); - alignment = intel_surf_alignment(dev_priv, fb->modifier[0]); + alignment = intel_surf_alignment(dev_priv, fb->modifier); /* * AUX surface offset is specified as the distance from the @@ -2917,7 +2911,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) * * TODO: linear and Y-tiled seem fine, Yf untested, */ - if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) { + if (fb->modifier == I915_FORMAT_MOD_X_TILED) { int cpp = drm_format_plane_cpp(fb->pixel_format, 0); while ((x + w) * cpp > fb->pitches[0]) { @@ -3004,11 +2998,9 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct drm_device *dev = primary->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(primary->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); int plane = intel_crtc->plane; u32 linear_offset; u32 dspcntr; @@ -3021,7 +3013,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, dspcntr |= DISPLAY_PLANE_ENABLE; - if (INTEL_INFO(dev)->gen < 4) { + if (INTEL_GEN(dev_priv) < 4) { if (intel_crtc->pipe == PIPE_B) dspcntr |= DISPPLANE_SEL_PIPE_B; @@ -3067,28 +3059,34 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, } if (INTEL_GEN(dev_priv) >= 4 && - fb->modifier[0] == I915_FORMAT_MOD_X_TILED) + fb->modifier == I915_FORMAT_MOD_X_TILED) dspcntr |= DISPPLANE_TILED; + if (rotation & DRM_ROTATE_180) + dspcntr |= DISPPLANE_ROTATE_180; + + if (rotation & DRM_REFLECT_X) + dspcntr |= DISPPLANE_MIRROR; + if (IS_G4X(dev_priv)) dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; intel_add_fb_offsets(&x, &y, plane_state, 0); - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) intel_crtc->dspaddr_offset = intel_compute_tile_offset(&x, &y, plane_state, 0); - if (rotation == DRM_ROTATE_180) { - dspcntr |= DISPPLANE_ROTATE_180; - - x += (crtc_state->pipe_src_w - 1); - y += (crtc_state->pipe_src_h - 1); + if (rotation & DRM_ROTATE_180) { + x += crtc_state->pipe_src_w - 1; + y += crtc_state->pipe_src_h - 1; + } else if (rotation & DRM_REFLECT_X) { + x += crtc_state->pipe_src_w - 1; } linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - if (INTEL_INFO(dev)->gen < 4) + if (INTEL_GEN(dev_priv) < 4) intel_crtc->dspaddr_offset = linear_offset; intel_crtc->adjusted_x = x; @@ -3097,14 +3095,17 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, I915_WRITE(reg, dspcntr); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { I915_WRITE(DSPSURF(plane), intel_fb_gtt_offset(fb, rotation) + intel_crtc->dspaddr_offset); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPLINOFF(plane), linear_offset); - } else - I915_WRITE(DSPADDR(plane), i915_gem_object_ggtt_offset(obj, NULL) + linear_offset); + } else { + I915_WRITE(DSPADDR(plane), + intel_fb_gtt_offset(fb, rotation) + + intel_crtc->dspaddr_offset); + } POSTING_READ(reg); } @@ -3169,9 +3170,12 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, BUG(); } - if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) + if (fb->modifier == I915_FORMAT_MOD_X_TILED) dspcntr |= DISPPLANE_TILED; + if (rotation & DRM_ROTATE_180) + dspcntr |= DISPPLANE_ROTATE_180; + if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; @@ -3180,13 +3184,11 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, intel_crtc->dspaddr_offset = intel_compute_tile_offset(&x, &y, plane_state, 0); - if (rotation == DRM_ROTATE_180) { - dspcntr |= DISPPLANE_ROTATE_180; - - if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { - x += (crtc_state->pipe_src_w - 1); - y += (crtc_state->pipe_src_h - 1); - } + /* HSW+ does this automagically in hardware */ + if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) && + rotation & DRM_ROTATE_180) { + x += crtc_state->pipe_src_w - 1; + y += crtc_state->pipe_src_h - 1; } linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); @@ -3278,9 +3280,9 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, if (drm_rotation_90_or_270(rotation)) { int cpp = drm_format_plane_cpp(fb->pixel_format, plane); - stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp); + stride /= intel_tile_height(dev_priv, fb->modifier, cpp); } else { - stride /= intel_fb_stride_alignment(dev_priv, fb->modifier[0], + stride /= intel_fb_stride_alignment(dev_priv, fb->modifier, fb->pixel_format); } @@ -3376,9 +3378,6 @@ static void skylake_update_primary_plane(struct drm_plane *plane, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = plane_state->base.fb; - const struct skl_wm_values *wm = &dev_priv->wm.skl_results; - const struct skl_plane_wm *p_wm = - &crtc_state->wm.skl.optimal.planes[0]; int pipe = intel_crtc->pipe; u32 plane_ctl; unsigned int rotation = plane_state->base.rotation; @@ -3399,7 +3398,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane, PLANE_CTL_PIPE_CSC_ENABLE; plane_ctl |= skl_plane_ctl_format(fb->pixel_format); - plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]); + plane_ctl |= skl_plane_ctl_tiling(fb->modifier); plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; plane_ctl |= skl_plane_ctl_rotation(rotation); @@ -3414,9 +3413,6 @@ static void skylake_update_primary_plane(struct drm_plane *plane, intel_crtc->adjusted_x = src_x; intel_crtc->adjusted_y = src_y; - if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) - skl_write_plane_wm(intel_crtc, p_wm, &wm->ddb, 0); - I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x); I915_WRITE(PLANE_STRIDE(pipe, 0), stride); @@ -3449,18 +3445,8 @@ static void skylake_disable_primary_plane(struct drm_plane *primary, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); - const struct skl_plane_wm *p_wm = &cstate->wm.skl.optimal.planes[0]; int pipe = intel_crtc->pipe; - /* - * We only populate skl_results on watermark updates, and if the - * plane's visiblity isn't actually changing neither is its watermarks. - */ - if (!crtc->primary->state->visible) - skl_write_plane_wm(intel_crtc, p_wm, - &dev_priv->wm.skl_results.ddb, 0); - I915_WRITE(PLANE_CTL(pipe, 0), 0); I915_WRITE(PLANE_SURF(pipe, 0), 0); POSTING_READ(PLANE_SURF(pipe, 0)); @@ -3510,7 +3496,7 @@ __intel_display_resume(struct drm_device *dev, int i, ret; intel_modeset_setup_hw_state(dev); - i915_redisable_vga(dev); + i915_redisable_vga(to_i915(dev)); if (!state) return 0; @@ -3687,8 +3673,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) static void intel_update_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *old_crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->base.state); @@ -3713,7 +3698,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc, (pipe_config->pipe_src_h - 1)); /* on skylake this is done by detaching scalers */ - if (INTEL_INFO(dev)->gen >= 9) { + if (INTEL_GEN(dev_priv) >= 9) { skl_detach_scalers(crtc); if (pipe_config->pch_pfit.enabled) @@ -4734,13 +4719,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, */ int skl_update_scaler_crtc(struct intel_crtc_state *state) { - struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; - DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n", - intel_crtc->base.base.id, intel_crtc->base.name, - intel_crtc->pipe, SKL_CRTC_INDEX); - return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, &state->scaler_state.scaler_id, DRM_ROTATE_0, state->pipe_src_w, state->pipe_src_h, @@ -4761,7 +4741,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct intel_plane *intel_plane = to_intel_plane(plane_state->base.plane); struct drm_framebuffer *fb = plane_state->base.fb; @@ -4769,10 +4748,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, bool force_detach = !fb || !plane_state->base.visible; - DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n", - intel_plane->base.base.id, intel_plane->base.name, - intel_crtc->pipe, drm_plane_index(&intel_plane->base)); - ret = skl_update_scaler(crtc_state, force_detach, drm_plane_index(&intel_plane->base), &plane_state->scaler_id, @@ -5096,6 +5071,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) struct drm_plane_state *old_pri_state = drm_atomic_get_existing_plane_state(old_state, primary); bool modeset = needs_modeset(&pipe_config->base); + struct intel_atomic_state *old_intel_state = + to_intel_atomic_state(old_state); if (old_pri_state) { struct intel_plane_state *primary_state = @@ -5163,7 +5140,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) * us to. */ if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(pipe_config); + dev_priv->display.initial_watermarks(old_intel_state, + pipe_config); else if (pipe_config->update_wm_pre) intel_update_watermarks(crtc); } @@ -5319,6 +5297,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; + struct intel_atomic_state *old_intel_state = + to_intel_atomic_state(old_state); if (WARN_ON(intel_crtc->active)) return; @@ -5377,7 +5357,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, intel_color_load_luts(&pipe_config->base); if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(intel_crtc->config); + dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config); intel_enable_pipe(intel_crtc); if (intel_crtc->config->has_pch_encoder) @@ -5408,11 +5388,12 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, struct drm_atomic_state *old_state) { struct drm_crtc *crtc = pipe_config->base.crtc; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe, hsw_workaround_pipe; enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; + struct intel_atomic_state *old_intel_state = + to_intel_atomic_state(old_state); if (WARN_ON(intel_crtc->active)) return; @@ -5467,7 +5448,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_enable_pipe_clock(intel_crtc); - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) skylake_pfit_enable(intel_crtc); else ironlake_pfit_enable(intel_crtc); @@ -5483,7 +5464,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, intel_ddi_enable_transcoder_func(crtc); if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(pipe_config); + dev_priv->display.initial_watermarks(old_intel_state, + pipe_config); else intel_update_watermarks(intel_crtc); @@ -5494,7 +5476,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, if (intel_crtc->config->has_pch_encoder) lpt_pch_enable(crtc); - if (intel_crtc->config->dp_encoder_is_mst) + if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) intel_ddi_set_vc_payload_alloc(crtc, true); assert_vblank_disabled(crtc); @@ -5599,8 +5581,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, struct drm_atomic_state *old_state) { struct drm_crtc *crtc = old_crtc_state->base.crtc; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; @@ -5617,13 +5598,13 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, if (!transcoder_is_dsi(cpu_transcoder)) intel_disable_pipe(intel_crtc); - if (intel_crtc->config->dp_encoder_is_mst) + if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) intel_ddi_set_vc_payload_alloc(crtc, false); if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) skylake_scaler_disable(intel_crtc); else ironlake_pfit_disable(intel_crtc, false); @@ -7051,7 +7032,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, } } - if (INTEL_INFO(dev)->num_pipes == 2) + if (INTEL_INFO(dev_priv)->num_pipes == 2) return 0; /* Ivybridge 3 pipe is really complicated */ @@ -7192,7 +7173,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; int clock_limit = dev_priv->max_dotclk_freq; - if (INTEL_INFO(dev)->gen < 4) { + if (INTEL_GEN(dev_priv) < 4) { clock_limit = dev_priv->max_cdclk_freq * 9 / 10; /* @@ -7786,12 +7767,11 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m_n, struct intel_link_m_n *m2_n2) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); int pipe = crtc->pipe; enum transcoder transcoder = crtc->config->cpu_transcoder; - if (INTEL_INFO(dev)->gen >= 5) { + if (INTEL_GEN(dev_priv) >= 5) { I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); @@ -8245,8 +8225,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) { - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); enum pipe pipe = intel_crtc->pipe; enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; @@ -8272,7 +8251,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) vsyncshift += adjusted_mode->crtc_htotal; } - if (INTEL_INFO(dev)->gen > 3) + if (INTEL_GEN(dev_priv) > 3) I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); I915_WRITE(HTOTAL(cpu_transcoder), @@ -8395,8 +8374,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) { - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); uint32_t pipeconf; pipeconf = 0; @@ -8432,7 +8410,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) } } - if (HAS_PIPE_CXSR(dev)) { + if (HAS_PIPE_CXSR(dev_priv)) { if (intel_crtc->lowfreq_avail) { DRM_DEBUG_KMS("enabling CxSR downclocking\n"); pipeconf |= PIPECONF_CXSR_DOWNCLOCK; @@ -8442,7 +8420,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) } if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { - if (INTEL_INFO(dev)->gen < 4 || + if (INTEL_GEN(dev_priv) < 4 || intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; else @@ -8650,8 +8628,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc, static void i9xx_get_pfit_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); uint32_t tmp; if (INTEL_GEN(dev_priv) <= 3 && @@ -8663,7 +8640,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc, return; /* Check whether the pfit is attached to our pipe. */ - if (INTEL_INFO(dev)->gen < 4) { + if (INTEL_GEN(dev_priv) < 4) { if (crtc->pipe != PIPE_B) return; } else { @@ -8727,10 +8704,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, fb = &intel_fb->base; - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { if (val & DISPPLANE_TILED) { plane_config->tiling = I915_TILING_X; - fb->modifier[0] = I915_FORMAT_MOD_X_TILED; + fb->modifier = I915_FORMAT_MOD_X_TILED; } } @@ -8739,7 +8716,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, fb->pixel_format = fourcc; fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { if (plane_config->tiling) offset = I915_READ(DSPTILEOFF(plane)); else @@ -8759,7 +8736,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, aligned_height = intel_fb_align_height(dev, fb->height, fb->pixel_format, - fb->modifier[0]); + fb->modifier); plane_config->size = fb->pitches[0] * aligned_height; @@ -8808,8 +8785,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, static bool i9xx_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain power_domain; uint32_t tmp; bool ret; @@ -8848,7 +8824,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, (tmp & PIPECONF_COLOR_RANGE_SELECT)) pipe_config->limited_color_range = true; - if (INTEL_INFO(dev)->gen < 4) + if (INTEL_GEN(dev_priv) < 4) pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; intel_get_pipe_timings(crtc, pipe_config); @@ -8856,7 +8832,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, i9xx_get_pfit_config(crtc, pipe_config); - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { /* No way to read it out on pipes B and C */ if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) tmp = dev_priv->chv_dpll_md[crtc->pipe]; @@ -9653,11 +9629,10 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m_n, struct intel_link_m_n *m2_n2) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - if (INTEL_INFO(dev)->gen >= 5) { + if (INTEL_GEN(dev_priv) >= 5) { m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) @@ -9669,7 +9644,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, * gen < 8) and if DRRS is supported (to make sure the * registers are not unnecessarily read). */ - if (m2_n2 && INTEL_INFO(dev)->gen < 8 && + if (m2_n2 && INTEL_GEN(dev_priv) < 8 && crtc->config->has_drrs) { m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); @@ -9773,17 +9748,17 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, tiling = val & PLANE_CTL_TILED_MASK; switch (tiling) { case PLANE_CTL_TILED_LINEAR: - fb->modifier[0] = DRM_FORMAT_MOD_NONE; + fb->modifier = DRM_FORMAT_MOD_NONE; break; case PLANE_CTL_TILED_X: plane_config->tiling = I915_TILING_X; - fb->modifier[0] = I915_FORMAT_MOD_X_TILED; + fb->modifier = I915_FORMAT_MOD_X_TILED; break; case PLANE_CTL_TILED_Y: - fb->modifier[0] = I915_FORMAT_MOD_Y_TILED; + fb->modifier = I915_FORMAT_MOD_Y_TILED; break; case PLANE_CTL_TILED_YF: - fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED; + fb->modifier = I915_FORMAT_MOD_Yf_TILED; break; default: MISSING_CASE(tiling); @@ -9800,13 +9775,13 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, fb->width = ((val >> 0) & 0x1fff) + 1; val = I915_READ(PLANE_STRIDE(pipe, 0)); - stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0], + stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier, fb->pixel_format); fb->pitches[0] = (val & 0x3ff) * stride_mult; aligned_height = intel_fb_align_height(dev, fb->height, fb->pixel_format, - fb->modifier[0]); + fb->modifier); plane_config->size = fb->pitches[0] * aligned_height; @@ -9871,10 +9846,10 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, fb = &intel_fb->base; - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { if (val & DISPPLANE_TILED) { plane_config->tiling = I915_TILING_X; - fb->modifier[0] = I915_FORMAT_MOD_X_TILED; + fb->modifier = I915_FORMAT_MOD_X_TILED; } } @@ -9903,7 +9878,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, aligned_height = intel_fb_align_height(dev, fb->height, fb->pixel_format, - fb->modifier[0]); + fb->modifier); plane_config->size = fb->pitches[0] * aligned_height; @@ -10661,8 +10636,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, static void haswell_get_ddi_port_state(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum port port; uint32_t tmp; @@ -10689,7 +10663,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, * DDI E. So just check whether this pipe is wired to DDI E and whether * the PCH transcoder is on. */ - if (INTEL_INFO(dev)->gen < 9 && + if (INTEL_GEN(dev_priv) < 9 && (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { pipe_config->has_pch_encoder = true; @@ -10704,8 +10678,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, static bool haswell_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain power_domain; unsigned long power_domain_mask; bool active; @@ -10738,7 +10711,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; - if (INTEL_INFO(dev)->gen >= 9) { + if (INTEL_GEN(dev_priv) >= 9) { skl_init_scalers(dev_priv, crtc, pipe_config); pipe_config->scaler_state.scaler_id = -1; @@ -10748,7 +10721,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { power_domain_mask |= BIT(power_domain); - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) skylake_get_pfit_config(crtc, pipe_config); else ironlake_get_pfit_config(crtc, pipe_config); @@ -10842,16 +10815,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); - const struct skl_wm_values *wm = &dev_priv->wm.skl_results; - const struct skl_plane_wm *p_wm = - &cstate->wm.skl.optimal.planes[PLANE_CURSOR]; int pipe = intel_crtc->pipe; uint32_t cntl = 0; - if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc)) - skl_write_cursor_wm(intel_crtc, p_wm, &wm->ddb); - if (plane_state && plane_state->base.visible) { cntl = MCURSOR_GAMMA_ENABLE; switch (plane_state->base.crtc_w) { @@ -10873,7 +10839,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, if (HAS_DDI(dev_priv)) cntl |= CURSOR_PIPE_CSC_ENABLE; - if (plane_state->base.rotation == DRM_ROTATE_180) + if (plane_state->base.rotation & DRM_ROTATE_180) cntl |= CURSOR_ROTATE_180; } @@ -10919,7 +10885,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, /* ILK+ do this automagically */ if (HAS_GMCH_DISPLAY(dev_priv) && - plane_state->base.rotation == DRM_ROTATE_180) { + plane_state->base.rotation & DRM_ROTATE_180) { base += (plane_state->base.crtc_h * plane_state->base.crtc_w - 1) * 4; } @@ -11818,7 +11784,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0]); intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset | - intel_fb_modifier_to_tiling(fb->modifier[0])); + intel_fb_modifier_to_tiling(fb->modifier)); /* XXX Enabling the panel-fitter across page-flip is so far * untested on non-native modes, so ignore it for now. @@ -11851,7 +11817,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0] | - intel_fb_modifier_to_tiling(fb->modifier[0])); + intel_fb_modifier_to_tiling(fb->modifier)); intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); /* Contrary to the suggestions in the documentation, @@ -11957,7 +11923,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); intel_ring_emit(ring, fb->pitches[0] | - intel_fb_modifier_to_tiling(fb->modifier[0])); + intel_fb_modifier_to_tiling(fb->modifier)); intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); intel_ring_emit(ring, (MI_NOOP)); @@ -12003,7 +11969,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, ctl = I915_READ(PLANE_CTL(pipe, 0)); ctl &= ~PLANE_CTL_TILED_MASK; - switch (fb->modifier[0]) { + switch (fb->modifier) { case DRM_FORMAT_MOD_NONE: break; case I915_FORMAT_MOD_X_TILED: @@ -12016,7 +11982,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, ctl |= PLANE_CTL_TILED_YF; break; default: - MISSING_CASE(fb->modifier[0]); + MISSING_CASE(fb->modifier); } /* @@ -12041,7 +12007,7 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, dspcntr = I915_READ(reg); - if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) + if (fb->modifier == I915_FORMAT_MOD_X_TILED) dspcntr |= DISPPLANE_TILED; else dspcntr &= ~DISPPLANE_TILED; @@ -12062,6 +12028,7 @@ static void intel_mmio_flip_work_func(struct work_struct *w) to_intel_framebuffer(crtc->base.primary->fb); struct drm_i915_gem_object *obj = intel_fb->obj; + i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0); intel_pipe_update_start(crtc); @@ -12186,7 +12153,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, * TILEOFF/LINOFF registers can't be changed via MI display flips. * Note that pitch changes could also affect these register. */ - if (INTEL_INFO(dev)->gen > 3 && + if (INTEL_GEN(dev_priv) > 3 && (fb->offsets[0] != crtc->primary->fb->offsets[0] || fb->pitches[0] != crtc->primary->fb->pitches[0])) return -EINVAL; @@ -12256,12 +12223,12 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { engine = dev_priv->engine[BCS]; - if (fb->modifier[0] != old_fb->modifier[0]) + if (fb->modifier != old_fb->modifier) /* vlv: DISPLAY_FLIP fails to change tiling */ engine = NULL; } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) { engine = dev_priv->engine[BCS]; - } else if (INTEL_INFO(dev)->gen >= 7) { + } else if (INTEL_GEN(dev_priv) >= 7) { engine = i915_gem_object_last_write_engine(obj); if (engine == NULL || engine->id != RCS) engine = dev_priv->engine[BCS]; @@ -12412,7 +12379,7 @@ static bool intel_wm_need_update(struct drm_plane *plane, if (!cur->base.fb || !new->base.fb) return false; - if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] || + if (cur->base.fb->modifier != new->base.fb->modifier || cur->base.rotation != new->base.rotation || drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) || drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) || @@ -12518,7 +12485,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, /* Pre-gen9 platforms need two-step watermark updates */ if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) && - INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks) + INTEL_GEN(dev_priv) < 9 && dev_priv->display.optimize_watermarks) to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true; if (visible || was_visible) @@ -12623,7 +12590,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, * old state and the new state. We can program these * immediately. */ - ret = dev_priv->display.compute_intermediate_wm(crtc->dev, + ret = dev_priv->display.compute_intermediate_wm(dev, intel_crtc, pipe_config); if (ret) { @@ -12635,7 +12602,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal; } - if (INTEL_INFO(dev)->gen >= 9) { + if (INTEL_GEN(dev_priv) >= 9) { if (mode_changed) ret = skl_update_scaler_crtc(pipe_config); @@ -12748,6 +12715,16 @@ static void intel_dump_crtc_timings(const struct drm_display_mode *mode) mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); } +static inline void +intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id, + unsigned int lane_count, struct intel_link_m_n *m_n) +{ + DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", + id, lane_count, + m_n->gmch_m, m_n->gmch_n, + m_n->link_m, m_n->link_n, m_n->tu); +} + static void intel_dump_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config, const char *context) @@ -12759,61 +12736,58 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, struct intel_plane_state *state; struct drm_framebuffer *fb; - DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n", - crtc->base.base.id, crtc->base.name, - context, pipe_config, pipe_name(crtc->pipe)); + DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n", + crtc->base.base.id, crtc->base.name, context); - DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder)); - DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n", + DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", + transcoder_name(pipe_config->cpu_transcoder), pipe_config->pipe_bpp, pipe_config->dither); - DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", - pipe_config->has_pch_encoder, - pipe_config->fdi_lanes, - pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, - pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, - pipe_config->fdi_m_n.tu); - DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", - intel_crtc_has_dp_encoder(pipe_config), - pipe_config->lane_count, - pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, - pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, - pipe_config->dp_m_n.tu); - - DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", - intel_crtc_has_dp_encoder(pipe_config), - pipe_config->lane_count, - pipe_config->dp_m2_n2.gmch_m, - pipe_config->dp_m2_n2.gmch_n, - pipe_config->dp_m2_n2.link_m, - pipe_config->dp_m2_n2.link_n, - pipe_config->dp_m2_n2.tu); + + if (pipe_config->has_pch_encoder) + intel_dump_m_n_config(pipe_config, "fdi", + pipe_config->fdi_lanes, + &pipe_config->fdi_m_n); + + if (intel_crtc_has_dp_encoder(pipe_config)) { + intel_dump_m_n_config(pipe_config, "dp m_n", + pipe_config->lane_count, &pipe_config->dp_m_n); + if (pipe_config->has_drrs) + intel_dump_m_n_config(pipe_config, "dp m2_n2", + pipe_config->lane_count, + &pipe_config->dp_m2_n2); + } DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", - pipe_config->has_audio, - pipe_config->has_infoframe); + pipe_config->has_audio, pipe_config->has_infoframe); DRM_DEBUG_KMS("requested mode:\n"); drm_mode_debug_printmodeline(&pipe_config->base.mode); DRM_DEBUG_KMS("adjusted mode:\n"); drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); - DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock); - DRM_DEBUG_KMS("pipe src size: %dx%d\n", + DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d\n", + pipe_config->port_clock, pipe_config->pipe_src_w, pipe_config->pipe_src_h); - DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", - crtc->num_scalers, - pipe_config->scaler_state.scaler_users, - pipe_config->scaler_state.scaler_id); - DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", - pipe_config->gmch_pfit.control, - pipe_config->gmch_pfit.pgm_ratios, - pipe_config->gmch_pfit.lvds_border_bits); - DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", - pipe_config->pch_pfit.pos, - pipe_config->pch_pfit.size, - pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); - DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); - DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); + + if (INTEL_GEN(dev_priv) >= 9) + DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", + crtc->num_scalers, + pipe_config->scaler_state.scaler_users, + pipe_config->scaler_state.scaler_id); + + if (HAS_GMCH_DISPLAY(dev_priv)) + DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", + pipe_config->gmch_pfit.control, + pipe_config->gmch_pfit.pgm_ratios, + pipe_config->gmch_pfit.lvds_border_bits); + else + DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", + pipe_config->pch_pfit.pos, + pipe_config->pch_pfit.size, + enableddisabled(pipe_config->pch_pfit.enabled)); + + DRM_DEBUG_KMS("ips: %i, double wide: %i\n", + pipe_config->ips_enabled, pipe_config->double_wide); if (IS_BROXTON(dev_priv)) { DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," @@ -12864,20 +12838,20 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, continue; } - DRM_DEBUG_KMS("[PLANE:%d:%s] enabled", - plane->base.id, plane->name); - DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s", + DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n", + plane->base.id, plane->name, fb->base.id, fb->width, fb->height, drm_get_format_name(fb->pixel_format, &format_name)); - DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", - state->scaler_id, - state->base.src.x1 >> 16, - state->base.src.y1 >> 16, - drm_rect_width(&state->base.src) >> 16, - drm_rect_height(&state->base.src) >> 16, - state->base.dst.x1, state->base.dst.y1, - drm_rect_width(&state->base.dst), - drm_rect_height(&state->base.dst)); + if (INTEL_GEN(dev_priv) >= 9) + DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", + state->scaler_id, + state->base.src.x1 >> 16, + state->base.src.y1 >> 16, + drm_rect_width(&state->base.src) >> 16, + drm_rect_height(&state->base.src) >> 16, + state->base.dst.x1, state->base.dst.y1, + drm_rect_width(&state->base.dst), + drm_rect_height(&state->base.dst)); } } @@ -13192,12 +13166,11 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n, } static bool -intel_pipe_config_compare(struct drm_device *dev, +intel_pipe_config_compare(struct drm_i915_private *dev_priv, struct intel_crtc_state *current_config, struct intel_crtc_state *pipe_config, bool adjust) { - struct drm_i915_private *dev_priv = to_i915(dev); bool ret = true; #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \ @@ -13317,7 +13290,7 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(lane_count); PIPE_CONF_CHECK_X(lane_lat_optim_mask); - if (INTEL_INFO(dev)->gen < 8) { + if (INTEL_GEN(dev_priv) < 8) { PIPE_CONF_CHECK_M_N(dp_m_n); if (current_config->has_drrs) @@ -13366,7 +13339,7 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_X(gmch_pfit.control); /* pfit ratios are autocomputed by the hw on gen4+ */ - if (INTEL_INFO(dev)->gen < 4) + if (INTEL_GEN(dev_priv) < 4) PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); @@ -13441,8 +13414,7 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, static void verify_wm_state(struct drm_crtc *crtc, struct drm_crtc_state *new_state) { - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct skl_ddb_allocation hw_ddb, *sw_ddb; struct skl_pipe_wm hw_wm, *sw_wm; struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; @@ -13451,7 +13423,7 @@ static void verify_wm_state(struct drm_crtc *crtc, const enum pipe pipe = intel_crtc->pipe; int plane, level, max_level = ilk_wm_max_level(dev_priv); - if (INTEL_INFO(dev)->gen < 9 || !new_state->active) + if (INTEL_GEN(dev_priv) < 9 || !new_state->active) return; skl_pipe_wm_get_hw_state(crtc, &hw_wm); @@ -13557,11 +13529,15 @@ static void verify_wm_state(struct drm_crtc *crtc, } static void -verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc) +verify_connector_state(struct drm_device *dev, + struct drm_atomic_state *state, + struct drm_crtc *crtc) { struct drm_connector *connector; + struct drm_connector_state *old_conn_state; + int i; - drm_for_each_connector(connector, dev) { + for_each_connector_in_state(state, connector, old_conn_state, i) { struct drm_encoder *encoder = connector->encoder; struct drm_connector_state *state = connector->state; @@ -13676,7 +13652,7 @@ verify_crtc_state(struct drm_crtc *crtc, intel_pipe_config_sanity_check(dev_priv, pipe_config); sw_config = to_intel_crtc_state(crtc->state); - if (!intel_pipe_config_compare(dev, sw_config, + if (!intel_pipe_config_compare(dev_priv, sw_config, pipe_config, false)) { I915_STATE_WARN(1, "pipe state doesn't match!\n"); intel_dump_pipe_config(intel_crtc, pipe_config, @@ -13769,15 +13745,16 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, static void intel_modeset_verify_crtc(struct drm_crtc *crtc, - struct drm_crtc_state *old_state, - struct drm_crtc_state *new_state) + struct drm_atomic_state *state, + struct drm_crtc_state *old_state, + struct drm_crtc_state *new_state) { if (!needs_modeset(new_state) && !to_intel_crtc_state(new_state)->update_pipe) return; verify_wm_state(crtc, new_state); - verify_connector_state(crtc->dev, crtc); + verify_connector_state(crtc->dev, state, crtc); verify_crtc_state(crtc, old_state, new_state); verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state); } @@ -13793,10 +13770,11 @@ verify_disabled_dpll_state(struct drm_device *dev) } static void -intel_modeset_verify_disabled(struct drm_device *dev) +intel_modeset_verify_disabled(struct drm_device *dev, + struct drm_atomic_state *state) { verify_encoder_state(dev); - verify_connector_state(dev, NULL); + verify_connector_state(dev, state, NULL); verify_disabled_dpll_state(dev); } @@ -14094,7 +14072,7 @@ static int intel_atomic_check(struct drm_device *dev, } if (i915.fastboot && - intel_pipe_config_compare(dev, + intel_pipe_config_compare(dev_priv, to_intel_crtc_state(crtc->state), pipe_config, true)) { crtc_state->mode_changed = false; @@ -14294,6 +14272,14 @@ static void skl_update_crtcs(struct drm_atomic_state *state, unsigned int updated = 0; bool progress; enum pipe pipe; + int i; + + const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {}; + + for_each_crtc_in_state(state, crtc, old_crtc_state, i) + /* ignore allocations for crtc's that have been turned off. */ + if (crtc->state->active) + entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; /* * Whenever the number of active pipes changes, we need to make sure we @@ -14302,7 +14288,6 @@ static void skl_update_crtcs(struct drm_atomic_state *state, * cause pipe underruns and other bad stuff. */ do { - int i; progress = false; for_each_crtc_in_state(state, crtc, old_crtc_state, i) { @@ -14313,12 +14298,14 @@ static void skl_update_crtcs(struct drm_atomic_state *state, cstate = to_intel_crtc_state(crtc->state); pipe = intel_crtc->pipe; - if (updated & cmask || !crtc->state->active) + if (updated & cmask || !cstate->base.active) continue; - if (skl_ddb_allocation_overlaps(state, intel_crtc)) + + if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i)) continue; updated |= cmask; + entries[i] = &cstate->wm.skl.ddb; /* * If this is an already active pipe, it's DDB changed, @@ -14327,7 +14314,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state, * new ddb allocation to take effect. */ if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb, - &intel_crtc->hw_ddb) && + &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) && !crtc->state->active_changed && intel_state->wm_results.dirty_pipes != updated) vbl_wait = true; @@ -14358,14 +14345,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_wait_for_dependencies(state); - if (intel_state->modeset) { - memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, - sizeof(intel_state->min_pixclk)); - dev_priv->active_crtcs = intel_state->active_crtcs; - dev_priv->atomic_cdclk_freq = intel_state->cdclk; - + if (intel_state->modeset) intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); - } for_each_crtc_in_state(state, crtc, old_crtc_state, i) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -14398,8 +14379,17 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_check_cpu_fifo_underruns(dev_priv); intel_check_pch_fifo_underruns(dev_priv); - if (!crtc->state->active) - intel_update_watermarks(intel_crtc); + if (!crtc->state->active) { + /* + * Make sure we don't call initial_watermarks + * for ILK-style watermark updates. + */ + if (dev_priv->display.atomic_update_watermarks) + dev_priv->display.initial_watermarks(intel_state, + to_intel_crtc_state(crtc->state)); + else + intel_update_watermarks(intel_crtc); + } } } @@ -14422,7 +14412,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) if (!intel_can_enable_sagv(state)) intel_disable_sagv(dev_priv); - intel_modeset_verify_disabled(dev); + intel_modeset_verify_disabled(dev, state); } /* Complete the events for pipes that have now been disabled */ @@ -14465,7 +14455,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_cstate = to_intel_crtc_state(crtc->state); if (dev_priv->display.optimize_watermarks) - dev_priv->display.optimize_watermarks(intel_cstate); + dev_priv->display.optimize_watermarks(intel_state, + intel_cstate); } for_each_crtc_in_state(state, crtc, old_crtc_state, i) { @@ -14474,7 +14465,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) if (put_domains[i]) modeset_put_power_domains(dev_priv, put_domains[i]); - intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); + intel_modeset_verify_crtc(crtc, state, old_crtc_state, crtc->state); } if (intel_state->modeset && intel_can_enable_sagv(state)) @@ -14557,10 +14548,6 @@ static void intel_atomic_track_fbs(struct drm_atomic_state *state) * This function commits a top-level state object that has been validated * with drm_atomic_helper_check(). * - * FIXME: Atomic modeset support for i915 is not yet complete. At the moment - * nonblocking commits are only safe for pure plane updates. Everything else - * should work though. - * * RETURNS * Zero for success or -errno. */ @@ -14572,11 +14559,6 @@ static int intel_atomic_commit(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; - if (intel_state->modeset && nonblock) { - DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n"); - return -EINVAL; - } - ret = drm_atomic_helper_setup_commit(state, nonblock); if (ret) return ret; @@ -14594,10 +14576,16 @@ static int intel_atomic_commit(struct drm_device *dev, drm_atomic_helper_swap_state(state, true); dev_priv->wm.distrust_bios_wm = false; - dev_priv->wm.skl_results = intel_state->wm_results; intel_shared_dpll_commit(state); intel_atomic_track_fbs(state); + if (intel_state->modeset) { + memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, + sizeof(intel_state->min_pixclk)); + dev_priv->active_crtcs = intel_state->active_crtcs; + dev_priv->atomic_cdclk_freq = intel_state->cdclk; + } + drm_atomic_state_get(state); INIT_WORK(&state->commit_work, nonblock ? intel_atomic_commit_work : NULL); @@ -14720,8 +14708,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, { struct intel_atomic_state *intel_state = to_intel_atomic_state(new_state->state); - struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(plane->dev); struct drm_framebuffer *fb = new_state->fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); @@ -14775,10 +14762,12 @@ intel_prepare_plane_fb(struct drm_plane *plane, GFP_KERNEL); if (ret < 0) return ret; + + i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); } if (plane->type == DRM_PLANE_TYPE_CURSOR && - INTEL_INFO(dev)->cursor_needs_physical) { + INTEL_INFO(dev_priv)->cursor_needs_physical) { int align = IS_I830(dev_priv) ? 16 * 1024 : 256; ret = i915_gem_object_attach_phys(obj, align); if (ret) { @@ -14811,7 +14800,7 @@ void intel_cleanup_plane_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { - struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = to_i915(plane->dev); struct intel_plane_state *old_intel_state; struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); @@ -14822,7 +14811,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane, return; if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR || - !INTEL_INFO(dev)->cursor_needs_physical)) + !INTEL_INFO(dev_priv)->cursor_needs_physical)) intel_unpin_fb_obj(old_state->fb, old_state->rotation); } @@ -14900,30 +14889,32 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *intel_cstate = to_intel_crtc_state(crtc->state); - struct intel_crtc_state *old_intel_state = + struct intel_crtc_state *old_intel_cstate = to_intel_crtc_state(old_crtc_state); + struct intel_atomic_state *old_intel_state = + to_intel_atomic_state(old_crtc_state->state); bool modeset = needs_modeset(crtc->state); - enum pipe pipe = intel_crtc->pipe; /* Perform vblank evasion around commit operation */ intel_pipe_update_start(intel_crtc); if (modeset) - return; + goto out; if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) { intel_color_set_csc(crtc->state); intel_color_load_luts(crtc->state); } - if (intel_cstate->update_pipe) { - intel_update_pipe_config(intel_crtc, old_intel_state); - } else if (INTEL_GEN(dev_priv) >= 9) { + if (intel_cstate->update_pipe) + intel_update_pipe_config(intel_crtc, old_intel_cstate); + else if (INTEL_GEN(dev_priv) >= 9) skl_detach_scalers(intel_crtc); - I915_WRITE(PIPE_WM_LINETIME(pipe), - intel_cstate->wm.skl.optimal.linetime); - } +out: + if (dev_priv->display.atomic_update_watermarks) + dev_priv->display.atomic_update_watermarks(old_intel_state, + intel_cstate); } static void intel_finish_crtc_commit(struct drm_crtc *crtc, @@ -14989,11 +14980,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) state->scaler_id = -1; } primary->pipe = pipe; - primary->plane = pipe; + /* + * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS + * port is hooked to pipe B. Hence we want plane A feeding pipe B. + */ + if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) + primary->plane = (enum plane) !pipe; + else + primary->plane = (enum plane) pipe; primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); primary->check_plane = intel_check_primary_plane; - if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) - primary->plane = !pipe; if (INTEL_GEN(dev_priv) >= 9) { intel_primary_formats = skl_primary_formats; @@ -15046,6 +15042,10 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) supported_rotations = DRM_ROTATE_0 | DRM_ROTATE_90 | DRM_ROTATE_180 | DRM_ROTATE_270; + } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { + supported_rotations = + DRM_ROTATE_0 | DRM_ROTATE_180 | + DRM_REFLECT_X; } else if (INTEL_GEN(dev_priv) >= 4) { supported_rotations = DRM_ROTATE_0 | DRM_ROTATE_180; @@ -15106,7 +15106,7 @@ intel_check_cursor_plane(struct drm_plane *plane, return -ENOMEM; } - if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) { + if (fb->modifier != DRM_FORMAT_MOD_NONE) { DRM_DEBUG_KMS("cursor cannot be tiled\n"); return -EINVAL; } @@ -15147,13 +15147,13 @@ intel_update_cursor_plane(struct drm_plane *plane, { struct drm_crtc *crtc = crtc_state->base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = to_i915(plane->dev); struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); uint32_t addr; if (!obj) addr = 0; - else if (!INTEL_INFO(dev)->cursor_needs_physical) + else if (!INTEL_INFO(dev_priv)->cursor_needs_physical) addr = i915_gem_object_ggtt_offset(obj, NULL); else addr = obj->phys_handle->busaddr; @@ -15280,14 +15280,14 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) struct intel_plane *plane; plane = intel_sprite_plane_create(dev_priv, pipe, sprite); - if (!plane) { + if (IS_ERR(plane)) { ret = PTR_ERR(plane); goto fail; } } cursor = intel_cursor_plane_create(dev_priv, pipe); - if (!cursor) { + if (IS_ERR(cursor)) { ret = PTR_ERR(cursor); goto fail; } @@ -15299,16 +15299,8 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) if (ret) goto fail; - /* - * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port - * is hooked to pipe B. Hence we want plane A feeding pipe B. - */ intel_crtc->pipe = pipe; - intel_crtc->plane = (enum plane) pipe; - if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) { - DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); - intel_crtc->plane = !pipe; - } + intel_crtc->plane = primary->plane; intel_crtc->cursor_base = ~0; intel_crtc->cursor_cntl = ~0; @@ -15401,11 +15393,9 @@ static bool has_edp_a(struct drm_i915_private *dev_priv) return true; } -static bool intel_crt_present(struct drm_device *dev) +static bool intel_crt_present(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) return false; if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) @@ -15479,7 +15469,7 @@ static void intel_setup_outputs(struct drm_device *dev) */ intel_lvds_init(dev); - if (intel_crt_present(dev)) + if (intel_crt_present(dev_priv)) intel_crt_init(dev); if (IS_BROXTON(dev_priv)) { @@ -15527,7 +15517,7 @@ static void intel_setup_outputs(struct drm_device *dev) } else if (HAS_PCH_SPLIT(dev_priv)) { int found; - dpd_is_edp = intel_dp_is_edp(dev, PORT_D); + dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D); if (has_edp_a(dev_priv)) intel_dp_init(dev, DP_A, PORT_A); @@ -15570,14 +15560,14 @@ static void intel_setup_outputs(struct drm_device *dev) * trust the port type the VBT declares as we've seen at least * HDMI ports that the VBT claim are DP or eDP. */ - has_edp = intel_dp_is_edp(dev, PORT_B); + has_edp = intel_dp_is_edp(dev_priv, PORT_B); has_port = intel_bios_is_port_present(dev_priv, PORT_B); if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) intel_hdmi_init(dev, VLV_HDMIB, PORT_B); - has_edp = intel_dp_is_edp(dev, PORT_C); + has_edp = intel_dp_is_edp(dev_priv, PORT_C); has_port = intel_bios_is_port_present(dev_priv, PORT_C); if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); @@ -15634,7 +15624,7 @@ static void intel_setup_outputs(struct drm_device *dev) } else if (IS_GEN2(dev_priv)) intel_dvo_init(dev); - if (SUPPORTS_TV(dev)) + if (SUPPORTS_TV(dev_priv)) intel_tv_init(dev); intel_psr_init(dev); @@ -15689,6 +15679,8 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, struct drm_i915_gem_object *obj = intel_fb->obj; mutex_lock(&dev->struct_mutex); + if (obj->pin_display && obj->cache_dirty) + i915_gem_clflush_object(obj, true); intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); mutex_unlock(&dev->struct_mutex); @@ -15769,7 +15761,7 @@ static int intel_framebuffer_init(struct drm_device *dev, switch (mode_cmd->modifier[0]) { case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: - if (INTEL_INFO(dev)->gen < 9) { + if (INTEL_GEN(dev_priv) < 9) { DRM_DEBUG("Unsupported tiling 0x%llx!\n", mode_cmd->modifier[0]); return -EINVAL; @@ -15832,7 +15824,7 @@ static int intel_framebuffer_init(struct drm_device *dev, case DRM_FORMAT_ARGB8888: break; case DRM_FORMAT_XRGB1555: - if (INTEL_INFO(dev)->gen > 3) { + if (INTEL_GEN(dev_priv) > 3) { DRM_DEBUG("unsupported pixel format: %s\n", drm_get_format_name(mode_cmd->pixel_format, &format_name)); return -EINVAL; @@ -15840,7 +15832,7 @@ static int intel_framebuffer_init(struct drm_device *dev, break; case DRM_FORMAT_ABGR8888: if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && - INTEL_INFO(dev)->gen < 9) { + INTEL_GEN(dev_priv) < 9) { DRM_DEBUG("unsupported pixel format: %s\n", drm_get_format_name(mode_cmd->pixel_format, &format_name)); return -EINVAL; @@ -15849,7 +15841,7 @@ static int intel_framebuffer_init(struct drm_device *dev, case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: - if (INTEL_INFO(dev)->gen < 4) { + if (INTEL_GEN(dev_priv) < 4) { DRM_DEBUG("unsupported pixel format: %s\n", drm_get_format_name(mode_cmd->pixel_format, &format_name)); return -EINVAL; @@ -15866,7 +15858,7 @@ static int intel_framebuffer_init(struct drm_device *dev, case DRM_FORMAT_UYVY: case DRM_FORMAT_YVYU: case DRM_FORMAT_VYUY: - if (INTEL_INFO(dev)->gen < 5) { + if (INTEL_GEN(dev_priv) < 5) { DRM_DEBUG("unsupported pixel format: %s\n", drm_get_format_name(mode_cmd->pixel_format, &format_name)); return -EINVAL; @@ -16295,9 +16287,8 @@ static void intel_init_quirks(struct drm_device *dev) } /* Disable the VGA plane that we never use */ -static void i915_disable_vga(struct drm_device *dev) +static void i915_disable_vga(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; u8 sr1; i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); @@ -16339,6 +16330,7 @@ static void sanitize_watermarks(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct drm_atomic_state *state; + struct intel_atomic_state *intel_state; struct drm_crtc *crtc; struct drm_crtc_state *cstate; struct drm_modeset_acquire_ctx ctx; @@ -16367,12 +16359,14 @@ retry: if (WARN_ON(IS_ERR(state))) goto fail; + intel_state = to_intel_atomic_state(state); + /* * Hardware readout is the only time we don't want to calculate * intermediate watermarks (since we don't trust the current * watermarks). */ - to_intel_atomic_state(state)->skip_intermediate_wm = true; + intel_state->skip_intermediate_wm = true; ret = intel_atomic_check(dev, state); if (ret) { @@ -16396,7 +16390,7 @@ retry: struct intel_crtc_state *cs = to_intel_crtc_state(cstate); cs->wm.need_postvbl_update = true; - dev_priv->display.optimize_watermarks(cs); + dev_priv->display.optimize_watermarks(intel_state, cs); } put_state: @@ -16429,7 +16423,7 @@ int intel_modeset_init(struct drm_device *dev) intel_init_pm(dev_priv); - if (INTEL_INFO(dev)->num_pipes == 0) + if (INTEL_INFO(dev_priv)->num_pipes == 0) return 0; /* @@ -16475,8 +16469,8 @@ int intel_modeset_init(struct drm_device *dev) dev->mode_config.fb_base = ggtt->mappable_base; DRM_DEBUG_KMS("%d display pipe%s available.\n", - INTEL_INFO(dev)->num_pipes, - INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); + INTEL_INFO(dev_priv)->num_pipes, + INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : ""); for_each_pipe(dev_priv, pipe) { int ret; @@ -16497,7 +16491,7 @@ int intel_modeset_init(struct drm_device *dev) intel_update_max_cdclk(dev_priv); /* Just disable it once at startup */ - i915_disable_vga(dev); + i915_disable_vga(dev_priv); intel_setup_outputs(dev); drm_modeset_lock_all(dev); @@ -16564,11 +16558,10 @@ static void intel_enable_pipe_a(struct drm_device *dev) static bool intel_check_plane_mapping(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 val; - if (INTEL_INFO(dev)->num_pipes == 1) + if (INTEL_INFO(dev_priv)->num_pipes == 1) return true; val = I915_READ(DSPCNTR(!crtc->plane)); @@ -16642,7 +16635,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) /* We need to sanitize the plane -> pipe mapping first because this will * disable the crtc (and hence change the state) if it is wrong. Note * that gen4+ has a fixed plane -> pipe mapping. */ - if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { + if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) { bool plane; DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n", @@ -16744,21 +16737,18 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) * the crtc fixup. */ } -void i915_redisable_vga_power_on(struct drm_device *dev) +void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); - i915_disable_vga(dev); + i915_disable_vga(dev_priv); } } -void i915_redisable_vga(struct drm_device *dev) +void i915_redisable_vga(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - /* This function can be called both from intel_modeset_setup_hw_state or * at a very early point in our resume sequence, where the power well * structures are not yet restored. Since this function is at a very @@ -16769,7 +16759,7 @@ void i915_redisable_vga(struct drm_device *dev) if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA)) return; - i915_redisable_vga_power_on(dev); + i915_redisable_vga_power_on(dev_priv); intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); } @@ -16841,7 +16831,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", crtc->base.base.id, crtc->base.name, - crtc->active ? "enabled" : "disabled"); + enableddisabled(crtc->active)); } for (i = 0; i < dev_priv->num_shared_dpll; i++) { @@ -16874,9 +16864,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) } DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", - encoder->base.base.id, - encoder->base.name, - encoder->base.crtc ? "enabled" : "disabled", + encoder->base.base.id, encoder->base.name, + enableddisabled(encoder->base.crtc), pipe_name(pipe)); } @@ -16905,9 +16894,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) connector->base.encoder = NULL; } DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", - connector->base.base.id, - connector->base.name, - connector->base.encoder ? "enabled" : "disabled"); + connector->base.base.id, connector->base.name, + enableddisabled(connector->base.encoder)); } for_each_intel_crtc(dev, crtc) { @@ -17155,10 +17143,9 @@ void intel_connector_attach_encoder(struct intel_connector *connector, /* * set vga decode state - true == enable VGA decode */ -int intel_modeset_vga_set_state(struct drm_device *dev, bool state) +int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state) { - struct drm_i915_private *dev_priv = to_i915(dev); - unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; + unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; u16 gmch_ctrl; if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { @@ -17312,16 +17299,15 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) void intel_display_print_error_state(struct drm_i915_error_state_buf *m, - struct drm_device *dev, + struct drm_i915_private *dev_priv, struct intel_display_error_state *error) { - struct drm_i915_private *dev_priv = to_i915(dev); int i; if (!error) return; - err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); + err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes); if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) err_printf(m, "PWR_WELL_CTL2: %08x\n", error->power_well_driver); @@ -17335,13 +17321,13 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, err_printf(m, "Plane [%d]:\n", i); err_printf(m, " CNTR: %08x\n", error->plane[i].control); err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); - if (INTEL_INFO(dev)->gen <= 3) { + if (INTEL_GEN(dev_priv) <= 3) { err_printf(m, " SIZE: %08x\n", error->plane[i].size); err_printf(m, " POS: %08x\n", error->plane[i].pos); } if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) err_printf(m, " ADDR: %08x\n", error->plane[i].addr); - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { err_printf(m, " SURF: %08x\n", error->plane[i].surface); err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 9df331b3305b..90283edcafba 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -942,14 +942,14 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, uint8_t *recv, int recv_size) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = + to_i915(intel_dig_port->base.base.dev); i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg; uint32_t aux_clock_divider; int i, ret, recv_bytes; uint32_t status; int try, clock = 0; - bool has_aux_irq = HAS_AUX_IRQ(dev); + bool has_aux_irq = HAS_AUX_IRQ(dev_priv); bool vdd; pps_lock(intel_dp); @@ -1542,8 +1542,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = dp_to_dig_port(intel_dp)->port; @@ -1578,7 +1577,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_fixed_panel_mode(intel_connector->panel.fixed_mode, adjusted_mode); - if (INTEL_INFO(dev)->gen >= 9) { + if (INTEL_GEN(dev_priv) >= 9) { int ret; ret = skl_update_scaler_crtc(pipe_config); if (ret) @@ -1791,9 +1790,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder, trans_dp &= ~TRANS_DP_ENH_FRAMING; I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp); } else { - if (!HAS_PCH_SPLIT(dev_priv) && !IS_VALLEYVIEW(dev_priv) && - !IS_CHERRYVIEW(dev_priv) && - pipe_config->limited_color_range) + if (IS_G4X(dev_priv) && pipe_config->limited_color_range) intel_dp->DP |= DP_COLOR_RANGE_16_235; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) @@ -2515,8 +2512,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, pipe_config->base.adjusted_mode.flags |= flags; - if (!HAS_PCH_SPLIT(dev_priv) && !IS_VALLEYVIEW(dev_priv) && - !IS_CHERRYVIEW(dev_priv) && tmp & DP_COLOR_RANGE_16_235) + if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) pipe_config->limited_color_range = true; pipe_config->lane_count = @@ -2735,7 +2731,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp, } static void intel_enable_dp(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct drm_device *dev = encoder->base.dev; @@ -2777,7 +2774,7 @@ static void intel_enable_dp(struct intel_encoder *encoder, if (pipe_config->has_audio) { DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", pipe_name(pipe)); - intel_audio_codec_enable(encoder); + intel_audio_codec_enable(encoder, pipe_config, conn_state); } } @@ -2787,7 +2784,7 @@ static void g4x_enable_dp(struct intel_encoder *encoder, { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - intel_enable_dp(encoder, pipe_config); + intel_enable_dp(encoder, pipe_config, conn_state); intel_edp_backlight_on(intel_dp); } @@ -2924,7 +2921,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder, { vlv_phy_pre_encoder_enable(encoder); - intel_enable_dp(encoder, pipe_config); + intel_enable_dp(encoder, pipe_config, conn_state); } static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder, @@ -2942,7 +2939,7 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder, { chv_phy_pre_encoder_enable(encoder); - intel_enable_dp(encoder, pipe_config); + intel_enable_dp(encoder, pipe_config, conn_state); /* Second common lane will stay alive on its own now */ chv_phy_release_cl2_override(encoder); @@ -2979,13 +2976,12 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ uint8_t intel_dp_voltage_max(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); enum port port = dp_to_dig_port(intel_dp)->port; if (IS_BROXTON(dev_priv)) return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; - else if (INTEL_INFO(dev)->gen >= 9) { + else if (INTEL_GEN(dev_priv) >= 9) { if (dev_priv->vbt.edp.low_vswing && port == PORT_A) return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; @@ -4873,15 +4869,13 @@ put_power: } /* check the VBT to see whether the eDP is on another port */ -bool intel_dp_is_edp(struct drm_device *dev, enum port port) +bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port) { - struct drm_i915_private *dev_priv = to_i915(dev); - /* * eDP not supported on g4x. so bail out early just * for a bit extra safety in case the VBT is bonkers. */ - if (INTEL_INFO(dev)->gen < 5) + if (INTEL_GEN(dev_priv) < 5) return false; if (port == PORT_A) @@ -5483,7 +5477,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector, INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); mutex_init(&dev_priv->drrs.mutex); - if (INTEL_INFO(dev)->gen <= 6) { + if (INTEL_GEN(dev_priv) <= 6) { DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n"); return NULL; } @@ -5657,7 +5651,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_dp->pps_pipe = INVALID_PIPE; /* intel_dp vfuncs */ - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; @@ -5666,7 +5660,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, else intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; else intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; @@ -5678,7 +5672,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_dp->DP = I915_READ(intel_dp->output_reg); intel_dp->attached_connector = intel_connector; - if (intel_dp_is_edp(dev, port)) + if (intel_dp_is_edp(dev_priv, port)) type = DRM_MODE_CONNECTOR_eDP; else type = DRM_MODE_CONNECTOR_DisplayPort; @@ -5742,7 +5736,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, } /* init MST on ports that can support it */ - if (HAS_DP_MST(dev) && !is_edp(intel_dp) && + if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) && (port == PORT_B || port == PORT_C || port == PORT_D)) intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id); @@ -5816,7 +5810,7 @@ bool intel_dp_init(struct drm_device *dev, } else { intel_encoder->pre_enable = g4x_pre_enable_dp; intel_encoder->enable = g4x_enable_dp; - if (INTEL_INFO(dev)->gen >= 5) + if (INTEL_GEN(dev_priv) >= 5) intel_encoder->post_disable = ilk_post_disable_dp; } diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 3ffbd69e4551..b029d1026a28 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -43,7 +43,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; int mst_pbn; - pipe_config->dp_encoder_is_mst = true; pipe_config->has_pch_encoder = false; bpp = 24; /* diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 21853a17b6d9..58a756f2f224 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -188,13 +188,12 @@ out: void intel_disable_shared_dpll(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll = crtc->config->shared_dpll; unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); /* PCH only available on ILK+ */ - if (INTEL_INFO(dev)->gen < 5) + if (INTEL_GEN(dev_priv) < 5) return; if (pll == NULL) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 398195bf6dd1..cd132c216a67 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -294,6 +294,9 @@ struct intel_connector { */ struct intel_encoder *encoder; + /* ACPI device id for ACPI and driver cooperation */ + u32 acpi_device_id; + /* Reads out the current hw, returning true if the connector is enabled * and active (i.e. dpms ON state). */ bool (*get_hw_state)(struct intel_connector *); @@ -652,7 +655,6 @@ struct intel_crtc_state { bool double_wide; - bool dp_encoder_is_mst; int pbn; struct intel_crtc_scaler_state scaler_state; @@ -728,9 +730,6 @@ struct intel_crtc { bool cxsr_allowed; } wm; - /* gen9+: ddb allocation currently being used */ - struct skl_ddb_entry hw_ddb; - int scanline_offset; struct { @@ -1187,7 +1186,9 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, /* intel_audio.c */ void intel_init_audio_hooks(struct drm_i915_private *dev_priv); -void intel_audio_codec_enable(struct intel_encoder *encoder); +void intel_audio_codec_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); void intel_audio_codec_disable(struct intel_encoder *encoder); void i915_audio_component_init(struct drm_i915_private *dev_priv); void i915_audio_component_cleanup(struct drm_i915_private *dev_priv); @@ -1392,7 +1393,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); bool intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state); -bool intel_dp_is_edp(struct drm_device *dev, enum port port); +bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port); enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd); void intel_edp_backlight_on(struct intel_dp *intel_dp); @@ -1738,18 +1739,9 @@ int intel_enable_sagv(struct drm_i915_private *dev_priv); int intel_disable_sagv(struct drm_i915_private *dev_priv); bool skl_wm_level_equals(const struct skl_wm_level *l1, const struct skl_wm_level *l2); -bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old, - const struct skl_ddb_allocation *new, - enum pipe pipe); -bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state, - struct intel_crtc *intel_crtc); -void skl_write_cursor_wm(struct intel_crtc *intel_crtc, - const struct skl_plane_wm *wm, - const struct skl_ddb_allocation *ddb); -void skl_write_plane_wm(struct intel_crtc *intel_crtc, - const struct skl_plane_wm *wm, - const struct skl_ddb_allocation *ddb, - int plane); +bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries, + const struct skl_ddb_entry *ddb, + int ignore); uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); bool ilk_disable_lp_wm(struct drm_device *dev); int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index 9f279a3d0f74..0d8ff0034b88 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -774,9 +774,8 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) 8); intel_dsi->clk_hs_to_lp_count += extra_byte_count; - DRM_DEBUG_KMS("Eot %s\n", intel_dsi->eotp_pkt ? "enabled" : "disabled"); - DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ? - "disabled" : "enabled"); + DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt)); + DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop)); DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video"); if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n"); @@ -795,8 +794,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count); DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count); DRM_DEBUG_KMS("BTA %s\n", - intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA ? - "disabled" : "enabled"); + enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA))); /* delays in VBT are in unit of 100us, so need to convert * here in ms diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 841f8d1e1410..3da4d466e332 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -102,6 +102,9 @@ intel_engine_setup(struct drm_i915_private *dev_priv, engine->mmio_base = info->mmio_base; engine->irq_shift = info->irq_shift; + /* Nothing to do here, execute in order of dependencies */ + engine->schedule = NULL; + dev_priv->engine[id] = engine; return 0; } @@ -236,8 +239,8 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine) */ void intel_engine_setup_common(struct intel_engine_cs *engine) { - INIT_LIST_HEAD(&engine->execlist_queue); - spin_lock_init(&engine->execlist_lock); + engine->execlist_queue = RB_ROOT; + engine->execlist_first = NULL; intel_engine_init_timeline(engine); intel_engine_init_hangcheck(engine); diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index e230d480c5e6..62f215b12eb5 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -48,17 +48,17 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv) static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv) { - return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8; + return IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8; } static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv) { - return INTEL_INFO(dev_priv)->gen < 4; + return INTEL_GEN(dev_priv) < 4; } static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv) { - return INTEL_INFO(dev_priv)->gen <= 3; + return INTEL_GEN(dev_priv) <= 3; } /* @@ -351,7 +351,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) { - if (INTEL_INFO(dev_priv)->gen >= 5) + if (INTEL_GEN(dev_priv) >= 5) return ilk_fbc_is_active(dev_priv); else if (IS_GM45(dev_priv)) return g4x_fbc_is_active(dev_priv); @@ -365,9 +365,9 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) fbc->active = true; - if (INTEL_INFO(dev_priv)->gen >= 7) + if (INTEL_GEN(dev_priv) >= 7) gen7_fbc_activate(dev_priv); - else if (INTEL_INFO(dev_priv)->gen >= 5) + else if (INTEL_GEN(dev_priv) >= 5) ilk_fbc_activate(dev_priv); else if (IS_GM45(dev_priv)) g4x_fbc_activate(dev_priv); @@ -381,7 +381,7 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) fbc->active = false; - if (INTEL_INFO(dev_priv)->gen >= 5) + if (INTEL_GEN(dev_priv) >= 5) ilk_fbc_deactivate(dev_priv); else if (IS_GM45(dev_priv)) g4x_fbc_deactivate(dev_priv); @@ -561,7 +561,7 @@ again: ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1, 4096, 0, end); - if (ret && INTEL_INFO(dev_priv)->gen <= 4) { + if (ret && INTEL_GEN(dev_priv) <= 4) { return 0; } else if (ret) { compression_threshold <<= 1; @@ -594,7 +594,7 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) fbc->threshold = ret; - if (INTEL_INFO(dev_priv)->gen >= 5) + if (INTEL_GEN(dev_priv) >= 5) I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start); else if (IS_GM45(dev_priv)) { I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start); @@ -708,10 +708,10 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) struct intel_fbc *fbc = &dev_priv->fbc; unsigned int effective_w, effective_h, max_w, max_h; - if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { max_w = 4096; max_h = 4096; - } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { + } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { max_w = 4096; max_h = 2048; } else { @@ -812,7 +812,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) fbc->no_fbc_reason = "framebuffer not tiled or fenced"; return false; } - if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) && + if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) && cache->plane.rotation != DRM_ROTATE_0) { fbc->no_fbc_reason = "rotation unsupported"; return false; @@ -854,9 +854,8 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) return true; } -static bool intel_fbc_can_choose(struct intel_crtc *crtc) +static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; if (intel_vgpu_active(dev_priv)) { @@ -874,16 +873,6 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc) return false; } - if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) { - fbc->no_fbc_reason = "no enabled pipes can have FBC"; - return false; - } - - if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) { - fbc->no_fbc_reason = "no enabled planes can have FBC"; - return false; - } - return true; } @@ -1066,23 +1055,19 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, struct drm_atomic_state *state) { struct intel_fbc *fbc = &dev_priv->fbc; - struct drm_crtc *crtc; - struct drm_crtc_state *crtc_state; struct drm_plane *plane; struct drm_plane_state *plane_state; - bool fbc_crtc_present = false; - int i, j; + bool crtc_chosen = false; + int i; mutex_lock(&fbc->lock); - for_each_crtc_in_state(state, crtc, crtc_state, i) { - if (fbc->crtc == to_intel_crtc(crtc)) { - fbc_crtc_present = true; - break; - } - } - /* This atomic commit doesn't involve the CRTC currently tied to FBC. */ - if (!fbc_crtc_present && fbc->crtc != NULL) + /* Does this atomic commit involve the CRTC currently tied to FBC? */ + if (fbc->crtc && + !drm_atomic_get_existing_crtc_state(state, &fbc->crtc->base)) + goto out; + + if (!intel_fbc_can_enable(dev_priv)) goto out; /* Simply choose the first CRTC that is compatible and has a visible @@ -1092,25 +1077,29 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, for_each_plane_in_state(state, plane, plane_state, i) { struct intel_plane_state *intel_plane_state = to_intel_plane_state(plane_state); + struct intel_crtc_state *intel_crtc_state; + struct intel_crtc *crtc = to_intel_crtc(plane_state->crtc); if (!intel_plane_state->base.visible) continue; - for_each_crtc_in_state(state, crtc, crtc_state, j) { - struct intel_crtc_state *intel_crtc_state = - to_intel_crtc_state(crtc_state); + if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) + continue; - if (plane_state->crtc != crtc) - continue; + if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) + continue; - if (!intel_fbc_can_choose(to_intel_crtc(crtc))) - break; + intel_crtc_state = to_intel_crtc_state( + drm_atomic_get_existing_crtc_state(state, &crtc->base)); - intel_crtc_state->enable_fbc = true; - goto out; - } + intel_crtc_state->enable_fbc = true; + crtc_chosen = true; + break; } + if (!crtc_chosen) + fbc->no_fbc_reason = "no suitable CRTC for FBC"; + out: mutex_unlock(&fbc->lock); } @@ -1386,7 +1375,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) } /* This value was pulled out of someone's hat */ - if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv)) + if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv)) I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); /* We still don't have any sort of hardware state readout for FBC, so diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index e87221bba475..beb08982dc0b 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -356,7 +356,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, struct drm_fb_offset *offsets, bool *enabled, int width, int height) { - struct drm_device *dev = fb_helper->dev; + struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); unsigned long conn_configured, mask; unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); int i, j; @@ -509,7 +509,7 @@ retry: * fbdev helper library. */ if (num_connectors_enabled != num_connectors_detected && - num_connectors_enabled < INTEL_INFO(dev)->num_pipes) { + num_connectors_enabled < INTEL_INFO(dev_priv)->num_pipes) { DRM_DEBUG_KMS("fallback: Not all outputs enabled\n"); DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled, num_connectors_detected); @@ -633,7 +633,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay; cur_size = intel_fb_align_height(dev, cur_size, fb->base.pixel_format, - fb->base.modifier[0]); + fb->base.modifier); cur_size *= fb->base.pitches[0]; DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n", pipe_name(intel_crtc->pipe), @@ -697,11 +697,11 @@ static void intel_fbdev_suspend_worker(struct work_struct *work) int intel_fbdev_init(struct drm_device *dev) { - struct intel_fbdev *ifbdev; struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_fbdev *ifbdev; int ret; - if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0)) + if (WARN_ON(INTEL_INFO(dev_priv)->num_pipes == 0)) return -ENODEV; ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); @@ -714,7 +714,7 @@ int intel_fbdev_init(struct drm_device *dev) ifbdev->preferred_bpp = 32; ret = drm_fb_helper_init(dev, &ifbdev->helper, - INTEL_INFO(dev)->num_pipes, 4); + INTEL_INFO(dev_priv)->num_pipes, 4); if (ret) { kfree(ifbdev); return ret; diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h index 76ceb539f9f0..7bab41218cf7 100644 --- a/drivers/gpu/drm/i915/intel_frontbuffer.h +++ b/drivers/gpu/drm/i915/intel_frontbuffer.h @@ -53,16 +53,17 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj, * until the rendering completes or a flip on this frontbuffer plane is * scheduled. */ -static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, +static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, enum fb_op_origin origin) { unsigned int frontbuffer_bits; frontbuffer_bits = atomic_read(&obj->frontbuffer_bits); if (!frontbuffer_bits) - return; + return false; __intel_fb_obj_invalidate(obj, origin, frontbuffer_bits); + return true; } /** diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 1aa85236b788..34d6ad2cf7c1 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -566,7 +566,7 @@ fail: ret = 0; } - if (err == 0 && !HAS_GUC_UCODE(dev)) + if (err == 0 && !HAS_GUC_UCODE(dev_priv)) ; /* Don't mention the GuC! */ else if (err == 0) DRM_INFO("GuC firmware load skipped\n"); @@ -725,18 +725,18 @@ void intel_guc_init(struct drm_device *dev) struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; const char *fw_path; - if (!HAS_GUC(dev)) { + if (!HAS_GUC(dev_priv)) { i915.enable_guc_loading = 0; i915.enable_guc_submission = 0; } else { /* A negative value means "use platform default" */ if (i915.enable_guc_loading < 0) - i915.enable_guc_loading = HAS_GUC_UCODE(dev); + i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv); if (i915.enable_guc_submission < 0) - i915.enable_guc_submission = HAS_GUC_SCHED(dev); + i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv); } - if (!HAS_GUC_UCODE(dev)) { + if (!HAS_GUC_UCODE(dev_priv)) { fw_path = NULL; } else if (IS_SKYLAKE(dev_priv)) { fw_path = I915_SKL_GUC_UCODE; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 35ada4e1c6cf..fb88e32e25a3 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -975,14 +975,16 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, pipe_config->lane_count = 4; } -static void intel_enable_hdmi_audio(struct intel_encoder *encoder) +static void intel_enable_hdmi_audio(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); WARN_ON(!crtc->config->has_hdmi_sink); DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", pipe_name(crtc->pipe)); - intel_audio_codec_enable(encoder); + intel_audio_codec_enable(encoder, pipe_config, conn_state); } static void g4x_enable_hdmi(struct intel_encoder *encoder, @@ -991,21 +993,20 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); u32 temp; temp = I915_READ(intel_hdmi->hdmi_reg); temp |= SDVO_ENABLE; - if (crtc->config->has_audio) + if (pipe_config->has_audio) temp |= SDVO_AUDIO_ENABLE; I915_WRITE(intel_hdmi->hdmi_reg, temp); POSTING_READ(intel_hdmi->hdmi_reg); - if (crtc->config->has_audio) - intel_enable_hdmi_audio(encoder); + if (pipe_config->has_audio) + intel_enable_hdmi_audio(encoder, pipe_config, conn_state); } static void ibx_enable_hdmi(struct intel_encoder *encoder, @@ -1040,8 +1041,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder, * FIXME: BSpec says this should be done at the end of * of the modeset sequence, so not sure if this isn't too soon. */ - if (crtc->config->pipe_bpp > 24 && - crtc->config->pixel_multiplier > 1) { + if (pipe_config->pipe_bpp > 24 && + pipe_config->pixel_multiplier > 1) { I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE); POSTING_READ(intel_hdmi->hdmi_reg); @@ -1055,8 +1056,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder, POSTING_READ(intel_hdmi->hdmi_reg); } - if (crtc->config->has_audio) - intel_enable_hdmi_audio(encoder); + if (pipe_config->has_audio) + intel_enable_hdmi_audio(encoder, pipe_config, conn_state); } static void cpt_enable_hdmi(struct intel_encoder *encoder, @@ -1073,7 +1074,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder, temp = I915_READ(intel_hdmi->hdmi_reg); temp |= SDVO_ENABLE; - if (crtc->config->has_audio) + if (pipe_config->has_audio) temp |= SDVO_AUDIO_ENABLE; /* @@ -1086,7 +1087,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder, * 4. enable HDMI clock gating */ - if (crtc->config->pipe_bpp > 24) { + if (pipe_config->pipe_bpp > 24) { I915_WRITE(TRANS_CHICKEN1(pipe), I915_READ(TRANS_CHICKEN1(pipe)) | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); @@ -1098,7 +1099,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder, I915_WRITE(intel_hdmi->hdmi_reg, temp); POSTING_READ(intel_hdmi->hdmi_reg); - if (crtc->config->pipe_bpp > 24) { + if (pipe_config->pipe_bpp > 24) { temp &= ~SDVO_COLOR_FORMAT_MASK; temp |= HDMI_COLOR_FORMAT_12bpc; @@ -1110,8 +1111,8 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder, ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); } - if (crtc->config->has_audio) - intel_enable_hdmi_audio(encoder); + if (pipe_config->has_audio) + intel_enable_hdmi_audio(encoder, pipe_config, conn_state); } static void vlv_enable_hdmi(struct intel_encoder *encoder, @@ -1178,9 +1179,7 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder, struct intel_crtc_state *old_crtc_state, struct drm_connector_state *old_conn_state) { - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); - - if (crtc->config->has_audio) + if (old_crtc_state->has_audio) intel_audio_codec_disable(encoder); intel_disable_hdmi(encoder, old_crtc_state, old_conn_state); @@ -1190,9 +1189,7 @@ static void pch_disable_hdmi(struct intel_encoder *encoder, struct intel_crtc_state *old_crtc_state, struct drm_connector_state *old_conn_state) { - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); - - if (crtc->config->has_audio) + if (old_crtc_state->has_audio) intel_audio_codec_disable(encoder); } @@ -1645,13 +1642,12 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder, struct drm_connector_state *conn_state) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); - const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; intel_hdmi_prepare(encoder); intel_hdmi->set_infoframes(&encoder->base, - intel_crtc->config->has_hdmi_sink, + pipe_config->has_hdmi_sink, adjusted_mode); } @@ -1663,9 +1659,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, struct intel_hdmi *intel_hdmi = &dport->hdmi; struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; vlv_phy_pre_encoder_enable(encoder); @@ -1674,7 +1668,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, 0x2b247878); intel_hdmi->set_infoframes(&encoder->base, - intel_crtc->config->has_hdmi_sink, + pipe_config->has_hdmi_sink, adjusted_mode); g4x_enable_hdmi(encoder, pipe_config, conn_state); diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 334d47b5811a..3d546c019de0 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -501,7 +501,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work) if (intel_connector->mst_port) continue; - if (!connector->polled && I915_HAS_HOTPLUG(dev) && + if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) && intel_connector->encoder->hpd_pin > HPD_NONE) { connector->polled = enabled ? DRM_CONNECTOR_POLL_CONNECT | diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index dde04b7643b1..0a09024d6ca3 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -432,8 +432,10 @@ static bool can_merge_ctx(const struct i915_gem_context *prev, static void execlists_dequeue(struct intel_engine_cs *engine) { - struct drm_i915_gem_request *cursor, *last; + struct drm_i915_gem_request *last; struct execlist_port *port = engine->execlist_port; + unsigned long flags; + struct rb_node *rb; bool submit = false; last = port->request; @@ -469,8 +471,12 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * and context switches) submission. */ - spin_lock(&engine->execlist_lock); - list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) { + spin_lock_irqsave(&engine->timeline->lock, flags); + rb = engine->execlist_first; + while (rb) { + struct drm_i915_gem_request *cursor = + rb_entry(rb, typeof(*cursor), priotree.node); + /* Can we combine this request with the current port? It has to * be the same context/ringbuffer and not have any exceptions * (e.g. GVT saying never to combine contexts). @@ -493,7 +499,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * context (even though a different request) to * the second port. */ - if (ctx_single_port_submission(cursor->ctx)) + if (ctx_single_port_submission(last->ctx) || + ctx_single_port_submission(cursor->ctx)) break; GEM_BUG_ON(last->ctx == cursor->ctx); @@ -501,17 +508,30 @@ static void execlists_dequeue(struct intel_engine_cs *engine) i915_gem_request_assign(&port->request, last); port++; } + + rb = rb_next(rb); + rb_erase(&cursor->priotree.node, &engine->execlist_queue); + RB_CLEAR_NODE(&cursor->priotree.node); + cursor->priotree.priority = INT_MAX; + + /* We keep the previous context alive until we retire the + * following request. This ensures that any the context object + * is still pinned for any residual writes the HW makes into it + * on the context switch into the next object following the + * breadcrumb. Otherwise, we may retire the context too early. + */ + cursor->previous_context = engine->last_context; + engine->last_context = cursor->ctx; + + __i915_gem_request_submit(cursor); last = cursor; submit = true; } if (submit) { - /* Decouple all the requests submitted from the queue */ - engine->execlist_queue.next = &cursor->execlist_link; - cursor->execlist_link.prev = &engine->execlist_queue; - i915_gem_request_assign(&port->request, last); + engine->execlist_first = rb; } - spin_unlock(&engine->execlist_lock); + spin_unlock_irqrestore(&engine->timeline->lock, flags); if (submit) execlists_submit_ports(engine); @@ -614,27 +634,147 @@ static void intel_lrc_irq_handler(unsigned long data) intel_uncore_forcewake_put(dev_priv, engine->fw_domains); } +static bool insert_request(struct i915_priotree *pt, struct rb_root *root) +{ + struct rb_node **p, *rb; + bool first = true; + + /* most positive priority is scheduled first, equal priorities fifo */ + rb = NULL; + p = &root->rb_node; + while (*p) { + struct i915_priotree *pos; + + rb = *p; + pos = rb_entry(rb, typeof(*pos), node); + if (pt->priority > pos->priority) { + p = &rb->rb_left; + } else { + p = &rb->rb_right; + first = false; + } + } + rb_link_node(&pt->node, rb, p); + rb_insert_color(&pt->node, root); + + return first; +} + static void execlists_submit_request(struct drm_i915_gem_request *request) { struct intel_engine_cs *engine = request->engine; unsigned long flags; - spin_lock_irqsave(&engine->execlist_lock, flags); + /* Will be called from irq-context when using foreign fences. */ + spin_lock_irqsave(&engine->timeline->lock, flags); - /* We keep the previous context alive until we retire the following - * request. This ensures that any the context object is still pinned - * for any residual writes the HW makes into it on the context switch - * into the next object following the breadcrumb. Otherwise, we may - * retire the context too early. - */ - request->previous_context = engine->last_context; - engine->last_context = request->ctx; - - list_add_tail(&request->execlist_link, &engine->execlist_queue); + if (insert_request(&request->priotree, &engine->execlist_queue)) + engine->execlist_first = &request->priotree.node; if (execlists_elsp_idle(engine)) tasklet_hi_schedule(&engine->irq_tasklet); - spin_unlock_irqrestore(&engine->execlist_lock, flags); + spin_unlock_irqrestore(&engine->timeline->lock, flags); +} + +static struct intel_engine_cs * +pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) +{ + struct intel_engine_cs *engine; + + engine = container_of(pt, + struct drm_i915_gem_request, + priotree)->engine; + if (engine != locked) { + if (locked) + spin_unlock_irq(&locked->timeline->lock); + spin_lock_irq(&engine->timeline->lock); + } + + return engine; +} + +static void execlists_schedule(struct drm_i915_gem_request *request, int prio) +{ + static DEFINE_MUTEX(lock); + struct intel_engine_cs *engine = NULL; + struct i915_dependency *dep, *p; + struct i915_dependency stack; + LIST_HEAD(dfs); + + if (prio <= READ_ONCE(request->priotree.priority)) + return; + + /* Need global lock to use the temporary link inside i915_dependency */ + mutex_lock(&lock); + + stack.signaler = &request->priotree; + list_add(&stack.dfs_link, &dfs); + + /* Recursively bump all dependent priorities to match the new request. + * + * A naive approach would be to use recursion: + * static void update_priorities(struct i915_priotree *pt, prio) { + * list_for_each_entry(dep, &pt->signalers_list, signal_link) + * update_priorities(dep->signal, prio) + * insert_request(pt); + * } + * but that may have unlimited recursion depth and so runs a very + * real risk of overunning the kernel stack. Instead, we build + * a flat list of all dependencies starting with the current request. + * As we walk the list of dependencies, we add all of its dependencies + * to the end of the list (this may include an already visited + * request) and continue to walk onwards onto the new dependencies. The + * end result is a topological list of requests in reverse order, the + * last element in the list is the request we must execute first. + */ + list_for_each_entry_safe(dep, p, &dfs, dfs_link) { + struct i915_priotree *pt = dep->signaler; + + list_for_each_entry(p, &pt->signalers_list, signal_link) + if (prio > READ_ONCE(p->signaler->priority)) + list_move_tail(&p->dfs_link, &dfs); + + p = list_next_entry(dep, dfs_link); + if (!RB_EMPTY_NODE(&pt->node)) + continue; + + engine = pt_lock_engine(pt, engine); + + /* If it is not already in the rbtree, we can update the + * priority inplace and skip over it (and its dependencies) + * if it is referenced *again* as we descend the dfs. + */ + if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) { + pt->priority = prio; + list_del_init(&dep->dfs_link); + } + } + + /* Fifo and depth-first replacement ensure our deps execute before us */ + list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { + struct i915_priotree *pt = dep->signaler; + + INIT_LIST_HEAD(&dep->dfs_link); + + engine = pt_lock_engine(pt, engine); + + if (prio <= pt->priority) + continue; + + GEM_BUG_ON(RB_EMPTY_NODE(&pt->node)); + + pt->priority = prio; + rb_erase(&pt->node, &engine->execlist_queue); + if (insert_request(pt, &engine->execlist_queue)) + engine->execlist_first = &pt->node; + } + + if (engine) + spin_unlock_irq(&engine->timeline->lock); + + mutex_unlock(&lock); + + /* XXX Do we need to preempt to make room for us and our deps? */ } int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) @@ -1673,8 +1813,10 @@ void intel_execlists_enable_submission(struct drm_i915_private *dev_priv) struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, dev_priv, id) + for_each_engine(engine, dev_priv, id) { engine->submit_request = execlists_submit_request; + engine->schedule = execlists_schedule; + } } static void @@ -1687,6 +1829,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->emit_breadcrumb = gen8_emit_breadcrumb; engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz; engine->submit_request = execlists_submit_request; + engine->schedule = execlists_schedule; engine->irq_enable = gen8_logical_ring_enable_irq; engine->irq_disable = gen8_logical_ring_disable_irq; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index de7b3e6ed477..d12ef0047d49 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -122,8 +122,7 @@ out: static void intel_lvds_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); u32 tmp, flags = 0; @@ -139,12 +138,12 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, pipe_config->base.adjusted_mode.flags |= flags; - if (INTEL_INFO(dev)->gen < 5) + if (INTEL_GEN(dev_priv) < 5) pipe_config->gmch_pfit.lvds_border_bits = tmp & LVDS_BORDER_ENABLE; /* gen2/3 store dither state in pfit control, needs to match */ - if (INTEL_INFO(dev)->gen < 4) { + if (INTEL_GEN(dev_priv) < 4) { tmp = I915_READ(PFIT_CONTROL); pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 7acbbbf97833..f4429f67a4e3 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -642,24 +642,6 @@ static struct notifier_block intel_opregion_notifier = { * (version 3) */ -static u32 get_did(struct intel_opregion *opregion, int i) -{ - u32 did; - - if (i < ARRAY_SIZE(opregion->acpi->didl)) { - did = opregion->acpi->didl[i]; - } else { - i -= ARRAY_SIZE(opregion->acpi->didl); - - if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2))) - return 0; - - did = opregion->acpi->did2[i]; - } - - return did; -} - static void set_did(struct intel_opregion *opregion, int i, u32 val) { if (i < ARRAY_SIZE(opregion->acpi->didl)) { @@ -674,11 +656,11 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val) } } -static u32 acpi_display_type(struct drm_connector *connector) +static u32 acpi_display_type(struct intel_connector *connector) { u32 display_type; - switch (connector->connector_type) { + switch (connector->base.connector_type) { case DRM_MODE_CONNECTOR_VGA: case DRM_MODE_CONNECTOR_DVIA: display_type = ACPI_DISPLAY_TYPE_VGA; @@ -707,7 +689,7 @@ static u32 acpi_display_type(struct drm_connector *connector) display_type = ACPI_DISPLAY_TYPE_OTHER; break; default: - MISSING_CASE(connector->connector_type); + MISSING_CASE(connector->base.connector_type); display_type = ACPI_DISPLAY_TYPE_OTHER; break; } @@ -718,34 +700,9 @@ static u32 acpi_display_type(struct drm_connector *connector) static void intel_didl_outputs(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->opregion; - struct pci_dev *pdev = dev_priv->drm.pdev; - struct drm_connector *connector; - acpi_handle handle; - struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; - unsigned long long device_id; - acpi_status status; - u32 temp, max_outputs; - int i = 0; - - handle = ACPI_HANDLE(&pdev->dev); - if (!handle || acpi_bus_get_device(handle, &acpi_dev)) - return; - - if (acpi_is_video_device(handle)) - acpi_video_bus = acpi_dev; - else { - list_for_each_entry(acpi_cdev, &acpi_dev->children, node) { - if (acpi_is_video_device(acpi_cdev->handle)) { - acpi_video_bus = acpi_cdev; - break; - } - } - } - - if (!acpi_video_bus) { - DRM_DEBUG_KMS("No ACPI video bus found\n"); - return; - } + struct intel_connector *connector; + int i = 0, max_outputs; + int display_index[16] = {}; /* * In theory, did2, the extended didl, gets added at opregion version @@ -757,64 +714,58 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv) max_outputs = ARRAY_SIZE(opregion->acpi->didl) + ARRAY_SIZE(opregion->acpi->did2); - list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { - if (i >= max_outputs) { - DRM_DEBUG_KMS("More than %u outputs detected via ACPI\n", - max_outputs); - return; - } - status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR", - NULL, &device_id); - if (ACPI_SUCCESS(status)) { - if (!device_id) - goto blind_set; - set_did(opregion, i++, (u32)(device_id & 0x0f0f)); - } + for_each_intel_connector(&dev_priv->drm, connector) { + u32 device_id, type; + + device_id = acpi_display_type(connector); + + /* Use display type specific display index. */ + type = (device_id & ACPI_DISPLAY_TYPE_MASK) + >> ACPI_DISPLAY_TYPE_SHIFT; + device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT; + + connector->acpi_device_id = device_id; + if (i < max_outputs) + set_did(opregion, i, device_id); + i++; } -end: DRM_DEBUG_KMS("%d outputs detected\n", i); + if (i > max_outputs) + DRM_ERROR("More than %d outputs in connector list\n", + max_outputs); + /* If fewer than max outputs, the list must be null terminated */ if (i < max_outputs) set_did(opregion, i, 0); - return; - -blind_set: - i = 0; - list_for_each_entry(connector, - &dev_priv->drm.mode_config.connector_list, head) { - int display_type = acpi_display_type(connector); - - if (i >= max_outputs) { - DRM_DEBUG_KMS("More than %u outputs in connector list\n", - max_outputs); - return; - } - - temp = get_did(opregion, i); - set_did(opregion, i, temp | (1 << 31) | display_type | i); - i++; - } - goto end; } static void intel_setup_cadls(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->opregion; + struct intel_connector *connector; int i = 0; - u32 disp_id; - - /* Initialize the CADL field by duplicating the DIDL values. - * Technically, this is not always correct as display outputs may exist, - * but not active. This initialization is necessary for some Clevo - * laptops that check this field before processing the brightness and - * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if - * there are less than eight devices. */ - do { - disp_id = get_did(opregion, i); - opregion->acpi->cadl[i] = disp_id; - } while (++i < 8 && disp_id != 0); + + /* + * Initialize the CADL field from the connector device ids. This is + * essentially the same as copying from the DIDL. Technically, this is + * not always correct as display outputs may exist, but not active. This + * initialization is necessary for some Clevo laptops that check this + * field before processing the brightness and display switching hotkeys. + * + * Note that internal panels should be at the front of the connector + * list already, ensuring they're not left out. + */ + for_each_intel_connector(&dev_priv->drm, connector) { + if (i >= ARRAY_SIZE(opregion->acpi->cadl)) + break; + opregion->acpi->cadl[i++] = connector->acpi_device_id; + } + + /* If fewer than 8 active devices, the list must be null terminated */ + if (i < ARRAY_SIZE(opregion->acpi->cadl)) + opregion->acpi->cadl[i] = 0; } void intel_opregion_register(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index be4b4d546fd9..08ab6d762ca4 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -304,7 +304,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, struct intel_crtc_state *pipe_config, int fitting_mode) { - struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; @@ -325,7 +325,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, break; case DRM_MODE_SCALE_ASPECT: /* Scale but preserve the aspect ratio */ - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) i965_scale_aspect(pipe_config, &pfit_control); else i9xx_scale_aspect(pipe_config, &pfit_control, @@ -339,7 +339,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay || pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) { pfit_control |= PFIT_ENABLE; - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) pfit_control |= PFIT_SCALING_AUTO; else pfit_control |= (VERT_AUTO_SCALE | @@ -355,7 +355,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, /* 965+ wants fuzzy fitting */ /* FIXME: handle multiple panels by failing gracefully */ - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | PFIT_FILTER_FUZZY); @@ -366,7 +366,7 @@ out: } /* Make sure pre-965 set dither correctly for 18bpp panels. */ - if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18) + if (INTEL_GEN(dev_priv) < 4 && pipe_config->pipe_bpp == 18) pfit_control |= PANEL_8TO6_DITHER_ENABLE; pipe_config->gmch_pfit.control = pfit_control; @@ -1722,7 +1722,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe) DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n", connector->name, - panel->backlight.enabled ? "enabled" : "disabled", + enableddisabled(panel->backlight.enabled), panel->backlight.level, panel->backlight.max); return 0; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index cc9e0c0f445f..bbb1eaf1e6db 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -347,8 +347,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) return; } - DRM_DEBUG_KMS("memory self-refresh is %s\n", - enable ? "enabled" : "disabled"); + DRM_DEBUG_KMS("memory self-refresh is %s\n", enableddisabled(enable)); } @@ -1061,7 +1060,8 @@ static void vlv_invert_wms(struct intel_crtc *crtc) for (level = 0; level < wm_state->num_levels; level++) { struct drm_device *dev = crtc->base.dev; - const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1; + const int sr_fifo_size = + INTEL_INFO(to_i915(dev))->num_pipes * 512 - 1; struct intel_plane *plane; wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane; @@ -1091,15 +1091,16 @@ static void vlv_invert_wms(struct intel_crtc *crtc) static void vlv_compute_wm(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); struct vlv_wm_state *wm_state = &crtc->wm_state; struct intel_plane *plane; - int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1; + int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1; int level; memset(wm_state, 0, sizeof(*wm_state)); wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed; - wm_state->num_levels = to_i915(dev)->wm.max_level + 1; + wm_state->num_levels = dev_priv->wm.max_level + 1; wm_state->num_active_planes = 0; @@ -1179,7 +1180,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc) } /* clear any (partially) filled invalid levels */ - for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) { + for (level = wm_state->num_levels; level < dev_priv->wm.max_level + 1; level++) { memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level])); memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level])); } @@ -1861,23 +1862,25 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp); } -static unsigned int ilk_display_fifo_size(const struct drm_device *dev) +static unsigned int +ilk_display_fifo_size(const struct drm_i915_private *dev_priv) { - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_GEN(dev_priv) >= 8) return 3072; - else if (INTEL_INFO(dev)->gen >= 7) + else if (INTEL_GEN(dev_priv) >= 7) return 768; else return 512; } -static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev, - int level, bool is_sprite) +static unsigned int +ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv, + int level, bool is_sprite) { - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_GEN(dev_priv) >= 8) /* BDW primary/sprite plane watermarks */ return level == 0 ? 255 : 2047; - else if (INTEL_INFO(dev)->gen >= 7) + else if (INTEL_GEN(dev_priv) >= 7) /* IVB/HSW primary/sprite plane watermarks */ return level == 0 ? 127 : 1023; else if (!is_sprite) @@ -1888,18 +1891,18 @@ static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev, return level == 0 ? 63 : 255; } -static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev, - int level) +static unsigned int +ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level) { - if (INTEL_INFO(dev)->gen >= 7) + if (INTEL_GEN(dev_priv) >= 7) return level == 0 ? 63 : 255; else return level == 0 ? 31 : 63; } -static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev) +static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv) { - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_GEN(dev_priv) >= 8) return 31; else return 15; @@ -1912,7 +1915,8 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev, enum intel_ddb_partitioning ddb_partitioning, bool is_sprite) { - unsigned int fifo_size = ilk_display_fifo_size(dev); + struct drm_i915_private *dev_priv = to_i915(dev); + unsigned int fifo_size = ilk_display_fifo_size(dev_priv); /* if sprites aren't enabled, sprites get nothing */ if (is_sprite && !config->sprites_enabled) @@ -1920,14 +1924,14 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev, /* HSW allows LP1+ watermarks even with multiple pipes */ if (level == 0 || config->num_pipes_active > 1) { - fifo_size /= INTEL_INFO(dev)->num_pipes; + fifo_size /= INTEL_INFO(dev_priv)->num_pipes; /* * For some reason the non self refresh * FIFO size is only half of the self * refresh FIFO size on ILK/SNB. */ - if (INTEL_INFO(dev)->gen <= 6) + if (INTEL_GEN(dev_priv) <= 6) fifo_size /= 2; } @@ -1943,7 +1947,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev, } /* clamp to max that the registers can hold */ - return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite)); + return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite)); } /* Calculate the maximum cursor plane watermark */ @@ -1956,7 +1960,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, return 64; /* otherwise just report max that registers can hold */ - return ilk_cursor_wm_reg_max(dev, level); + return ilk_cursor_wm_reg_max(to_i915(dev), level); } static void ilk_compute_wm_maximums(const struct drm_device *dev, @@ -1968,17 +1972,17 @@ static void ilk_compute_wm_maximums(const struct drm_device *dev, max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); max->cur = ilk_cursor_wm_max(dev, level, config); - max->fbc = ilk_fbc_wm_reg_max(dev); + max->fbc = ilk_fbc_wm_reg_max(to_i915(dev)); } -static void ilk_compute_wm_reg_maximums(struct drm_device *dev, +static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv, int level, struct ilk_wm_maximums *max) { - max->pri = ilk_plane_wm_reg_max(dev, level, false); - max->spr = ilk_plane_wm_reg_max(dev, level, true); - max->cur = ilk_cursor_wm_reg_max(dev, level); - max->fbc = ilk_fbc_wm_reg_max(dev); + max->pri = ilk_plane_wm_reg_max(dev_priv, level, false); + max->spr = ilk_plane_wm_reg_max(dev_priv, level, true); + max->cur = ilk_cursor_wm_reg_max(dev_priv, level); + max->fbc = ilk_fbc_wm_reg_max(dev_priv); } static bool ilk_validate_wm_level(int level, @@ -2382,7 +2386,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) usable_level = max_level; /* ILK/SNB: LP2+ watermarks only w/o sprites */ - if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled) + if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled) usable_level = 1; /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ @@ -2401,7 +2405,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) if (!ilk_validate_pipe_wm(dev, pipe_wm)) return -EINVAL; - ilk_compute_wm_reg_maximums(dev, 1, &max); + ilk_compute_wm_reg_maximums(dev_priv, 1, &max); for (level = 1; level <= max_level; level++) { struct intel_wm_level *wm = &pipe_wm->raw_wm[level]; @@ -2530,7 +2534,7 @@ static void ilk_wm_merge(struct drm_device *dev, last_enabled_level = 0; /* ILK: FBC WM must be disabled always */ - merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6; + merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6; /* merge each WM1+ level */ for (level = 1; level <= max_level; level++) { @@ -2593,6 +2597,7 @@ static void ilk_compute_wm_results(struct drm_device *dev, enum intel_ddb_partitioning partitioning, struct ilk_wm_values *results) { + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc; int level, wm_lp; @@ -2619,7 +2624,7 @@ static void ilk_compute_wm_results(struct drm_device *dev, if (r->enable) results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN; - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_GEN(dev_priv) >= 8) results->wm_lp[wm_lp - 1] |= r->fbc_val << WM1_LP_FBC_SHIFT_BDW; else @@ -2630,7 +2635,7 @@ static void ilk_compute_wm_results(struct drm_device *dev, * Always set WM1S_LP_EN when spr_val != 0, even if the * level is disabled. Doing otherwise could cause underruns. */ - if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) { + if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) { WARN_ON(wm_lp != 1); results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val; } else @@ -2780,7 +2785,6 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, static void ilk_write_wm_values(struct drm_i915_private *dev_priv, struct ilk_wm_values *results) { - struct drm_device *dev = &dev_priv->drm; struct ilk_wm_values *previous = &dev_priv->wm.hw; unsigned int dirty; uint32_t val; @@ -2836,7 +2840,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv, previous->wm_lp_spr[0] != results->wm_lp_spr[0]) I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); - if (INTEL_INFO(dev)->gen >= 7) { + if (INTEL_GEN(dev_priv) >= 7) { if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) @@ -3063,7 +3067,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) latency = dev_priv->wm.skl_latency[level]; if (skl_needs_memory_bw_wa(intel_state) && - plane->base.state->fb->modifier[0] == + plane->base.state->fb->modifier == I915_FORMAT_MOD_X_TILED) latency += 15; @@ -3118,7 +3122,11 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, * we currently hold. */ if (!intel_state->active_pipe_changes) { - *alloc = to_intel_crtc(for_crtc)->hw_ddb; + /* + * alloc may be cleared by clear_intel_crtc_state, + * copy from old state to be sure + */ + *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb; return; } @@ -3319,8 +3327,8 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate, return 0; /* For Non Y-tile return 8-blocks */ - if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED && - fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED) + if (fb->modifier != I915_FORMAT_MOD_Y_TILED && + fb->modifier != I915_FORMAT_MOD_Yf_TILED) return 8; src_w = drm_rect_width(&intel_pstate->base.src) >> 16; @@ -3589,7 +3597,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, return 0; } - if (apply_memory_bw_wa && fb->modifier[0] == I915_FORMAT_MOD_X_TILED) + if (apply_memory_bw_wa && fb->modifier == I915_FORMAT_MOD_X_TILED) latency += 15; width = drm_rect_width(&intel_pstate->base.src) >> 16; @@ -3624,13 +3632,16 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, y_min_scanlines = 4; } + if (apply_memory_bw_wa) + y_min_scanlines *= 2; + plane_bytes_per_line = width * cpp; - if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || - fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { + if (fb->modifier == I915_FORMAT_MOD_Y_TILED || + fb->modifier == I915_FORMAT_MOD_Yf_TILED) { plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512); plane_blocks_per_line /= y_min_scanlines; - } else if (fb->modifier[0] == DRM_FORMAT_MOD_NONE) { + } else if (fb->modifier == DRM_FORMAT_MOD_NONE) { plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1; } else { @@ -3644,11 +3655,9 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, plane_blocks_per_line); y_tile_minimum = plane_blocks_per_line * y_min_scanlines; - if (apply_memory_bw_wa) - y_tile_minimum *= 2; - if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || - fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { + if (fb->modifier == I915_FORMAT_MOD_Y_TILED || + fb->modifier == I915_FORMAT_MOD_Yf_TILED) { selected_result = max(method2, y_tile_minimum); } else { if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && @@ -3664,8 +3673,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line); if (level >= 1 && level <= 7) { - if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || - fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { + if (fb->modifier == I915_FORMAT_MOD_Y_TILED || + fb->modifier == I915_FORMAT_MOD_Yf_TILED) { res_blocks += y_tile_minimum; res_lines += y_min_scanlines; } else { @@ -3906,25 +3915,16 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, return a->start < b->end && b->start < a->end; } -bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state, - struct intel_crtc *intel_crtc) +bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries, + const struct skl_ddb_entry *ddb, + int ignore) { - struct drm_crtc *other_crtc; - struct drm_crtc_state *other_cstate; - struct intel_crtc *other_intel_crtc; - const struct skl_ddb_entry *ddb = - &to_intel_crtc_state(intel_crtc->base.state)->wm.skl.ddb; int i; - for_each_crtc_in_state(state, other_crtc, other_cstate, i) { - other_intel_crtc = to_intel_crtc(other_crtc); - - if (other_intel_crtc == intel_crtc) - continue; - - if (skl_ddb_entries_overlap(ddb, &other_intel_crtc->hw_ddb)) + for (i = 0; i < I915_MAX_PIPES; i++) + if (i != ignore && entries[i] && + skl_ddb_entries_overlap(ddb, entries[i])) return true; - } return false; } @@ -4193,14 +4193,35 @@ skl_compute_wm(struct drm_atomic_state *state) return 0; } -static void skl_update_wm(struct intel_crtc *intel_crtc) +static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, + struct intel_crtc_state *cstate) +{ + struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; + const struct skl_ddb_allocation *ddb = &state->wm_results.ddb; + enum pipe pipe = crtc->pipe; + int plane; + + if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base))) + return; + + I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime); + + for_each_universal_plane(dev_priv, pipe, plane) + skl_write_plane_wm(crtc, &pipe_wm->planes[plane], ddb, plane); + + skl_write_cursor_wm(crtc, &pipe_wm->planes[PLANE_CURSOR], ddb); +} + +static void skl_initial_wm(struct intel_atomic_state *state, + struct intel_crtc_state *cstate) { + struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct skl_wm_values *results = &dev_priv->wm.skl_results; + struct skl_wm_values *results = &state->wm_results; struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw; - struct intel_crtc_state *cstate = to_intel_crtc_state(intel_crtc->base.state); - struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; enum pipe pipe = intel_crtc->pipe; if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0) @@ -4208,27 +4229,11 @@ static void skl_update_wm(struct intel_crtc *intel_crtc) mutex_lock(&dev_priv->wm.wm_mutex); - /* - * If this pipe isn't active already, we're going to be enabling it - * very soon. Since it's safe to update a pipe's ddb allocation while - * the pipe's shut off, just do so here. Already active pipes will have - * their watermarks updated once we update their planes. - */ - if (intel_crtc->base.state->active_changed) { - int plane; - - for_each_universal_plane(dev_priv, pipe, plane) - skl_write_plane_wm(intel_crtc, &pipe_wm->planes[plane], - &results->ddb, plane); - - skl_write_cursor_wm(intel_crtc, &pipe_wm->planes[PLANE_CURSOR], - &results->ddb); - } + if (cstate->base.active_changed) + skl_atomic_update_crtc_wm(state, cstate); skl_copy_wm_for_pipe(hw_vals, results, pipe); - intel_crtc->hw_ddb = cstate->wm.skl.ddb; - mutex_unlock(&dev_priv->wm.wm_mutex); } @@ -4265,7 +4270,7 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv) ilk_wm_merge(dev, &config, &max, &lp_wm_1_2); /* 5/6 split only in single pipe config on IVB+ */ - if (INTEL_INFO(dev)->gen >= 7 && + if (INTEL_GEN(dev_priv) >= 7 && config.num_pipes_active == 1 && config.sprites_enabled) { ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); @@ -4283,7 +4288,8 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv) ilk_write_wm_values(dev_priv, &results); } -static void ilk_initial_watermarks(struct intel_crtc_state *cstate) +static void ilk_initial_watermarks(struct intel_atomic_state *state, + struct intel_crtc_state *cstate) { struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); @@ -4294,7 +4300,8 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate) mutex_unlock(&dev_priv->wm.wm_mutex); } -static void ilk_optimize_watermarks(struct intel_crtc_state *cstate) +static void ilk_optimize_watermarks(struct intel_atomic_state *state, + struct intel_crtc_state *cstate) { struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); @@ -4605,7 +4612,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev) hw->wm_lp[2] = I915_READ(WM3_LP_ILK); hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); - if (INTEL_INFO(dev)->gen >= 7) { + if (INTEL_GEN(dev_priv) >= 7) { hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); } @@ -7690,7 +7697,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv) /* For FIFO watermark updates */ if (INTEL_GEN(dev_priv) >= 9) { skl_setup_wm_latency(dev_priv); - dev_priv->display.update_wm = skl_update_wm; + dev_priv->display.initial_watermarks = skl_initial_wm; + dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm; dev_priv->display.compute_global_watermarks = skl_compute_wm; } else if (HAS_PCH_SPLIT(dev_priv)) { ilk_setup_wm_latency(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 271a3e29ff23..7b488e2793d9 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -427,7 +427,7 @@ void intel_psr_enable(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); - if (!HAS_PSR(dev)) { + if (!HAS_PSR(dev_priv)) { DRM_DEBUG_KMS("PSR not supported on this platform\n"); return; } @@ -472,7 +472,7 @@ void intel_psr_enable(struct intel_dp *intel_dp) /* Enable PSR on the panel */ hsw_psr_enable_sink(intel_dp); - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) intel_psr_activate(intel_dp); } else { vlv_psr_setup_vsc(intel_dp); @@ -498,7 +498,7 @@ void intel_psr_enable(struct intel_dp *intel_dp) * - On HSW/BDW we get a recoverable frozen screen until next * exit-activate sequence. */ - if (INTEL_INFO(dev)->gen < 9) + if (INTEL_GEN(dev_priv) < 9) schedule_delayed_work(&dev_priv->psr.work, msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 700e93d80616..aeb637dc1fdf 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1294,6 +1294,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request) { struct drm_i915_private *dev_priv = request->i915; + i915_gem_request_submit(request); + I915_WRITE_TAIL(request->engine, request->tail); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 642b54692d0d..3466b4e77e7c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -267,6 +267,15 @@ struct intel_engine_cs { */ void (*submit_request)(struct drm_i915_gem_request *req); + /* Call when the priority on a request has changed and it and its + * dependencies may need rescheduling. Note the request itself may + * not be ready to run! + * + * Called under the struct_mutex. + */ + void (*schedule)(struct drm_i915_gem_request *request, + int priority); + /* Some chipsets are not quite as coherent as advertised and need * an expensive kick to force a true read of the up-to-date seqno. * However, the up-to-date seqno is not always required and the last @@ -335,12 +344,12 @@ struct intel_engine_cs { /* Execlists */ struct tasklet_struct irq_tasklet; - spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */ struct execlist_port { struct drm_i915_gem_request *request; unsigned int count; } execlist_port[2]; - struct list_head execlist_queue; + struct rb_root execlist_queue; + struct rb_node *execlist_first; unsigned int fw_domains; bool disable_lite_restore_wa; bool preempt_wa; @@ -578,7 +587,6 @@ static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine) void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine); void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); -unsigned int intel_kick_waiters(struct drm_i915_private *i915); -unsigned int intel_kick_signalers(struct drm_i915_private *i915); +unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915); #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 05994083e161..356c662ad453 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -1066,7 +1066,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) * * CHV DPLL B/C have some issues if VGA mode is enabled. */ - for_each_pipe(&dev_priv->drm, pipe) { + for_each_pipe(dev_priv, pipe) { u32 val = I915_READ(DPLL(pipe)); val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; @@ -1097,7 +1097,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) intel_crt_reset(&encoder->base); } - i915_redisable_vga_power_on(&dev_priv->drm); + i915_redisable_vga_power_on(dev_priv); intel_pps_unlock_regs_wa(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 3990c805a5b5..27808e91cb5a 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1195,8 +1195,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - struct drm_device *dev = intel_encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; struct drm_display_mode *mode = &crtc_state->base.mode; @@ -1269,13 +1268,13 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, return; /* Set the SDVO control regs. */ - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { /* The real mode polarity is set by the SDVO commands, using * struct intel_sdvo_dtd. */ sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range) sdvox |= HDMI_COLOR_RANGE_16_235; - if (INTEL_INFO(dev)->gen < 5) + if (INTEL_GEN(dev_priv) < 5) sdvox |= SDVO_BORDER_ENABLE; } else { sdvox = I915_READ(intel_sdvo->sdvo_reg); @@ -1294,7 +1293,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, if (intel_sdvo->has_hdmi_audio) sdvox |= SDVO_AUDIO_ENABLE; - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { /* done in crtc_mode_set as the dpll_md reg must be written early */ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv)) { @@ -1305,7 +1304,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, } if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL && - INTEL_INFO(dev)->gen < 5) + INTEL_GEN(dev_priv) < 5) sdvox |= SDVO_STALL_SELECT; intel_sdvo_write_sdvox(intel_sdvo, sdvox); } diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 0e9aac2e3eae..8f131a08d440 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -203,13 +203,8 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(drm_plane); struct drm_framebuffer *fb = plane_state->base.fb; - const struct skl_wm_values *wm = &dev_priv->wm.skl_results; - struct drm_crtc *crtc = crtc_state->base.crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); const int pipe = intel_plane->pipe; const int plane = intel_plane->plane + 1; - const struct skl_plane_wm *p_wm = - &crtc_state->wm.skl.optimal.planes[plane]; u32 plane_ctl; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 surf_addr = plane_state->main.offset; @@ -229,13 +224,10 @@ skl_update_plane(struct drm_plane *drm_plane, PLANE_CTL_PIPE_CSC_ENABLE; plane_ctl |= skl_plane_ctl_format(fb->pixel_format); - plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]); + plane_ctl |= skl_plane_ctl_tiling(fb->modifier); plane_ctl |= skl_plane_ctl_rotation(rotation); - if (wm->dirty_pipes & drm_crtc_mask(crtc)) - skl_write_plane_wm(intel_crtc, p_wm, &wm->ddb, plane); - if (key->flags) { I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value); I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value); @@ -291,19 +283,9 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) struct drm_device *dev = dplane->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(dplane); - struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); const int pipe = intel_plane->pipe; const int plane = intel_plane->plane + 1; - /* - * We only populate skl_results on watermark updates, and if the - * plane's visiblity isn't actually changing neither is its watermarks. - */ - if (!dplane->state->visible) - skl_write_plane_wm(to_intel_crtc(crtc), - &cstate->wm.skl.optimal.planes[plane], - &dev_priv->wm.skl_results.ddb, plane); - I915_WRITE(PLANE_CTL(pipe, plane), 0); I915_WRITE(PLANE_SURF(pipe, plane), 0); @@ -362,7 +344,7 @@ vlv_update_plane(struct drm_plane *dplane, int plane = intel_plane->plane; u32 sprctl; u32 sprsurf_offset, linear_offset; - unsigned int rotation = dplane->state->rotation; + unsigned int rotation = plane_state->base.rotation; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->base.dst.x1; int crtc_y = plane_state->base.dst.y1; @@ -424,9 +406,15 @@ vlv_update_plane(struct drm_plane *dplane, */ sprctl |= SP_GAMMA_ENABLE; - if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) + if (fb->modifier == I915_FORMAT_MOD_X_TILED) sprctl |= SP_TILED; + if (rotation & DRM_ROTATE_180) + sprctl |= SP_ROTATE_180; + + if (rotation & DRM_REFLECT_X) + sprctl |= SP_MIRROR; + /* Sizes are 0 based */ src_w--; src_h--; @@ -436,11 +424,11 @@ vlv_update_plane(struct drm_plane *dplane, intel_add_fb_offsets(&x, &y, plane_state, 0); sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0); - if (rotation == DRM_ROTATE_180) { - sprctl |= SP_ROTATE_180; - + if (rotation & DRM_ROTATE_180) { x += src_w; y += src_h; + } else if (rotation & DRM_REFLECT_X) { + x += src_w; } linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); @@ -460,7 +448,7 @@ vlv_update_plane(struct drm_plane *dplane, I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); - if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) + if (fb->modifier == I915_FORMAT_MOD_X_TILED) I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x); else I915_WRITE(SPLINOFF(pipe, plane), linear_offset); @@ -543,9 +531,12 @@ ivb_update_plane(struct drm_plane *plane, */ sprctl |= SPRITE_GAMMA_ENABLE; - if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) + if (fb->modifier == I915_FORMAT_MOD_X_TILED) sprctl |= SPRITE_TILED; + if (rotation & DRM_ROTATE_180) + sprctl |= SPRITE_ROTATE_180; + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE; else @@ -566,14 +557,11 @@ ivb_update_plane(struct drm_plane *plane, intel_add_fb_offsets(&x, &y, plane_state, 0); sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0); - if (rotation == DRM_ROTATE_180) { - sprctl |= SPRITE_ROTATE_180; - - /* HSW and BDW does this automagically in hardware */ - if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { - x += src_w; - y += src_h; - } + /* HSW+ does this automagically in hardware */ + if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) && + rotation & DRM_ROTATE_180) { + x += src_w; + y += src_h; } linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); @@ -596,7 +584,7 @@ ivb_update_plane(struct drm_plane *plane, * register */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) I915_WRITE(SPROFFSET(pipe), (y << 16) | x); - else if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) + else if (fb->modifier == I915_FORMAT_MOD_X_TILED) I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); else I915_WRITE(SPRLINOFF(pipe), linear_offset); @@ -681,9 +669,12 @@ ilk_update_plane(struct drm_plane *plane, */ dvscntr |= DVS_GAMMA_ENABLE; - if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) + if (fb->modifier == I915_FORMAT_MOD_X_TILED) dvscntr |= DVS_TILED; + if (rotation & DRM_ROTATE_180) + dvscntr |= DVS_ROTATE_180; + if (IS_GEN6(dev_priv)) dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ @@ -700,9 +691,7 @@ ilk_update_plane(struct drm_plane *plane, intel_add_fb_offsets(&x, &y, plane_state, 0); dvssurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0); - if (rotation == DRM_ROTATE_180) { - dvscntr |= DVS_ROTATE_180; - + if (rotation & DRM_ROTATE_180) { x += src_w; y += src_h; } @@ -723,7 +712,7 @@ ilk_update_plane(struct drm_plane *plane, I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); - if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) + if (fb->modifier == I915_FORMAT_MOD_X_TILED) I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); else I915_WRITE(DVSLINOFF(pipe), linear_offset); @@ -1112,6 +1101,10 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, supported_rotations = DRM_ROTATE_0 | DRM_ROTATE_90 | DRM_ROTATE_180 | DRM_ROTATE_270; + } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { + supported_rotations = + DRM_ROTATE_0 | DRM_ROTATE_180 | + DRM_REFLECT_X; } else { supported_rotations = DRM_ROTATE_0 | DRM_ROTATE_180; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 9212f00d5752..78cdfc6833d6 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1029,8 +1029,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_tv *intel_tv = enc_to_tv(encoder); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); @@ -1116,7 +1115,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder, set_color_conversion(dev_priv, color_conversion); - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) I915_WRITE(TV_CLR_KNOBS, 0x00404000); else I915_WRITE(TV_CLR_KNOBS, 0x00606000); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index e2b188dcf908..d7be0d94ba4d 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -402,6 +402,8 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, bool restore_forcewake) { + struct intel_device_info *info = mkwrite_device_info(dev_priv); + /* clear out unclaimed reg detection bit */ if (check_for_unclaimed_mmio(dev_priv)) DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); @@ -419,6 +421,10 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, GT_FIFO_CTL_RC6_POLICY_STALL); } + /* Enable Decoupled MMIO only on BXT C stepping onwards */ + if (!IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) + info->has_decoupled_mmio = false; + intel_uncore_forcewake_reset(dev_priv, restore_forcewake); } @@ -641,6 +647,8 @@ intel_fw_table_check(struct drm_i915_private *dev_priv) num_ranges = dev_priv->uncore.fw_domains_table_entries; for (i = 0, prev = -1; i < num_ranges; i++, ranges++) { + WARN_ON_ONCE(IS_GEN9(dev_priv) && + (prev + 1) != (s32)ranges->start); WARN_ON_ONCE(prev >= (s32)ranges->start); prev = ranges->start; WARN_ON_ONCE(prev >= (s32)ranges->end); @@ -783,7 +791,7 @@ static const struct intel_forcewake_range __gen9_fw_ranges[] = { GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), - GEN_FW_RANGE(0xb480, 0xbfff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER), GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER), GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), @@ -831,6 +839,66 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv, __unclaimed_reg_debug(dev_priv, reg, read, before); } +static const enum decoupled_power_domain fw2dpd_domain[] = { + GEN9_DECOUPLED_PD_RENDER, + GEN9_DECOUPLED_PD_BLITTER, + GEN9_DECOUPLED_PD_ALL, + GEN9_DECOUPLED_PD_MEDIA, + GEN9_DECOUPLED_PD_ALL, + GEN9_DECOUPLED_PD_ALL, + GEN9_DECOUPLED_PD_ALL +}; + +/* + * Decoupled MMIO access for only 1 DWORD + */ +static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv, + u32 reg, + enum forcewake_domains fw_domain, + enum decoupled_ops operation) +{ + enum decoupled_power_domain dp_domain; + u32 ctrl_reg_data = 0; + + dp_domain = fw2dpd_domain[fw_domain - 1]; + + ctrl_reg_data |= reg; + ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT); + ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT); + ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO; + __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data); + + if (wait_for_atomic((__raw_i915_read32(dev_priv, + GEN9_DECOUPLED_REG0_DW1) & + GEN9_DECOUPLED_DW1_GO) == 0, + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Decoupled MMIO wait timed out\n"); +} + +static inline u32 +__gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv, + u32 reg, + enum forcewake_domains fw_domain) +{ + __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain, + GEN9_DECOUPLED_OP_READ); + + return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0); +} + +static inline void +__gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv, + u32 reg, u32 data, + enum forcewake_domains fw_domain) +{ + + __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data); + + __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain, + GEN9_DECOUPLED_OP_WRITE); +} + + #define GEN2_READ_HEADER(x) \ u##x val = 0; \ assert_rpm_wakelock_held(dev_priv); @@ -935,6 +1003,28 @@ fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { GEN6_READ_FOOTER; \ } +#define __gen9_decoupled_read(x) \ +static u##x \ +gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \ + i915_reg_t reg, bool trace) { \ + enum forcewake_domains fw_engine; \ + GEN6_READ_HEADER(x); \ + fw_engine = __fwtable_reg_read_fw_domains(offset); \ + if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \ + unsigned i; \ + u32 *ptr_data = (u32 *) &val; \ + for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \ + *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \ + offset, \ + fw_engine); \ + } else { \ + val = __raw_i915_read##x(dev_priv, reg); \ + } \ + GEN6_READ_FOOTER; \ +} + +__gen9_decoupled_read(32) +__gen9_decoupled_read(64) __fwtable_read(8) __fwtable_read(16) __fwtable_read(32) @@ -1064,6 +1154,25 @@ fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bo GEN6_WRITE_FOOTER; \ } +#define __gen9_decoupled_write(x) \ +static void \ +gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \ + i915_reg_t reg, u##x val, \ + bool trace) { \ + enum forcewake_domains fw_engine; \ + GEN6_WRITE_HEADER; \ + fw_engine = __fwtable_reg_write_fw_domains(offset); \ + if (fw_engine & ~dev_priv->uncore.fw_domains_active) \ + __gen9_decoupled_mmio_write(dev_priv, \ + offset, \ + val, \ + fw_engine); \ + else \ + __raw_i915_write##x(dev_priv, reg, val); \ + GEN6_WRITE_FOOTER; \ +} + +__gen9_decoupled_write(32) __fwtable_write(8) __fwtable_write(16) __fwtable_write(32) @@ -1287,6 +1396,14 @@ void intel_uncore_init(struct drm_i915_private *dev_priv) ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges); ASSIGN_WRITE_MMIO_VFUNCS(fwtable); ASSIGN_READ_MMIO_VFUNCS(fwtable); + if (HAS_DECOUPLED_MMIO(dev_priv)) { + dev_priv->uncore.funcs.mmio_readl = + gen9_decoupled_read32; + dev_priv->uncore.funcs.mmio_readq = + gen9_decoupled_read64; + dev_priv->uncore.funcs.mmio_writel = + gen9_decoupled_write32; + } break; case 8: if (IS_CHERRYVIEW(dev_priv)) { @@ -1368,7 +1485,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && - (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask)) + (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask)) break; } diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index 68db9621f1f0..8886cab19f98 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -280,7 +280,8 @@ struct common_child_dev_config { u8 dp_support:1; u8 tmds_support:1; u8 support_reserved:5; - u8 not_common3[12]; + u8 aux_channel; + u8 not_common3[11]; u8 iboost_level; } __packed; diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 4e1ae3fc462d..6be515a9fb69 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -68,6 +68,12 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, ipu_dc_disable_channel(ipu_crtc->dc); ipu_di_disable(ipu_crtc->di); + /* + * Planes must be disabled before DC clock is removed, as otherwise the + * attached IDMACs will be left in undefined state, possibly hanging + * the IPU or even system. + */ + drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false); ipu_dc_disable(ipu); spin_lock_irq(&crtc->dev->event_lock); @@ -77,9 +83,6 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, } spin_unlock_irq(&crtc->dev->event_lock); - /* always disable planes on the CRTC */ - drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true); - drm_crtc_vblank_off(crtc); } diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 4e2806cf778c..028c24df2291 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -6,6 +6,8 @@ msm-y := \ adreno/adreno_gpu.o \ adreno/a3xx_gpu.o \ adreno/a4xx_gpu.o \ + adreno/a5xx_gpu.o \ + adreno/a5xx_power.o \ hdmi/hdmi.o \ hdmi/hdmi_audio.o \ hdmi/hdmi_bridge.o \ @@ -37,6 +39,7 @@ msm-y := \ mdp/mdp5/mdp5_irq.o \ mdp/mdp5/mdp5_mdss.o \ mdp/mdp5/mdp5_kms.o \ + mdp/mdp5/mdp5_pipe.o \ mdp/mdp5/mdp5_plane.o \ mdp/mdp5/mdp5_smp.o \ msm_atomic.o \ @@ -48,6 +51,7 @@ msm-y := \ msm_gem_prime.o \ msm_gem_shrinker.o \ msm_gem_submit.o \ + msm_gem_vma.o \ msm_gpu.o \ msm_iommu.o \ msm_perf.o \ diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h index fee24297fb92..4be092f911f9 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h @@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31) +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05) - /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) -Copyright (C) 2013-2015 by the following authors: +Copyright (C) 2013-2016 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) @@ -206,12 +207,12 @@ enum a2xx_rb_copy_sample_select { }; enum a2xx_rb_blend_opcode { - BLEND_DST_PLUS_SRC = 0, - BLEND_SRC_MINUS_DST = 1, - BLEND_MIN_DST_SRC = 2, - BLEND_MAX_DST_SRC = 3, - BLEND_DST_MINUS_SRC = 4, - BLEND_DST_PLUS_SRC_BIAS = 5, + BLEND2_DST_PLUS_SRC = 0, + BLEND2_SRC_MINUS_DST = 1, + BLEND2_MIN_DST_SRC = 2, + BLEND2_MAX_DST_SRC = 3, + BLEND2_DST_MINUS_SRC = 4, + BLEND2_DST_PLUS_SRC_BIAS = 5, }; enum adreno_mmu_clnt_beh { diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h index 27dabd5e57fb..a066c8b9eccd 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h @@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31) +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05) - /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) Copyright (C) 2013-2016 by the following authors: @@ -129,10 +130,14 @@ enum a3xx_tex_fmt { TFMT_Z16_UNORM = 9, TFMT_X8Z24_UNORM = 10, TFMT_Z32_FLOAT = 11, - TFMT_NV12_UV_TILED = 17, - TFMT_NV12_Y_TILED = 19, - TFMT_NV12_UV = 21, - TFMT_NV12_Y = 23, + TFMT_UV_64X32 = 16, + TFMT_VU_64X32 = 17, + TFMT_Y_64X32 = 18, + TFMT_NV12_64X32 = 19, + TFMT_UV_LINEAR = 20, + TFMT_VU_LINEAR = 21, + TFMT_Y_LINEAR = 22, + TFMT_NV12_LINEAR = 23, TFMT_I420_Y = 24, TFMT_I420_U = 26, TFMT_I420_V = 27, @@ -525,14 +530,6 @@ enum a3xx_uche_perfcounter_select { UCHE_UCHEPERF_ACTIVE_CYCLES = 20, }; -enum a3xx_rb_blend_opcode { - BLEND_DST_PLUS_SRC = 0, - BLEND_SRC_MINUS_DST = 1, - BLEND_DST_MINUS_SRC = 2, - BLEND_MIN_DST_SRC = 3, - BLEND_MAX_DST_SRC = 4, -}; - enum a3xx_intp_mode { SMOOTH = 0, FLAT = 1, @@ -1393,13 +1390,14 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mod { return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK; } +#define A3XX_RB_COPY_CONTROL_MSAA_SRGB_DOWNSAMPLE 0x00000080 #define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00 #define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8 static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val) { return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK; } -#define A3XX_RB_COPY_CONTROL_UNK12 0x00001000 +#define A3XX_RB_COPY_CONTROL_DEPTH32_RESOLVE 0x00001000 #define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000 #define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14 static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val) @@ -1472,7 +1470,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val) { return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK; } -#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080 +#define A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080 #define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000 #define REG_A3XX_RB_DEPTH_CLEAR 0x00002101 diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index fd266ed963b6..b999349b7d2d 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -41,7 +41,7 @@ extern bool hang_debug; static void a3xx_dump(struct msm_gpu *gpu); -static void a3xx_me_init(struct msm_gpu *gpu) +static bool a3xx_me_init(struct msm_gpu *gpu) { struct msm_ringbuffer *ring = gpu->rb; @@ -65,7 +65,7 @@ static void a3xx_me_init(struct msm_gpu *gpu) OUT_RING(ring, 0x00000000); gpu->funcs->flush(gpu); - gpu->funcs->idle(gpu); + return gpu->funcs->idle(gpu); } static int a3xx_hw_init(struct msm_gpu *gpu) @@ -294,15 +294,20 @@ static int a3xx_hw_init(struct msm_gpu *gpu) /* clear ME_HALT to start micro engine */ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0); - a3xx_me_init(gpu); - - return 0; + return a3xx_me_init(gpu) ? 0 : -EINVAL; } static void a3xx_recover(struct msm_gpu *gpu) { + int i; + adreno_dump_info(gpu); + for (i = 0; i < 8; i++) { + printk("CP_SCRATCH_REG%d: %u\n", i, + gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i)); + } + /* dump registers before resetting gpu, if enabled: */ if (hang_debug) a3xx_dump(gpu); @@ -330,17 +335,22 @@ static void a3xx_destroy(struct msm_gpu *gpu) kfree(a3xx_gpu); } -static void a3xx_idle(struct msm_gpu *gpu) +static bool a3xx_idle(struct msm_gpu *gpu) { /* wait for ringbuffer to drain: */ - adreno_idle(gpu); + if (!adreno_idle(gpu)) + return false; /* then wait for GPU to finish: */ if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) & - A3XX_RBBM_STATUS_GPU_BUSY))) + A3XX_RBBM_STATUS_GPU_BUSY))) { DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name); - /* TODO maybe we need to reset GPU here to recover from hang? */ + /* TODO maybe we need to reset GPU here to recover from hang? */ + return false; + } + + return true; } static irqreturn_t a3xx_irq(struct msm_gpu *gpu) @@ -419,91 +429,13 @@ static void a3xx_dump(struct msm_gpu *gpu) } /* Register offset defines for A3XX */ static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { - REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG), - REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA), - REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA, - REG_A3XX_CP_PFP_UCODE_DATA), - REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR, - REG_A3XX_CP_PFP_UCODE_ADDR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR), REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE), + REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI), REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR), + REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI), REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR), REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL), - REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL), REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL), - REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE), - REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ), - REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE), - REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ), - REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0), - REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR), - REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR), - REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK), - REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA), - REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA), - REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2), - REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA), - REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT), - REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS, - REG_A3XX_CP_PROTECT_STATUS), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL, - REG_A3XX_RBBM_PERFCTR_CTL), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0, - REG_A3XX_RBBM_PERFCTR_LOAD_CMD0), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1, - REG_A3XX_RBBM_PERFCTR_LOAD_CMD1), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO, - REG_A3XX_RBBM_PERFCTR_PWR_1_LO), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS, - REG_A3XX_RBBM_INT_0_STATUS), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS, - REG_A3XX_RBBM_AHB_ERROR_STATUS), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD, - REG_A3XX_RBBM_INT_CLEAR_CMD), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL), - REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL, - REG_A3XX_VPC_VPC_DEBUG_RAM_SEL), - REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ, - REG_A3XX_VPC_VPC_DEBUG_RAM_READ), - REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS, - REG_A3XX_VSC_SIZE_ADDRESS), - REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0), - REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX), - REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG, - REG_A3XX_SP_VS_PVT_MEM_ADDR_REG), - REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG, - REG_A3XX_SP_FS_PVT_MEM_ADDR_REG), - REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG, - REG_A3XX_SP_VS_OBJ_START_REG), - REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG, - REG_A3XX_SP_FS_OBJ_START_REG), - REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2, - REG_A3XX_RBBM_PM_OVERRIDE2), - REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2), - REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT, - REG_A3XX_SQ_GPR_MANAGEMENT), - REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT, - REG_A3XX_SQ_INST_STORE_MANAGMENT), - REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD, - REG_A3XX_RBBM_SW_RESET_CMD), - REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0, - REG_A3XX_UCHE_CACHE_INVALIDATE0_REG), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO, - REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI, - REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI), }; static const struct adreno_gpu_funcs funcs = { @@ -583,7 +515,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) #endif } - if (!gpu->mmu) { + if (!gpu->aspace) { /* TODO we think it is possible to configure the GPU to * restrict access to VRAM carveout. But the required * registers are unknown. For now just bail out and diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h index 3220b91f559a..4ce21b902779 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h @@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31) +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05) - /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) Copyright (C) 2013-2016 by the following authors: @@ -46,6 +47,9 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. enum a4xx_color_fmt { RB4_A8_UNORM = 1, RB4_R8_UNORM = 2, + RB4_R8_SNORM = 3, + RB4_R8_UINT = 4, + RB4_R8_SINT = 5, RB4_R4G4B4A4_UNORM = 8, RB4_R5G5B5A1_UNORM = 10, RB4_R5G6B5_UNORM = 14, @@ -89,17 +93,10 @@ enum a4xx_color_fmt { enum a4xx_tile_mode { TILE4_LINEAR = 0, + TILE4_2 = 2, TILE4_3 = 3, }; -enum a4xx_rb_blend_opcode { - BLEND_DST_PLUS_SRC = 0, - BLEND_SRC_MINUS_DST = 1, - BLEND_DST_MINUS_SRC = 2, - BLEND_MIN_DST_SRC = 3, - BLEND_MAX_DST_SRC = 4, -}; - enum a4xx_vtx_fmt { VFMT4_32_FLOAT = 1, VFMT4_32_32_FLOAT = 2, @@ -940,6 +937,7 @@ static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val) { return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK; } +#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM 0x00010000 #define REG_A4XX_RB_RENDER_CONTROL 0x000020a1 #define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001 @@ -1043,7 +1041,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_b } #define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0 #define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5 -static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a4xx_rb_blend_opcode val) +static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) { return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK; } @@ -1061,7 +1059,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb } #define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000 #define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21 -static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a4xx_rb_blend_opcode val) +static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) { return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK; } @@ -1073,12 +1071,18 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r } #define REG_A4XX_RB_BLEND_RED 0x000020f0 -#define A4XX_RB_BLEND_RED_UINT__MASK 0x0000ffff +#define A4XX_RB_BLEND_RED_UINT__MASK 0x000000ff #define A4XX_RB_BLEND_RED_UINT__SHIFT 0 static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val) { return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK; } +#define A4XX_RB_BLEND_RED_SINT__MASK 0x0000ff00 +#define A4XX_RB_BLEND_RED_SINT__SHIFT 8 +static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_RED_SINT__SHIFT) & A4XX_RB_BLEND_RED_SINT__MASK; +} #define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000 #define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16 static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val) @@ -1095,12 +1099,18 @@ static inline uint32_t A4XX_RB_BLEND_RED_F32(float val) } #define REG_A4XX_RB_BLEND_GREEN 0x000020f2 -#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x0000ffff +#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff #define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0 static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val) { return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK; } +#define A4XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00 +#define A4XX_RB_BLEND_GREEN_SINT__SHIFT 8 +static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_GREEN_SINT__SHIFT) & A4XX_RB_BLEND_GREEN_SINT__MASK; +} #define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000 #define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16 static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val) @@ -1117,12 +1127,18 @@ static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val) } #define REG_A4XX_RB_BLEND_BLUE 0x000020f4 -#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x0000ffff +#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff #define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0 static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val) { return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK; } +#define A4XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00 +#define A4XX_RB_BLEND_BLUE_SINT__SHIFT 8 +static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_BLUE_SINT__SHIFT) & A4XX_RB_BLEND_BLUE_SINT__MASK; +} #define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000 #define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16 static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val) @@ -1139,12 +1155,18 @@ static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val) } #define REG_A4XX_RB_BLEND_ALPHA 0x000020f6 -#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x0000ffff +#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff #define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0 static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val) { return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK; } +#define A4XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00 +#define A4XX_RB_BLEND_ALPHA_SINT__SHIFT 8 +static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_ALPHA_SINT__SHIFT) & A4XX_RB_BLEND_ALPHA_SINT__MASK; +} #define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000 #define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16 static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val) @@ -1348,7 +1370,7 @@ static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val) { return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK; } -#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080 +#define A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080 #define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000 #define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS 0x00020000 #define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000 @@ -2177,11 +2199,23 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) #define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232 -#define REG_A4XX_CP_PROTECT_REG_0 0x00000240 - static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; } static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; } +#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff +#define A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0 +static inline uint32_t A4XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val) +{ + return ((val) << A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A4XX_CP_PROTECT_REG_BASE_ADDR__MASK; +} +#define A4XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000 +#define A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24 +static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val) +{ + return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK; +} +#define A4XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000 +#define A4XX_CP_PROTECT_REG_TRAP_READ 0x40000000 #define REG_A4XX_CP_PROTECT_CTRL 0x00000250 @@ -2272,7 +2306,7 @@ static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) { return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK; } -#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00 +#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 #define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) { @@ -2420,7 +2454,7 @@ static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) { return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK; } -#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00 +#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 #define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) { @@ -3117,6 +3151,8 @@ static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val) #define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000 #define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00008000 +#define A4XX_GRAS_CL_CLIP_CNTL_ZNEAR_CLIP_DISABLE 0x00010000 +#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000 #define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000 #define REG_A4XX_GRAS_CLEAR_CNTL 0x00002003 @@ -3253,6 +3289,7 @@ static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val) return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK; } #define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800 +#define A4XX_GRAS_SU_MODE_CONTROL_MSAA_ENABLE 0x00002000 #define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000 #define REG_A4XX_GRAS_SC_CONTROL 0x0000207b @@ -3670,6 +3707,8 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val) #define REG_A4XX_PC_BINNING_COMMAND 0x00000d00 #define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001 +#define REG_A4XX_PC_TESSFACTOR_ADDR 0x00000d08 + #define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c #define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10 @@ -3690,6 +3729,20 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val) #define REG_A4XX_PC_BIN_BASE 0x000021c0 +#define REG_A4XX_PC_VSTREAM_CONTROL 0x000021c2 +#define A4XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000 +#define A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16 +static inline uint32_t A4XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val) +{ + return ((val) << A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A4XX_PC_VSTREAM_CONTROL_SIZE__MASK; +} +#define A4XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000 +#define A4XX_PC_VSTREAM_CONTROL_N__SHIFT 22 +static inline uint32_t A4XX_PC_VSTREAM_CONTROL_N(uint32_t val) +{ + return ((val) << A4XX_PC_VSTREAM_CONTROL_N__SHIFT) & A4XX_PC_VSTREAM_CONTROL_N__MASK; +} + #define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4 #define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK 0x0000000f #define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT 0 @@ -3752,12 +3805,8 @@ static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val) { return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK; } -#define A4XX_PC_HS_PARAM_PRIMTYPE__MASK 0x01800000 -#define A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT 23 -static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val) -{ - return ((val) << A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_HS_PARAM_PRIMTYPE__MASK; -} +#define A4XX_PC_HS_PARAM_CW 0x00800000 +#define A4XX_PC_HS_PARAM_CONNECTED 0x01000000 #define REG_A4XX_VBIF_VERSION 0x00003000 diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index d0d3c7baa8fe..511bc855cc7f 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -113,7 +113,7 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu) } -static void a4xx_me_init(struct msm_gpu *gpu) +static bool a4xx_me_init(struct msm_gpu *gpu) { struct msm_ringbuffer *ring = gpu->rb; @@ -137,7 +137,7 @@ static void a4xx_me_init(struct msm_gpu *gpu) OUT_RING(ring, 0x00000000); gpu->funcs->flush(gpu); - gpu->funcs->idle(gpu); + return gpu->funcs->idle(gpu); } static int a4xx_hw_init(struct msm_gpu *gpu) @@ -292,15 +292,20 @@ static int a4xx_hw_init(struct msm_gpu *gpu) /* clear ME_HALT to start micro engine */ gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0); - a4xx_me_init(gpu); - - return 0; + return a4xx_me_init(gpu) ? 0 : -EINVAL; } static void a4xx_recover(struct msm_gpu *gpu) { + int i; + adreno_dump_info(gpu); + for (i = 0; i < 8; i++) { + printk("CP_SCRATCH_REG%d: %u\n", i, + gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i)); + } + /* dump registers before resetting gpu, if enabled: */ if (hang_debug) a4xx_dump(gpu); @@ -328,17 +333,21 @@ static void a4xx_destroy(struct msm_gpu *gpu) kfree(a4xx_gpu); } -static void a4xx_idle(struct msm_gpu *gpu) +static bool a4xx_idle(struct msm_gpu *gpu) { /* wait for ringbuffer to drain: */ - adreno_idle(gpu); + if (!adreno_idle(gpu)) + return false; /* then wait for GPU to finish: */ if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) & - A4XX_RBBM_STATUS_GPU_BUSY))) + A4XX_RBBM_STATUS_GPU_BUSY))) { DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name); + /* TODO maybe we need to reset GPU here to recover from hang? */ + return false; + } - /* TODO maybe we need to reset GPU here to recover from hang? */ + return true; } static irqreturn_t a4xx_irq(struct msm_gpu *gpu) @@ -460,87 +469,13 @@ static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m) /* Register offset defines for A4XX, in order of enum adreno_regs */ static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { - REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG), - REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA), - REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA, - REG_A4XX_CP_PFP_UCODE_DATA), - REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR, - REG_A4XX_CP_PFP_UCODE_ADDR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR), REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE), + REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI), REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR), + REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI), REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR), REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL), - REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL), REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL), - REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE), - REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ), - REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE), - REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ), - REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0), - REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA), - REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA), - REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2), - REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA), - REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT), - REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS, - REG_A4XX_CP_PROTECT_STATUS), - REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR), - REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL, - REG_A4XX_RBBM_PERFCTR_CTL), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0, - REG_A4XX_RBBM_PERFCTR_LOAD_CMD0), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1, - REG_A4XX_RBBM_PERFCTR_LOAD_CMD1), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2, - REG_A4XX_RBBM_PERFCTR_LOAD_CMD2), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO, - REG_A4XX_RBBM_PERFCTR_PWR_1_LO), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS, - REG_A4XX_RBBM_INT_0_STATUS), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS, - REG_A4XX_RBBM_AHB_ERROR_STATUS), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS, - REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS, - REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS), - REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL, - REG_A4XX_VPC_DEBUG_RAM_SEL), - REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ, - REG_A4XX_VPC_DEBUG_RAM_READ), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD, - REG_A4XX_RBBM_INT_CLEAR_CMD), - REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS, - REG_A4XX_VSC_SIZE_ADDRESS), - REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0), - REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG, - REG_A4XX_SP_VS_PVT_MEM_ADDR), - REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG, - REG_A4XX_SP_FS_PVT_MEM_ADDR), - REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG, - REG_A4XX_SP_VS_OBJ_START), - REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG, - REG_A4XX_SP_FS_OBJ_START), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD, - REG_A4XX_RBBM_SW_RESET_CMD), - REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0, - REG_A4XX_UCHE_INVALIDATE0), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO, - REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO), - REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI, - REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI), }; static void a4xx_dump(struct msm_gpu *gpu) @@ -587,16 +522,8 @@ static int a4xx_pm_suspend(struct msm_gpu *gpu) { static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) { - uint32_t hi, lo, tmp; - - tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI); - do { - hi = tmp; - lo = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO); - tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI); - } while (tmp != hi); - - *value = (((uint64_t)hi) << 32) | lo; + *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO, + REG_A4XX_RBBM_PERFCTR_CP_0_HI); return 0; } @@ -672,7 +599,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) #endif } - if (!gpu->mmu) { + if (!gpu->aspace) { /* TODO we think it is possible to configure the GPU to * restrict access to VRAM carveout. But the required * registers are unknown. For now just bail out and diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h new file mode 100644 index 000000000000..b6fe763ddf34 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h @@ -0,0 +1,3757 @@ +#ifndef A5XX_XML +#define A5XX_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) + +Copyright (C) 2013-2016 by the following authors: +- Rob Clark <robdclark@gmail.com> (robclark) +- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum a5xx_color_fmt { + RB5_R8_UNORM = 3, + RB5_R4G4B4A4_UNORM = 8, + RB5_R5G5B5A1_UNORM = 10, + RB5_R5G6B5_UNORM = 14, + RB5_R16_FLOAT = 23, + RB5_R8G8B8A8_UNORM = 48, + RB5_R8G8B8_UNORM = 49, + RB5_R8G8B8A8_UINT = 51, + RB5_R10G10B10A2_UINT = 58, + RB5_R16G16_FLOAT = 69, + RB5_R32_FLOAT = 74, + RB5_R16G16B16A16_FLOAT = 98, + RB5_R32G32_FLOAT = 103, + RB5_R32G32B32A32_FLOAT = 130, +}; + +enum a5xx_tile_mode { + TILE5_LINEAR = 0, + TILE5_2 = 2, + TILE5_3 = 3, +}; + +enum a5xx_vtx_fmt { + VFMT5_8_UNORM = 3, + VFMT5_8_SNORM = 4, + VFMT5_8_UINT = 5, + VFMT5_8_SINT = 6, + VFMT5_8_8_UNORM = 15, + VFMT5_8_8_SNORM = 16, + VFMT5_8_8_UINT = 17, + VFMT5_8_8_SINT = 18, + VFMT5_16_UNORM = 21, + VFMT5_16_SNORM = 22, + VFMT5_16_FLOAT = 23, + VFMT5_16_UINT = 24, + VFMT5_16_SINT = 25, + VFMT5_8_8_8_UNORM = 33, + VFMT5_8_8_8_SNORM = 34, + VFMT5_8_8_8_UINT = 35, + VFMT5_8_8_8_SINT = 36, + VFMT5_8_8_8_8_UNORM = 48, + VFMT5_8_8_8_8_SNORM = 50, + VFMT5_8_8_8_8_UINT = 51, + VFMT5_8_8_8_8_SINT = 52, + VFMT5_16_16_UNORM = 67, + VFMT5_16_16_SNORM = 68, + VFMT5_16_16_FLOAT = 69, + VFMT5_16_16_UINT = 70, + VFMT5_16_16_SINT = 71, + VFMT5_32_UNORM = 72, + VFMT5_32_SNORM = 73, + VFMT5_32_FLOAT = 74, + VFMT5_32_UINT = 75, + VFMT5_32_SINT = 76, + VFMT5_32_FIXED = 77, + VFMT5_16_16_16_UNORM = 88, + VFMT5_16_16_16_SNORM = 89, + VFMT5_16_16_16_FLOAT = 90, + VFMT5_16_16_16_UINT = 91, + VFMT5_16_16_16_SINT = 92, + VFMT5_16_16_16_16_UNORM = 96, + VFMT5_16_16_16_16_SNORM = 97, + VFMT5_16_16_16_16_FLOAT = 98, + VFMT5_16_16_16_16_UINT = 99, + VFMT5_16_16_16_16_SINT = 100, + VFMT5_32_32_UNORM = 101, + VFMT5_32_32_SNORM = 102, + VFMT5_32_32_FLOAT = 103, + VFMT5_32_32_UINT = 104, + VFMT5_32_32_SINT = 105, + VFMT5_32_32_FIXED = 106, + VFMT5_32_32_32_UNORM = 112, + VFMT5_32_32_32_SNORM = 113, + VFMT5_32_32_32_UINT = 114, + VFMT5_32_32_32_SINT = 115, + VFMT5_32_32_32_FLOAT = 116, + VFMT5_32_32_32_FIXED = 117, + VFMT5_32_32_32_32_UNORM = 128, + VFMT5_32_32_32_32_SNORM = 129, + VFMT5_32_32_32_32_FLOAT = 130, + VFMT5_32_32_32_32_UINT = 131, + VFMT5_32_32_32_32_SINT = 132, + VFMT5_32_32_32_32_FIXED = 133, +}; + +enum a5xx_tex_fmt { + TFMT5_A8_UNORM = 2, + TFMT5_8_UNORM = 3, + TFMT5_4_4_4_4_UNORM = 8, + TFMT5_5_5_5_1_UNORM = 10, + TFMT5_5_6_5_UNORM = 14, + TFMT5_8_8_UNORM = 15, + TFMT5_8_8_SNORM = 16, + TFMT5_L8_A8_UNORM = 19, + TFMT5_16_FLOAT = 23, + TFMT5_8_8_8_8_UNORM = 48, + TFMT5_8_8_8_UNORM = 49, + TFMT5_8_8_8_SNORM = 50, + TFMT5_9_9_9_E5_FLOAT = 53, + TFMT5_10_10_10_2_UNORM = 54, + TFMT5_11_11_10_FLOAT = 66, + TFMT5_16_16_FLOAT = 69, + TFMT5_32_FLOAT = 74, + TFMT5_16_16_16_16_FLOAT = 98, + TFMT5_32_32_FLOAT = 103, + TFMT5_32_32_32_32_FLOAT = 130, + TFMT5_X8Z24_UNORM = 160, +}; + +enum a5xx_tex_fetchsize { + TFETCH5_1_BYTE = 0, + TFETCH5_2_BYTE = 1, + TFETCH5_4_BYTE = 2, + TFETCH5_8_BYTE = 3, + TFETCH5_16_BYTE = 4, +}; + +enum a5xx_depth_format { + DEPTH5_NONE = 0, + DEPTH5_16 = 1, + DEPTH5_24_8 = 2, + DEPTH5_32 = 4, +}; + +enum a5xx_blit_buf { + BLIT_MRT0 = 0, + BLIT_MRT1 = 1, + BLIT_MRT2 = 2, + BLIT_MRT3 = 3, + BLIT_MRT4 = 4, + BLIT_MRT5 = 5, + BLIT_MRT6 = 6, + BLIT_MRT7 = 7, + BLIT_ZS = 8, + BLIT_Z32 = 9, +}; + +enum a5xx_tex_filter { + A5XX_TEX_NEAREST = 0, + A5XX_TEX_LINEAR = 1, + A5XX_TEX_ANISO = 2, +}; + +enum a5xx_tex_clamp { + A5XX_TEX_REPEAT = 0, + A5XX_TEX_CLAMP_TO_EDGE = 1, + A5XX_TEX_MIRROR_REPEAT = 2, + A5XX_TEX_CLAMP_TO_BORDER = 3, + A5XX_TEX_MIRROR_CLAMP = 4, +}; + +enum a5xx_tex_aniso { + A5XX_TEX_ANISO_1 = 0, + A5XX_TEX_ANISO_2 = 1, + A5XX_TEX_ANISO_4 = 2, + A5XX_TEX_ANISO_8 = 3, + A5XX_TEX_ANISO_16 = 4, +}; + +enum a5xx_tex_swiz { + A5XX_TEX_X = 0, + A5XX_TEX_Y = 1, + A5XX_TEX_Z = 2, + A5XX_TEX_W = 3, + A5XX_TEX_ZERO = 4, + A5XX_TEX_ONE = 5, +}; + +enum a5xx_tex_type { + A5XX_TEX_1D = 0, + A5XX_TEX_2D = 1, + A5XX_TEX_CUBE = 2, + A5XX_TEX_3D = 3, +}; + +#define A5XX_INT0_RBBM_GPU_IDLE 0x00000001 +#define A5XX_INT0_RBBM_AHB_ERROR 0x00000002 +#define A5XX_INT0_RBBM_TRANSFER_TIMEOUT 0x00000004 +#define A5XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008 +#define A5XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010 +#define A5XX_INT0_RBBM_ETS_MS_TIMEOUT 0x00000020 +#define A5XX_INT0_RBBM_ATB_ASYNC_OVERFLOW 0x00000040 +#define A5XX_INT0_RBBM_GPC_ERROR 0x00000080 +#define A5XX_INT0_CP_SW 0x00000100 +#define A5XX_INT0_CP_HW_ERROR 0x00000200 +#define A5XX_INT0_CP_CCU_FLUSH_DEPTH_TS 0x00000400 +#define A5XX_INT0_CP_CCU_FLUSH_COLOR_TS 0x00000800 +#define A5XX_INT0_CP_CCU_RESOLVE_TS 0x00001000 +#define A5XX_INT0_CP_IB2 0x00002000 +#define A5XX_INT0_CP_IB1 0x00004000 +#define A5XX_INT0_CP_RB 0x00008000 +#define A5XX_INT0_CP_UNUSED_1 0x00010000 +#define A5XX_INT0_CP_RB_DONE_TS 0x00020000 +#define A5XX_INT0_CP_WT_DONE_TS 0x00040000 +#define A5XX_INT0_UNKNOWN_1 0x00080000 +#define A5XX_INT0_CP_CACHE_FLUSH_TS 0x00100000 +#define A5XX_INT0_UNUSED_2 0x00200000 +#define A5XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00400000 +#define A5XX_INT0_MISC_HANG_DETECT 0x00800000 +#define A5XX_INT0_UCHE_OOB_ACCESS 0x01000000 +#define A5XX_INT0_UCHE_TRAP_INTR 0x02000000 +#define A5XX_INT0_DEBBUS_INTR_0 0x04000000 +#define A5XX_INT0_DEBBUS_INTR_1 0x08000000 +#define A5XX_INT0_GPMU_VOLTAGE_DROOP 0x10000000 +#define A5XX_INT0_GPMU_FIRMWARE 0x20000000 +#define A5XX_INT0_ISDB_CPU_IRQ 0x40000000 +#define A5XX_INT0_ISDB_UNDER_DEBUG 0x80000000 +#define A5XX_CP_INT_CP_OPCODE_ERROR 0x00000001 +#define A5XX_CP_INT_CP_RESERVED_BIT_ERROR 0x00000002 +#define A5XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004 +#define A5XX_CP_INT_CP_DMA_ERROR 0x00000008 +#define A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010 +#define A5XX_CP_INT_CP_AHB_ERROR 0x00000020 +#define REG_A5XX_CP_RB_BASE 0x00000800 + +#define REG_A5XX_CP_RB_BASE_HI 0x00000801 + +#define REG_A5XX_CP_RB_CNTL 0x00000802 + +#define REG_A5XX_CP_RB_RPTR_ADDR 0x00000804 + +#define REG_A5XX_CP_RB_RPTR_ADDR_HI 0x00000805 + +#define REG_A5XX_CP_RB_RPTR 0x00000806 + +#define REG_A5XX_CP_RB_WPTR 0x00000807 + +#define REG_A5XX_CP_PFP_STAT_ADDR 0x00000808 + +#define REG_A5XX_CP_PFP_STAT_DATA 0x00000809 + +#define REG_A5XX_CP_DRAW_STATE_ADDR 0x0000080b + +#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c + +#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817 + +#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818 + +#define REG_A5XX_CP_CRASH_DUMP_CNTL 0x00000819 + +#define REG_A5XX_CP_ME_STAT_ADDR 0x0000081a + +#define REG_A5XX_CP_ROQ_THRESHOLDS_1 0x0000081f + +#define REG_A5XX_CP_ROQ_THRESHOLDS_2 0x00000820 + +#define REG_A5XX_CP_ROQ_DBG_ADDR 0x00000821 + +#define REG_A5XX_CP_ROQ_DBG_DATA 0x00000822 + +#define REG_A5XX_CP_MEQ_DBG_ADDR 0x00000823 + +#define REG_A5XX_CP_MEQ_DBG_DATA 0x00000824 + +#define REG_A5XX_CP_MEQ_THRESHOLDS 0x00000825 + +#define REG_A5XX_CP_MERCIU_SIZE 0x00000826 + +#define REG_A5XX_CP_MERCIU_DBG_ADDR 0x00000827 + +#define REG_A5XX_CP_MERCIU_DBG_DATA_1 0x00000828 + +#define REG_A5XX_CP_MERCIU_DBG_DATA_2 0x00000829 + +#define REG_A5XX_CP_PFP_UCODE_DBG_ADDR 0x0000082a + +#define REG_A5XX_CP_PFP_UCODE_DBG_DATA 0x0000082b + +#define REG_A5XX_CP_ME_UCODE_DBG_ADDR 0x0000082f + +#define REG_A5XX_CP_ME_UCODE_DBG_DATA 0x00000830 + +#define REG_A5XX_CP_CNTL 0x00000831 + +#define REG_A5XX_CP_PFP_ME_CNTL 0x00000832 + +#define REG_A5XX_CP_CHICKEN_DBG 0x00000833 + +#define REG_A5XX_CP_PFP_INSTR_BASE_LO 0x00000835 + +#define REG_A5XX_CP_PFP_INSTR_BASE_HI 0x00000836 + +#define REG_A5XX_CP_ME_INSTR_BASE_LO 0x00000838 + +#define REG_A5XX_CP_ME_INSTR_BASE_HI 0x00000839 + +#define REG_A5XX_CP_CONTEXT_SWITCH_CNTL 0x0000083b + +#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO 0x0000083c + +#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI 0x0000083d + +#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO 0x0000083e + +#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI 0x0000083f + +#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x00000840 + +#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x00000841 + +#define REG_A5XX_CP_ADDR_MODE_CNTL 0x00000860 + +#define REG_A5XX_CP_ME_STAT_DATA 0x00000b14 + +#define REG_A5XX_CP_WFI_PEND_CTR 0x00000b15 + +#define REG_A5XX_CP_INTERRUPT_STATUS 0x00000b18 + +#define REG_A5XX_CP_HW_FAULT 0x00000b1a + +#define REG_A5XX_CP_PROTECT_STATUS 0x00000b1c + +#define REG_A5XX_CP_IB1_BASE 0x00000b1f + +#define REG_A5XX_CP_IB1_BASE_HI 0x00000b20 + +#define REG_A5XX_CP_IB1_BUFSZ 0x00000b21 + +#define REG_A5XX_CP_IB2_BASE 0x00000b22 + +#define REG_A5XX_CP_IB2_BASE_HI 0x00000b23 + +#define REG_A5XX_CP_IB2_BUFSZ 0x00000b24 + +static inline uint32_t REG_A5XX_CP_SCRATCH(uint32_t i0) { return 0x00000b78 + 0x1*i0; } + +static inline uint32_t REG_A5XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000b78 + 0x1*i0; } + +static inline uint32_t REG_A5XX_CP_PROTECT(uint32_t i0) { return 0x00000880 + 0x1*i0; } + +static inline uint32_t REG_A5XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000880 + 0x1*i0; } +#define A5XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff +#define A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0 +static inline uint32_t A5XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val) +{ + return ((val) << A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A5XX_CP_PROTECT_REG_BASE_ADDR__MASK; +} +#define A5XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000 +#define A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24 +static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val) +{ + return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK; +} +#define A5XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000 +#define A5XX_CP_PROTECT_REG_TRAP_READ 0x40000000 + +#define REG_A5XX_CP_PROTECT_CNTL 0x000008a0 + +#define REG_A5XX_CP_AHB_FAULT 0x00000b1b + +#define REG_A5XX_CP_PERFCTR_CP_SEL_0 0x00000bb0 + +#define REG_A5XX_CP_PERFCTR_CP_SEL_1 0x00000bb1 + +#define REG_A5XX_CP_PERFCTR_CP_SEL_2 0x00000bb2 + +#define REG_A5XX_CP_PERFCTR_CP_SEL_3 0x00000bb3 + +#define REG_A5XX_CP_PERFCTR_CP_SEL_4 0x00000bb4 + +#define REG_A5XX_CP_PERFCTR_CP_SEL_5 0x00000bb5 + +#define REG_A5XX_CP_PERFCTR_CP_SEL_6 0x00000bb6 + +#define REG_A5XX_CP_PERFCTR_CP_SEL_7 0x00000bb7 + +#define REG_A5XX_VSC_ADDR_MODE_CNTL 0x00000bc1 + +#define REG_A5XX_CP_POWERCTR_CP_SEL_0 0x00000bba + +#define REG_A5XX_CP_POWERCTR_CP_SEL_1 0x00000bbb + +#define REG_A5XX_CP_POWERCTR_CP_SEL_2 0x00000bbc + +#define REG_A5XX_CP_POWERCTR_CP_SEL_3 0x00000bbd + +#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_A 0x00000004 + +#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_B 0x00000005 + +#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_C 0x00000006 + +#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_D 0x00000007 + +#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLT 0x00000008 + +#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLM 0x00000009 + +#define REG_A5XX_RBBM_CFG_DEBBUS_CTLTM_ENABLE_SHIFT 0x00000018 + +#define REG_A5XX_RBBM_CFG_DBGBUS_OPL 0x0000000a + +#define REG_A5XX_RBBM_CFG_DBGBUS_OPE 0x0000000b + +#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_0 0x0000000c + +#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_1 0x0000000d + +#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_2 0x0000000e + +#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_3 0x0000000f + +#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_0 0x00000010 + +#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_1 0x00000011 + +#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_2 0x00000012 + +#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_3 0x00000013 + +#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_0 0x00000014 + +#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_1 0x00000015 + +#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_0 0x00000016 + +#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_1 0x00000017 + +#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_2 0x00000018 + +#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_3 0x00000019 + +#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_0 0x0000001a + +#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_1 0x0000001b + +#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_2 0x0000001c + +#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_3 0x0000001d + +#define REG_A5XX_RBBM_CFG_DBGBUS_NIBBLEE 0x0000001e + +#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC0 0x0000001f + +#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC1 0x00000020 + +#define REG_A5XX_RBBM_CFG_DBGBUS_LOADREG 0x00000021 + +#define REG_A5XX_RBBM_CFG_DBGBUS_IDX 0x00000022 + +#define REG_A5XX_RBBM_CFG_DBGBUS_CLRC 0x00000023 + +#define REG_A5XX_RBBM_CFG_DBGBUS_LOADIVT 0x00000024 + +#define REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000002f + +#define REG_A5XX_RBBM_INT_CLEAR_CMD 0x00000037 + +#define REG_A5XX_RBBM_INT_0_MASK 0x00000038 +#define A5XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001 +#define A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR 0x00000002 +#define A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT 0x00000004 +#define A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT 0x00000008 +#define A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT 0x00000010 +#define A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT 0x00000020 +#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW 0x00000040 +#define A5XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080 +#define A5XX_RBBM_INT_0_MASK_CP_SW 0x00000100 +#define A5XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200 +#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400 +#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800 +#define A5XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000 +#define A5XX_RBBM_INT_0_MASK_CP_IB2 0x00002000 +#define A5XX_RBBM_INT_0_MASK_CP_IB1 0x00004000 +#define A5XX_RBBM_INT_0_MASK_CP_RB 0x00008000 +#define A5XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000 +#define A5XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000 +#define A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000 +#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000 +#define A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT 0x00800000 +#define A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000 +#define A5XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000 +#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000 +#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000 +#define A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP 0x10000000 +#define A5XX_RBBM_INT_0_MASK_GPMU_FIRMWARE 0x20000000 +#define A5XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000 +#define A5XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000 + +#define REG_A5XX_RBBM_AHB_DBG_CNTL 0x0000003f + +#define REG_A5XX_RBBM_EXT_VBIF_DBG_CNTL 0x00000041 + +#define REG_A5XX_RBBM_SW_RESET_CMD 0x00000043 + +#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045 + +#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046 + +#define REG_A5XX_RBBM_DBG_LO_HI_GPIO 0x00000048 + +#define REG_A5XX_RBBM_EXT_TRACE_BUS_CNTL 0x00000049 + +#define REG_A5XX_RBBM_CLOCK_CNTL_TP0 0x0000004a + +#define REG_A5XX_RBBM_CLOCK_CNTL_TP1 0x0000004b + +#define REG_A5XX_RBBM_CLOCK_CNTL_TP2 0x0000004c + +#define REG_A5XX_RBBM_CLOCK_CNTL_TP3 0x0000004d + +#define REG_A5XX_RBBM_CLOCK_CNTL2_TP0 0x0000004e + +#define REG_A5XX_RBBM_CLOCK_CNTL2_TP1 0x0000004f + +#define REG_A5XX_RBBM_CLOCK_CNTL2_TP2 0x00000050 + +#define REG_A5XX_RBBM_CLOCK_CNTL2_TP3 0x00000051 + +#define REG_A5XX_RBBM_CLOCK_CNTL3_TP0 0x00000052 + +#define REG_A5XX_RBBM_CLOCK_CNTL3_TP1 0x00000053 + +#define REG_A5XX_RBBM_CLOCK_CNTL3_TP2 0x00000054 + +#define REG_A5XX_RBBM_CLOCK_CNTL3_TP3 0x00000055 + +#define REG_A5XX_RBBM_READ_AHB_THROUGH_DBG 0x00000059 + +#define REG_A5XX_RBBM_CLOCK_CNTL_UCHE 0x0000005a + +#define REG_A5XX_RBBM_CLOCK_CNTL2_UCHE 0x0000005b + +#define REG_A5XX_RBBM_CLOCK_CNTL3_UCHE 0x0000005c + +#define REG_A5XX_RBBM_CLOCK_CNTL4_UCHE 0x0000005d + +#define REG_A5XX_RBBM_CLOCK_HYST_UCHE 0x0000005e + +#define REG_A5XX_RBBM_CLOCK_DELAY_UCHE 0x0000005f + +#define REG_A5XX_RBBM_CLOCK_MODE_GPC 0x00000060 + +#define REG_A5XX_RBBM_CLOCK_DELAY_GPC 0x00000061 + +#define REG_A5XX_RBBM_CLOCK_HYST_GPC 0x00000062 + +#define REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000063 + +#define REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x00000064 + +#define REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000065 + +#define REG_A5XX_RBBM_CLOCK_DELAY_HLSQ 0x00000066 + +#define REG_A5XX_RBBM_CLOCK_CNTL 0x00000067 + +#define REG_A5XX_RBBM_CLOCK_CNTL_SP0 0x00000068 + +#define REG_A5XX_RBBM_CLOCK_CNTL_SP1 0x00000069 + +#define REG_A5XX_RBBM_CLOCK_CNTL_SP2 0x0000006a + +#define REG_A5XX_RBBM_CLOCK_CNTL_SP3 0x0000006b + +#define REG_A5XX_RBBM_CLOCK_CNTL2_SP0 0x0000006c + +#define REG_A5XX_RBBM_CLOCK_CNTL2_SP1 0x0000006d + +#define REG_A5XX_RBBM_CLOCK_CNTL2_SP2 0x0000006e + +#define REG_A5XX_RBBM_CLOCK_CNTL2_SP3 0x0000006f + +#define REG_A5XX_RBBM_CLOCK_HYST_SP0 0x00000070 + +#define REG_A5XX_RBBM_CLOCK_HYST_SP1 0x00000071 + +#define REG_A5XX_RBBM_CLOCK_HYST_SP2 0x00000072 + +#define REG_A5XX_RBBM_CLOCK_HYST_SP3 0x00000073 + +#define REG_A5XX_RBBM_CLOCK_DELAY_SP0 0x00000074 + +#define REG_A5XX_RBBM_CLOCK_DELAY_SP1 0x00000075 + +#define REG_A5XX_RBBM_CLOCK_DELAY_SP2 0x00000076 + +#define REG_A5XX_RBBM_CLOCK_DELAY_SP3 0x00000077 + +#define REG_A5XX_RBBM_CLOCK_CNTL_RB0 0x00000078 + +#define REG_A5XX_RBBM_CLOCK_CNTL_RB1 0x00000079 + +#define REG_A5XX_RBBM_CLOCK_CNTL_RB2 0x0000007a + +#define REG_A5XX_RBBM_CLOCK_CNTL_RB3 0x0000007b + +#define REG_A5XX_RBBM_CLOCK_CNTL2_RB0 0x0000007c + +#define REG_A5XX_RBBM_CLOCK_CNTL2_RB1 0x0000007d + +#define REG_A5XX_RBBM_CLOCK_CNTL2_RB2 0x0000007e + +#define REG_A5XX_RBBM_CLOCK_CNTL2_RB3 0x0000007f + +#define REG_A5XX_RBBM_CLOCK_HYST_RAC 0x00000080 + +#define REG_A5XX_RBBM_CLOCK_DELAY_RAC 0x00000081 + +#define REG_A5XX_RBBM_CLOCK_CNTL_CCU0 0x00000082 + +#define REG_A5XX_RBBM_CLOCK_CNTL_CCU1 0x00000083 + +#define REG_A5XX_RBBM_CLOCK_CNTL_CCU2 0x00000084 + +#define REG_A5XX_RBBM_CLOCK_CNTL_CCU3 0x00000085 + +#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000086 + +#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000087 + +#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000088 + +#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000089 + +#define REG_A5XX_RBBM_CLOCK_CNTL_RAC 0x0000008a + +#define REG_A5XX_RBBM_CLOCK_CNTL2_RAC 0x0000008b + +#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0 0x0000008c + +#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1 0x0000008d + +#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2 0x0000008e + +#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3 0x0000008f + +#define REG_A5XX_RBBM_CLOCK_HYST_VFD 0x00000090 + +#define REG_A5XX_RBBM_CLOCK_MODE_VFD 0x00000091 + +#define REG_A5XX_RBBM_CLOCK_DELAY_VFD 0x00000092 + +#define REG_A5XX_RBBM_AHB_CNTL0 0x00000093 + +#define REG_A5XX_RBBM_AHB_CNTL1 0x00000094 + +#define REG_A5XX_RBBM_AHB_CNTL2 0x00000095 + +#define REG_A5XX_RBBM_AHB_CMD 0x00000096 + +#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11 0x0000009c + +#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12 0x0000009d + +#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13 0x0000009e + +#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14 0x0000009f + +#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15 0x000000a0 + +#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16 0x000000a1 + +#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17 0x000000a2 + +#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18 0x000000a3 + +#define REG_A5XX_RBBM_CLOCK_DELAY_TP0 0x000000a4 + +#define REG_A5XX_RBBM_CLOCK_DELAY_TP1 0x000000a5 + +#define REG_A5XX_RBBM_CLOCK_DELAY_TP2 0x000000a6 + +#define REG_A5XX_RBBM_CLOCK_DELAY_TP3 0x000000a7 + +#define REG_A5XX_RBBM_CLOCK_DELAY2_TP0 0x000000a8 + +#define REG_A5XX_RBBM_CLOCK_DELAY2_TP1 0x000000a9 + +#define REG_A5XX_RBBM_CLOCK_DELAY2_TP2 0x000000aa + +#define REG_A5XX_RBBM_CLOCK_DELAY2_TP3 0x000000ab + +#define REG_A5XX_RBBM_CLOCK_DELAY3_TP0 0x000000ac + +#define REG_A5XX_RBBM_CLOCK_DELAY3_TP1 0x000000ad + +#define REG_A5XX_RBBM_CLOCK_DELAY3_TP2 0x000000ae + +#define REG_A5XX_RBBM_CLOCK_DELAY3_TP3 0x000000af + +#define REG_A5XX_RBBM_CLOCK_HYST_TP0 0x000000b0 + +#define REG_A5XX_RBBM_CLOCK_HYST_TP1 0x000000b1 + +#define REG_A5XX_RBBM_CLOCK_HYST_TP2 0x000000b2 + +#define REG_A5XX_RBBM_CLOCK_HYST_TP3 0x000000b3 + +#define REG_A5XX_RBBM_CLOCK_HYST2_TP0 0x000000b4 + +#define REG_A5XX_RBBM_CLOCK_HYST2_TP1 0x000000b5 + +#define REG_A5XX_RBBM_CLOCK_HYST2_TP2 0x000000b6 + +#define REG_A5XX_RBBM_CLOCK_HYST2_TP3 0x000000b7 + +#define REG_A5XX_RBBM_CLOCK_HYST3_TP0 0x000000b8 + +#define REG_A5XX_RBBM_CLOCK_HYST3_TP1 0x000000b9 + +#define REG_A5XX_RBBM_CLOCK_HYST3_TP2 0x000000ba + +#define REG_A5XX_RBBM_CLOCK_HYST3_TP3 0x000000bb + +#define REG_A5XX_RBBM_CLOCK_CNTL_GPMU 0x000000c8 + +#define REG_A5XX_RBBM_CLOCK_DELAY_GPMU 0x000000c9 + +#define REG_A5XX_RBBM_CLOCK_HYST_GPMU 0x000000ca + +#define REG_A5XX_RBBM_PERFCTR_CP_0_LO 0x000003a0 + +#define REG_A5XX_RBBM_PERFCTR_CP_0_HI 0x000003a1 + +#define REG_A5XX_RBBM_PERFCTR_CP_1_LO 0x000003a2 + +#define REG_A5XX_RBBM_PERFCTR_CP_1_HI 0x000003a3 + +#define REG_A5XX_RBBM_PERFCTR_CP_2_LO 0x000003a4 + +#define REG_A5XX_RBBM_PERFCTR_CP_2_HI 0x000003a5 + +#define REG_A5XX_RBBM_PERFCTR_CP_3_LO 0x000003a6 + +#define REG_A5XX_RBBM_PERFCTR_CP_3_HI 0x000003a7 + +#define REG_A5XX_RBBM_PERFCTR_CP_4_LO 0x000003a8 + +#define REG_A5XX_RBBM_PERFCTR_CP_4_HI 0x000003a9 + +#define REG_A5XX_RBBM_PERFCTR_CP_5_LO 0x000003aa + +#define REG_A5XX_RBBM_PERFCTR_CP_5_HI 0x000003ab + +#define REG_A5XX_RBBM_PERFCTR_CP_6_LO 0x000003ac + +#define REG_A5XX_RBBM_PERFCTR_CP_6_HI 0x000003ad + +#define REG_A5XX_RBBM_PERFCTR_CP_7_LO 0x000003ae + +#define REG_A5XX_RBBM_PERFCTR_CP_7_HI 0x000003af + +#define REG_A5XX_RBBM_PERFCTR_RBBM_0_LO 0x000003b0 + +#define REG_A5XX_RBBM_PERFCTR_RBBM_0_HI 0x000003b1 + +#define REG_A5XX_RBBM_PERFCTR_RBBM_1_LO 0x000003b2 + +#define REG_A5XX_RBBM_PERFCTR_RBBM_1_HI 0x000003b3 + +#define REG_A5XX_RBBM_PERFCTR_RBBM_2_LO 0x000003b4 + +#define REG_A5XX_RBBM_PERFCTR_RBBM_2_HI 0x000003b5 + +#define REG_A5XX_RBBM_PERFCTR_RBBM_3_LO 0x000003b6 + +#define REG_A5XX_RBBM_PERFCTR_RBBM_3_HI 0x000003b7 + +#define REG_A5XX_RBBM_PERFCTR_PC_0_LO 0x000003b8 + +#define REG_A5XX_RBBM_PERFCTR_PC_0_HI 0x000003b9 + +#define REG_A5XX_RBBM_PERFCTR_PC_1_LO 0x000003ba + +#define REG_A5XX_RBBM_PERFCTR_PC_1_HI 0x000003bb + +#define REG_A5XX_RBBM_PERFCTR_PC_2_LO 0x000003bc + +#define REG_A5XX_RBBM_PERFCTR_PC_2_HI 0x000003bd + +#define REG_A5XX_RBBM_PERFCTR_PC_3_LO 0x000003be + +#define REG_A5XX_RBBM_PERFCTR_PC_3_HI 0x000003bf + +#define REG_A5XX_RBBM_PERFCTR_PC_4_LO 0x000003c0 + +#define REG_A5XX_RBBM_PERFCTR_PC_4_HI 0x000003c1 + +#define REG_A5XX_RBBM_PERFCTR_PC_5_LO 0x000003c2 + +#define REG_A5XX_RBBM_PERFCTR_PC_5_HI 0x000003c3 + +#define REG_A5XX_RBBM_PERFCTR_PC_6_LO 0x000003c4 + +#define REG_A5XX_RBBM_PERFCTR_PC_6_HI 0x000003c5 + +#define REG_A5XX_RBBM_PERFCTR_PC_7_LO 0x000003c6 + +#define REG_A5XX_RBBM_PERFCTR_PC_7_HI 0x000003c7 + +#define REG_A5XX_RBBM_PERFCTR_VFD_0_LO 0x000003c8 + +#define REG_A5XX_RBBM_PERFCTR_VFD_0_HI 0x000003c9 + +#define REG_A5XX_RBBM_PERFCTR_VFD_1_LO 0x000003ca + +#define REG_A5XX_RBBM_PERFCTR_VFD_1_HI 0x000003cb + +#define REG_A5XX_RBBM_PERFCTR_VFD_2_LO 0x000003cc + +#define REG_A5XX_RBBM_PERFCTR_VFD_2_HI 0x000003cd + +#define REG_A5XX_RBBM_PERFCTR_VFD_3_LO 0x000003ce + +#define REG_A5XX_RBBM_PERFCTR_VFD_3_HI 0x000003cf + +#define REG_A5XX_RBBM_PERFCTR_VFD_4_LO 0x000003d0 + +#define REG_A5XX_RBBM_PERFCTR_VFD_4_HI 0x000003d1 + +#define REG_A5XX_RBBM_PERFCTR_VFD_5_LO 0x000003d2 + +#define REG_A5XX_RBBM_PERFCTR_VFD_5_HI 0x000003d3 + +#define REG_A5XX_RBBM_PERFCTR_VFD_6_LO 0x000003d4 + +#define REG_A5XX_RBBM_PERFCTR_VFD_6_HI 0x000003d5 + +#define REG_A5XX_RBBM_PERFCTR_VFD_7_LO 0x000003d6 + +#define REG_A5XX_RBBM_PERFCTR_VFD_7_HI 0x000003d7 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO 0x000003d8 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI 0x000003d9 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO 0x000003da + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI 0x000003db + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO 0x000003dc + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI 0x000003dd + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO 0x000003de + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI 0x000003df + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO 0x000003e0 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI 0x000003e1 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO 0x000003e2 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI 0x000003e3 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO 0x000003e4 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI 0x000003e5 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO 0x000003e6 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI 0x000003e7 + +#define REG_A5XX_RBBM_PERFCTR_VPC_0_LO 0x000003e8 + +#define REG_A5XX_RBBM_PERFCTR_VPC_0_HI 0x000003e9 + +#define REG_A5XX_RBBM_PERFCTR_VPC_1_LO 0x000003ea + +#define REG_A5XX_RBBM_PERFCTR_VPC_1_HI 0x000003eb + +#define REG_A5XX_RBBM_PERFCTR_VPC_2_LO 0x000003ec + +#define REG_A5XX_RBBM_PERFCTR_VPC_2_HI 0x000003ed + +#define REG_A5XX_RBBM_PERFCTR_VPC_3_LO 0x000003ee + +#define REG_A5XX_RBBM_PERFCTR_VPC_3_HI 0x000003ef + +#define REG_A5XX_RBBM_PERFCTR_CCU_0_LO 0x000003f0 + +#define REG_A5XX_RBBM_PERFCTR_CCU_0_HI 0x000003f1 + +#define REG_A5XX_RBBM_PERFCTR_CCU_1_LO 0x000003f2 + +#define REG_A5XX_RBBM_PERFCTR_CCU_1_HI 0x000003f3 + +#define REG_A5XX_RBBM_PERFCTR_CCU_2_LO 0x000003f4 + +#define REG_A5XX_RBBM_PERFCTR_CCU_2_HI 0x000003f5 + +#define REG_A5XX_RBBM_PERFCTR_CCU_3_LO 0x000003f6 + +#define REG_A5XX_RBBM_PERFCTR_CCU_3_HI 0x000003f7 + +#define REG_A5XX_RBBM_PERFCTR_TSE_0_LO 0x000003f8 + +#define REG_A5XX_RBBM_PERFCTR_TSE_0_HI 0x000003f9 + +#define REG_A5XX_RBBM_PERFCTR_TSE_1_LO 0x000003fa + +#define REG_A5XX_RBBM_PERFCTR_TSE_1_HI 0x000003fb + +#define REG_A5XX_RBBM_PERFCTR_TSE_2_LO 0x000003fc + +#define REG_A5XX_RBBM_PERFCTR_TSE_2_HI 0x000003fd + +#define REG_A5XX_RBBM_PERFCTR_TSE_3_LO 0x000003fe + +#define REG_A5XX_RBBM_PERFCTR_TSE_3_HI 0x000003ff + +#define REG_A5XX_RBBM_PERFCTR_RAS_0_LO 0x00000400 + +#define REG_A5XX_RBBM_PERFCTR_RAS_0_HI 0x00000401 + +#define REG_A5XX_RBBM_PERFCTR_RAS_1_LO 0x00000402 + +#define REG_A5XX_RBBM_PERFCTR_RAS_1_HI 0x00000403 + +#define REG_A5XX_RBBM_PERFCTR_RAS_2_LO 0x00000404 + +#define REG_A5XX_RBBM_PERFCTR_RAS_2_HI 0x00000405 + +#define REG_A5XX_RBBM_PERFCTR_RAS_3_LO 0x00000406 + +#define REG_A5XX_RBBM_PERFCTR_RAS_3_HI 0x00000407 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_0_LO 0x00000408 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_0_HI 0x00000409 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_1_LO 0x0000040a + +#define REG_A5XX_RBBM_PERFCTR_UCHE_1_HI 0x0000040b + +#define REG_A5XX_RBBM_PERFCTR_UCHE_2_LO 0x0000040c + +#define REG_A5XX_RBBM_PERFCTR_UCHE_2_HI 0x0000040d + +#define REG_A5XX_RBBM_PERFCTR_UCHE_3_LO 0x0000040e + +#define REG_A5XX_RBBM_PERFCTR_UCHE_3_HI 0x0000040f + +#define REG_A5XX_RBBM_PERFCTR_UCHE_4_LO 0x00000410 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_4_HI 0x00000411 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_5_LO 0x00000412 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_5_HI 0x00000413 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_6_LO 0x00000414 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_6_HI 0x00000415 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_7_LO 0x00000416 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_7_HI 0x00000417 + +#define REG_A5XX_RBBM_PERFCTR_TP_0_LO 0x00000418 + +#define REG_A5XX_RBBM_PERFCTR_TP_0_HI 0x00000419 + +#define REG_A5XX_RBBM_PERFCTR_TP_1_LO 0x0000041a + +#define REG_A5XX_RBBM_PERFCTR_TP_1_HI 0x0000041b + +#define REG_A5XX_RBBM_PERFCTR_TP_2_LO 0x0000041c + +#define REG_A5XX_RBBM_PERFCTR_TP_2_HI 0x0000041d + +#define REG_A5XX_RBBM_PERFCTR_TP_3_LO 0x0000041e + +#define REG_A5XX_RBBM_PERFCTR_TP_3_HI 0x0000041f + +#define REG_A5XX_RBBM_PERFCTR_TP_4_LO 0x00000420 + +#define REG_A5XX_RBBM_PERFCTR_TP_4_HI 0x00000421 + +#define REG_A5XX_RBBM_PERFCTR_TP_5_LO 0x00000422 + +#define REG_A5XX_RBBM_PERFCTR_TP_5_HI 0x00000423 + +#define REG_A5XX_RBBM_PERFCTR_TP_6_LO 0x00000424 + +#define REG_A5XX_RBBM_PERFCTR_TP_6_HI 0x00000425 + +#define REG_A5XX_RBBM_PERFCTR_TP_7_LO 0x00000426 + +#define REG_A5XX_RBBM_PERFCTR_TP_7_HI 0x00000427 + +#define REG_A5XX_RBBM_PERFCTR_SP_0_LO 0x00000428 + +#define REG_A5XX_RBBM_PERFCTR_SP_0_HI 0x00000429 + +#define REG_A5XX_RBBM_PERFCTR_SP_1_LO 0x0000042a + +#define REG_A5XX_RBBM_PERFCTR_SP_1_HI 0x0000042b + +#define REG_A5XX_RBBM_PERFCTR_SP_2_LO 0x0000042c + +#define REG_A5XX_RBBM_PERFCTR_SP_2_HI 0x0000042d + +#define REG_A5XX_RBBM_PERFCTR_SP_3_LO 0x0000042e + +#define REG_A5XX_RBBM_PERFCTR_SP_3_HI 0x0000042f + +#define REG_A5XX_RBBM_PERFCTR_SP_4_LO 0x00000430 + +#define REG_A5XX_RBBM_PERFCTR_SP_4_HI 0x00000431 + +#define REG_A5XX_RBBM_PERFCTR_SP_5_LO 0x00000432 + +#define REG_A5XX_RBBM_PERFCTR_SP_5_HI 0x00000433 + +#define REG_A5XX_RBBM_PERFCTR_SP_6_LO 0x00000434 + +#define REG_A5XX_RBBM_PERFCTR_SP_6_HI 0x00000435 + +#define REG_A5XX_RBBM_PERFCTR_SP_7_LO 0x00000436 + +#define REG_A5XX_RBBM_PERFCTR_SP_7_HI 0x00000437 + +#define REG_A5XX_RBBM_PERFCTR_SP_8_LO 0x00000438 + +#define REG_A5XX_RBBM_PERFCTR_SP_8_HI 0x00000439 + +#define REG_A5XX_RBBM_PERFCTR_SP_9_LO 0x0000043a + +#define REG_A5XX_RBBM_PERFCTR_SP_9_HI 0x0000043b + +#define REG_A5XX_RBBM_PERFCTR_SP_10_LO 0x0000043c + +#define REG_A5XX_RBBM_PERFCTR_SP_10_HI 0x0000043d + +#define REG_A5XX_RBBM_PERFCTR_SP_11_LO 0x0000043e + +#define REG_A5XX_RBBM_PERFCTR_SP_11_HI 0x0000043f + +#define REG_A5XX_RBBM_PERFCTR_RB_0_LO 0x00000440 + +#define REG_A5XX_RBBM_PERFCTR_RB_0_HI 0x00000441 + +#define REG_A5XX_RBBM_PERFCTR_RB_1_LO 0x00000442 + +#define REG_A5XX_RBBM_PERFCTR_RB_1_HI 0x00000443 + +#define REG_A5XX_RBBM_PERFCTR_RB_2_LO 0x00000444 + +#define REG_A5XX_RBBM_PERFCTR_RB_2_HI 0x00000445 + +#define REG_A5XX_RBBM_PERFCTR_RB_3_LO 0x00000446 + +#define REG_A5XX_RBBM_PERFCTR_RB_3_HI 0x00000447 + +#define REG_A5XX_RBBM_PERFCTR_RB_4_LO 0x00000448 + +#define REG_A5XX_RBBM_PERFCTR_RB_4_HI 0x00000449 + +#define REG_A5XX_RBBM_PERFCTR_RB_5_LO 0x0000044a + +#define REG_A5XX_RBBM_PERFCTR_RB_5_HI 0x0000044b + +#define REG_A5XX_RBBM_PERFCTR_RB_6_LO 0x0000044c + +#define REG_A5XX_RBBM_PERFCTR_RB_6_HI 0x0000044d + +#define REG_A5XX_RBBM_PERFCTR_RB_7_LO 0x0000044e + +#define REG_A5XX_RBBM_PERFCTR_RB_7_HI 0x0000044f + +#define REG_A5XX_RBBM_PERFCTR_VSC_0_LO 0x00000450 + +#define REG_A5XX_RBBM_PERFCTR_VSC_0_HI 0x00000451 + +#define REG_A5XX_RBBM_PERFCTR_VSC_1_LO 0x00000452 + +#define REG_A5XX_RBBM_PERFCTR_VSC_1_HI 0x00000453 + +#define REG_A5XX_RBBM_PERFCTR_LRZ_0_LO 0x00000454 + +#define REG_A5XX_RBBM_PERFCTR_LRZ_0_HI 0x00000455 + +#define REG_A5XX_RBBM_PERFCTR_LRZ_1_LO 0x00000456 + +#define REG_A5XX_RBBM_PERFCTR_LRZ_1_HI 0x00000457 + +#define REG_A5XX_RBBM_PERFCTR_LRZ_2_LO 0x00000458 + +#define REG_A5XX_RBBM_PERFCTR_LRZ_2_HI 0x00000459 + +#define REG_A5XX_RBBM_PERFCTR_LRZ_3_LO 0x0000045a + +#define REG_A5XX_RBBM_PERFCTR_LRZ_3_HI 0x0000045b + +#define REG_A5XX_RBBM_PERFCTR_CMP_0_LO 0x0000045c + +#define REG_A5XX_RBBM_PERFCTR_CMP_0_HI 0x0000045d + +#define REG_A5XX_RBBM_PERFCTR_CMP_1_LO 0x0000045e + +#define REG_A5XX_RBBM_PERFCTR_CMP_1_HI 0x0000045f + +#define REG_A5XX_RBBM_PERFCTR_CMP_2_LO 0x00000460 + +#define REG_A5XX_RBBM_PERFCTR_CMP_2_HI 0x00000461 + +#define REG_A5XX_RBBM_PERFCTR_CMP_3_LO 0x00000462 + +#define REG_A5XX_RBBM_PERFCTR_CMP_3_HI 0x00000463 + +#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b + +#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c + +#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d + +#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e + +#define REG_A5XX_RBBM_ALWAYSON_COUNTER_LO 0x000004d2 + +#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI 0x000004d3 + +#define REG_A5XX_RBBM_STATUS 0x000004f5 +#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x80000000 +#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x40000000 +#define A5XX_RBBM_STATUS_HLSQ_BUSY 0x20000000 +#define A5XX_RBBM_STATUS_VSC_BUSY 0x10000000 +#define A5XX_RBBM_STATUS_TPL1_BUSY 0x08000000 +#define A5XX_RBBM_STATUS_SP_BUSY 0x04000000 +#define A5XX_RBBM_STATUS_UCHE_BUSY 0x02000000 +#define A5XX_RBBM_STATUS_VPC_BUSY 0x01000000 +#define A5XX_RBBM_STATUS_VFDP_BUSY 0x00800000 +#define A5XX_RBBM_STATUS_VFD_BUSY 0x00400000 +#define A5XX_RBBM_STATUS_TESS_BUSY 0x00200000 +#define A5XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000 +#define A5XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000 +#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY 0x00040000 +#define A5XX_RBBM_STATUS_DCOM_BUSY 0x00020000 +#define A5XX_RBBM_STATUS_COM_BUSY 0x00010000 +#define A5XX_RBBM_STATUS_LRZ_BUZY 0x00008000 +#define A5XX_RBBM_STATUS_A2D_DSP_BUSY 0x00004000 +#define A5XX_RBBM_STATUS_CCUFCHE_BUSY 0x00002000 +#define A5XX_RBBM_STATUS_RB_BUSY 0x00001000 +#define A5XX_RBBM_STATUS_RAS_BUSY 0x00000800 +#define A5XX_RBBM_STATUS_TSE_BUSY 0x00000400 +#define A5XX_RBBM_STATUS_VBIF_BUSY 0x00000200 +#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST 0x00000100 +#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST 0x00000080 +#define A5XX_RBBM_STATUS_CP_BUSY 0x00000040 +#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY 0x00000020 +#define A5XX_RBBM_STATUS_CP_CRASH_BUSY 0x00000010 +#define A5XX_RBBM_STATUS_CP_ETS_BUSY 0x00000008 +#define A5XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004 +#define A5XX_RBBM_STATUS_CP_ME_BUSY 0x00000002 +#define A5XX_RBBM_STATUS_HI_BUSY 0x00000001 + +#define REG_A5XX_RBBM_STATUS3 0x00000530 + +#define REG_A5XX_RBBM_INT_0_STATUS 0x000004e1 + +#define REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS 0x000004f0 + +#define REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS 0x000004f1 + +#define REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS 0x000004f3 + +#define REG_A5XX_RBBM_AHB_ERROR_STATUS 0x000004f4 + +#define REG_A5XX_RBBM_PERFCTR_CNTL 0x00000464 + +#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 0x00000465 + +#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD1 0x00000466 + +#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD2 0x00000467 + +#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD3 0x00000468 + +#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000469 + +#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x0000046a + +#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b + +#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c + +#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d + +#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e + +#define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000046f + +#define REG_A5XX_RBBM_AHB_ERROR 0x000004ed + +#define REG_A5XX_RBBM_CFG_DBGBUS_EVENT_LOGIC 0x00000504 + +#define REG_A5XX_RBBM_CFG_DBGBUS_OVER 0x00000505 + +#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT0 0x00000506 + +#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT1 0x00000507 + +#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT2 0x00000508 + +#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT3 0x00000509 + +#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT4 0x0000050a + +#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT5 0x0000050b + +#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_ADDR 0x0000050c + +#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF0 0x0000050d + +#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1 0x0000050e + +#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2 0x0000050f + +#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF3 0x00000510 + +#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF4 0x00000511 + +#define REG_A5XX_RBBM_CFG_DBGBUS_MISR0 0x00000512 + +#define REG_A5XX_RBBM_CFG_DBGBUS_MISR1 0x00000513 + +#define REG_A5XX_RBBM_ISDB_CNT 0x00000533 + +#define REG_A5XX_RBBM_SECVID_TRUST_CONFIG 0x0000f000 + +#define REG_A5XX_RBBM_SECVID_TRUST_CNTL 0x0000f400 + +#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800 + +#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801 + +#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802 + +#define REG_A5XX_RBBM_SECVID_TSB_CNTL 0x0000f803 + +#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_LO 0x0000f804 + +#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_HI 0x0000f805 + +#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_LO 0x0000f806 + +#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_HI 0x0000f807 + +#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810 + +#define REG_A5XX_VSC_PIPE_DATA_LENGTH_0 0x00000c00 + +#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0 0x00000c60 + +#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1 0x00000c61 + +#define REG_A5XX_VSC_BIN_SIZE 0x00000cdd +#define A5XX_VSC_BIN_SIZE_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_VSC_BIN_SIZE_X__MASK 0x00007fff +#define A5XX_VSC_BIN_SIZE_X__SHIFT 0 +static inline uint32_t A5XX_VSC_BIN_SIZE_X(uint32_t val) +{ + return ((val) << A5XX_VSC_BIN_SIZE_X__SHIFT) & A5XX_VSC_BIN_SIZE_X__MASK; +} +#define A5XX_VSC_BIN_SIZE_Y__MASK 0x7fff0000 +#define A5XX_VSC_BIN_SIZE_Y__SHIFT 16 +static inline uint32_t A5XX_VSC_BIN_SIZE_Y(uint32_t val) +{ + return ((val) << A5XX_VSC_BIN_SIZE_Y__SHIFT) & A5XX_VSC_BIN_SIZE_Y__MASK; +} + +#define REG_A5XX_GRAS_ADDR_MODE_CNTL 0x00000c81 + +#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c90 + +#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c91 + +#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c92 + +#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c93 + +#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c94 + +#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c95 + +#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c96 + +#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c97 + +#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0 0x00000c98 + +#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1 0x00000c99 + +#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2 0x00000c9a + +#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3 0x00000c9b + +#define REG_A5XX_RB_DBG_ECO_CNTL 0x00000cc4 + +#define REG_A5XX_RB_ADDR_MODE_CNTL 0x00000cc5 + +#define REG_A5XX_RB_MODE_CNTL 0x00000cc6 + +#define REG_A5XX_RB_CCU_CNTL 0x00000cc7 + +#define REG_A5XX_RB_PERFCTR_RB_SEL_0 0x00000cd0 + +#define REG_A5XX_RB_PERFCTR_RB_SEL_1 0x00000cd1 + +#define REG_A5XX_RB_PERFCTR_RB_SEL_2 0x00000cd2 + +#define REG_A5XX_RB_PERFCTR_RB_SEL_3 0x00000cd3 + +#define REG_A5XX_RB_PERFCTR_RB_SEL_4 0x00000cd4 + +#define REG_A5XX_RB_PERFCTR_RB_SEL_5 0x00000cd5 + +#define REG_A5XX_RB_PERFCTR_RB_SEL_6 0x00000cd6 + +#define REG_A5XX_RB_PERFCTR_RB_SEL_7 0x00000cd7 + +#define REG_A5XX_RB_PERFCTR_CCU_SEL_0 0x00000cd8 + +#define REG_A5XX_RB_PERFCTR_CCU_SEL_1 0x00000cd9 + +#define REG_A5XX_RB_PERFCTR_CCU_SEL_2 0x00000cda + +#define REG_A5XX_RB_PERFCTR_CCU_SEL_3 0x00000cdb + +#define REG_A5XX_RB_POWERCTR_RB_SEL_0 0x00000ce0 + +#define REG_A5XX_RB_POWERCTR_RB_SEL_1 0x00000ce1 + +#define REG_A5XX_RB_POWERCTR_RB_SEL_2 0x00000ce2 + +#define REG_A5XX_RB_POWERCTR_RB_SEL_3 0x00000ce3 + +#define REG_A5XX_RB_POWERCTR_CCU_SEL_0 0x00000ce4 + +#define REG_A5XX_RB_POWERCTR_CCU_SEL_1 0x00000ce5 + +#define REG_A5XX_RB_PERFCTR_CMP_SEL_0 0x00000cec + +#define REG_A5XX_RB_PERFCTR_CMP_SEL_1 0x00000ced + +#define REG_A5XX_RB_PERFCTR_CMP_SEL_2 0x00000cee + +#define REG_A5XX_RB_PERFCTR_CMP_SEL_3 0x00000cef + +#define REG_A5XX_PC_DBG_ECO_CNTL 0x00000d00 +#define A5XX_PC_DBG_ECO_CNTL_TWOPASSUSEWFI 0x00000100 + +#define REG_A5XX_PC_ADDR_MODE_CNTL 0x00000d01 + +#define REG_A5XX_PC_MODE_CNTL 0x00000d02 + +#define REG_A5XX_UNKNOWN_0D08 0x00000d08 + +#define REG_A5XX_UNKNOWN_0D09 0x00000d09 + +#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10 + +#define REG_A5XX_PC_PERFCTR_PC_SEL_1 0x00000d11 + +#define REG_A5XX_PC_PERFCTR_PC_SEL_2 0x00000d12 + +#define REG_A5XX_PC_PERFCTR_PC_SEL_3 0x00000d13 + +#define REG_A5XX_PC_PERFCTR_PC_SEL_4 0x00000d14 + +#define REG_A5XX_PC_PERFCTR_PC_SEL_5 0x00000d15 + +#define REG_A5XX_PC_PERFCTR_PC_SEL_6 0x00000d16 + +#define REG_A5XX_PC_PERFCTR_PC_SEL_7 0x00000d17 + +#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0 0x00000e00 + +#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01 + +#define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05 + +#define REG_A5XX_HLSQ_MODE_CNTL 0x00000e06 + +#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e10 + +#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e11 + +#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e12 + +#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e13 + +#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e14 + +#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e15 + +#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e16 + +#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e17 + +#define REG_A5XX_HLSQ_SPTP_RDSEL 0x00000f08 + +#define REG_A5XX_HLSQ_DBG_READ_SEL 0x0000bc00 + +#define REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000a000 + +#define REG_A5XX_VFD_ADDR_MODE_CNTL 0x00000e41 + +#define REG_A5XX_VFD_MODE_CNTL 0x00000e42 + +#define REG_A5XX_VFD_PERFCTR_VFD_SEL_0 0x00000e50 + +#define REG_A5XX_VFD_PERFCTR_VFD_SEL_1 0x00000e51 + +#define REG_A5XX_VFD_PERFCTR_VFD_SEL_2 0x00000e52 + +#define REG_A5XX_VFD_PERFCTR_VFD_SEL_3 0x00000e53 + +#define REG_A5XX_VFD_PERFCTR_VFD_SEL_4 0x00000e54 + +#define REG_A5XX_VFD_PERFCTR_VFD_SEL_5 0x00000e55 + +#define REG_A5XX_VFD_PERFCTR_VFD_SEL_6 0x00000e56 + +#define REG_A5XX_VFD_PERFCTR_VFD_SEL_7 0x00000e57 + +#define REG_A5XX_VPC_DBG_ECO_CNTL 0x00000e60 + +#define REG_A5XX_VPC_ADDR_MODE_CNTL 0x00000e61 + +#define REG_A5XX_VPC_MODE_CNTL 0x00000e62 + +#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0 0x00000e64 + +#define REG_A5XX_VPC_PERFCTR_VPC_SEL_1 0x00000e65 + +#define REG_A5XX_VPC_PERFCTR_VPC_SEL_2 0x00000e66 + +#define REG_A5XX_VPC_PERFCTR_VPC_SEL_3 0x00000e67 + +#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80 + +#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82 + +#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87 + +#define REG_A5XX_UCHE_WRITE_THRU_BASE_HI 0x00000e88 + +#define REG_A5XX_UCHE_TRAP_BASE_LO 0x00000e89 + +#define REG_A5XX_UCHE_TRAP_BASE_HI 0x00000e8a + +#define REG_A5XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e8b + +#define REG_A5XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e8c + +#define REG_A5XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e8d + +#define REG_A5XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e8e + +#define REG_A5XX_UCHE_DBG_ECO_CNTL_2 0x00000e8f + +#define REG_A5XX_UCHE_DBG_ECO_CNTL 0x00000e90 + +#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO 0x00000e91 + +#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_HI 0x00000e92 + +#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_LO 0x00000e93 + +#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_HI 0x00000e94 + +#define REG_A5XX_UCHE_CACHE_INVALIDATE 0x00000e95 + +#define REG_A5XX_UCHE_CACHE_WAYS 0x00000e96 + +#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000ea0 + +#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000ea1 + +#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000ea2 + +#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000ea3 + +#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000ea4 + +#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000ea5 + +#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000ea6 + +#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000ea7 + +#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0 0x00000ea8 + +#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1 0x00000ea9 + +#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2 0x00000eaa + +#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3 0x00000eab + +#define REG_A5XX_UCHE_TRAP_LOG_LO 0x00000eb1 + +#define REG_A5XX_UCHE_TRAP_LOG_HI 0x00000eb2 + +#define REG_A5XX_SP_DBG_ECO_CNTL 0x00000ec0 + +#define REG_A5XX_SP_ADDR_MODE_CNTL 0x00000ec1 + +#define REG_A5XX_SP_MODE_CNTL 0x00000ec2 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_0 0x00000ed0 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_1 0x00000ed1 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_2 0x00000ed2 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_3 0x00000ed3 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_4 0x00000ed4 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_5 0x00000ed5 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_6 0x00000ed6 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_7 0x00000ed7 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_8 0x00000ed8 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_9 0x00000ed9 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_10 0x00000eda + +#define REG_A5XX_SP_PERFCTR_SP_SEL_11 0x00000edb + +#define REG_A5XX_SP_POWERCTR_SP_SEL_0 0x00000edc + +#define REG_A5XX_SP_POWERCTR_SP_SEL_1 0x00000edd + +#define REG_A5XX_SP_POWERCTR_SP_SEL_2 0x00000ede + +#define REG_A5XX_SP_POWERCTR_SP_SEL_3 0x00000edf + +#define REG_A5XX_TPL1_ADDR_MODE_CNTL 0x00000f01 + +#define REG_A5XX_TPL1_MODE_CNTL 0x00000f02 + +#define REG_A5XX_TPL1_PERFCTR_TP_SEL_0 0x00000f10 + +#define REG_A5XX_TPL1_PERFCTR_TP_SEL_1 0x00000f11 + +#define REG_A5XX_TPL1_PERFCTR_TP_SEL_2 0x00000f12 + +#define REG_A5XX_TPL1_PERFCTR_TP_SEL_3 0x00000f13 + +#define REG_A5XX_TPL1_PERFCTR_TP_SEL_4 0x00000f14 + +#define REG_A5XX_TPL1_PERFCTR_TP_SEL_5 0x00000f15 + +#define REG_A5XX_TPL1_PERFCTR_TP_SEL_6 0x00000f16 + +#define REG_A5XX_TPL1_PERFCTR_TP_SEL_7 0x00000f17 + +#define REG_A5XX_TPL1_POWERCTR_TP_SEL_0 0x00000f18 + +#define REG_A5XX_TPL1_POWERCTR_TP_SEL_1 0x00000f19 + +#define REG_A5XX_TPL1_POWERCTR_TP_SEL_2 0x00000f1a + +#define REG_A5XX_TPL1_POWERCTR_TP_SEL_3 0x00000f1b + +#define REG_A5XX_VBIF_VERSION 0x00003000 + +#define REG_A5XX_VBIF_CLKON 0x00003001 + +#define REG_A5XX_VBIF_ABIT_SORT 0x00003028 + +#define REG_A5XX_VBIF_ABIT_SORT_CONF 0x00003029 + +#define REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049 + +#define REG_A5XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a + +#define REG_A5XX_VBIF_IN_RD_LIM_CONF0 0x0000302c + +#define REG_A5XX_VBIF_IN_RD_LIM_CONF1 0x0000302d + +#define REG_A5XX_VBIF_XIN_HALT_CTRL0 0x00003080 + +#define REG_A5XX_VBIF_XIN_HALT_CTRL1 0x00003081 + +#define REG_A5XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084 + +#define REG_A5XX_VBIF_TEST_BUS1_CTRL0 0x00003085 + +#define REG_A5XX_VBIF_TEST_BUS1_CTRL1 0x00003086 + +#define REG_A5XX_VBIF_TEST_BUS2_CTRL0 0x00003087 + +#define REG_A5XX_VBIF_TEST_BUS2_CTRL1 0x00003088 + +#define REG_A5XX_VBIF_TEST_BUS_OUT 0x0000308c + +#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0 + +#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1 + +#define REG_A5XX_VBIF_PERF_CNT_SEL2 0x000030d2 + +#define REG_A5XX_VBIF_PERF_CNT_SEL3 0x000030d3 + +#define REG_A5XX_VBIF_PERF_CNT_LOW0 0x000030d8 + +#define REG_A5XX_VBIF_PERF_CNT_LOW1 0x000030d9 + +#define REG_A5XX_VBIF_PERF_CNT_LOW2 0x000030da + +#define REG_A5XX_VBIF_PERF_CNT_LOW3 0x000030db + +#define REG_A5XX_VBIF_PERF_CNT_HIGH0 0x000030e0 + +#define REG_A5XX_VBIF_PERF_CNT_HIGH1 0x000030e1 + +#define REG_A5XX_VBIF_PERF_CNT_HIGH2 0x000030e2 + +#define REG_A5XX_VBIF_PERF_CNT_HIGH3 0x000030e3 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_EN0 0x00003100 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_EN1 0x00003101 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_EN2 0x00003102 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a + +#define REG_A5XX_GPMU_INST_RAM_BASE 0x00008800 + +#define REG_A5XX_GPMU_DATA_RAM_BASE 0x00009800 + +#define REG_A5XX_GPMU_SP_POWER_CNTL 0x0000a881 + +#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL 0x0000a886 + +#define REG_A5XX_GPMU_RBCCU_POWER_CNTL 0x0000a887 + +#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS 0x0000a88b +#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON 0x00100000 + +#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS 0x0000a88d +#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON 0x00100000 + +#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY 0x0000a891 + +#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL 0x0000a892 + +#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST 0x0000a893 + +#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL 0x0000a894 + +#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3 + +#define REG_A5XX_GPMU_WFI_CONFIG 0x0000a8c1 + +#define REG_A5XX_GPMU_RBBM_INTR_INFO 0x0000a8d6 + +#define REG_A5XX_GPMU_CM3_SYSRESET 0x0000a8d8 + +#define REG_A5XX_GPMU_GENERAL_0 0x0000a8e0 + +#define REG_A5XX_GPMU_GENERAL_1 0x0000a8e1 + +#define REG_A5XX_SP_POWER_COUNTER_0_LO 0x0000a840 + +#define REG_A5XX_SP_POWER_COUNTER_0_HI 0x0000a841 + +#define REG_A5XX_SP_POWER_COUNTER_1_LO 0x0000a842 + +#define REG_A5XX_SP_POWER_COUNTER_1_HI 0x0000a843 + +#define REG_A5XX_SP_POWER_COUNTER_2_LO 0x0000a844 + +#define REG_A5XX_SP_POWER_COUNTER_2_HI 0x0000a845 + +#define REG_A5XX_SP_POWER_COUNTER_3_LO 0x0000a846 + +#define REG_A5XX_SP_POWER_COUNTER_3_HI 0x0000a847 + +#define REG_A5XX_TP_POWER_COUNTER_0_LO 0x0000a848 + +#define REG_A5XX_TP_POWER_COUNTER_0_HI 0x0000a849 + +#define REG_A5XX_TP_POWER_COUNTER_1_LO 0x0000a84a + +#define REG_A5XX_TP_POWER_COUNTER_1_HI 0x0000a84b + +#define REG_A5XX_TP_POWER_COUNTER_2_LO 0x0000a84c + +#define REG_A5XX_TP_POWER_COUNTER_2_HI 0x0000a84d + +#define REG_A5XX_TP_POWER_COUNTER_3_LO 0x0000a84e + +#define REG_A5XX_TP_POWER_COUNTER_3_HI 0x0000a84f + +#define REG_A5XX_RB_POWER_COUNTER_0_LO 0x0000a850 + +#define REG_A5XX_RB_POWER_COUNTER_0_HI 0x0000a851 + +#define REG_A5XX_RB_POWER_COUNTER_1_LO 0x0000a852 + +#define REG_A5XX_RB_POWER_COUNTER_1_HI 0x0000a853 + +#define REG_A5XX_RB_POWER_COUNTER_2_LO 0x0000a854 + +#define REG_A5XX_RB_POWER_COUNTER_2_HI 0x0000a855 + +#define REG_A5XX_RB_POWER_COUNTER_3_LO 0x0000a856 + +#define REG_A5XX_RB_POWER_COUNTER_3_HI 0x0000a857 + +#define REG_A5XX_CCU_POWER_COUNTER_0_LO 0x0000a858 + +#define REG_A5XX_CCU_POWER_COUNTER_0_HI 0x0000a859 + +#define REG_A5XX_CCU_POWER_COUNTER_1_LO 0x0000a85a + +#define REG_A5XX_CCU_POWER_COUNTER_1_HI 0x0000a85b + +#define REG_A5XX_UCHE_POWER_COUNTER_0_LO 0x0000a85c + +#define REG_A5XX_UCHE_POWER_COUNTER_0_HI 0x0000a85d + +#define REG_A5XX_UCHE_POWER_COUNTER_1_LO 0x0000a85e + +#define REG_A5XX_UCHE_POWER_COUNTER_1_HI 0x0000a85f + +#define REG_A5XX_UCHE_POWER_COUNTER_2_LO 0x0000a860 + +#define REG_A5XX_UCHE_POWER_COUNTER_2_HI 0x0000a861 + +#define REG_A5XX_UCHE_POWER_COUNTER_3_LO 0x0000a862 + +#define REG_A5XX_UCHE_POWER_COUNTER_3_HI 0x0000a863 + +#define REG_A5XX_CP_POWER_COUNTER_0_LO 0x0000a864 + +#define REG_A5XX_CP_POWER_COUNTER_0_HI 0x0000a865 + +#define REG_A5XX_CP_POWER_COUNTER_1_LO 0x0000a866 + +#define REG_A5XX_CP_POWER_COUNTER_1_HI 0x0000a867 + +#define REG_A5XX_CP_POWER_COUNTER_2_LO 0x0000a868 + +#define REG_A5XX_CP_POWER_COUNTER_2_HI 0x0000a869 + +#define REG_A5XX_CP_POWER_COUNTER_3_LO 0x0000a86a + +#define REG_A5XX_CP_POWER_COUNTER_3_HI 0x0000a86b + +#define REG_A5XX_GPMU_POWER_COUNTER_0_LO 0x0000a86c + +#define REG_A5XX_GPMU_POWER_COUNTER_0_HI 0x0000a86d + +#define REG_A5XX_GPMU_POWER_COUNTER_1_LO 0x0000a86e + +#define REG_A5XX_GPMU_POWER_COUNTER_1_HI 0x0000a86f + +#define REG_A5XX_GPMU_POWER_COUNTER_2_LO 0x0000a870 + +#define REG_A5XX_GPMU_POWER_COUNTER_2_HI 0x0000a871 + +#define REG_A5XX_GPMU_POWER_COUNTER_3_LO 0x0000a872 + +#define REG_A5XX_GPMU_POWER_COUNTER_3_HI 0x0000a873 + +#define REG_A5XX_GPMU_POWER_COUNTER_4_LO 0x0000a874 + +#define REG_A5XX_GPMU_POWER_COUNTER_4_HI 0x0000a875 + +#define REG_A5XX_GPMU_POWER_COUNTER_5_LO 0x0000a876 + +#define REG_A5XX_GPMU_POWER_COUNTER_5_HI 0x0000a877 + +#define REG_A5XX_GPMU_POWER_COUNTER_ENABLE 0x0000a878 + +#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO 0x0000a879 + +#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI 0x0000a87a + +#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET 0x0000a87b + +#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_0 0x0000a87c + +#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_1 0x0000a87d + +#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3 + +#define REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL 0x0000a8a8 + +#define REG_A5XX_GPMU_TEMP_SENSOR_ID 0x0000ac00 + +#define REG_A5XX_GPMU_TEMP_SENSOR_CONFIG 0x0000ac01 + +#define REG_A5XX_GPMU_TEMP_VAL 0x0000ac02 + +#define REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD 0x0000ac03 + +#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_STATUS 0x0000ac05 + +#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK 0x0000ac06 + +#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_0_1 0x0000ac40 + +#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_2_3 0x0000ac41 + +#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_0_1 0x0000ac42 + +#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_2_3 0x0000ac43 + +#define REG_A5XX_GPMU_BASE_LEAKAGE 0x0000ac46 + +#define REG_A5XX_GPMU_GPMU_VOLTAGE 0x0000ac60 + +#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_STATUS 0x0000ac61 + +#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK 0x0000ac62 + +#define REG_A5XX_GPMU_GPMU_PWR_THRESHOLD 0x0000ac80 + +#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL 0x0000acc4 + +#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS 0x0000acc5 + +#define REG_A5XX_GDPM_CONFIG1 0x0000b80c + +#define REG_A5XX_GDPM_CONFIG2 0x0000b80d + +#define REG_A5XX_GDPM_INT_EN 0x0000b80f + +#define REG_A5XX_GDPM_INT_MASK 0x0000b811 + +#define REG_A5XX_GPMU_BEC_ENABLE 0x0000b9a0 + +#define REG_A5XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000c41a + +#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000c41d + +#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000c41f + +#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x0000c421 + +#define REG_A5XX_GPU_CS_ENABLE_REG 0x0000c520 + +#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557 + +#define REG_A5XX_GRAS_CL_CNTL 0x0000e000 + +#define REG_A5XX_UNKNOWN_E001 0x0000e001 + +#define REG_A5XX_UNKNOWN_E004 0x0000e004 + +#define REG_A5XX_GRAS_CNTL 0x0000e005 +#define A5XX_GRAS_CNTL_VARYING 0x00000001 + +#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x0000e006 +#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff +#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0 +static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val) +{ + return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK; +} +#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00 +#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10 +static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val) +{ + return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK; +} + +#define REG_A5XX_GRAS_CL_VPORT_XOFFSET_0 0x0000e010 +#define A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff +#define A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0 +static inline uint32_t A5XX_GRAS_CL_VPORT_XOFFSET_0(float val) +{ + return ((fui(val)) << A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK; +} + +#define REG_A5XX_GRAS_CL_VPORT_XSCALE_0 0x0000e011 +#define A5XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff +#define A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0 +static inline uint32_t A5XX_GRAS_CL_VPORT_XSCALE_0(float val) +{ + return ((fui(val)) << A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_XSCALE_0__MASK; +} + +#define REG_A5XX_GRAS_CL_VPORT_YOFFSET_0 0x0000e012 +#define A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff +#define A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0 +static inline uint32_t A5XX_GRAS_CL_VPORT_YOFFSET_0(float val) +{ + return ((fui(val)) << A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK; +} + +#define REG_A5XX_GRAS_CL_VPORT_YSCALE_0 0x0000e013 +#define A5XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff +#define A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0 +static inline uint32_t A5XX_GRAS_CL_VPORT_YSCALE_0(float val) +{ + return ((fui(val)) << A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_YSCALE_0__MASK; +} + +#define REG_A5XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000e014 +#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff +#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0 +static inline uint32_t A5XX_GRAS_CL_VPORT_ZOFFSET_0(float val) +{ + return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK; +} + +#define REG_A5XX_GRAS_CL_VPORT_ZSCALE_0 0x0000e015 +#define A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff +#define A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0 +static inline uint32_t A5XX_GRAS_CL_VPORT_ZSCALE_0(float val) +{ + return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK; +} + +#define REG_A5XX_GRAS_SU_CNTL 0x0000e090 +#define A5XX_GRAS_SU_CNTL_FRONT_CW 0x00000004 +#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8 +#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3 +static inline uint32_t A5XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val) +{ + return ((((int32_t)(val * 4.0))) << A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK; +} +#define A5XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800 +#define A5XX_GRAS_SU_CNTL_MSAA_ENABLE 0x00002000 + +#define REG_A5XX_GRAS_SU_POINT_MINMAX 0x0000e091 +#define A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff +#define A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0 +static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MIN(float val) +{ + return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK; +} +#define A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000 +#define A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16 +static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MAX(float val) +{ + return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK; +} + +#define REG_A5XX_GRAS_SU_POINT_SIZE 0x0000e092 +#define A5XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff +#define A5XX_GRAS_SU_POINT_SIZE__SHIFT 0 +static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val) +{ + return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK; +} + +#define REG_A5XX_UNKNOWN_E093 0x0000e093 + +#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094 +#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_ALPHA_TEST_ENABLE 0x00000001 + +#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000e095 +#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff +#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0 +static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_SCALE(float val) +{ + return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK; +} + +#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000e096 +#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff +#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0 +static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) +{ + return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; +} + +#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x0000e097 +#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff +#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0 +static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val) +{ + return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK; +} + +#define REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO 0x0000e098 +#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007 +#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0 +static inline uint32_t A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val) +{ + return ((val) << A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK; +} + +#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x0000e099 + +#define REG_A5XX_GRAS_SC_CNTL 0x0000e0a0 +#define A5XX_GRAS_SC_CNTL_SAMPLES_PASSED 0x00008000 + +#define REG_A5XX_GRAS_SC_BIN_CNTL 0x0000e0a1 + +#define REG_A5XX_GRAS_SC_RAS_MSAA_CNTL 0x0000e0a2 +#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK; +} + +#define REG_A5XX_GRAS_SC_DEST_MSAA_CNTL 0x0000e0a3 +#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK; +} +#define A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004 + +#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL 0x0000e0a4 + +#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x0000e0aa +#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff +#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0 +static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK; +} +#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000 +#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16 +static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK; +} + +#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x0000e0ab +#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff +#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0 +static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK; +} +#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000 +#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16 +static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK; +} + +#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x0000e0ca +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0 +static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK; +} +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000 +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16 +static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK; +} + +#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x0000e0cb +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0 +static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK; +} +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000 +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16 +static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK; +} + +#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000e0ea +#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff +#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0 +static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK; +} +#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000 +#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16 +static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK; +} + +#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000e0eb +#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff +#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0 +static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK; +} +#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000 +#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16 +static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK; +} + +#define REG_A5XX_GRAS_LRZ_CNTL 0x0000e100 + +#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO 0x0000e101 + +#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI 0x0000e102 + +#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH 0x0000e103 + +#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x0000e104 + +#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x0000e105 + +#define REG_A5XX_RB_CNTL 0x0000e140 +#define A5XX_RB_CNTL_WIDTH__MASK 0x000000ff +#define A5XX_RB_CNTL_WIDTH__SHIFT 0 +static inline uint32_t A5XX_RB_CNTL_WIDTH(uint32_t val) +{ + return ((val >> 5) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK; +} +#define A5XX_RB_CNTL_HEIGHT__MASK 0x0001fe00 +#define A5XX_RB_CNTL_HEIGHT__SHIFT 9 +static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val) +{ + return ((val >> 5) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK; +} +#define A5XX_RB_CNTL_BYPASS 0x00020000 + +#define REG_A5XX_RB_RENDER_CNTL 0x0000e141 +#define A5XX_RB_RENDER_CNTL_SAMPLES_PASSED 0x00000040 +#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000 +#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH2 0x00008000 +#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000 +#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16 +static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK; +} +#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK 0xff000000 +#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT 24 +static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS2(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK; +} + +#define REG_A5XX_RB_RAS_MSAA_CNTL 0x0000e142 +#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A5XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK; +} + +#define REG_A5XX_RB_DEST_MSAA_CNTL 0x0000e143 +#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK; +} +#define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004 + +#define REG_A5XX_RB_RENDER_CONTROL0 0x0000e144 +#define A5XX_RB_RENDER_CONTROL0_VARYING 0x00000001 +#define A5XX_RB_RENDER_CONTROL0_XCOORD 0x00000040 +#define A5XX_RB_RENDER_CONTROL0_YCOORD 0x00000080 +#define A5XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100 +#define A5XX_RB_RENDER_CONTROL0_WCOORD 0x00000200 + +#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145 +#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002 + +#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146 +#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f +#define A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT 0 +static inline uint32_t A5XX_RB_FS_OUTPUT_CNTL_MRT(uint32_t val) +{ + return ((val) << A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK; +} +#define A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z 0x00000020 + +#define REG_A5XX_RB_RENDER_COMPONENTS 0x0000e147 +#define A5XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f +#define A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0 +static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT0(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT0__MASK; +} +#define A5XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0 +#define A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4 +static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT1(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT1__MASK; +} +#define A5XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00 +#define A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8 +static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT2(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT2__MASK; +} +#define A5XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000 +#define A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12 +static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT3(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT3__MASK; +} +#define A5XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000 +#define A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16 +static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT4(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT4__MASK; +} +#define A5XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000 +#define A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20 +static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT5(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT5__MASK; +} +#define A5XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000 +#define A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24 +static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT6(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT6__MASK; +} +#define A5XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000 +#define A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28 +static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT7(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT7__MASK; +} + +static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0; } + +static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; } +#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001 +#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002 +#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780 +#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7 +static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val) +{ + return ((val) << A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK; +} + +static inline uint32_t REG_A5XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x0000e151 + 0x7*i0; } +#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f +#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0 +static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK; +} +#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0 +#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5 +static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) +{ + return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK; +} +#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00 +#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8 +static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK; +} +#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000 +#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16 +static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK; +} +#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000 +#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21 +static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) +{ + return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK; +} +#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000 +#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24 +static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK; +} + +static inline uint32_t REG_A5XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x0000e152 + 0x7*i0; } +#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff +#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a5xx_color_fmt val) +{ + return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK; +} +#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300 +#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8 +static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode val) +{ + return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK; +} +#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000 +#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13 +static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; +} +#define A5XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000 + +static inline uint32_t REG_A5XX_RB_MRT_PITCH(uint32_t i0) { return 0x0000e153 + 0x7*i0; } +#define A5XX_RB_MRT_PITCH__MASK 0xffffffff +#define A5XX_RB_MRT_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_MRT_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_MRT_PITCH__SHIFT) & A5XX_RB_MRT_PITCH__MASK; +} + +static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e154 + 0x7*i0; } +#define A5XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff +#define A5XX_RB_MRT_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_MRT_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_MRT_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH__MASK; +} + +static inline uint32_t REG_A5XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x0000e155 + 0x7*i0; } + +static inline uint32_t REG_A5XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x0000e156 + 0x7*i0; } + +#define REG_A5XX_RB_BLEND_RED 0x0000e1a0 +#define A5XX_RB_BLEND_RED_UINT__MASK 0x000000ff +#define A5XX_RB_BLEND_RED_UINT__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_RED_UINT(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_RED_UINT__SHIFT) & A5XX_RB_BLEND_RED_UINT__MASK; +} +#define A5XX_RB_BLEND_RED_SINT__MASK 0x0000ff00 +#define A5XX_RB_BLEND_RED_SINT__SHIFT 8 +static inline uint32_t A5XX_RB_BLEND_RED_SINT(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_RED_SINT__SHIFT) & A5XX_RB_BLEND_RED_SINT__MASK; +} +#define A5XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000 +#define A5XX_RB_BLEND_RED_FLOAT__SHIFT 16 +static inline uint32_t A5XX_RB_BLEND_RED_FLOAT(float val) +{ + return ((util_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK; +} + +#define REG_A5XX_RB_BLEND_RED_F32 0x0000e1a1 +#define A5XX_RB_BLEND_RED_F32__MASK 0xffffffff +#define A5XX_RB_BLEND_RED_F32__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_RED_F32(float val) +{ + return ((fui(val)) << A5XX_RB_BLEND_RED_F32__SHIFT) & A5XX_RB_BLEND_RED_F32__MASK; +} + +#define REG_A5XX_RB_BLEND_GREEN 0x0000e1a2 +#define A5XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff +#define A5XX_RB_BLEND_GREEN_UINT__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_GREEN_UINT(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_GREEN_UINT__SHIFT) & A5XX_RB_BLEND_GREEN_UINT__MASK; +} +#define A5XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00 +#define A5XX_RB_BLEND_GREEN_SINT__SHIFT 8 +static inline uint32_t A5XX_RB_BLEND_GREEN_SINT(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_GREEN_SINT__SHIFT) & A5XX_RB_BLEND_GREEN_SINT__MASK; +} +#define A5XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000 +#define A5XX_RB_BLEND_GREEN_FLOAT__SHIFT 16 +static inline uint32_t A5XX_RB_BLEND_GREEN_FLOAT(float val) +{ + return ((util_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK; +} + +#define REG_A5XX_RB_BLEND_GREEN_F32 0x0000e1a3 +#define A5XX_RB_BLEND_GREEN_F32__MASK 0xffffffff +#define A5XX_RB_BLEND_GREEN_F32__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_GREEN_F32(float val) +{ + return ((fui(val)) << A5XX_RB_BLEND_GREEN_F32__SHIFT) & A5XX_RB_BLEND_GREEN_F32__MASK; +} + +#define REG_A5XX_RB_BLEND_BLUE 0x0000e1a4 +#define A5XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff +#define A5XX_RB_BLEND_BLUE_UINT__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_BLUE_UINT(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_BLUE_UINT__SHIFT) & A5XX_RB_BLEND_BLUE_UINT__MASK; +} +#define A5XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00 +#define A5XX_RB_BLEND_BLUE_SINT__SHIFT 8 +static inline uint32_t A5XX_RB_BLEND_BLUE_SINT(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_BLUE_SINT__SHIFT) & A5XX_RB_BLEND_BLUE_SINT__MASK; +} +#define A5XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000 +#define A5XX_RB_BLEND_BLUE_FLOAT__SHIFT 16 +static inline uint32_t A5XX_RB_BLEND_BLUE_FLOAT(float val) +{ + return ((util_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK; +} + +#define REG_A5XX_RB_BLEND_BLUE_F32 0x0000e1a5 +#define A5XX_RB_BLEND_BLUE_F32__MASK 0xffffffff +#define A5XX_RB_BLEND_BLUE_F32__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_BLUE_F32(float val) +{ + return ((fui(val)) << A5XX_RB_BLEND_BLUE_F32__SHIFT) & A5XX_RB_BLEND_BLUE_F32__MASK; +} + +#define REG_A5XX_RB_BLEND_ALPHA 0x0000e1a6 +#define A5XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff +#define A5XX_RB_BLEND_ALPHA_UINT__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_ALPHA_UINT(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_ALPHA_UINT__SHIFT) & A5XX_RB_BLEND_ALPHA_UINT__MASK; +} +#define A5XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00 +#define A5XX_RB_BLEND_ALPHA_SINT__SHIFT 8 +static inline uint32_t A5XX_RB_BLEND_ALPHA_SINT(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_ALPHA_SINT__SHIFT) & A5XX_RB_BLEND_ALPHA_SINT__MASK; +} +#define A5XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000 +#define A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16 +static inline uint32_t A5XX_RB_BLEND_ALPHA_FLOAT(float val) +{ + return ((util_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK; +} + +#define REG_A5XX_RB_BLEND_ALPHA_F32 0x0000e1a7 +#define A5XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff +#define A5XX_RB_BLEND_ALPHA_F32__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_ALPHA_F32(float val) +{ + return ((fui(val)) << A5XX_RB_BLEND_ALPHA_F32__SHIFT) & A5XX_RB_BLEND_ALPHA_F32__MASK; +} + +#define REG_A5XX_RB_ALPHA_CONTROL 0x0000e1a8 +#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff +#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0 +static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val) +{ + return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK; +} +#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100 +#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00 +#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9 +static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val) +{ + return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK; +} + +#define REG_A5XX_RB_BLEND_CNTL 0x0000e1a9 +#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff +#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK; +} +#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100 +#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000 +#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16 +static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK; +} + +#define REG_A5XX_RB_DEPTH_PLANE_CNTL 0x0000e1b0 +#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001 + +#define REG_A5XX_RB_DEPTH_CNTL 0x0000e1b1 +#define A5XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001 +#define A5XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002 +#define A5XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c +#define A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2 +static inline uint32_t A5XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val) +{ + return ((val) << A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A5XX_RB_DEPTH_CNTL_ZFUNC__MASK; +} +#define A5XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000040 + +#define REG_A5XX_RB_DEPTH_BUFFER_INFO 0x0000e1b2 +#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007 +#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0 +static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val) +{ + return ((val) << A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK; +} + +#define REG_A5XX_RB_DEPTH_BUFFER_BASE_LO 0x0000e1b3 + +#define REG_A5XX_RB_DEPTH_BUFFER_BASE_HI 0x0000e1b4 + +#define REG_A5XX_RB_DEPTH_BUFFER_PITCH 0x0000e1b5 +#define A5XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff +#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val) +{ + return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK; +} + +#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x0000e1b6 +#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff +#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK; +} + +#define REG_A5XX_RB_STENCIL_CONTROL 0x0000e1c0 +#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001 +#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002 +#define A5XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004 +#define A5XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700 +#define A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8 +static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val) +{ + return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC__MASK; +} +#define A5XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800 +#define A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11 +static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val) +{ + return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL__MASK; +} +#define A5XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000 +#define A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14 +static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val) +{ + return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS__MASK; +} +#define A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000 +#define A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17 +static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val) +{ + return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK; +} +#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000 +#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20 +static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val) +{ + return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK; +} +#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000 +#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23 +static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val) +{ + return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK; +} +#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000 +#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26 +static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val) +{ + return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK; +} +#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000 +#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29 +static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val) +{ + return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK; +} + +#define REG_A5XX_RB_STENCIL_INFO 0x0000e1c1 +#define A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001 + +#define REG_A5XX_RB_STENCIL_BASE_LO 0x0000e1c2 + +#define REG_A5XX_RB_STENCIL_BASE_HI 0x0000e1c3 + +#define REG_A5XX_RB_STENCIL_PITCH 0x0000e1c4 +#define A5XX_RB_STENCIL_PITCH__MASK 0xffffffff +#define A5XX_RB_STENCIL_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_STENCIL_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_STENCIL_PITCH__SHIFT) & A5XX_RB_STENCIL_PITCH__MASK; +} + +#define REG_A5XX_RB_STENCIL_ARRAY_PITCH 0x0000e1c5 +#define A5XX_RB_STENCIL_ARRAY_PITCH__MASK 0xffffffff +#define A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_STENCIL_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT) & A5XX_RB_STENCIL_ARRAY_PITCH__MASK; +} + +#define REG_A5XX_RB_STENCILREFMASK 0x0000e1c6 +#define A5XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff +#define A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0 +static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILREF(uint32_t val) +{ + return ((val) << A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILREF__MASK; +} +#define A5XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00 +#define A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8 +static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val) +{ + return ((val) << A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILMASK__MASK; +} +#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000 +#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16 +static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val) +{ + return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK; +} + +#define REG_A5XX_UNKNOWN_E1C7 0x0000e1c7 + +#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0 +#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff +#define A5XX_RB_WINDOW_OFFSET_X__SHIFT 0 +static inline uint32_t A5XX_RB_WINDOW_OFFSET_X(uint32_t val) +{ + return ((val) << A5XX_RB_WINDOW_OFFSET_X__SHIFT) & A5XX_RB_WINDOW_OFFSET_X__MASK; +} +#define A5XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000 +#define A5XX_RB_WINDOW_OFFSET_Y__SHIFT 16 +static inline uint32_t A5XX_RB_WINDOW_OFFSET_Y(uint32_t val) +{ + return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK; +} + +#define REG_A5XX_RB_BLIT_CNTL 0x0000e210 +#define A5XX_RB_BLIT_CNTL_BUF__MASK 0x0000003f +#define A5XX_RB_BLIT_CNTL_BUF__SHIFT 0 +static inline uint32_t A5XX_RB_BLIT_CNTL_BUF(enum a5xx_blit_buf val) +{ + return ((val) << A5XX_RB_BLIT_CNTL_BUF__SHIFT) & A5XX_RB_BLIT_CNTL_BUF__MASK; +} + +#define REG_A5XX_RB_RESOLVE_CNTL_1 0x0000e211 +#define A5XX_RB_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_RB_RESOLVE_CNTL_1_X__MASK 0x00007fff +#define A5XX_RB_RESOLVE_CNTL_1_X__SHIFT 0 +static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_X(uint32_t val) +{ + return ((val) << A5XX_RB_RESOLVE_CNTL_1_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_X__MASK; +} +#define A5XX_RB_RESOLVE_CNTL_1_Y__MASK 0x7fff0000 +#define A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT 16 +static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_Y(uint32_t val) +{ + return ((val) << A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_Y__MASK; +} + +#define REG_A5XX_RB_RESOLVE_CNTL_2 0x0000e212 +#define A5XX_RB_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_RB_RESOLVE_CNTL_2_X__MASK 0x00007fff +#define A5XX_RB_RESOLVE_CNTL_2_X__SHIFT 0 +static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_X(uint32_t val) +{ + return ((val) << A5XX_RB_RESOLVE_CNTL_2_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_X__MASK; +} +#define A5XX_RB_RESOLVE_CNTL_2_Y__MASK 0x7fff0000 +#define A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT 16 +static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val) +{ + return ((val) << A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_Y__MASK; +} + +#define REG_A5XX_RB_RESOLVE_CNTL_3 0x0000e213 + +#define REG_A5XX_RB_BLIT_DST_LO 0x0000e214 + +#define REG_A5XX_RB_BLIT_DST_HI 0x0000e215 + +#define REG_A5XX_RB_BLIT_DST_PITCH 0x0000e216 +#define A5XX_RB_BLIT_DST_PITCH__MASK 0xffffffff +#define A5XX_RB_BLIT_DST_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_BLIT_DST_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_BLIT_DST_PITCH__SHIFT) & A5XX_RB_BLIT_DST_PITCH__MASK; +} + +#define REG_A5XX_RB_BLIT_DST_ARRAY_PITCH 0x0000e217 +#define A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0xffffffff +#define A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK; +} + +#define REG_A5XX_RB_CLEAR_COLOR_DW0 0x0000e218 + +#define REG_A5XX_RB_CLEAR_COLOR_DW1 0x0000e219 + +#define REG_A5XX_RB_CLEAR_COLOR_DW2 0x0000e21a + +#define REG_A5XX_RB_CLEAR_COLOR_DW3 0x0000e21b + +#define REG_A5XX_RB_CLEAR_CNTL 0x0000e21c +#define A5XX_RB_CLEAR_CNTL_FAST_CLEAR 0x00000002 +#define A5XX_RB_CLEAR_CNTL_MASK__MASK 0x000000f0 +#define A5XX_RB_CLEAR_CNTL_MASK__SHIFT 4 +static inline uint32_t A5XX_RB_CLEAR_CNTL_MASK(uint32_t val) +{ + return ((val) << A5XX_RB_CLEAR_CNTL_MASK__SHIFT) & A5XX_RB_CLEAR_CNTL_MASK__MASK; +} + +#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x0000e240 + +#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x0000e241 + +#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x0000e242 + +static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x0000e243 + 0x4*i0; } + +static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return 0x0000e243 + 0x4*i0; } + +static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_HI(uint32_t i0) { return 0x0000e244 + 0x4*i0; } + +static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x0000e245 + 0x4*i0; } +#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK 0xffffffff +#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK; +} + +static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t i0) { return 0x0000e246 + 0x4*i0; } +#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK 0xffffffff +#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK; +} + +#define REG_A5XX_RB_BLIT_FLAG_DST_LO 0x0000e263 + +#define REG_A5XX_RB_BLIT_FLAG_DST_HI 0x0000e264 + +#define REG_A5XX_RB_BLIT_FLAG_DST_PITCH 0x0000e265 +#define A5XX_RB_BLIT_FLAG_DST_PITCH__MASK 0xffffffff +#define A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_BLIT_FLAG_DST_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_PITCH__MASK; +} + +#define REG_A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH 0x0000e266 +#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK 0xffffffff +#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK; +} + +#define REG_A5XX_VPC_CNTL_0 0x0000e280 +#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK 0x0000007f +#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT 0 +static inline uint32_t A5XX_VPC_CNTL_0_STRIDE_IN_VPC(uint32_t val) +{ + return ((val) << A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT) & A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK; +} +#define A5XX_VPC_CNTL_0_VARYING 0x00000800 + +static inline uint32_t REG_A5XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x0000e282 + 0x1*i0; } + +static inline uint32_t REG_A5XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x0000e282 + 0x1*i0; } + +static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000e28a + 0x1*i0; } + +static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000e28a + 0x1*i0; } + +#define REG_A5XX_UNKNOWN_E292 0x0000e292 + +#define REG_A5XX_UNKNOWN_E293 0x0000e293 + +static inline uint32_t REG_A5XX_VPC_VAR(uint32_t i0) { return 0x0000e294 + 0x1*i0; } + +static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294 + 0x1*i0; } + +#define REG_A5XX_VPC_GS_SIV_CNTL 0x0000e298 + +#define REG_A5XX_UNKNOWN_E29A 0x0000e29a + +#define REG_A5XX_VPC_PACK 0x0000e29d +#define A5XX_VPC_PACK_NUMNONPOSVAR__MASK 0x000000ff +#define A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT 0 +static inline uint32_t A5XX_VPC_PACK_NUMNONPOSVAR(uint32_t val) +{ + return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK; +} + +#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL 0x0000e2a0 + +#define REG_A5XX_UNKNOWN_E2A1 0x0000e2a1 + +#define REG_A5XX_VPC_SO_OVERRIDE 0x0000e2a2 + +#define REG_A5XX_VPC_SO_BUFFER_BASE_LO_0 0x0000e2a7 + +#define REG_A5XX_VPC_SO_BUFFER_BASE_HI_0 0x0000e2a8 + +#define REG_A5XX_VPC_SO_BUFFER_SIZE_0 0x0000e2a9 + +#define REG_A5XX_UNKNOWN_E2AB 0x0000e2ab + +#define REG_A5XX_VPC_SO_FLUSH_BASE_LO_0 0x0000e2ac + +#define REG_A5XX_VPC_SO_FLUSH_BASE_HI_0 0x0000e2ad + +#define REG_A5XX_UNKNOWN_E2AE 0x0000e2ae + +#define REG_A5XX_UNKNOWN_E2B2 0x0000e2b2 + +#define REG_A5XX_UNKNOWN_E2B9 0x0000e2b9 + +#define REG_A5XX_UNKNOWN_E2C0 0x0000e2c0 + +#define REG_A5XX_PC_PRIMITIVE_CNTL 0x0000e384 +#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000007f +#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0 +static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val) +{ + return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK; +} + +#define REG_A5XX_PC_PRIM_VTX_CNTL 0x0000e385 +#define A5XX_PC_PRIM_VTX_CNTL_PSIZE 0x00000800 + +#define REG_A5XX_PC_RASTER_CNTL 0x0000e388 + +#define REG_A5XX_UNKNOWN_E389 0x0000e389 + +#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c + +#define REG_A5XX_UNKNOWN_E38D 0x0000e38d + +#define REG_A5XX_PC_GS_PARAM 0x0000e38e + +#define REG_A5XX_PC_HS_PARAM 0x0000e38f + +#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0 + +#define REG_A5XX_VFD_CONTROL_0 0x0000e400 +#define A5XX_VFD_CONTROL_0_VTXCNT__MASK 0x0000003f +#define A5XX_VFD_CONTROL_0_VTXCNT__SHIFT 0 +static inline uint32_t A5XX_VFD_CONTROL_0_VTXCNT(uint32_t val) +{ + return ((val) << A5XX_VFD_CONTROL_0_VTXCNT__SHIFT) & A5XX_VFD_CONTROL_0_VTXCNT__MASK; +} + +#define REG_A5XX_VFD_CONTROL_1 0x0000e401 +#define A5XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00 +#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT 8 +static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val) +{ + return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK; +} +#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000 +#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16 +static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val) +{ + return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK; +} + +#define REG_A5XX_VFD_CONTROL_2 0x0000e402 + +#define REG_A5XX_VFD_CONTROL_3 0x0000e403 + +#define REG_A5XX_VFD_CONTROL_4 0x0000e404 + +#define REG_A5XX_VFD_CONTROL_5 0x0000e405 + +#define REG_A5XX_VFD_INDEX_OFFSET 0x0000e408 + +#define REG_A5XX_VFD_INSTANCE_START_OFFSET 0x0000e409 + +static inline uint32_t REG_A5XX_VFD_FETCH(uint32_t i0) { return 0x0000e40a + 0x4*i0; } + +static inline uint32_t REG_A5XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000e40a + 0x4*i0; } + +static inline uint32_t REG_A5XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000e40b + 0x4*i0; } + +static inline uint32_t REG_A5XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000e40c + 0x4*i0; } + +static inline uint32_t REG_A5XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000e40d + 0x4*i0; } + +static inline uint32_t REG_A5XX_VFD_DECODE(uint32_t i0) { return 0x0000e48a + 0x2*i0; } + +static inline uint32_t REG_A5XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000e48a + 0x2*i0; } +#define A5XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f +#define A5XX_VFD_DECODE_INSTR_IDX__SHIFT 0 +static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val) +{ + return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK; +} +#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x3ff00000 +#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20 +static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val) +{ + return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK; +} +#define A5XX_VFD_DECODE_INSTR_SWAP__MASK 0xc0000000 +#define A5XX_VFD_DECODE_INSTR_SWAP__SHIFT 30 +static inline uint32_t A5XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A5XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A5XX_VFD_DECODE_INSTR_SWAP__MASK; +} + +static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; } + +static inline uint32_t REG_A5XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000e4ca + 0x1*i0; } + +static inline uint32_t REG_A5XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000e4ca + 0x1*i0; } +#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f +#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0 +static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val) +{ + return ((val) << A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK; +} +#define A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0 +#define A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4 +static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val) +{ + return ((val) << A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK; +} + +#define REG_A5XX_VFD_POWER_CNTL 0x0000e4f0 + +#define REG_A5XX_SP_SP_CNTL 0x0000e580 + +#define REG_A5XX_SP_VS_CONTROL_REG 0x0000e584 +#define A5XX_SP_VS_CONTROL_REG_ENABLED 0x00000001 +#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_SP_FS_CONTROL_REG 0x0000e585 +#define A5XX_SP_FS_CONTROL_REG_ENABLED 0x00000001 +#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_SP_HS_CONTROL_REG 0x0000e586 +#define A5XX_SP_HS_CONTROL_REG_ENABLED 0x00000001 +#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_SP_DS_CONTROL_REG 0x0000e587 +#define A5XX_SP_DS_CONTROL_REG_ENABLED 0x00000001 +#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_SP_GS_CONTROL_REG 0x0000e588 +#define A5XX_SP_GS_CONTROL_REG_ENABLED 0x00000001 +#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_SP_CS_CONFIG 0x0000e589 + +#define REG_A5XX_SP_VS_CONFIG_MAX_CONST 0x0000e58a + +#define REG_A5XX_SP_FS_CONFIG_MAX_CONST 0x0000e58b + +#define REG_A5XX_SP_VS_CTRL_REG0 0x0000e590 +#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 +#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 +static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 +#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 +static inline uint32_t A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A5XX_SP_VS_CTRL_REG0_VARYING 0x00010000 +#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00100000 + +#define REG_A5XX_SP_PRIMITIVE_CNTL 0x0000e592 +#define A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000001f +#define A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0 +static inline uint32_t A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val) +{ + return ((val >> 2) << A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_SP_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK; +} + +static inline uint32_t REG_A5XX_SP_VS_OUT(uint32_t i0) { return 0x0000e593 + 0x1*i0; } + +static inline uint32_t REG_A5XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000e593 + 0x1*i0; } +#define A5XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff +#define A5XX_SP_VS_OUT_REG_A_REGID__SHIFT 0 +static inline uint32_t A5XX_SP_VS_OUT_REG_A_REGID(uint32_t val) +{ + return ((val) << A5XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_A_REGID__MASK; +} +#define A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00 +#define A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8 +static inline uint32_t A5XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val) +{ + return ((val) << A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK; +} +#define A5XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000 +#define A5XX_SP_VS_OUT_REG_B_REGID__SHIFT 16 +static inline uint32_t A5XX_SP_VS_OUT_REG_B_REGID(uint32_t val) +{ + return ((val) << A5XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_B_REGID__MASK; +} +#define A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000 +#define A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24 +static inline uint32_t A5XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val) +{ + return ((val) << A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK; +} + +static inline uint32_t REG_A5XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; } + +static inline uint32_t REG_A5XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; } +#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff +#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0 +static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val) +{ + return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK; +} +#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 +#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8 +static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val) +{ + return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK; +} +#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 +#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16 +static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val) +{ + return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK; +} +#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 +#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24 +static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val) +{ + return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK; +} + +#define REG_A5XX_UNKNOWN_E5AB 0x0000e5ab + +#define REG_A5XX_SP_VS_OBJ_START_LO 0x0000e5ac + +#define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad + +#define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0 +#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 +#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 +static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 +#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 +static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A5XX_SP_FS_CTRL_REG0_VARYING 0x00010000 +#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00100000 + +#define REG_A5XX_UNKNOWN_E5C2 0x0000e5c2 + +#define REG_A5XX_SP_FS_OBJ_START_LO 0x0000e5c3 + +#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4 + +#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9 + +#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca +#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f +#define A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT 0 +static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_MRT(uint32_t val) +{ + return ((val) << A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK; +} +#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK 0x00001fe0 +#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT 5 +static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(uint32_t val) +{ + return ((val) << A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK; +} +#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK 0x001fe000 +#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT 13 +static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(uint32_t val) +{ + return ((val) << A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK; +} + +static inline uint32_t REG_A5XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000e5cb + 0x1*i0; } + +static inline uint32_t REG_A5XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000e5cb + 0x1*i0; } +#define A5XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff +#define A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0 +static inline uint32_t A5XX_SP_FS_OUTPUT_REG_REGID(uint32_t val) +{ + return ((val) << A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_REG_REGID__MASK; +} +#define A5XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100 + +static inline uint32_t REG_A5XX_SP_FS_MRT(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; } + +static inline uint32_t REG_A5XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; } +#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff +#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val) +{ + return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK; +} + +#define REG_A5XX_UNKNOWN_E5DB 0x0000e5db + +#define REG_A5XX_SP_CS_CNTL_0 0x0000e5f0 + +#define REG_A5XX_UNKNOWN_E600 0x0000e600 + +#define REG_A5XX_UNKNOWN_E640 0x0000e640 + +#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL 0x0000e704 +#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK; +} + +#define REG_A5XX_TPL1_TP_DEST_MSAA_CNTL 0x0000e705 +#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK; +} +#define A5XX_TPL1_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004 + +#define REG_A5XX_TPL1_VS_TEX_COUNT 0x0000e700 + +#define REG_A5XX_TPL1_VS_TEX_SAMP_LO 0x0000e722 + +#define REG_A5XX_TPL1_VS_TEX_SAMP_HI 0x0000e723 + +#define REG_A5XX_TPL1_VS_TEX_CONST_LO 0x0000e72a + +#define REG_A5XX_TPL1_VS_TEX_CONST_HI 0x0000e72b + +#define REG_A5XX_TPL1_FS_TEX_COUNT 0x0000e750 + +#define REG_A5XX_TPL1_FS_TEX_SAMP_LO 0x0000e75a + +#define REG_A5XX_TPL1_FS_TEX_SAMP_HI 0x0000e75b + +#define REG_A5XX_TPL1_FS_TEX_CONST_LO 0x0000e75e + +#define REG_A5XX_TPL1_FS_TEX_CONST_HI 0x0000e75f + +#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL 0x0000e764 + +#define REG_A5XX_HLSQ_CONTROL_0_REG 0x0000e784 + +#define REG_A5XX_HLSQ_CONTROL_1_REG 0x0000e785 +#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x0000003f +#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT 0 +static inline uint32_t A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT) & A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK; +} + +#define REG_A5XX_HLSQ_CONTROL_2_REG 0x0000e786 +#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff +#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0 +static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK; +} + +#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787 +#define A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK 0x000000ff +#define A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT 0 +static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK; +} + +#define REG_A5XX_HLSQ_CONTROL_4_REG 0x0000e788 +#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000 +#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16 +static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK; +} +#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000 +#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24 +static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK; +} + +#define REG_A5XX_HLSQ_UPDATE_CNTL 0x0000e78a + +#define REG_A5XX_HLSQ_VS_CONTROL_REG 0x0000e78b +#define A5XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00000001 +#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_HLSQ_FS_CONTROL_REG 0x0000e78c +#define A5XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00000001 +#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_HLSQ_HS_CONTROL_REG 0x0000e78d +#define A5XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00000001 +#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_HLSQ_DS_CONTROL_REG 0x0000e78e +#define A5XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00000001 +#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_HLSQ_GS_CONTROL_REG 0x0000e78f +#define A5XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00000001 +#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_HLSQ_CS_CONFIG 0x0000e790 + +#define REG_A5XX_HLSQ_VS_CNTL 0x0000e791 +#define A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK 0xfffffffe +#define A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT 1 +static inline uint32_t A5XX_HLSQ_VS_CNTL_INSTRLEN(uint32_t val) +{ + return ((val) << A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK; +} + +#define REG_A5XX_HLSQ_FS_CNTL 0x0000e792 +#define A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK 0xfffffffe +#define A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT 1 +static inline uint32_t A5XX_HLSQ_FS_CNTL_INSTRLEN(uint32_t val) +{ + return ((val) << A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK; +} + +#define REG_A5XX_HLSQ_HS_CNTL 0x0000e793 +#define A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK 0xfffffffe +#define A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT 1 +static inline uint32_t A5XX_HLSQ_HS_CNTL_INSTRLEN(uint32_t val) +{ + return ((val) << A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK; +} + +#define REG_A5XX_HLSQ_DS_CNTL 0x0000e794 +#define A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK 0xfffffffe +#define A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT 1 +static inline uint32_t A5XX_HLSQ_DS_CNTL_INSTRLEN(uint32_t val) +{ + return ((val) << A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK; +} + +#define REG_A5XX_HLSQ_GS_CNTL 0x0000e795 +#define A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK 0xfffffffe +#define A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT 1 +static inline uint32_t A5XX_HLSQ_GS_CNTL_INSTRLEN(uint32_t val) +{ + return ((val) << A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK; +} + +#define REG_A5XX_HLSQ_CS_CNTL 0x0000e796 +#define A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK 0xfffffffe +#define A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT 1 +static inline uint32_t A5XX_HLSQ_CS_CNTL_INSTRLEN(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK; +} + +#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_X 0x0000e7b9 + +#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000e7ba + +#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000e7bb + +#define REG_A5XX_HLSQ_CS_NDRANGE_0 0x0000e7b0 + +#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1 + +#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2 + +#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3 + +#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4 + +#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5 + +#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6 + +#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7 + +#define REG_A5XX_HLSQ_CS_CNTL_1 0x0000e7b8 + +#define REG_A5XX_UNKNOWN_E7C0 0x0000e7c0 + +#define REG_A5XX_HLSQ_VS_CONSTLEN 0x0000e7c3 + +#define REG_A5XX_HLSQ_VS_INSTRLEN 0x0000e7c4 + +#define REG_A5XX_UNKNOWN_E7C5 0x0000e7c5 + +#define REG_A5XX_UNKNOWN_E7CA 0x0000e7ca + +#define REG_A5XX_HLSQ_FS_CONSTLEN 0x0000e7d7 + +#define REG_A5XX_HLSQ_FS_INSTRLEN 0x0000e7d8 + +#define REG_A5XX_HLSQ_HS_CONSTLEN 0x0000e7c8 + +#define REG_A5XX_HLSQ_HS_INSTRLEN 0x0000e7c9 + +#define REG_A5XX_HLSQ_DS_CONSTLEN 0x0000e7cd + +#define REG_A5XX_HLSQ_DS_INSTRLEN 0x0000e7ce + +#define REG_A5XX_UNKNOWN_E7CF 0x0000e7cf + +#define REG_A5XX_HLSQ_GS_CONSTLEN 0x0000e7d2 + +#define REG_A5XX_HLSQ_GS_INSTRLEN 0x0000e7d3 + +#define REG_A5XX_UNKNOWN_E7D4 0x0000e7d4 + +#define REG_A5XX_UNKNOWN_E7D9 0x0000e7d9 + +#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_3 0x0000e7dc + +#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_4 0x0000e7dd + +#define REG_A5XX_RB_2D_DST_FILL 0x00002101 + +#define REG_A5XX_RB_2D_SRC_INFO 0x00002107 +#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff +#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val) +{ + return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK; +} +#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00 +#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT 10 +static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK; +} + +#define REG_A5XX_RB_2D_SRC_LO 0x00002108 + +#define REG_A5XX_RB_2D_SRC_HI 0x00002109 + +#define REG_A5XX_RB_2D_DST_INFO 0x00002110 +#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff +#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val) +{ + return ((val) << A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK; +} +#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00 +#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10 +static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK; +} + +#define REG_A5XX_RB_2D_SRC_FLAGS_LO 0x00002140 + +#define REG_A5XX_RB_2D_SRC_FLAGS_HI 0x00002141 + +#define REG_A5XX_RB_2D_DST_LO 0x00002111 + +#define REG_A5XX_RB_2D_DST_HI 0x00002112 + +#define REG_A5XX_RB_2D_DST_FLAGS_LO 0x00002143 + +#define REG_A5XX_RB_2D_DST_FLAGS_HI 0x00002144 + +#define REG_A5XX_GRAS_2D_SRC_INFO 0x00002181 +#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff +#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val) +{ + return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK; +} +#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00 +#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10 +static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK; +} + +#define REG_A5XX_GRAS_2D_DST_INFO 0x00002182 +#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff +#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val) +{ + return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK; +} +#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00 +#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT 10 +static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK; +} + +#define REG_A5XX_TEX_SAMP_0 0x00000000 +#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001 +#define A5XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006 +#define A5XX_TEX_SAMP_0_XY_MAG__SHIFT 1 +static inline uint32_t A5XX_TEX_SAMP_0_XY_MAG(enum a5xx_tex_filter val) +{ + return ((val) << A5XX_TEX_SAMP_0_XY_MAG__SHIFT) & A5XX_TEX_SAMP_0_XY_MAG__MASK; +} +#define A5XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018 +#define A5XX_TEX_SAMP_0_XY_MIN__SHIFT 3 +static inline uint32_t A5XX_TEX_SAMP_0_XY_MIN(enum a5xx_tex_filter val) +{ + return ((val) << A5XX_TEX_SAMP_0_XY_MIN__SHIFT) & A5XX_TEX_SAMP_0_XY_MIN__MASK; +} +#define A5XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0 +#define A5XX_TEX_SAMP_0_WRAP_S__SHIFT 5 +static inline uint32_t A5XX_TEX_SAMP_0_WRAP_S(enum a5xx_tex_clamp val) +{ + return ((val) << A5XX_TEX_SAMP_0_WRAP_S__SHIFT) & A5XX_TEX_SAMP_0_WRAP_S__MASK; +} +#define A5XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700 +#define A5XX_TEX_SAMP_0_WRAP_T__SHIFT 8 +static inline uint32_t A5XX_TEX_SAMP_0_WRAP_T(enum a5xx_tex_clamp val) +{ + return ((val) << A5XX_TEX_SAMP_0_WRAP_T__SHIFT) & A5XX_TEX_SAMP_0_WRAP_T__MASK; +} +#define A5XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800 +#define A5XX_TEX_SAMP_0_WRAP_R__SHIFT 11 +static inline uint32_t A5XX_TEX_SAMP_0_WRAP_R(enum a5xx_tex_clamp val) +{ + return ((val) << A5XX_TEX_SAMP_0_WRAP_R__SHIFT) & A5XX_TEX_SAMP_0_WRAP_R__MASK; +} +#define A5XX_TEX_SAMP_0_ANISO__MASK 0x0001c000 +#define A5XX_TEX_SAMP_0_ANISO__SHIFT 14 +static inline uint32_t A5XX_TEX_SAMP_0_ANISO(enum a5xx_tex_aniso val) +{ + return ((val) << A5XX_TEX_SAMP_0_ANISO__SHIFT) & A5XX_TEX_SAMP_0_ANISO__MASK; +} +#define A5XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000 +#define A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19 +static inline uint32_t A5XX_TEX_SAMP_0_LOD_BIAS(float val) +{ + return ((((int32_t)(val * 256.0))) << A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A5XX_TEX_SAMP_0_LOD_BIAS__MASK; +} + +#define REG_A5XX_TEX_SAMP_1 0x00000001 +#define A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e +#define A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1 +static inline uint32_t A5XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val) +{ + return ((val) << A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK; +} +#define A5XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010 +#define A5XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020 +#define A5XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040 +#define A5XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00 +#define A5XX_TEX_SAMP_1_MAX_LOD__SHIFT 8 +static inline uint32_t A5XX_TEX_SAMP_1_MAX_LOD(float val) +{ + return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A5XX_TEX_SAMP_1_MAX_LOD__MASK; +} +#define A5XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000 +#define A5XX_TEX_SAMP_1_MIN_LOD__SHIFT 20 +static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val) +{ + return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A5XX_TEX_SAMP_1_MIN_LOD__MASK; +} + +#define REG_A5XX_TEX_SAMP_2 0x00000002 + +#define REG_A5XX_TEX_SAMP_3 0x00000003 + +#define REG_A5XX_TEX_CONST_0 0x00000000 +#define A5XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003 +#define A5XX_TEX_CONST_0_TILE_MODE__SHIFT 0 +static inline uint32_t A5XX_TEX_CONST_0_TILE_MODE(enum a5xx_tile_mode val) +{ + return ((val) << A5XX_TEX_CONST_0_TILE_MODE__SHIFT) & A5XX_TEX_CONST_0_TILE_MODE__MASK; +} +#define A5XX_TEX_CONST_0_SRGB 0x00000004 +#define A5XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070 +#define A5XX_TEX_CONST_0_SWIZ_X__SHIFT 4 +static inline uint32_t A5XX_TEX_CONST_0_SWIZ_X(enum a5xx_tex_swiz val) +{ + return ((val) << A5XX_TEX_CONST_0_SWIZ_X__SHIFT) & A5XX_TEX_CONST_0_SWIZ_X__MASK; +} +#define A5XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380 +#define A5XX_TEX_CONST_0_SWIZ_Y__SHIFT 7 +static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Y(enum a5xx_tex_swiz val) +{ + return ((val) << A5XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Y__MASK; +} +#define A5XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00 +#define A5XX_TEX_CONST_0_SWIZ_Z__SHIFT 10 +static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Z(enum a5xx_tex_swiz val) +{ + return ((val) << A5XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Z__MASK; +} +#define A5XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000 +#define A5XX_TEX_CONST_0_SWIZ_W__SHIFT 13 +static inline uint32_t A5XX_TEX_CONST_0_SWIZ_W(enum a5xx_tex_swiz val) +{ + return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK; +} +#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000 +#define A5XX_TEX_CONST_0_FMT__SHIFT 22 +static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val) +{ + return ((val) << A5XX_TEX_CONST_0_FMT__SHIFT) & A5XX_TEX_CONST_0_FMT__MASK; +} +#define A5XX_TEX_CONST_0_SWAP__MASK 0xc0000000 +#define A5XX_TEX_CONST_0_SWAP__SHIFT 30 +static inline uint32_t A5XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A5XX_TEX_CONST_0_SWAP__SHIFT) & A5XX_TEX_CONST_0_SWAP__MASK; +} + +#define REG_A5XX_TEX_CONST_1 0x00000001 +#define A5XX_TEX_CONST_1_WIDTH__MASK 0x00007fff +#define A5XX_TEX_CONST_1_WIDTH__SHIFT 0 +static inline uint32_t A5XX_TEX_CONST_1_WIDTH(uint32_t val) +{ + return ((val) << A5XX_TEX_CONST_1_WIDTH__SHIFT) & A5XX_TEX_CONST_1_WIDTH__MASK; +} +#define A5XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000 +#define A5XX_TEX_CONST_1_HEIGHT__SHIFT 15 +static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val) +{ + return ((val) << A5XX_TEX_CONST_1_HEIGHT__SHIFT) & A5XX_TEX_CONST_1_HEIGHT__MASK; +} + +#define REG_A5XX_TEX_CONST_2 0x00000002 +#define A5XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f +#define A5XX_TEX_CONST_2_FETCHSIZE__SHIFT 0 +static inline uint32_t A5XX_TEX_CONST_2_FETCHSIZE(enum a5xx_tex_fetchsize val) +{ + return ((val) << A5XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A5XX_TEX_CONST_2_FETCHSIZE__MASK; +} +#define A5XX_TEX_CONST_2_PITCH__MASK 0x1fffff80 +#define A5XX_TEX_CONST_2_PITCH__SHIFT 7 +static inline uint32_t A5XX_TEX_CONST_2_PITCH(uint32_t val) +{ + return ((val) << A5XX_TEX_CONST_2_PITCH__SHIFT) & A5XX_TEX_CONST_2_PITCH__MASK; +} +#define A5XX_TEX_CONST_2_TYPE__MASK 0x60000000 +#define A5XX_TEX_CONST_2_TYPE__SHIFT 29 +static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val) +{ + return ((val) << A5XX_TEX_CONST_2_TYPE__SHIFT) & A5XX_TEX_CONST_2_TYPE__MASK; +} + +#define REG_A5XX_TEX_CONST_3 0x00000003 +#define A5XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff +#define A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A5XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 12) << A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A5XX_TEX_CONST_3_ARRAY_PITCH__MASK; +} +#define A5XX_TEX_CONST_3_FLAG 0x10000000 + +#define REG_A5XX_TEX_CONST_4 0x00000004 +#define A5XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0 +#define A5XX_TEX_CONST_4_BASE_LO__SHIFT 5 +static inline uint32_t A5XX_TEX_CONST_4_BASE_LO(uint32_t val) +{ + return ((val >> 5) << A5XX_TEX_CONST_4_BASE_LO__SHIFT) & A5XX_TEX_CONST_4_BASE_LO__MASK; +} + +#define REG_A5XX_TEX_CONST_5 0x00000005 +#define A5XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff +#define A5XX_TEX_CONST_5_BASE_HI__SHIFT 0 +static inline uint32_t A5XX_TEX_CONST_5_BASE_HI(uint32_t val) +{ + return ((val) << A5XX_TEX_CONST_5_BASE_HI__SHIFT) & A5XX_TEX_CONST_5_BASE_HI__MASK; +} +#define A5XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000 +#define A5XX_TEX_CONST_5_DEPTH__SHIFT 17 +static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val) +{ + return ((val) << A5XX_TEX_CONST_5_DEPTH__SHIFT) & A5XX_TEX_CONST_5_DEPTH__MASK; +} + +#define REG_A5XX_TEX_CONST_6 0x00000006 + +#define REG_A5XX_TEX_CONST_7 0x00000007 + +#define REG_A5XX_TEX_CONST_8 0x00000008 + +#define REG_A5XX_TEX_CONST_9 0x00000009 + +#define REG_A5XX_TEX_CONST_10 0x0000000a + +#define REG_A5XX_TEX_CONST_11 0x0000000b + + +#endif /* A5XX_XML */ diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c new file mode 100644 index 000000000000..b8647198c11c --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -0,0 +1,888 @@ +/* Copyright (c) 2016 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "msm_gem.h" +#include "a5xx_gpu.h" + +extern bool hang_debug; +static void a5xx_dump(struct msm_gpu *gpu); + +static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, + struct msm_file_private *ctx) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct msm_drm_private *priv = gpu->dev->dev_private; + struct msm_ringbuffer *ring = gpu->rb; + unsigned int i, ibs = 0; + + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + if (priv->lastctx == ctx) + break; + case MSM_SUBMIT_CMD_BUF: + OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); + OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, submit->cmd[i].size); + ibs++; + break; + } + } + + OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1); + OUT_RING(ring, submit->fence->seqno); + + OUT_PKT7(ring, CP_EVENT_WRITE, 4); + OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); + OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence))); + OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence))); + OUT_RING(ring, submit->fence->seqno); + + gpu->funcs->flush(gpu); +} + +struct a5xx_hwcg { + u32 offset; + u32 value; +}; + +static const struct a5xx_hwcg a530_hwcg[] = { + {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444}, + {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011}, + {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} +}; + +static const struct { + int (*test)(struct adreno_gpu *gpu); + const struct a5xx_hwcg *regs; + unsigned int count; +} a5xx_hwcg_regs[] = { + { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), }, +}; + +static void _a5xx_enable_hwcg(struct msm_gpu *gpu, + const struct a5xx_hwcg *regs, unsigned int count) +{ + unsigned int i; + + for (i = 0; i < count; i++) + gpu_write(gpu, regs[i].offset, regs[i].value); + + gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00); + gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182); +} + +static void a5xx_enable_hwcg(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) { + if (a5xx_hwcg_regs[i].test(adreno_gpu)) { + _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs, + a5xx_hwcg_regs[i].count); + return; + } + } +} + +static int a5xx_me_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct msm_ringbuffer *ring = gpu->rb; + + OUT_PKT7(ring, CP_ME_INIT, 8); + + OUT_RING(ring, 0x0000002F); + + /* Enable multiple hardware contexts */ + OUT_RING(ring, 0x00000003); + + /* Enable error detection */ + OUT_RING(ring, 0x20000000); + + /* Don't enable header dump */ + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + + /* Specify workarounds for various microcode issues */ + if (adreno_is_a530(adreno_gpu)) { + /* Workaround for token end syncs + * Force a WFI after every direct-render 3D mode draw and every + * 2D mode 3 draw + */ + OUT_RING(ring, 0x0000000B); + } else { + /* No workarounds enabled */ + OUT_RING(ring, 0x00000000); + } + + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + + gpu->funcs->flush(gpu); + + return gpu->funcs->idle(gpu) ? 0 : -EINVAL; +} + +static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, + const struct firmware *fw, u64 *iova) +{ + struct drm_device *drm = gpu->dev; + struct drm_gem_object *bo; + void *ptr; + + mutex_lock(&drm->struct_mutex); + bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED); + mutex_unlock(&drm->struct_mutex); + + if (IS_ERR(bo)) + return bo; + + ptr = msm_gem_get_vaddr(bo); + if (!ptr) { + drm_gem_object_unreference_unlocked(bo); + return ERR_PTR(-ENOMEM); + } + + if (iova) { + int ret = msm_gem_get_iova(bo, gpu->id, iova); + + if (ret) { + drm_gem_object_unreference_unlocked(bo); + return ERR_PTR(ret); + } + } + + memcpy(ptr, &fw->data[4], fw->size - 4); + + msm_gem_put_vaddr(bo); + return bo; +} + +static int a5xx_ucode_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + int ret; + + if (!a5xx_gpu->pm4_bo) { + a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4, + &a5xx_gpu->pm4_iova); + + if (IS_ERR(a5xx_gpu->pm4_bo)) { + ret = PTR_ERR(a5xx_gpu->pm4_bo); + a5xx_gpu->pm4_bo = NULL; + dev_err(gpu->dev->dev, "could not allocate PM4: %d\n", + ret); + return ret; + } + } + + if (!a5xx_gpu->pfp_bo) { + a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp, + &a5xx_gpu->pfp_iova); + + if (IS_ERR(a5xx_gpu->pfp_bo)) { + ret = PTR_ERR(a5xx_gpu->pfp_bo); + a5xx_gpu->pfp_bo = NULL; + dev_err(gpu->dev->dev, "could not allocate PFP: %d\n", + ret); + return ret; + } + } + + gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, + REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova); + + gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, + REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova); + + return 0; +} + +#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \ + A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \ + A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ + A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \ + A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ + A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP) + +static int a5xx_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int ret; + + gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); + + /* Make all blocks contribute to the GPU BUSY perf counter */ + gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF); + + /* Enable RBBM error reporting bits */ + gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001); + + if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) { + /* + * Mask out the activity signals from RB1-3 to avoid false + * positives + */ + + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11, + 0xF0000000); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18, + 0xFFFFFFFF); + } + + /* Enable fault detection */ + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL, + (1 << 30) | 0xFFFF); + + /* Turn on performance counters */ + gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01); + + /* Increase VFD cache access so LRZ and other data gets evicted less */ + gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02); + + /* Disable L2 bypass in the UCHE */ + gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000); + gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF); + gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000); + gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF); + + /* Set the GMEM VA range (0 to gpu->gmem) */ + gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000); + gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000); + gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO, + 0x00100000 + adreno_gpu->gmem - 1); + gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000); + + gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40); + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40); + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060); + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16); + + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22)); + + if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) + gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); + + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100); + + /* Enable USE_RETENTION_FLOPS */ + gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000); + + /* Enable ME/PFP split notification */ + gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); + + /* Enable HWCG */ + a5xx_enable_hwcg(gpu); + + gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); + + /* Set the highest bank bit */ + gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7); + gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1); + + /* Protect registers from the CP */ + gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007); + + /* RBBM */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64)); + + /* Content protect */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(6), + ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, + 16)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(7), + ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2)); + + /* CP */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1)); + + /* RB */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2)); + + /* VPC */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4)); + + /* UCHE */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16)); + + if (adreno_is_a530(adreno_gpu)) + gpu_write(gpu, REG_A5XX_CP_PROTECT(17), + ADRENO_PROTECT_RW(0x10000, 0x8000)); + + gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0); + /* + * Disable the trusted memory range - we don't actually supported secure + * memory rendering at this point in time and we don't want to block off + * part of the virtual memory space. + */ + gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, + REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000); + gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); + + /* Load the GPMU firmware before starting the HW init */ + a5xx_gpmu_ucode_init(gpu); + + ret = adreno_hw_init(gpu); + if (ret) + return ret; + + ret = a5xx_ucode_init(gpu); + if (ret) + return ret; + + /* Disable the interrupts through the initial bringup stage */ + gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK); + + /* Clear ME_HALT to start the micro engine */ + gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0); + ret = a5xx_me_init(gpu); + if (ret) + return ret; + + ret = a5xx_power_init(gpu); + if (ret) + return ret; + + /* + * Send a pipeline event stat to get misbehaving counters to start + * ticking correctly + */ + if (adreno_is_a530(adreno_gpu)) { + OUT_PKT7(gpu->rb, CP_EVENT_WRITE, 1); + OUT_RING(gpu->rb, 0x0F); + + gpu->funcs->flush(gpu); + if (!gpu->funcs->idle(gpu)) + return -EINVAL; + } + + /* Put the GPU into unsecure mode */ + gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0); + + return 0; +} + +static void a5xx_recover(struct msm_gpu *gpu) +{ + int i; + + adreno_dump_info(gpu); + + for (i = 0; i < 8; i++) { + printk("CP_SCRATCH_REG%d: %u\n", i, + gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i))); + } + + if (hang_debug) + a5xx_dump(gpu); + + gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1); + gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD); + gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0); + adreno_recover(gpu); +} + +static void a5xx_destroy(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + DBG("%s", gpu->name); + + if (a5xx_gpu->pm4_bo) { + if (a5xx_gpu->pm4_iova) + msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->id); + drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo); + } + + if (a5xx_gpu->pfp_bo) { + if (a5xx_gpu->pfp_iova) + msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->id); + drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo); + } + + if (a5xx_gpu->gpmu_bo) { + if (a5xx_gpu->gpmu_bo) + msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); + drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); + } + + adreno_gpu_cleanup(adreno_gpu); + kfree(a5xx_gpu); +} + +static inline bool _a5xx_check_idle(struct msm_gpu *gpu) +{ + if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY) + return false; + + /* + * Nearly every abnormality ends up pausing the GPU and triggering a + * fault so we can safely just watch for this one interrupt to fire + */ + return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) & + A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT); +} + +static bool a5xx_idle(struct msm_gpu *gpu) +{ + /* wait for CP to drain ringbuffer: */ + if (!adreno_idle(gpu)) + return false; + + if (spin_until(_a5xx_check_idle(gpu))) { + DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X\n", + gpu->name, __builtin_return_address(0), + gpu_read(gpu, REG_A5XX_RBBM_STATUS), + gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS)); + + return false; + } + + return true; +} + +static void a5xx_cp_err_irq(struct msm_gpu *gpu) +{ + u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS); + + if (status & A5XX_CP_INT_CP_OPCODE_ERROR) { + u32 val; + + gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0); + + /* + * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so + * read it twice + */ + + gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA); + val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA); + + dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n", + val); + } + + if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR) + dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n", + gpu_read(gpu, REG_A5XX_CP_HW_FAULT)); + + if (status & A5XX_CP_INT_CP_DMA_ERROR) + dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n"); + + if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) { + u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS); + + dev_err_ratelimited(gpu->dev->dev, + "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n", + val & (1 << 24) ? "WRITE" : "READ", + (val & 0xFFFFF) >> 2, val); + } + + if (status & A5XX_CP_INT_CP_AHB_ERROR) { + u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT); + const char *access[16] = { "reserved", "reserved", + "timestamp lo", "timestamp hi", "pfp read", "pfp write", + "", "", "me read", "me write", "", "", "crashdump read", + "crashdump write" }; + + dev_err_ratelimited(gpu->dev->dev, + "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n", + status & 0xFFFFF, access[(status >> 24) & 0xF], + (status & (1 << 31)), status); + } +} + +static void a5xx_rbbm_err_irq(struct msm_gpu *gpu) +{ + u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) { + u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS); + + dev_err_ratelimited(gpu->dev->dev, + "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n", + val & (1 << 28) ? "WRITE" : "READ", + (val & 0xFFFFF) >> 2, (val >> 20) & 0x3, + (val >> 24) & 0xF); + + /* Clear the error */ + gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4)); + } + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT) + dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n"); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT) + dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n", + gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS)); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT) + dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n", + gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS)); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT) + dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n", + gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS)); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW) + dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n"); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW) + dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n"); +} + +static void a5xx_uche_err_irq(struct msm_gpu *gpu) +{ + uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI); + + addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO); + + dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n", + addr); +} + +static void a5xx_gpmu_err_irq(struct msm_gpu *gpu) +{ + dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n"); +} + +#define RBBM_ERROR_MASK \ + (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \ + A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW) + +static irqreturn_t a5xx_irq(struct msm_gpu *gpu) +{ + u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS); + + gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, status); + + if (status & RBBM_ERROR_MASK) + a5xx_rbbm_err_irq(gpu); + + if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR) + a5xx_cp_err_irq(gpu); + + if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS) + a5xx_uche_err_irq(gpu); + + if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP) + a5xx_gpmu_err_irq(gpu); + + if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) + msm_gpu_retire(gpu); + + return IRQ_HANDLED; +} + +static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { + REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE), + REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI), + REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR), + REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI, + REG_A5XX_CP_RB_RPTR_ADDR_HI), + REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR), + REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR), + REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL), +}; + +static const u32 a5xx_registers[] = { + 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, + 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, + 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, + 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807, + 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0, + 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, + 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82, + 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, + 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, + 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, + 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145, + 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23, + 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43, + 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, + 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147, + 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, + 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268, + 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, + 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405, + 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3, + 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9, + 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, + 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A, + 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F, + 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0, + 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, + 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF, + ~0 +}; + +static void a5xx_dump(struct msm_gpu *gpu) +{ + dev_info(gpu->dev->dev, "status: %08x\n", + gpu_read(gpu, REG_A5XX_RBBM_STATUS)); + adreno_dump(gpu); +} + +static int a5xx_pm_resume(struct msm_gpu *gpu) +{ + int ret; + + /* Turn on the core power */ + ret = msm_gpu_pm_resume(gpu); + if (ret) + return ret; + + /* Turn the RBCCU domain first to limit the chances of voltage droop */ + gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000); + + /* Wait 3 usecs before polling */ + udelay(3); + + ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS, + (1 << 20), (1 << 20)); + if (ret) { + DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n", + gpu->name, + gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS)); + return ret; + } + + /* Turn on the SP domain */ + gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000); + ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS, + (1 << 20), (1 << 20)); + if (ret) + DRM_ERROR("%s: timeout waiting for SP GDSC enable\n", + gpu->name); + + return ret; +} + +static int a5xx_pm_suspend(struct msm_gpu *gpu) +{ + /* Clear the VBIF pipe before shutting down */ + gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF); + spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF); + + gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0); + + /* + * Reset the VBIF before power collapse to avoid issue with FIFO + * entries + */ + gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000); + gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000); + + return msm_gpu_pm_suspend(gpu); +} + +static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) +{ + *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO, + REG_A5XX_RBBM_PERFCTR_CP_0_HI); + + return 0; +} + +#ifdef CONFIG_DEBUG_FS +static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) +{ + gpu->funcs->pm_resume(gpu); + + seq_printf(m, "status: %08x\n", + gpu_read(gpu, REG_A5XX_RBBM_STATUS)); + gpu->funcs->pm_suspend(gpu); + + adreno_show(gpu, m); +} +#endif + +static const struct adreno_gpu_funcs funcs = { + .base = { + .get_param = adreno_get_param, + .hw_init = a5xx_hw_init, + .pm_suspend = a5xx_pm_suspend, + .pm_resume = a5xx_pm_resume, + .recover = a5xx_recover, + .last_fence = adreno_last_fence, + .submit = a5xx_submit, + .flush = adreno_flush, + .idle = a5xx_idle, + .irq = a5xx_irq, + .destroy = a5xx_destroy, + .show = a5xx_show, + }, + .get_timestamp = a5xx_get_timestamp, +}; + +struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev = priv->gpu_pdev; + struct a5xx_gpu *a5xx_gpu = NULL; + struct adreno_gpu *adreno_gpu; + struct msm_gpu *gpu; + int ret; + + if (!pdev) { + dev_err(dev->dev, "No A5XX device is defined\n"); + return ERR_PTR(-ENXIO); + } + + a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL); + if (!a5xx_gpu) + return ERR_PTR(-ENOMEM); + + adreno_gpu = &a5xx_gpu->base; + gpu = &adreno_gpu->base; + + a5xx_gpu->pdev = pdev; + adreno_gpu->registers = a5xx_registers; + adreno_gpu->reg_offsets = a5xx_register_offsets; + + a5xx_gpu->lm_leakage = 0x4E001A; + + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs); + if (ret) { + a5xx_destroy(&(a5xx_gpu->base.base)); + return ERR_PTR(ret); + } + + return gpu; +} diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h new file mode 100644 index 000000000000..1590f845d554 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -0,0 +1,60 @@ +/* Copyright (c) 2016 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __A5XX_GPU_H__ +#define __A5XX_GPU_H__ + +#include "adreno_gpu.h" + +/* Bringing over the hack from the previous targets */ +#undef ROP_COPY +#undef ROP_XOR + +#include "a5xx.xml.h" + +struct a5xx_gpu { + struct adreno_gpu base; + struct platform_device *pdev; + + struct drm_gem_object *pm4_bo; + uint64_t pm4_iova; + + struct drm_gem_object *pfp_bo; + uint64_t pfp_iova; + + struct drm_gem_object *gpmu_bo; + uint64_t gpmu_iova; + uint32_t gpmu_dwords; + + uint32_t lm_leakage; +}; + +#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) + +int a5xx_power_init(struct msm_gpu *gpu); +void a5xx_gpmu_ucode_init(struct msm_gpu *gpu); + +static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs, + uint32_t reg, uint32_t mask, uint32_t value) +{ + while (usecs--) { + udelay(1); + if ((gpu_read(gpu, reg) & mask) == value) + return 0; + cpu_relax(); + } + + return -ETIMEDOUT; +} + + +#endif /* __A5XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c new file mode 100644 index 000000000000..72d52c71f769 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -0,0 +1,344 @@ +/* Copyright (c) 2016 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/pm_opp.h> +#include "a5xx_gpu.h" + +/* + * The GPMU data block is a block of shared registers that can be used to + * communicate back and forth. These "registers" are by convention with the GPMU + * firwmare and not bound to any specific hardware design + */ + +#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE +#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5) +#define AGC_MSG_BASE (AGC_INIT_BASE + 7) + +#define AGC_MSG_STATE (AGC_MSG_BASE + 0) +#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1) +#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3) +#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o)) + +#define AGC_POWER_CONFIG_PRODUCTION_ID 1 +#define AGC_INIT_MSG_VALUE 0xBABEFACE + +static struct { + uint32_t reg; + uint32_t value; +} a5xx_sequence_regs[] = { + { 0xB9A1, 0x00010303 }, + { 0xB9A2, 0x13000000 }, + { 0xB9A3, 0x00460020 }, + { 0xB9A4, 0x10000000 }, + { 0xB9A5, 0x040A1707 }, + { 0xB9A6, 0x00010000 }, + { 0xB9A7, 0x0E000904 }, + { 0xB9A8, 0x10000000 }, + { 0xB9A9, 0x01165000 }, + { 0xB9AA, 0x000E0002 }, + { 0xB9AB, 0x03884141 }, + { 0xB9AC, 0x10000840 }, + { 0xB9AD, 0x572A5000 }, + { 0xB9AE, 0x00000003 }, + { 0xB9AF, 0x00000000 }, + { 0xB9B0, 0x10000000 }, + { 0xB828, 0x6C204010 }, + { 0xB829, 0x6C204011 }, + { 0xB82A, 0x6C204012 }, + { 0xB82B, 0x6C204013 }, + { 0xB82C, 0x6C204014 }, + { 0xB90F, 0x00000004 }, + { 0xB910, 0x00000002 }, + { 0xB911, 0x00000002 }, + { 0xB912, 0x00000002 }, + { 0xB913, 0x00000002 }, + { 0xB92F, 0x00000004 }, + { 0xB930, 0x00000005 }, + { 0xB931, 0x00000005 }, + { 0xB932, 0x00000005 }, + { 0xB933, 0x00000005 }, + { 0xB96F, 0x00000001 }, + { 0xB970, 0x00000003 }, + { 0xB94F, 0x00000004 }, + { 0xB950, 0x0000000B }, + { 0xB951, 0x0000000B }, + { 0xB952, 0x0000000B }, + { 0xB953, 0x0000000B }, + { 0xB907, 0x00000019 }, + { 0xB927, 0x00000019 }, + { 0xB947, 0x00000019 }, + { 0xB967, 0x00000019 }, + { 0xB987, 0x00000019 }, + { 0xB906, 0x00220001 }, + { 0xB926, 0x00220001 }, + { 0xB946, 0x00220001 }, + { 0xB966, 0x00220001 }, + { 0xB986, 0x00300000 }, + { 0xAC40, 0x0340FF41 }, + { 0xAC41, 0x03BEFED0 }, + { 0xAC42, 0x00331FED }, + { 0xAC43, 0x021FFDD3 }, + { 0xAC44, 0x5555AAAA }, + { 0xAC45, 0x5555AAAA }, + { 0xB9BA, 0x00000008 }, +}; + +/* + * Get the actual voltage value for the operating point at the specified + * frequency + */ +static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq) +{ + struct drm_device *dev = gpu->dev; + struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev = priv->gpu_pdev; + struct dev_pm_opp *opp; + + opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true); + + return (!IS_ERR(opp)) ? dev_pm_opp_get_voltage(opp) / 1000 : 0; +} + +/* Setup thermal limit management */ +static void a5xx_lm_setup(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + unsigned int i; + + /* Write the block of sequence registers */ + for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++) + gpu_write(gpu, a5xx_sequence_regs[i].reg, + a5xx_sequence_regs[i].value); + + /* Hard code the A530 GPU thermal sensor ID for the GPMU */ + gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007); + gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01); + gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01); + + /* Until we get clock scaling 0 is always the active power level */ + gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0); + + gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage); + + /* The threshold is fixed at 6000 for A530 */ + gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000); + + gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF); + gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1); + + /* Write the voltage table */ + gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF); + gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1); + + gpu_write(gpu, AGC_MSG_STATE, 1); + gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID); + + /* Write the max power - hard coded to 5448 for A530 */ + gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448); + gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1); + + /* + * For now just write the one voltage level - we will do more when we + * can do scaling + */ + gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate)); + gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000); + + gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t)); + gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE); +} + +/* Enable SP/TP cpower collapse */ +static void a5xx_pc_init(struct msm_gpu *gpu) +{ + gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F); + gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0); + gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080); + gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040); +} + +/* Enable the GPMU microcontroller */ +static int a5xx_gpmu_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + struct msm_ringbuffer *ring = gpu->rb; + + if (!a5xx_gpu->gpmu_dwords) + return 0; + + /* Turn off protected mode for this operation */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 0); + + /* Kick off the IB to load the GPMU microcode */ + OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); + OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova)); + OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova)); + OUT_RING(ring, a5xx_gpu->gpmu_dwords); + + /* Turn back on protected mode */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 1); + + gpu->funcs->flush(gpu); + + if (!gpu->funcs->idle(gpu)) { + DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n", + gpu->name); + return -EINVAL; + } + + gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014); + + /* Kick off the GPMU */ + gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0); + + /* + * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just + * won't have advanced power collapse. + */ + if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF, + 0xBABEFACE)) + DRM_ERROR("%s: GPMU firmware initialization timed out\n", + gpu->name); + + return 0; +} + +/* Enable limits management */ +static void a5xx_lm_enable(struct msm_gpu *gpu) +{ + gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0); + gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A); + gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01); + gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000); + gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000); + + gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011); +} + +int a5xx_power_init(struct msm_gpu *gpu) +{ + int ret; + + /* Set up the limits management */ + a5xx_lm_setup(gpu); + + /* Set up SP/TP power collpase */ + a5xx_pc_init(gpu); + + /* Start the GPMU */ + ret = a5xx_gpmu_init(gpu); + if (ret) + return ret; + + /* Start the limits management */ + a5xx_lm_enable(gpu); + + return 0; +} + +void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + struct drm_device *drm = gpu->dev; + const struct firmware *fw; + uint32_t dwords = 0, offset = 0, bosize; + unsigned int *data, *ptr, *cmds; + unsigned int cmds_size; + + if (a5xx_gpu->gpmu_bo) + return; + + /* Get the firmware */ + if (request_firmware(&fw, adreno_gpu->info->gpmufw, drm->dev)) { + DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n", + gpu->name); + return; + } + + data = (unsigned int *) fw->data; + + /* + * The first dword is the size of the remaining data in dwords. Use it + * as a checksum of sorts and make sure it matches the actual size of + * the firmware that we read + */ + + if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2))) + goto out; + + /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */ + if (data[1] != 2) + goto out; + + cmds = data + data[2] + 3; + cmds_size = data[0] - data[2] - 2; + + /* + * A single type4 opcode can only have so many values attached so + * add enough opcodes to load the all the commands + */ + bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; + + mutex_lock(&drm->struct_mutex); + a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED); + mutex_unlock(&drm->struct_mutex); + + if (IS_ERR(a5xx_gpu->gpmu_bo)) + goto err; + + if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->id, &a5xx_gpu->gpmu_iova)) + goto err; + + ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo); + if (!ptr) + goto err; + + while (cmds_size > 0) { + int i; + uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ? + TYPE4_MAX_PAYLOAD : cmds_size; + + ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset, + _size); + + for (i = 0; i < _size; i++) + ptr[dwords++] = *cmds++; + + offset += _size; + cmds_size -= _size; + } + + msm_gem_put_vaddr(a5xx_gpu->gpmu_bo); + a5xx_gpu->gpmu_dwords = dwords; + + goto out; + +err: + if (a5xx_gpu->gpmu_iova) + msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); + if (a5xx_gpu->gpmu_bo) + drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); + + a5xx_gpu->gpmu_bo = NULL; + a5xx_gpu->gpmu_iova = 0; + a5xx_gpu->gpmu_dwords = 0; + +out: + /* No need to keep that firmware laying around anymore */ + release_firmware(fw); +} diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h index e81481d1b7df..4a33ba6f1244 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h @@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31) +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05) - /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) Copyright (C) 2013-2016 by the following authors: @@ -172,6 +173,14 @@ enum a3xx_color_swap { XYZW = 3, }; +enum a3xx_rb_blend_opcode { + BLEND_DST_PLUS_SRC = 0, + BLEND_SRC_MINUS_DST = 1, + BLEND_DST_MINUS_SRC = 2, + BLEND_MIN_DST_SRC = 3, + BLEND_MAX_DST_SRC = 4, +}; + #define REG_AXXX_CP_RB_BASE 0x000001c0 #define REG_AXXX_CP_RB_CNTL 0x000001c1 diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 7250ffc6322f..893eb2b2531b 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -74,6 +74,15 @@ static const struct adreno_info gpulist[] = { .pfpfw = "a420_pfp.fw", .gmem = (SZ_1M + SZ_512K), .init = a4xx_gpu_init, + }, { + .rev = ADRENO_REV(5, 3, 0, ANY_ID), + .revn = 530, + .name = "A530", + .pm4fw = "a530_pm4.fw", + .pfpfw = "a530_pfp.fw", + .gmem = SZ_1M, + .init = a5xx_gpu_init, + .gpmufw = "a530v3_gpmu.fw2", }, }; @@ -83,6 +92,8 @@ MODULE_FIRMWARE("a330_pm4.fw"); MODULE_FIRMWARE("a330_pfp.fw"); MODULE_FIRMWARE("a420_pm4.fw"); MODULE_FIRMWARE("a420_pfp.fw"); +MODULE_FIRMWARE("a530_fm4.fw"); +MODULE_FIRMWARE("a530_pfp.fw"); static inline bool _rev_match(uint8_t entry, uint8_t id) { @@ -145,12 +156,16 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) mutex_lock(&dev->struct_mutex); gpu->funcs->pm_resume(gpu); mutex_unlock(&dev->struct_mutex); + + disable_irq(gpu->irq); + ret = gpu->funcs->hw_init(gpu); if (ret) { dev_err(dev->dev, "gpu hw init failed: %d\n", ret); gpu->funcs->destroy(gpu); gpu = NULL; } else { + enable_irq(gpu->irq); /* give inactive pm a chance to kick in: */ msm_gpu_retire(gpu); } @@ -166,12 +181,20 @@ static void set_gpu_pdev(struct drm_device *dev, priv->gpu_pdev = pdev; } +static const struct { + const char *str; + uint32_t flag; +} quirks[] = { + { "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI }, + { "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK }, +}; + static int adreno_bind(struct device *dev, struct device *master, void *data) { static struct adreno_platform_config config = {}; struct device_node *child, *node = dev->of_node; u32 val; - int ret; + int ret, i; ret = of_property_read_u32(node, "qcom,chipid", &val); if (ret) { @@ -205,6 +228,10 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) return -ENXIO; } + for (i = 0; i < ARRAY_SIZE(quirks); i++) + if (of_property_read_bool(node, quirks[i].str)) + config.quirks |= quirks[i].flag; + dev->platform_data = &config; set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev)); return 0; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index f386f463278d..a18126150e11 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -22,7 +22,7 @@ #include "msm_mmu.h" #define RB_SIZE SZ_32K -#define RB_BLKSIZE 16 +#define RB_BLKSIZE 32 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) { @@ -54,9 +54,6 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) } } -#define rbmemptr(adreno_gpu, member) \ - ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member)) - int adreno_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -79,11 +76,14 @@ int adreno_hw_init(struct msm_gpu *gpu) (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0)); /* Setup ringbuffer address: */ - adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova); + adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE, + REG_ADRENO_CP_RB_BASE_HI, gpu->rb_iova); - if (!adreno_is_a430(adreno_gpu)) - adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, - rbmemptr(adreno_gpu, rptr)); + if (!adreno_is_a430(adreno_gpu)) { + adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, + REG_ADRENO_CP_RB_RPTR_ADDR_HI, + rbmemptr(adreno_gpu, rptr)); + } return 0; } @@ -126,11 +126,14 @@ void adreno_recover(struct msm_gpu *gpu) adreno_gpu->memptrs->wptr = 0; gpu->funcs->pm_resume(gpu); + + disable_irq(gpu->irq); ret = gpu->funcs->hw_init(gpu); if (ret) { dev_err(dev->dev, "gpu hw init failed: %d\n", ret); /* hmm, oh well? */ } + enable_irq(gpu->irq); } void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, @@ -218,19 +221,18 @@ void adreno_flush(struct msm_gpu *gpu) adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr); } -void adreno_idle(struct msm_gpu *gpu) +bool adreno_idle(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); uint32_t wptr = get_wptr(gpu->rb); - int ret; /* wait for CP to drain ringbuffer: */ - ret = spin_until(get_rptr(adreno_gpu) == wptr); - - if (ret) - DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name); + if (!spin_until(get_rptr(adreno_gpu) == wptr)) + return true; /* TODO maybe we need to reset GPU here to recover from hang? */ + DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name); + return false; } #ifdef CONFIG_DEBUG_FS @@ -278,7 +280,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m) void adreno_dump_info(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - int i; printk("revision: %d (%d.%d.%d.%d)\n", adreno_gpu->info->revn, adreno_gpu->rev.core, @@ -290,11 +291,6 @@ void adreno_dump_info(struct msm_gpu *gpu) printk("rptr: %d\n", get_rptr(adreno_gpu)); printk("wptr: %d\n", adreno_gpu->memptrs->wptr); printk("rb wptr: %d\n", get_wptr(gpu->rb)); - - for (i = 0; i < 8; i++) { - printk("CP_SCRATCH_REG%d: %u\n", i, - gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i)); - } } /* would be nice to not have to duplicate the _show() stuff with printk(): */ @@ -350,6 +346,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, adreno_gpu->gmem = adreno_gpu->info->gmem; adreno_gpu->revn = adreno_gpu->info->revn; adreno_gpu->rev = config->rev; + adreno_gpu->quirks = config->quirks; gpu->fast_rate = config->fast_rate; gpu->slow_rate = config->slow_rate; @@ -381,7 +378,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; } - mmu = gpu->mmu; + mmu = gpu->aspace->mmu; if (mmu) { ret = mmu->funcs->attach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 07d99bdf7c99..e8d55b0306ed 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -28,6 +28,9 @@ #include "adreno_pm4.xml.h" #define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1 +#define REG_SKIP ~0 +#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP + /** * adreno_regs: List of registers that are used in across all * 3D devices. Each device type has different offset value for the same @@ -35,73 +38,21 @@ * and are indexed by the enumeration values defined in this enum */ enum adreno_regs { - REG_ADRENO_CP_DEBUG, - REG_ADRENO_CP_ME_RAM_WADDR, - REG_ADRENO_CP_ME_RAM_DATA, - REG_ADRENO_CP_PFP_UCODE_DATA, - REG_ADRENO_CP_PFP_UCODE_ADDR, - REG_ADRENO_CP_WFI_PEND_CTR, REG_ADRENO_CP_RB_BASE, + REG_ADRENO_CP_RB_BASE_HI, REG_ADRENO_CP_RB_RPTR_ADDR, + REG_ADRENO_CP_RB_RPTR_ADDR_HI, REG_ADRENO_CP_RB_RPTR, REG_ADRENO_CP_RB_WPTR, - REG_ADRENO_CP_PROTECT_CTRL, - REG_ADRENO_CP_ME_CNTL, REG_ADRENO_CP_RB_CNTL, - REG_ADRENO_CP_IB1_BASE, - REG_ADRENO_CP_IB1_BUFSZ, - REG_ADRENO_CP_IB2_BASE, - REG_ADRENO_CP_IB2_BUFSZ, - REG_ADRENO_CP_TIMESTAMP, - REG_ADRENO_CP_ME_RAM_RADDR, - REG_ADRENO_CP_ROQ_ADDR, - REG_ADRENO_CP_ROQ_DATA, - REG_ADRENO_CP_MERCIU_ADDR, - REG_ADRENO_CP_MERCIU_DATA, - REG_ADRENO_CP_MERCIU_DATA2, - REG_ADRENO_CP_MEQ_ADDR, - REG_ADRENO_CP_MEQ_DATA, - REG_ADRENO_CP_HW_FAULT, - REG_ADRENO_CP_PROTECT_STATUS, - REG_ADRENO_SCRATCH_ADDR, - REG_ADRENO_SCRATCH_UMSK, - REG_ADRENO_SCRATCH_REG2, - REG_ADRENO_RBBM_STATUS, - REG_ADRENO_RBBM_PERFCTR_CTL, - REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0, - REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1, - REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2, - REG_ADRENO_RBBM_PERFCTR_PWR_1_LO, - REG_ADRENO_RBBM_INT_0_MASK, - REG_ADRENO_RBBM_INT_0_STATUS, - REG_ADRENO_RBBM_AHB_ERROR_STATUS, - REG_ADRENO_RBBM_PM_OVERRIDE2, - REG_ADRENO_RBBM_AHB_CMD, - REG_ADRENO_RBBM_INT_CLEAR_CMD, - REG_ADRENO_RBBM_SW_RESET_CMD, - REG_ADRENO_RBBM_CLOCK_CTL, - REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS, - REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS, - REG_ADRENO_VPC_DEBUG_RAM_SEL, - REG_ADRENO_VPC_DEBUG_RAM_READ, - REG_ADRENO_VSC_SIZE_ADDRESS, - REG_ADRENO_VFD_CONTROL_0, - REG_ADRENO_VFD_INDEX_MAX, - REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG, - REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG, - REG_ADRENO_SP_VS_OBJ_START_REG, - REG_ADRENO_SP_FS_OBJ_START_REG, - REG_ADRENO_PA_SC_AA_CONFIG, - REG_ADRENO_SQ_GPR_MANAGEMENT, - REG_ADRENO_SQ_INST_STORE_MANAGMENT, - REG_ADRENO_TP0_CHICKEN, - REG_ADRENO_RBBM_RBBM_CTL, - REG_ADRENO_UCHE_INVALIDATE0, - REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO, - REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI, REG_ADRENO_REGISTER_MAX, }; +enum adreno_quirks { + ADRENO_QUIRK_TWO_PASS_USE_WFI = 1, + ADRENO_QUIRK_FAULT_DETECT_MASK = 2, +}; + struct adreno_rev { uint8_t core; uint8_t major; @@ -122,12 +73,16 @@ struct adreno_info { uint32_t revn; const char *name; const char *pm4fw, *pfpfw; + const char *gpmufw; uint32_t gmem; struct msm_gpu *(*init)(struct drm_device *dev); }; const struct adreno_info *adreno_info(struct adreno_rev rev); +#define rbmemptr(adreno_gpu, member) \ + ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member)) + struct adreno_rbmemptrs { volatile uint32_t rptr; volatile uint32_t wptr; @@ -153,7 +108,7 @@ struct adreno_gpu { // different for z180.. struct adreno_rbmemptrs *memptrs; struct drm_gem_object *memptrs_bo; - uint32_t memptrs_iova; + uint64_t memptrs_iova; /* * Register offsets are different between some GPUs. @@ -161,6 +116,8 @@ struct adreno_gpu { * code (a3xx_gpu.c) and stored in this common location. */ const unsigned int *reg_offsets; + + uint32_t quirks; }; #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) @@ -171,6 +128,7 @@ struct adreno_platform_config { #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING struct msm_bus_scale_pdata *bus_scale_table; #endif + uint32_t quirks; }; #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000) @@ -234,6 +192,11 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu) return gpu->revn == 430; } +static inline int adreno_is_a530(struct adreno_gpu *gpu) +{ + return gpu->revn == 530; +} + int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); int adreno_hw_init(struct msm_gpu *gpu); uint32_t adreno_last_fence(struct msm_gpu *gpu); @@ -241,7 +204,7 @@ void adreno_recover(struct msm_gpu *gpu); void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx); void adreno_flush(struct msm_gpu *gpu); -void adreno_idle(struct msm_gpu *gpu); +bool adreno_idle(struct msm_gpu *gpu); #ifdef CONFIG_DEBUG_FS void adreno_show(struct msm_gpu *gpu, struct seq_file *m); #endif @@ -278,8 +241,38 @@ OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8)); } +static inline u32 PM4_PARITY(u32 val) +{ + return (0x9669 >> (0xF & (val ^ + (val >> 4) ^ (val >> 8) ^ (val >> 12) ^ + (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^ + (val >> 28)))) & 1; +} + +/* Maximum number of values that can be executed for one opcode */ +#define TYPE4_MAX_PAYLOAD 127 + +#define PKT4(_reg, _cnt) \ + (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \ + (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27)) + +static inline void +OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) +{ + adreno_wait_ring(ring->gpu, cnt + 1); + OUT_RING(ring, PKT4(regindx, cnt)); +} + +static inline void +OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) +{ + adreno_wait_ring(ring->gpu, cnt + 1); + OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) | + ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23)); +} + /* - * adreno_checkreg_off() - Checks the validity of a register enum + * adreno_reg_check() - Checks the validity of a register enum * @gpu: Pointer to struct adreno_gpu * @offset_name: The register enum that is checked */ @@ -290,6 +283,16 @@ static inline bool adreno_reg_check(struct adreno_gpu *gpu, !gpu->reg_offsets[offset_name]) { BUG(); } + + /* + * REG_SKIP is a special value that tell us that the register in + * question isn't implemented on target but don't trigger a BUG(). This + * is used to cleanly implement adreno_gpu_write64() and + * adreno_gpu_read64() in a generic fashion + */ + if (gpu->reg_offsets[offset_name] == REG_SKIP) + return false; + return true; } @@ -313,5 +316,37 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu, struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); +struct msm_gpu *a5xx_gpu_init(struct drm_device *dev); + +static inline void adreno_gpu_write64(struct adreno_gpu *gpu, + enum adreno_regs lo, enum adreno_regs hi, u64 data) +{ + adreno_gpu_write(gpu, lo, lower_32_bits(data)); + adreno_gpu_write(gpu, hi, upper_32_bits(data)); +} + +/* + * Given a register and a count, return a value to program into + * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len + * registers starting at _reg. + * + * The register base needs to be a multiple of the length. If it is not, the + * hardware will quietly mask off the bits for you and shift the size. For + * example, if you intend the protection to start at 0x07 for a length of 4 + * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might + * expose registers you intended to protect! + */ +#define ADRENO_PROTECT_RW(_reg, _len) \ + ((1 << 30) | (1 << 29) | \ + ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF)) + +/* + * Same as above, but allow reads over the range. For areas of mixed use (such + * as performance counters) this allows us to protect a much larger range with a + * single register + */ +#define ADRENO_PROTECT_RDONLY(_reg, _len) \ + ((1 << 29) \ + ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF)) #endif /* __ADRENO_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h index d7477ff867c9..6a2930e75503 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h @@ -8,13 +8,14 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31) +- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2016-04-26 17:56:44) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32907 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 22544 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 110765 bytes, from 2016-11-26 23:01:48) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 90321 bytes, from 2016-11-28 16:50:05) - /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00) Copyright (C) 2013-2016 by the following authors: @@ -58,6 +59,7 @@ enum vgt_event_type { RST_PIX_CNT = 13, RST_VTX_CNT = 14, TILE_FLUSH = 15, + STAT_EVENT = 16, CACHE_FLUSH_AND_INV_TS_EVENT = 20, ZPASS_DONE = 21, CACHE_FLUSH_AND_INV_EVENT = 22, @@ -65,6 +67,10 @@ enum vgt_event_type { PERFCOUNTER_STOP = 24, VS_FETCH_DONE = 27, FACENESS_FLUSH = 28, + UNK_1C = 28, + UNK_1D = 29, + BLIT = 30, + UNK_26 = 38, }; enum pc_di_primtype { @@ -82,7 +88,6 @@ enum pc_di_primtype { DI_PT_LINESTRIP_ADJ = 11, DI_PT_TRI_ADJ = 12, DI_PT_TRISTRIP_ADJ = 13, - DI_PT_PATCHES = 34, }; enum pc_di_src_sel { @@ -110,11 +115,15 @@ enum adreno_pm4_packet_type { CP_TYPE1_PKT = 0x40000000, CP_TYPE2_PKT = 0x80000000, CP_TYPE3_PKT = 0xc0000000, + CP_TYPE4_PKT = 0x40000000, + CP_TYPE7_PKT = 0x70000000, }; enum adreno_pm4_type3_packets { CP_ME_INIT = 72, CP_NOP = 16, + CP_PREEMPT_ENABLE = 28, + CP_PREEMPT_TOKEN = 30, CP_INDIRECT_BUFFER = 63, CP_INDIRECT_BUFFER_PFD = 55, CP_WAIT_FOR_IDLE = 38, @@ -163,6 +172,7 @@ enum adreno_pm4_type3_packets { CP_TEST_TWO_MEMS = 113, CP_REG_WR_NO_CTXT = 120, CP_RECORD_PFP_TIMESTAMP = 17, + CP_SET_SECURE_MODE = 102, CP_WAIT_FOR_ME = 19, CP_SET_DRAW_STATE = 67, CP_DRAW_INDX_OFFSET = 56, @@ -178,6 +188,22 @@ enum adreno_pm4_type3_packets { CP_WAIT_MEM_WRITES = 18, CP_COND_REG_EXEC = 71, CP_MEM_TO_REG = 66, + CP_EXEC_CS = 51, + CP_PERFCOUNTER_ACTION = 80, + CP_SMMU_TABLE_UPDATE = 83, + CP_CONTEXT_REG_BUNCH = 92, + CP_YIELD_ENABLE = 28, + CP_SKIP_IB2_ENABLE_GLOBAL = 29, + CP_SKIP_IB2_ENABLE_LOCAL = 35, + CP_SET_SUBDRAW_SIZE = 53, + CP_SET_VISIBILITY_OVERRIDE = 100, + CP_PREEMPT_ENABLE_GLOBAL = 105, + CP_PREEMPT_ENABLE_LOCAL = 106, + CP_CONTEXT_SWITCH_YIELD = 107, + CP_SET_RENDER_MODE = 108, + CP_COMPUTE_CHECKPOINT = 110, + CP_MEM_TO_MEM = 115, + CP_BLIT = 44, IN_IB_PREFETCH_END = 23, IN_SUBBLK_PREFETCH = 31, IN_INSTR_PREFETCH = 32, @@ -196,6 +222,7 @@ enum adreno_state_block { SB_VERT_SHADER = 4, SB_GEOM_SHADER = 5, SB_FRAG_SHADER = 6, + SB_COMPUTE_SHADER = 7, }; enum adreno_state_type { @@ -218,6 +245,17 @@ enum a4xx_index_size { INDEX4_SIZE_32_BIT = 2, }; +enum render_mode_cmd { + BYPASS = 1, + GMEM = 3, + BLIT2D = 5, +}; + +enum cp_blit_cmd { + BLIT_OP_FILL = 0, + BLIT_OP_BLIT = 1, +}; + #define REG_CP_LOAD_STATE_0 0x00000000 #define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff #define CP_LOAD_STATE_0_DST_OFF__SHIFT 0 @@ -258,6 +296,14 @@ static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val) return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK; } +#define REG_CP_LOAD_STATE_2 0x00000002 +#define CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__MASK 0xffffffff +#define CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__SHIFT 0 +static inline uint32_t CP_LOAD_STATE_2_EXT_SRC_ADDR_HI(uint32_t val) +{ + return ((val) << CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE_2_EXT_SRC_ADDR_HI__MASK; +} + #define REG_CP_DRAW_INDX_0 0x00000000 #define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff #define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0 @@ -389,7 +435,12 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel va { return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK; } -#define CP_DRAW_INDX_OFFSET_0_TESSELLATE 0x00000100 +#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000300 +#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8 +static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK; +} #define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00 #define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10 static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val) @@ -437,30 +488,40 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val) return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK; } -#define REG_CP_SET_DRAW_STATE_0 0x00000000 -#define CP_SET_DRAW_STATE_0_COUNT__MASK 0x0000ffff -#define CP_SET_DRAW_STATE_0_COUNT__SHIFT 0 -static inline uint32_t CP_SET_DRAW_STATE_0_COUNT(uint32_t val) +static inline uint32_t REG_CP_SET_DRAW_STATE_(uint32_t i0) { return 0x00000000 + 0x3*i0; } + +static inline uint32_t REG_CP_SET_DRAW_STATE__0(uint32_t i0) { return 0x00000000 + 0x3*i0; } +#define CP_SET_DRAW_STATE__0_COUNT__MASK 0x0000ffff +#define CP_SET_DRAW_STATE__0_COUNT__SHIFT 0 +static inline uint32_t CP_SET_DRAW_STATE__0_COUNT(uint32_t val) { - return ((val) << CP_SET_DRAW_STATE_0_COUNT__SHIFT) & CP_SET_DRAW_STATE_0_COUNT__MASK; + return ((val) << CP_SET_DRAW_STATE__0_COUNT__SHIFT) & CP_SET_DRAW_STATE__0_COUNT__MASK; } -#define CP_SET_DRAW_STATE_0_DIRTY 0x00010000 -#define CP_SET_DRAW_STATE_0_DISABLE 0x00020000 -#define CP_SET_DRAW_STATE_0_DISABLE_ALL_GROUPS 0x00040000 -#define CP_SET_DRAW_STATE_0_LOAD_IMMED 0x00080000 -#define CP_SET_DRAW_STATE_0_GROUP_ID__MASK 0x1f000000 -#define CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT 24 -static inline uint32_t CP_SET_DRAW_STATE_0_GROUP_ID(uint32_t val) +#define CP_SET_DRAW_STATE__0_DIRTY 0x00010000 +#define CP_SET_DRAW_STATE__0_DISABLE 0x00020000 +#define CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS 0x00040000 +#define CP_SET_DRAW_STATE__0_LOAD_IMMED 0x00080000 +#define CP_SET_DRAW_STATE__0_GROUP_ID__MASK 0x1f000000 +#define CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT 24 +static inline uint32_t CP_SET_DRAW_STATE__0_GROUP_ID(uint32_t val) { - return ((val) << CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE_0_GROUP_ID__MASK; + return ((val) << CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE__0_GROUP_ID__MASK; } -#define REG_CP_SET_DRAW_STATE_1 0x00000001 -#define CP_SET_DRAW_STATE_1_ADDR__MASK 0xffffffff -#define CP_SET_DRAW_STATE_1_ADDR__SHIFT 0 -static inline uint32_t CP_SET_DRAW_STATE_1_ADDR(uint32_t val) +static inline uint32_t REG_CP_SET_DRAW_STATE__1(uint32_t i0) { return 0x00000001 + 0x3*i0; } +#define CP_SET_DRAW_STATE__1_ADDR_LO__MASK 0xffffffff +#define CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT 0 +static inline uint32_t CP_SET_DRAW_STATE__1_ADDR_LO(uint32_t val) { - return ((val) << CP_SET_DRAW_STATE_1_ADDR__SHIFT) & CP_SET_DRAW_STATE_1_ADDR__MASK; + return ((val) << CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT) & CP_SET_DRAW_STATE__1_ADDR_LO__MASK; +} + +static inline uint32_t REG_CP_SET_DRAW_STATE__2(uint32_t i0) { return 0x00000002 + 0x3*i0; } +#define CP_SET_DRAW_STATE__2_ADDR_HI__MASK 0xffffffff +#define CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT 0 +static inline uint32_t CP_SET_DRAW_STATE__2_ADDR_HI(uint32_t val) +{ + return ((val) << CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT) & CP_SET_DRAW_STATE__2_ADDR_HI__MASK; } #define REG_CP_SET_BIN_0 0x00000000 @@ -533,5 +594,192 @@ static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val) return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK; } +#define REG_CP_DISPATCH_COMPUTE_0 0x00000000 + +#define REG_CP_DISPATCH_COMPUTE_1 0x00000001 +#define CP_DISPATCH_COMPUTE_1_X__MASK 0xffffffff +#define CP_DISPATCH_COMPUTE_1_X__SHIFT 0 +static inline uint32_t CP_DISPATCH_COMPUTE_1_X(uint32_t val) +{ + return ((val) << CP_DISPATCH_COMPUTE_1_X__SHIFT) & CP_DISPATCH_COMPUTE_1_X__MASK; +} + +#define REG_CP_DISPATCH_COMPUTE_2 0x00000002 +#define CP_DISPATCH_COMPUTE_2_Y__MASK 0xffffffff +#define CP_DISPATCH_COMPUTE_2_Y__SHIFT 0 +static inline uint32_t CP_DISPATCH_COMPUTE_2_Y(uint32_t val) +{ + return ((val) << CP_DISPATCH_COMPUTE_2_Y__SHIFT) & CP_DISPATCH_COMPUTE_2_Y__MASK; +} + +#define REG_CP_DISPATCH_COMPUTE_3 0x00000003 +#define CP_DISPATCH_COMPUTE_3_Z__MASK 0xffffffff +#define CP_DISPATCH_COMPUTE_3_Z__SHIFT 0 +static inline uint32_t CP_DISPATCH_COMPUTE_3_Z(uint32_t val) +{ + return ((val) << CP_DISPATCH_COMPUTE_3_Z__SHIFT) & CP_DISPATCH_COMPUTE_3_Z__MASK; +} + +#define REG_CP_SET_RENDER_MODE_0 0x00000000 +#define CP_SET_RENDER_MODE_0_MODE__MASK 0x000001ff +#define CP_SET_RENDER_MODE_0_MODE__SHIFT 0 +static inline uint32_t CP_SET_RENDER_MODE_0_MODE(enum render_mode_cmd val) +{ + return ((val) << CP_SET_RENDER_MODE_0_MODE__SHIFT) & CP_SET_RENDER_MODE_0_MODE__MASK; +} + +#define REG_CP_SET_RENDER_MODE_1 0x00000001 +#define CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK 0xffffffff +#define CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT 0 +static inline uint32_t CP_SET_RENDER_MODE_1_ADDR_0_LO(uint32_t val) +{ + return ((val) << CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT) & CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK; +} + +#define REG_CP_SET_RENDER_MODE_2 0x00000002 +#define CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK 0xffffffff +#define CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT 0 +static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val) +{ + return ((val) << CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT) & CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK; +} + +#define REG_CP_SET_RENDER_MODE_3 0x00000003 +#define CP_SET_RENDER_MODE_3_GMEM_ENABLE 0x00000010 + +#define REG_CP_SET_RENDER_MODE_4 0x00000004 + +#define REG_CP_SET_RENDER_MODE_5 0x00000005 +#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK 0xffffffff +#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT 0 +static inline uint32_t CP_SET_RENDER_MODE_5_ADDR_1_LEN(uint32_t val) +{ + return ((val) << CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT) & CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK; +} + +#define REG_CP_SET_RENDER_MODE_6 0x00000006 +#define CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK 0xffffffff +#define CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT 0 +static inline uint32_t CP_SET_RENDER_MODE_6_ADDR_1_LO(uint32_t val) +{ + return ((val) << CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT) & CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK; +} + +#define REG_CP_SET_RENDER_MODE_7 0x00000007 +#define CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK 0xffffffff +#define CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT 0 +static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val) +{ + return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK; +} + +#define REG_CP_PERFCOUNTER_ACTION_0 0x00000000 + +#define REG_CP_PERFCOUNTER_ACTION_1 0x00000001 +#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK 0xffffffff +#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT 0 +static inline uint32_t CP_PERFCOUNTER_ACTION_1_ADDR_0_LO(uint32_t val) +{ + return ((val) << CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT) & CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK; +} + +#define REG_CP_PERFCOUNTER_ACTION_2 0x00000002 +#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK 0xffffffff +#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT 0 +static inline uint32_t CP_PERFCOUNTER_ACTION_2_ADDR_0_HI(uint32_t val) +{ + return ((val) << CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT) & CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK; +} + +#define REG_CP_EVENT_WRITE_0 0x00000000 +#define CP_EVENT_WRITE_0_EVENT__MASK 0x000000ff +#define CP_EVENT_WRITE_0_EVENT__SHIFT 0 +static inline uint32_t CP_EVENT_WRITE_0_EVENT(enum vgt_event_type val) +{ + return ((val) << CP_EVENT_WRITE_0_EVENT__SHIFT) & CP_EVENT_WRITE_0_EVENT__MASK; +} + +#define REG_CP_EVENT_WRITE_1 0x00000001 +#define CP_EVENT_WRITE_1_ADDR_0_LO__MASK 0xffffffff +#define CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT 0 +static inline uint32_t CP_EVENT_WRITE_1_ADDR_0_LO(uint32_t val) +{ + return ((val) << CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT) & CP_EVENT_WRITE_1_ADDR_0_LO__MASK; +} + +#define REG_CP_EVENT_WRITE_2 0x00000002 +#define CP_EVENT_WRITE_2_ADDR_0_HI__MASK 0xffffffff +#define CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT 0 +static inline uint32_t CP_EVENT_WRITE_2_ADDR_0_HI(uint32_t val) +{ + return ((val) << CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT) & CP_EVENT_WRITE_2_ADDR_0_HI__MASK; +} + +#define REG_CP_EVENT_WRITE_3 0x00000003 + +#define REG_CP_BLIT_0 0x00000000 +#define CP_BLIT_0_OP__MASK 0x0000000f +#define CP_BLIT_0_OP__SHIFT 0 +static inline uint32_t CP_BLIT_0_OP(enum cp_blit_cmd val) +{ + return ((val) << CP_BLIT_0_OP__SHIFT) & CP_BLIT_0_OP__MASK; +} + +#define REG_CP_BLIT_1 0x00000001 +#define CP_BLIT_1_SRC_X1__MASK 0x0000ffff +#define CP_BLIT_1_SRC_X1__SHIFT 0 +static inline uint32_t CP_BLIT_1_SRC_X1(uint32_t val) +{ + return ((val) << CP_BLIT_1_SRC_X1__SHIFT) & CP_BLIT_1_SRC_X1__MASK; +} +#define CP_BLIT_1_SRC_Y1__MASK 0xffff0000 +#define CP_BLIT_1_SRC_Y1__SHIFT 16 +static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val) +{ + return ((val) << CP_BLIT_1_SRC_Y1__SHIFT) & CP_BLIT_1_SRC_Y1__MASK; +} + +#define REG_CP_BLIT_2 0x00000002 +#define CP_BLIT_2_SRC_X2__MASK 0x0000ffff +#define CP_BLIT_2_SRC_X2__SHIFT 0 +static inline uint32_t CP_BLIT_2_SRC_X2(uint32_t val) +{ + return ((val) << CP_BLIT_2_SRC_X2__SHIFT) & CP_BLIT_2_SRC_X2__MASK; +} +#define CP_BLIT_2_SRC_Y2__MASK 0xffff0000 +#define CP_BLIT_2_SRC_Y2__SHIFT 16 +static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val) +{ + return ((val) << CP_BLIT_2_SRC_Y2__SHIFT) & CP_BLIT_2_SRC_Y2__MASK; +} + +#define REG_CP_BLIT_3 0x00000003 +#define CP_BLIT_3_DST_X1__MASK 0x0000ffff +#define CP_BLIT_3_DST_X1__SHIFT 0 +static inline uint32_t CP_BLIT_3_DST_X1(uint32_t val) +{ + return ((val) << CP_BLIT_3_DST_X1__SHIFT) & CP_BLIT_3_DST_X1__MASK; +} +#define CP_BLIT_3_DST_Y1__MASK 0xffff0000 +#define CP_BLIT_3_DST_Y1__SHIFT 16 +static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val) +{ + return ((val) << CP_BLIT_3_DST_Y1__SHIFT) & CP_BLIT_3_DST_Y1__MASK; +} + +#define REG_CP_BLIT_4 0x00000004 +#define CP_BLIT_4_DST_X2__MASK 0x0000ffff +#define CP_BLIT_4_DST_X2__SHIFT 0 +static inline uint32_t CP_BLIT_4_DST_X2(uint32_t val) +{ + return ((val) << CP_BLIT_4_DST_X2__SHIFT) & CP_BLIT_4_DST_X2__MASK; +} +#define CP_BLIT_4_DST_Y2__MASK 0xffff0000 +#define CP_BLIT_4_DST_Y2__SHIFT 16 +static inline uint32_t CP_BLIT_4_DST_Y2(uint32_t val) +{ + return ((val) << CP_BLIT_4_DST_Y2__SHIFT) & CP_BLIT_4_DST_Y2__MASK; +} + #endif /* ADRENO_PM4_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index 4958594d5266..39dff7d5e89b 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index f05ed0e1f3d6..3819fdefcae2 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -139,6 +139,7 @@ struct msm_dsi_host { u32 err_work_state; struct work_struct err_work; + struct work_struct hpd_work; struct workqueue_struct *workqueue; /* DSI 6G TX buffer*/ @@ -981,7 +982,7 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size) struct drm_device *dev = msm_host->dev; const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; int ret; - u32 iova; + uint64_t iova; if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { mutex_lock(&dev->struct_mutex); @@ -1146,7 +1147,7 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len) { const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; int ret; - u32 dma_base; + uint64_t dma_base; bool triggered; if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { @@ -1294,6 +1295,14 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host) wmb(); /* make sure dsi controller enabled again */ } +static void dsi_hpd_worker(struct work_struct *work) +{ + struct msm_dsi_host *msm_host = + container_of(work, struct msm_dsi_host, hpd_work); + + drm_helper_hpd_irq_event(msm_host->dev); +} + static void dsi_err_worker(struct work_struct *work) { struct msm_dsi_host *msm_host = @@ -1480,7 +1489,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host, DBG("id=%d", msm_host->id); if (msm_host->dev) - drm_helper_hpd_irq_event(msm_host->dev); + queue_work(msm_host->workqueue, &msm_host->hpd_work); return 0; } @@ -1494,7 +1503,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host, DBG("id=%d", msm_host->id); if (msm_host->dev) - drm_helper_hpd_irq_event(msm_host->dev); + queue_work(msm_host->workqueue, &msm_host->hpd_work); return 0; } @@ -1748,6 +1757,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) /* setup workqueue */ msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); INIT_WORK(&msm_host->err_work, dsi_err_worker); + INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker); msm_dsi->host = &msm_host->base; msm_dsi->id = msm_host->id; diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h index 2d999494cdea..8b9f3ebaeba7 100644 --- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h +++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h @@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c index 598fdaff0a41..26e3a01a99c2 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c @@ -521,6 +521,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm) .parent_names = (const char *[]){ "xo" }, .num_parents = 1, .name = vco_name, + .flags = CLK_IGNORE_UNUSED, .ops = &clk_ops_dsi_pll_28nm_vco, }; struct device *dev = &pll_28nm->pdev->dev; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c index 38c90e1eb002..49008451085b 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c @@ -412,6 +412,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm) struct clk_init_data vco_init = { .parent_names = (const char *[]){ "pxo" }, .num_parents = 1, + .flags = CLK_IGNORE_UNUSED, .ops = &clk_ops_dsi_pll_28nm_vco, }; struct device *dev = &pll_28nm->pdev->dev; diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h index 506434fac993..3fcbb30dc241 100644 --- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h +++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h @@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h index f1072c18c81e..d7bf3232dc88 100644 --- a/drivers/gpu/drm/msm/edp/edp.xml.h +++ b/drivers/gpu/drm/msm/edp/edp.xml.h @@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h index 34c7df6549c1..0a97ff75ed6f 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h @@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c index aa94a553794f..143eab46ba68 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c @@ -702,6 +702,7 @@ static struct clk_init_data pll_init = { .ops = &hdmi_8996_pll_ops, .parent_names = hdmi_pll_parents, .num_parents = ARRAY_SIZE(hdmi_pll_parents), + .flags = CLK_IGNORE_UNUSED, }; int msm_hdmi_pll_8996_init(struct platform_device *pdev) diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c index 92da69aa6187..99590758c68b 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c @@ -424,6 +424,7 @@ static struct clk_init_data pll_init = { .ops = &hdmi_pll_ops, .parent_names = hdmi_pll_parents, .num_parents = ARRAY_SIZE(hdmi_pll_parents), + .flags = CLK_IGNORE_UNUSED, }; int msm_hdmi_pll_8960_init(struct platform_device *pdev) diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h index 6eab7d0cf6b5..1b996ede7a65 100644 --- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h +++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h @@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h index 6688e79cc88e..88037889589b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h @@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 9527dafc3e69..1c29618f4ddb 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -373,7 +373,7 @@ static void update_cursor(struct drm_crtc *crtc) if (mdp4_crtc->cursor.stale) { struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; - uint32_t iova = mdp4_crtc->cursor.next_iova; + uint64_t iova = mdp4_crtc->cursor.next_iova; if (next_bo) { /* take a obj ref + iova ref when we start scanning out: */ @@ -418,7 +418,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_gem_object *cursor_bo, *old_bo; unsigned long flags; - uint32_t iova; + uint64_t iova; int ret; if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 571a91ee9607..b782efd4b95f 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -17,6 +17,7 @@ #include "msm_drv.h" +#include "msm_gem.h" #include "msm_mmu.h" #include "mdp4_kms.h" @@ -159,17 +160,18 @@ static void mdp4_destroy(struct msm_kms *kms) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); struct device *dev = mdp4_kms->dev->dev; - struct msm_mmu *mmu = mdp4_kms->mmu; - - if (mmu) { - mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); - mmu->funcs->destroy(mmu); - } + struct msm_gem_address_space *aspace = mdp4_kms->aspace; if (mdp4_kms->blank_cursor_iova) msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id); drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo); + if (aspace) { + aspace->mmu->funcs->detach(aspace->mmu, + iommu_ports, ARRAY_SIZE(iommu_ports)); + msm_gem_address_space_destroy(aspace); + } + if (mdp4_kms->rpm_enabled) pm_runtime_disable(dev); @@ -440,7 +442,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) struct mdp4_platform_config *config = mdp4_get_config(pdev); struct mdp4_kms *mdp4_kms; struct msm_kms *kms = NULL; - struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; int irq, ret; mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); @@ -531,24 +533,26 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) mdelay(16); if (config->iommu) { - mmu = msm_iommu_new(&pdev->dev, config->iommu); - if (IS_ERR(mmu)) { - ret = PTR_ERR(mmu); + aspace = msm_gem_address_space_create(&pdev->dev, + config->iommu, "mdp4"); + if (IS_ERR(aspace)) { + ret = PTR_ERR(aspace); goto fail; } - ret = mmu->funcs->attach(mmu, iommu_ports, + + mdp4_kms->aspace = aspace; + + ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); if (ret) goto fail; - - mdp4_kms->mmu = mmu; } else { dev_info(dev->dev, "no iommu, fallback to phys " "contig buffers for scanout\n"); - mmu = NULL; + aspace = NULL; } - mdp4_kms->id = msm_register_mmu(dev, mmu); + mdp4_kms->id = msm_register_address_space(dev, aspace); if (mdp4_kms->id < 0) { ret = mdp4_kms->id; dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret); @@ -598,6 +602,10 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev) /* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */ config.max_clk = 266667000; config.iommu = iommu_domain_alloc(&platform_bus_type); + if (config.iommu) { + config.iommu->geometry.aperture_start = 0x1000; + config.iommu->geometry.aperture_end = 0xffffffff; + } return &config; } diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index 25fb83997119..62712ca164ee 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h @@ -43,7 +43,7 @@ struct mdp4_kms { struct clk *pclk; struct clk *lut_clk; struct clk *axi_clk; - struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; struct mdp_irq error_handler; @@ -51,7 +51,7 @@ struct mdp4_kms { /* empty/blank cursor bo to use when cursor is "disabled" */ struct drm_gem_object *blank_cursor_bo; - uint32_t blank_cursor_iova; + uint64_t blank_cursor_iova; }; #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 3903dbcda763..911e4690d36a 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c @@ -40,7 +40,7 @@ enum mdp4_frame_format mdp4_get_frame_format(struct drm_framebuffer *fb) { bool is_tile = false; - if (fb->modifier[1] == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE) + if (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE) is_tile = true; if (fb->pixel_format == DRM_FORMAT_NV12 && is_tile) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h index ca6ca30650a0..27d5371acee0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h @@ -8,9 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-05-10 05:06:30) -- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54) -- /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2016-01-07 08:45:55) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) Copyright (C) 2013-2016 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index ac9e4cde1380..618b2ffed9b4 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c @@ -272,7 +272,7 @@ const struct mdp5_cfg_hw msm8x16_config = { .count = 2, .base = { 0x14000, 0x16000 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, + MDP_PIPE_CAP_DECIMATION, }, .pipe_dma = { .count = 1, @@ -282,7 +282,7 @@ const struct mdp5_cfg_hw msm8x16_config = { .lm = { .count = 2, /* LM0 and LM3 */ .base = { 0x44000, 0x47000 }, - .nb_stages = 5, + .nb_stages = 8, .max_width = 2048, .max_height = 0xFFFF, }, @@ -550,6 +550,10 @@ static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev) static struct mdp5_cfg_platform config = {}; config.iommu = iommu_domain_alloc(&platform_bus_type); + if (config.iommu) { + config.iommu->geometry.aperture_start = 0x1000; + config.iommu->geometry.aperture_end = 0xffffffff; + } return &config; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index fa2be7ce9468..1ce8a01a5a28 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -27,11 +27,8 @@ #define CURSOR_WIDTH 64 #define CURSOR_HEIGHT 64 -#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */ - struct mdp5_crtc { struct drm_crtc base; - char name[8]; int id; bool enabled; @@ -102,7 +99,7 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask); + DBG("%s: flush=%08x", crtc->name, flush_mask); return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask); } @@ -136,7 +133,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_pending_vblank_event *event; - struct drm_plane *plane; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); @@ -148,16 +144,12 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) */ if (!file || (event->base.file_priv == file)) { mdp5_crtc->event = NULL; - DBG("%s: send event: %p", mdp5_crtc->name, event); + DBG("%s: send event: %p", crtc->name, event); drm_crtc_send_vblank_event(crtc, event); } } spin_unlock_irqrestore(&dev->event_lock, flags); - drm_atomic_crtc_for_each_plane(plane, crtc) { - mdp5_plane_complete_flip(plane); - } - if (mdp5_crtc->ctl && !crtc->state->enable) { /* set STAGE_UNUSED for all layers */ mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0); @@ -223,12 +215,7 @@ static void blend_setup(struct drm_crtc *crtc) plane_cnt++; } - /* - * If there is no base layer, enable border color. - * Although it's not possbile in current blend logic, - * put it here as a reminder. - */ - if (!pstates[STAGE_BASE] && plane_cnt) { + if (!pstates[STAGE_BASE]) { ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; DBG("Border Color is enabled"); } @@ -300,7 +287,7 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc) mode = &crtc->state->adjusted_mode; DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", - mdp5_crtc->name, mode->base.id, mode->name, + crtc->name, mode->base.id, mode->name, mode->vrefresh, mode->clock, mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal, @@ -320,7 +307,7 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc) struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc); - DBG("%s", mdp5_crtc->name); + DBG("%s", crtc->name); if (WARN_ON(!mdp5_crtc->enabled)) return; @@ -339,7 +326,7 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc) struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc); - DBG("%s", mdp5_crtc->name); + DBG("%s", crtc->name); if (WARN_ON(mdp5_crtc->enabled)) return; @@ -365,31 +352,29 @@ static int pstate_cmp(const void *a, const void *b) return pa->state->zpos - pb->state->zpos; } +/* is there a helper for this? */ +static bool is_fullscreen(struct drm_crtc_state *cstate, + struct drm_plane_state *pstate) +{ + return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) && + ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) && + ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay); +} + static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc); struct drm_plane *plane; struct drm_device *dev = crtc->dev; struct plane_state pstates[STAGE_MAX + 1]; const struct mdp5_cfg_hw *hw_cfg; const struct drm_plane_state *pstate; - int cnt = 0, i; + int cnt = 0, base = 0, i; - DBG("%s: check", mdp5_crtc->name); + DBG("%s: check", crtc->name); - /* verify that there are not too many planes attached to crtc - * and that we don't have conflicting mixer stages: - */ - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { - if (cnt >= (hw_cfg->lm.nb_stages)) { - dev_err(dev->dev, "too many planes!\n"); - return -EINVAL; - } - - pstates[cnt].plane = plane; pstates[cnt].state = to_mdp5_plane_state(pstate); @@ -399,10 +384,26 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, /* assign a stage based on sorted zpos property */ sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); + /* if the bottom-most layer is not fullscreen, we need to use + * it for solid-color: + */ + if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base)) + base++; + + /* verify that there are not too many planes attached to crtc + * and that we don't have conflicting mixer stages: + */ + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + + if ((cnt + base) >= hw_cfg->lm.nb_stages) { + dev_err(dev->dev, "too many planes! cnt=%d, base=%d\n", cnt, base); + return -EINVAL; + } + for (i = 0; i < cnt; i++) { - pstates[i].state->stage = STAGE_BASE + i; - DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name, - pipe2name(mdp5_plane_pipe(pstates[i].plane)), + pstates[i].state->stage = STAGE_BASE + i + base; + DBG("%s: assign pipe %s on stage=%d", crtc->name, + pstates[i].plane->name, pstates[i].state->stage); } @@ -412,8 +413,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - DBG("%s: begin", mdp5_crtc->name); + DBG("%s: begin", crtc->name); } static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc, @@ -423,7 +423,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; unsigned long flags; - DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event); + DBG("%s: event: %p", crtc->name, crtc->state->event); WARN_ON(mdp5_crtc->event); @@ -489,7 +489,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct mdp5_kms *mdp5_kms = get_kms(crtc); struct drm_gem_object *cursor_bo, *old_bo = NULL; - uint32_t blendcfg, cursor_addr, stride; + uint32_t blendcfg, stride; + uint64_t cursor_addr; int ret, lm; enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); @@ -643,7 +644,7 @@ static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) { struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err); - DBG("%s: error: %08x", mdp5_crtc->name, irqstatus); + DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus); } static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus) @@ -765,9 +766,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; mdp5_crtc->err.irq = mdp5_crtc_err_irq; - snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d", - pipe2name(mdp5_plane_pipe(plane)), id); - drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs, NULL); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index 5c5940db898e..3ce8b9dec9c1 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c @@ -41,6 +41,8 @@ static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) if (dumpstate && __ratelimit(&rs)) { struct drm_printer p = drm_info_printer(mdp5_kms->dev->dev); drm_state_dump(mdp5_kms->dev, &p); + if (mdp5_kms->smp) + mdp5_smp_dump(mdp5_kms->smp, &p); } } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index ed7143d35b25..5f6cd8745dbc 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -19,6 +19,7 @@ #include <linux/of_irq.h> #include "msm_drv.h" +#include "msm_gem.h" #include "msm_mmu.h" #include "mdp5_kms.h" @@ -71,10 +72,49 @@ static int mdp5_hw_init(struct msm_kms *kms) return 0; } +struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct msm_kms_state *state = to_kms_state(s); + struct mdp5_state *new_state; + int ret; + + if (state->state) + return state->state; + + ret = drm_modeset_lock(&mdp5_kms->state_lock, s->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + new_state = kmalloc(sizeof(*mdp5_kms->state), GFP_KERNEL); + if (!new_state) + return ERR_PTR(-ENOMEM); + + /* Copy state: */ + new_state->hwpipe = mdp5_kms->state->hwpipe; + if (mdp5_kms->smp) + new_state->smp = mdp5_kms->state->smp; + + state->state = new_state; + + return new_state; +} + +static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + swap(to_kms_state(state)->state, mdp5_kms->state); +} + static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + mdp5_enable(mdp5_kms); + + if (mdp5_kms->smp) + mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp); } static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) @@ -87,6 +127,9 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s for_each_plane_in_state(state, plane, plane_state, i) mdp5_plane_complete_commit(plane, plane_state); + if (mdp5_kms->smp) + mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); + mdp5_disable(mdp5_kms); } @@ -117,14 +160,66 @@ static int mdp5_set_split_display(struct msm_kms *kms, static void mdp5_kms_destroy(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct msm_mmu *mmu = mdp5_kms->mmu; + struct msm_gem_address_space *aspace = mdp5_kms->aspace; + int i; + + for (i = 0; i < mdp5_kms->num_hwpipes; i++) + mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); - if (mmu) { - mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); - mmu->funcs->destroy(mmu); + if (aspace) { + aspace->mmu->funcs->detach(aspace->mmu, + iommu_ports, ARRAY_SIZE(iommu_ports)); + msm_gem_address_space_destroy(aspace); } } +#ifdef CONFIG_DEBUG_FS +static int smp_show(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct msm_drm_private *priv = dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct drm_printer p = drm_seq_file_printer(m); + + if (!mdp5_kms->smp) { + drm_printf(&p, "no SMP pool\n"); + return 0; + } + + mdp5_smp_dump(mdp5_kms->smp, &p); + + return 0; +} + +static struct drm_info_list mdp5_debugfs_list[] = { + {"smp", smp_show }, +}; + +static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + int ret; + + ret = drm_debugfs_create_files(mdp5_debugfs_list, + ARRAY_SIZE(mdp5_debugfs_list), + minor->debugfs_root, minor); + + if (ret) { + dev_err(dev->dev, "could not install mdp5_debugfs_list\n"); + return ret; + } + + return 0; +} + +static void mdp5_kms_debugfs_cleanup(struct msm_kms *kms, struct drm_minor *minor) +{ + drm_debugfs_remove_files(mdp5_debugfs_list, + ARRAY_SIZE(mdp5_debugfs_list), minor); +} +#endif + static const struct mdp_kms_funcs kms_funcs = { .base = { .hw_init = mdp5_hw_init, @@ -134,6 +229,7 @@ static const struct mdp_kms_funcs kms_funcs = { .irq = mdp5_irq, .enable_vblank = mdp5_enable_vblank, .disable_vblank = mdp5_disable_vblank, + .swap_state = mdp5_swap_state, .prepare_commit = mdp5_prepare_commit, .complete_commit = mdp5_complete_commit, .wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done, @@ -141,6 +237,10 @@ static const struct mdp_kms_funcs kms_funcs = { .round_pixclk = mdp5_round_pixclk, .set_split_display = mdp5_set_split_display, .destroy = mdp5_kms_destroy, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = mdp5_kms_debugfs_init, + .debugfs_cleanup = mdp5_kms_debugfs_cleanup, +#endif }, .set_irqmask = mdp5_set_irqmask, }; @@ -321,15 +421,6 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) static int modeset_init(struct mdp5_kms *mdp5_kms) { - static const enum mdp5_pipe crtcs[] = { - SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, - }; - static const enum mdp5_pipe vig_planes[] = { - SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3, - }; - static const enum mdp5_pipe dma_planes[] = { - SSPP_DMA0, SSPP_DMA1, - }; struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; const struct mdp5_cfg_hw *hw_cfg; @@ -337,58 +428,35 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - /* construct CRTCs and their private planes: */ - for (i = 0; i < hw_cfg->pipe_rgb.count; i++) { + /* Construct planes equaling the number of hw pipes, and CRTCs + * for the N layer-mixers (LM). The first N planes become primary + * planes for the CRTCs, with the remainder as overlay planes: + */ + for (i = 0; i < mdp5_kms->num_hwpipes; i++) { + bool primary = i < mdp5_cfg->lm.count; struct drm_plane *plane; struct drm_crtc *crtc; - plane = mdp5_plane_init(dev, crtcs[i], true, - hw_cfg->pipe_rgb.base[i], hw_cfg->pipe_rgb.caps); + plane = mdp5_plane_init(dev, primary); if (IS_ERR(plane)) { ret = PTR_ERR(plane); - dev_err(dev->dev, "failed to construct plane for %s (%d)\n", - pipe2name(crtcs[i]), ret); + dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret); goto fail; } + priv->planes[priv->num_planes++] = plane; + + if (!primary) + continue; crtc = mdp5_crtc_init(dev, plane, i); if (IS_ERR(crtc)) { ret = PTR_ERR(crtc); - dev_err(dev->dev, "failed to construct crtc for %s (%d)\n", - pipe2name(crtcs[i]), ret); + dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); goto fail; } priv->crtcs[priv->num_crtcs++] = crtc; } - /* Construct video planes: */ - for (i = 0; i < hw_cfg->pipe_vig.count; i++) { - struct drm_plane *plane; - - plane = mdp5_plane_init(dev, vig_planes[i], false, - hw_cfg->pipe_vig.base[i], hw_cfg->pipe_vig.caps); - if (IS_ERR(plane)) { - ret = PTR_ERR(plane); - dev_err(dev->dev, "failed to construct %s plane: %d\n", - pipe2name(vig_planes[i]), ret); - goto fail; - } - } - - /* DMA planes */ - for (i = 0; i < hw_cfg->pipe_dma.count; i++) { - struct drm_plane *plane; - - plane = mdp5_plane_init(dev, dma_planes[i], false, - hw_cfg->pipe_dma.base[i], hw_cfg->pipe_dma.caps); - if (IS_ERR(plane)) { - ret = PTR_ERR(plane); - dev_err(dev->dev, "failed to construct %s plane: %d\n", - pipe2name(dma_planes[i]), ret); - goto fail; - } - } - /* Construct encoders and modeset initialize connector devices * for each external display interface. */ @@ -564,7 +632,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) struct mdp5_kms *mdp5_kms; struct mdp5_cfg *config; struct msm_kms *kms; - struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; int irq, i, ret; /* priv->kms would have been populated by the MDP5 driver */ @@ -606,30 +674,29 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) mdelay(16); if (config->platform.iommu) { - mmu = msm_iommu_new(&pdev->dev, config->platform.iommu); - if (IS_ERR(mmu)) { - ret = PTR_ERR(mmu); - dev_err(&pdev->dev, "failed to init iommu: %d\n", ret); - iommu_domain_free(config->platform.iommu); + aspace = msm_gem_address_space_create(&pdev->dev, + config->platform.iommu, "mdp5"); + if (IS_ERR(aspace)) { + ret = PTR_ERR(aspace); goto fail; } - ret = mmu->funcs->attach(mmu, iommu_ports, + mdp5_kms->aspace = aspace; + + ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); if (ret) { dev_err(&pdev->dev, "failed to attach iommu: %d\n", ret); - mmu->funcs->destroy(mmu); goto fail; } } else { dev_info(&pdev->dev, "no iommu, fallback to phys contig buffers for scanout\n"); - mmu = NULL; + aspace = NULL;; } - mdp5_kms->mmu = mmu; - mdp5_kms->id = msm_register_mmu(dev, mmu); + mdp5_kms->id = msm_register_address_space(dev, aspace); if (mdp5_kms->id < 0) { ret = mdp5_kms->id; dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret); @@ -644,8 +711,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; - dev->mode_config.max_width = config->hw->lm.max_width; - dev->mode_config.max_height = config->hw->lm.max_height; + dev->mode_config.max_width = 0xffff; + dev->mode_config.max_height = 0xffff; dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp; dev->driver->get_scanout_position = mdp5_get_scanoutpos; @@ -673,6 +740,69 @@ static void mdp5_destroy(struct platform_device *pdev) if (mdp5_kms->rpm_enabled) pm_runtime_disable(&pdev->dev); + + kfree(mdp5_kms->state); +} + +static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt, + const enum mdp5_pipe *pipes, const uint32_t *offsets, + uint32_t caps) +{ + struct drm_device *dev = mdp5_kms->dev; + int i, ret; + + for (i = 0; i < cnt; i++) { + struct mdp5_hw_pipe *hwpipe; + + hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps); + if (IS_ERR(hwpipe)) { + ret = PTR_ERR(hwpipe); + dev_err(dev->dev, "failed to construct pipe for %s (%d)\n", + pipe2name(pipes[i]), ret); + return ret; + } + hwpipe->idx = mdp5_kms->num_hwpipes; + mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe; + } + + return 0; +} + +static int hwpipe_init(struct mdp5_kms *mdp5_kms) +{ + static const enum mdp5_pipe rgb_planes[] = { + SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, + }; + static const enum mdp5_pipe vig_planes[] = { + SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3, + }; + static const enum mdp5_pipe dma_planes[] = { + SSPP_DMA0, SSPP_DMA1, + }; + const struct mdp5_cfg_hw *hw_cfg; + int ret; + + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + + /* Construct RGB pipes: */ + ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes, + hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps); + if (ret) + return ret; + + /* Construct video (VIG) pipes: */ + ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes, + hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps); + if (ret) + return ret; + + /* Construct DMA pipes: */ + ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes, + hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps); + if (ret) + return ret; + + return 0; } static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) @@ -696,6 +826,13 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) mdp5_kms->dev = dev; mdp5_kms->pdev = pdev; + drm_modeset_lock_init(&mdp5_kms->state_lock); + mdp5_kms->state = kzalloc(sizeof(*mdp5_kms->state), GFP_KERNEL); + if (!mdp5_kms->state) { + ret = -ENOMEM; + goto fail; + } + mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); if (IS_ERR(mdp5_kms->mmio)) { ret = PTR_ERR(mdp5_kms->mmio); @@ -749,7 +886,7 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) * this section initializes the SMP: */ if (mdp5_kms->caps & MDP_CAP_SMP) { - mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp); + mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp); if (IS_ERR(mdp5_kms->smp)) { ret = PTR_ERR(mdp5_kms->smp); mdp5_kms->smp = NULL; @@ -764,6 +901,10 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) goto fail; } + ret = hwpipe_init(mdp5_kms); + if (ret) + goto fail; + /* set uninit-ed kms */ priv->kms = &mdp5_kms->base.base; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index c6fbcfad2d59..17b0cc101171 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -24,8 +24,11 @@ #include "mdp5_cfg.h" /* must be included before mdp5.xml.h */ #include "mdp5.xml.h" #include "mdp5_ctl.h" +#include "mdp5_pipe.h" #include "mdp5_smp.h" +struct mdp5_state; + struct mdp5_kms { struct mdp_kms base; @@ -33,13 +36,21 @@ struct mdp5_kms { struct platform_device *pdev; + unsigned num_hwpipes; + struct mdp5_hw_pipe *hwpipes[SSPP_MAX]; + struct mdp5_cfg_handler *cfg; uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */ + /** + * Global atomic state. Do not access directly, use mdp5_get_state() + */ + struct mdp5_state *state; + struct drm_modeset_lock state_lock; /* mapper-id used to request GEM buffer mapped for scanout: */ int id; - struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; struct mdp5_smp *smp; struct mdp5_ctl_manager *ctlm; @@ -65,9 +76,27 @@ struct mdp5_kms { }; #define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) +/* Global atomic state for tracking resources that are shared across + * multiple kms objects (planes/crtcs/etc). + * + * For atomic updates which require modifying global state, + */ +struct mdp5_state { + struct mdp5_hw_pipe_state hwpipe; + struct mdp5_smp_state smp; +}; + +struct mdp5_state *__must_check +mdp5_get_state(struct drm_atomic_state *s); + +/* Atomic plane state. Subclasses the base drm_plane_state in order to + * track assigned hwpipe and hw specific state. + */ struct mdp5_plane_state { struct drm_plane_state base; + struct mdp5_hw_pipe *hwpipe; + /* aligned with property */ uint8_t premultiplied; uint8_t zpos; @@ -76,11 +105,6 @@ struct mdp5_plane_state { /* assigned by crtc blender */ enum mdp_mixer_stage_id stage; - /* some additional transactional status to help us know in the - * apply path whether we need to update SMP allocation, and - * whether current update is still pending: - */ - bool mode_changed : 1; bool pending : 1; }; #define to_mdp5_plane_state(x) \ @@ -208,13 +232,10 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms); void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); uint32_t mdp5_plane_get_flush(struct drm_plane *plane); -void mdp5_plane_complete_flip(struct drm_plane *plane); void mdp5_plane_complete_commit(struct drm_plane *plane, struct drm_plane_state *state); enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); -struct drm_plane *mdp5_plane_init(struct drm_device *dev, - enum mdp5_pipe pipe, bool private_plane, - uint32_t reg_offset, uint32_t caps); +struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c new file mode 100644 index 000000000000..1ae9dc8d260d --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c @@ -0,0 +1,133 @@ +/* + * Copyright (C) 2016 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "mdp5_kms.h" + +struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s, + struct drm_plane *plane, uint32_t caps, uint32_t blkcfg) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct mdp5_state *state; + struct mdp5_hw_pipe_state *old_state, *new_state; + struct mdp5_hw_pipe *hwpipe = NULL; + int i; + + state = mdp5_get_state(s); + if (IS_ERR(state)) + return ERR_CAST(state); + + /* grab old_state after mdp5_get_state(), since now we hold lock: */ + old_state = &mdp5_kms->state->hwpipe; + new_state = &state->hwpipe; + + for (i = 0; i < mdp5_kms->num_hwpipes; i++) { + struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i]; + + /* skip if already in-use.. check both new and old state, + * since we cannot immediately re-use a pipe that is + * released in the current update in some cases: + * (1) mdp5 can have SMP (non-double-buffered) + * (2) hw pipe previously assigned to different CRTC + * (vblanks might not be aligned) + */ + if (new_state->hwpipe_to_plane[cur->idx] || + old_state->hwpipe_to_plane[cur->idx]) + continue; + + /* skip if doesn't support some required caps: */ + if (caps & ~cur->caps) + continue; + + /* possible candidate, take the one with the + * fewest unneeded caps bits set: + */ + if (!hwpipe || (hweight_long(cur->caps & ~caps) < + hweight_long(hwpipe->caps & ~caps))) + hwpipe = cur; + } + + if (!hwpipe) + return ERR_PTR(-ENOMEM); + + if (mdp5_kms->smp) { + int ret; + + DBG("%s: alloc SMP blocks", hwpipe->name); + ret = mdp5_smp_assign(mdp5_kms->smp, &state->smp, + hwpipe->pipe, blkcfg); + if (ret) + return ERR_PTR(-ENOMEM); + + hwpipe->blkcfg = blkcfg; + } + + DBG("%s: assign to plane %s for caps %x", + hwpipe->name, plane->name, caps); + new_state->hwpipe_to_plane[hwpipe->idx] = plane; + + return hwpipe; +} + +void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct mdp5_state *state = mdp5_get_state(s); + struct mdp5_hw_pipe_state *new_state = &state->hwpipe; + + if (!hwpipe) + return; + + if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx])) + return; + + DBG("%s: release from plane %s", hwpipe->name, + new_state->hwpipe_to_plane[hwpipe->idx]->name); + + if (mdp5_kms->smp) { + DBG("%s: free SMP blocks", hwpipe->name); + mdp5_smp_release(mdp5_kms->smp, &state->smp, hwpipe->pipe); + } + + new_state->hwpipe_to_plane[hwpipe->idx] = NULL; +} + +void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe) +{ + kfree(hwpipe); +} + +struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, + uint32_t reg_offset, uint32_t caps) +{ + struct mdp5_hw_pipe *hwpipe; + + hwpipe = kzalloc(sizeof(*hwpipe), GFP_KERNEL); + if (!hwpipe) + return ERR_PTR(-ENOMEM); + + hwpipe->name = pipe2name(pipe); + hwpipe->pipe = pipe; + hwpipe->reg_offset = reg_offset; + hwpipe->caps = caps; + hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe); + + spin_lock_init(&hwpipe->pipe_lock); + + return hwpipe; +} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h new file mode 100644 index 000000000000..611da7a660c9 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2016 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __MDP5_PIPE_H__ +#define __MDP5_PIPE_H__ + +#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */ + +/* represents a hw pipe, which is dynamically assigned to a plane */ +struct mdp5_hw_pipe { + int idx; + + const char *name; + enum mdp5_pipe pipe; + + spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */ + uint32_t reg_offset; + uint32_t caps; + + uint32_t flush_mask; /* used to commit pipe registers */ + + /* number of smp blocks per plane, ie: + * nblks_y | (nblks_u << 8) | (nblks_v << 16) + */ + uint32_t blkcfg; +}; + +/* global atomic state of assignment between pipes and planes: */ +struct mdp5_hw_pipe_state { + struct drm_plane *hwpipe_to_plane[SSPP_MAX]; +}; + +struct mdp5_hw_pipe *__must_check +mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane, + uint32_t caps, uint32_t blkcfg); +void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe); + +struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, + uint32_t reg_offset, uint32_t caps); +void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe); + +#endif /* __MDP5_PIPE_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 8bf55e3450c5..c099da7bc212 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -21,15 +21,6 @@ struct mdp5_plane { struct drm_plane base; - const char *name; - - enum mdp5_pipe pipe; - - spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */ - uint32_t reg_offset; - uint32_t caps; - - uint32_t flush_mask; /* used to commit pipe registers */ uint32_t nformats; uint32_t formats[32]; @@ -70,12 +61,6 @@ static void mdp5_plane_destroy(struct drm_plane *plane) static void mdp5_plane_install_rotation_property(struct drm_device *dev, struct drm_plane *plane) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - - if (!(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP) && - !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) - return; - drm_plane_create_rotation_property(plane, DRM_ROTATE_0, DRM_ROTATE_0 | @@ -188,11 +173,12 @@ mdp5_plane_atomic_print_state(struct drm_printer *p, { struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); + drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ? + pstate->hwpipe->name : "(null)"); drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied); drm_printf(p, "\tzpos=%u\n", pstate->zpos); drm_printf(p, "\talpha=%u\n", pstate->alpha); drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); - drm_printf(p, "\tmode_changed=%u\n", pstate->mode_changed); drm_printf(p, "\tpending=%u\n", pstate->pending); } @@ -234,7 +220,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane) if (mdp5_state && mdp5_state->base.fb) drm_framebuffer_reference(mdp5_state->base.fb); - mdp5_state->mode_changed = false; mdp5_state->pending = false; return &mdp5_state->base; @@ -243,10 +228,12 @@ mdp5_plane_duplicate_state(struct drm_plane *plane) static void mdp5_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { + struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); + if (state->fb) drm_framebuffer_unreference(state->fb); - kfree(to_mdp5_plane_state(state)); + kfree(pstate); } static const struct drm_plane_funcs mdp5_plane_funcs = { @@ -265,104 +252,115 @@ static const struct drm_plane_funcs mdp5_plane_funcs = { static int mdp5_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); struct drm_framebuffer *fb = new_state->fb; if (!new_state->fb) return 0; - DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id); + DBG("%s: prepare: FB[%u]", plane->name, fb->base.id); return msm_framebuffer_prepare(fb, mdp5_kms->id); } static void mdp5_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); struct drm_framebuffer *fb = old_state->fb; if (!fb) return; - DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id); + DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id); msm_framebuffer_cleanup(fb, mdp5_kms->id); } static int mdp5_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); + struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); struct drm_plane_state *old_state = plane->state; - const struct mdp_format *format; - bool vflip, hflip; + struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg); + bool new_hwpipe = false; + uint32_t max_width, max_height; + uint32_t caps = 0; - DBG("%s: check (%d -> %d)", mdp5_plane->name, + DBG("%s: check (%d -> %d)", plane->name, plane_enabled(old_state), plane_enabled(state)); + /* We don't allow faster-than-vblank updates.. if we did add this + * some day, we would need to disallow in cases where hwpipe + * changes + */ + if (WARN_ON(to_mdp5_plane_state(old_state)->pending)) + return -EBUSY; + + max_width = config->hw->lm.max_width << 16; + max_height = config->hw->lm.max_height << 16; + + /* Make sure source dimensions are within bounds. */ + if ((state->src_w > max_width) || (state->src_h > max_height)) { + struct drm_rect src = drm_plane_state_src(state); + DBG("Invalid source size "DRM_RECT_FP_FMT, + DRM_RECT_FP_ARG(&src)); + return -ERANGE; + } + if (plane_enabled(state)) { unsigned int rotation; + const struct mdp_format *format; + struct mdp5_kms *mdp5_kms = get_kms(plane); + uint32_t blkcfg = 0; format = to_mdp_format(msm_framebuffer_format(state->fb)); - if (MDP_FORMAT_IS_YUV(format) && - !pipe_supports_yuv(mdp5_plane->caps)) { - dev_err(plane->dev->dev, - "Pipe doesn't support YUV\n"); - - return -EINVAL; - } - - if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) && - (((state->src_w >> 16) != state->crtc_w) || - ((state->src_h >> 16) != state->crtc_h))) { - dev_err(plane->dev->dev, - "Pipe doesn't support scaling (%dx%d -> %dx%d)\n", - state->src_w >> 16, state->src_h >> 16, - state->crtc_w, state->crtc_h); + if (MDP_FORMAT_IS_YUV(format)) + caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC; - return -EINVAL; - } + if (((state->src_w >> 16) != state->crtc_w) || + ((state->src_h >> 16) != state->crtc_h)) + caps |= MDP_PIPE_CAP_SCALE; rotation = drm_rotation_simplify(state->rotation, DRM_ROTATE_0 | DRM_REFLECT_X | DRM_REFLECT_Y); - hflip = !!(rotation & DRM_REFLECT_X); - vflip = !!(rotation & DRM_REFLECT_Y); - if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || - (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { - dev_err(plane->dev->dev, - "Pipe doesn't support flip\n"); + if (rotation & DRM_REFLECT_X) + caps |= MDP_PIPE_CAP_HFLIP; - return -EINVAL; - } - } + if (rotation & DRM_REFLECT_Y) + caps |= MDP_PIPE_CAP_VFLIP; - if (plane_enabled(state) && plane_enabled(old_state)) { - /* we cannot change SMP block configuration during scanout: */ - bool full_modeset = false; - if (state->fb->pixel_format != old_state->fb->pixel_format) { - DBG("%s: pixel_format change!", mdp5_plane->name); - full_modeset = true; - } - if (state->src_w != old_state->src_w) { - DBG("%s: src_w change!", mdp5_plane->name); - full_modeset = true; - } - if (to_mdp5_plane_state(old_state)->pending) { - DBG("%s: still pending!", mdp5_plane->name); - full_modeset = true; + /* (re)allocate hw pipe if we don't have one or caps-mismatch: */ + if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps)) + new_hwpipe = true; + + if (mdp5_kms->smp) { + const struct mdp_format *format = + to_mdp_format(msm_framebuffer_format(state->fb)); + + blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format, + state->src_w >> 16, false); + + if (mdp5_state->hwpipe && (mdp5_state->hwpipe->blkcfg != blkcfg)) + new_hwpipe = true; } - if (full_modeset) { - struct drm_crtc_state *crtc_state = - drm_atomic_get_crtc_state(state->state, state->crtc); - crtc_state->mode_changed = true; - to_mdp5_plane_state(state)->mode_changed = true; + + /* (re)assign hwpipe if needed, otherwise keep old one: */ + if (new_hwpipe) { + /* TODO maybe we want to re-assign hwpipe sometimes + * in cases when we no-longer need some caps to make + * it available for other planes? + */ + struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe; + mdp5_state->hwpipe = mdp5_pipe_assign(state->state, + plane, caps, blkcfg); + if (IS_ERR(mdp5_state->hwpipe)) { + DBG("%s: failed to assign hwpipe!", plane->name); + return PTR_ERR(mdp5_state->hwpipe); + } + mdp5_pipe_release(state->state, old_hwpipe); } - } else { - to_mdp5_plane_state(state)->mode_changed = true; } return 0; @@ -371,16 +369,16 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, static void mdp5_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct drm_plane_state *state = plane->state; + struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); + + DBG("%s: update", plane->name); - DBG("%s: update", mdp5_plane->name); + mdp5_state->pending = true; - if (!plane_enabled(state)) { - to_mdp5_plane_state(state)->pending = true; - } else if (to_mdp5_plane_state(state)->mode_changed) { + if (plane_enabled(state)) { int ret; - to_mdp5_plane_state(state)->pending = true; + ret = mdp5_plane_mode_set(plane, state->crtc, state->fb, state->crtc_x, state->crtc_y, @@ -389,11 +387,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane, state->src_w, state->src_h); /* atomic_check should have ensured that this doesn't fail */ WARN_ON(ret < 0); - } else { - unsigned long flags; - spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); - set_scanout_locked(plane, state->fb); - spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags); } } @@ -407,9 +400,9 @@ static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = { static void set_scanout_locked(struct drm_plane *plane, struct drm_framebuffer *fb) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); - enum mdp5_pipe pipe = mdp5_plane->pipe; + struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(plane->state)->hwpipe; + enum mdp5_pipe pipe = hwpipe->pipe; mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | @@ -689,14 +682,14 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct drm_plane_state *pstate = plane->state; + struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe; struct mdp5_kms *mdp5_kms = get_kms(plane); - enum mdp5_pipe pipe = mdp5_plane->pipe; + enum mdp5_pipe pipe = hwpipe->pipe; const struct mdp_format *format; uint32_t nplanes, config = 0; uint32_t phasex_step[COMP_MAX] = {0,}, phasey_step[COMP_MAX] = {0,}; - bool pe = mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT; + bool pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT; int pe_left[COMP_MAX], pe_right[COMP_MAX]; int pe_top[COMP_MAX], pe_bottom[COMP_MAX]; uint32_t hdecm = 0, vdecm = 0; @@ -721,27 +714,10 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, src_w = src_w >> 16; src_h = src_h >> 16; - DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp5_plane->name, + DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name, fb->base.id, src_x, src_y, src_w, src_h, crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); - /* Request some memory from the SMP: */ - if (mdp5_kms->smp) { - ret = mdp5_smp_request(mdp5_kms->smp, - mdp5_plane->pipe, format, src_w, false); - if (ret) - return ret; - } - - /* - * Currently we update the hw for allocations/requests immediately, - * but once atomic modeset/pageflip is in place, the allocation - * would move into atomic->check_plane_state(), while updating the - * hw would remain here: - */ - if (mdp5_kms->smp) - mdp5_smp_configure(mdp5_kms->smp, pipe); - ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step); if (ret) return ret; @@ -750,7 +726,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, if (ret) return ret; - if (mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT) { + if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) { calc_pixel_ext(format, src_w, crtc_w, phasex_step, pe_left, pe_right, true); calc_pixel_ext(format, src_h, crtc_h, phasey_step, @@ -771,11 +747,11 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, hflip = !!(rotation & DRM_REFLECT_X); vflip = !!(rotation & DRM_REFLECT_Y); - spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); + spin_lock_irqsave(&hwpipe->pipe_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), - MDP5_PIPE_SRC_IMG_SIZE_WIDTH(fb->width) | - MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(fb->height)); + MDP5_PIPE_SRC_IMG_SIZE_WIDTH(min(fb->width, src_w)) | + MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(min(fb->height, src_h))); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe), MDP5_PIPE_SRC_SIZE_WIDTH(src_w) | @@ -820,12 +796,12 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, /* not using secure mode: */ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0); - if (mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT) + if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) mdp5_write_pixel_ext(mdp5_kms, pipe, format, src_w, pe_left, pe_right, src_h, pe_top, pe_bottom); - if (mdp5_plane->caps & MDP_PIPE_CAP_SCALE) { + if (hwpipe->caps & MDP_PIPE_CAP_SCALE) { mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), phasex_step[COMP_0]); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), @@ -840,7 +816,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config); } - if (mdp5_plane->caps & MDP_PIPE_CAP_CSC) { + if (hwpipe->caps & MDP_PIPE_CAP_CSC) { if (MDP_FORMAT_IS_YUV(format)) csc_enable(mdp5_kms, pipe, mdp_get_default_csc_cfg(CSC_YUV2RGB)); @@ -850,56 +826,42 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, set_scanout_locked(plane, fb); - spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags); + spin_unlock_irqrestore(&hwpipe->pipe_lock, flags); return ret; } -void mdp5_plane_complete_flip(struct drm_plane *plane) +enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) { - struct mdp5_kms *mdp5_kms = get_kms(plane); - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - enum mdp5_pipe pipe = mdp5_plane->pipe; - - DBG("%s: complete flip", mdp5_plane->name); + struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); - if (mdp5_kms->smp) - mdp5_smp_commit(mdp5_kms->smp, pipe); + if (WARN_ON(!pstate->hwpipe)) + return 0; - to_mdp5_plane_state(plane->state)->pending = false; -} - -enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) -{ - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - return mdp5_plane->pipe; + return pstate->hwpipe->pipe; } uint32_t mdp5_plane_get_flush(struct drm_plane *plane) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); + struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); - return mdp5_plane->flush_mask; + if (WARN_ON(!pstate->hwpipe)) + return 0; + + return pstate->hwpipe->flush_mask; } /* called after vsync in thread context */ void mdp5_plane_complete_commit(struct drm_plane *plane, struct drm_plane_state *state) { - struct mdp5_kms *mdp5_kms = get_kms(plane); - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - enum mdp5_pipe pipe = mdp5_plane->pipe; + struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); - if (!plane_enabled(plane->state) && mdp5_kms->smp) { - DBG("%s: free SMP", mdp5_plane->name); - mdp5_smp_release(mdp5_kms->smp, pipe); - } + pstate->pending = false; } /* initialize plane */ -struct drm_plane *mdp5_plane_init(struct drm_device *dev, - enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset, - uint32_t caps) +struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) { struct drm_plane *plane = NULL; struct mdp5_plane *mdp5_plane; @@ -914,22 +876,13 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, plane = &mdp5_plane->base; - mdp5_plane->pipe = pipe; - mdp5_plane->name = pipe2name(pipe); - mdp5_plane->caps = caps; - mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, - ARRAY_SIZE(mdp5_plane->formats), - !pipe_supports_yuv(mdp5_plane->caps)); - - mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe); - mdp5_plane->reg_offset = reg_offset; - spin_lock_init(&mdp5_plane->pipe_lock); + ARRAY_SIZE(mdp5_plane->formats), false); - type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; + type = primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, mdp5_plane->formats, mdp5_plane->nformats, - type, "%s", mdp5_plane->name); + type, NULL); if (ret) goto fail; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c index 27d7b55b52c9..58f712d37e7f 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c @@ -21,72 +21,6 @@ #include "mdp5_smp.h" -/* SMP - Shared Memory Pool - * - * These are shared between all the clients, where each plane in a - * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on - * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR. - * - * Based on the size of the attached scanout buffer, a certain # of - * blocks must be allocated to that client out of the shared pool. - * - * In some hw, some blocks are statically allocated for certain pipes - * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). - * - * For each block that can be dynamically allocated, it can be either - * free: - * The block is free. - * - * pending: - * The block is allocated to some client and not free. - * - * configured: - * The block is allocated to some client, and assigned to that - * client in MDP5_SMP_ALLOC registers. - * - * inuse: - * The block is being actively used by a client. - * - * The updates happen in the following steps: - * - * 1) mdp5_smp_request(): - * When plane scanout is setup, calculate required number of - * blocks needed per client, and request. Blocks neither inuse nor - * configured nor pending by any other client are added to client's - * pending set. - * For shrinking, blocks in pending but not in configured can be freed - * directly, but those already in configured will be freed later by - * mdp5_smp_commit. - * - * 2) mdp5_smp_configure(): - * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers - * are configured for the union(pending, inuse) - * Current pending is copied to configured. - * It is assumed that mdp5_smp_request and mdp5_smp_configure not run - * concurrently for the same pipe. - * - * 3) mdp5_smp_commit(): - * After next vblank, copy configured -> inuse. Optionally update - * MDP5_SMP_ALLOC registers if there are newly unused blocks - * - * 4) mdp5_smp_release(): - * Must be called after the pipe is disabled and no longer uses any SMB - * - * On the next vblank after changes have been committed to hw, the - * client's pending blocks become it's in-use blocks (and no-longer - * in-use blocks become available to other clients). - * - * btw, hurray for confusing overloaded acronyms! :-/ - * - * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1 - * should happen at (or before)? atomic->check(). And we'd need - * an API to discard previous requests if update is aborted or - * (test-only). - * - * TODO would perhaps be nice to have debugfs to dump out kernel - * inuse and pending state of all clients.. - */ - struct mdp5_smp { struct drm_device *dev; @@ -94,16 +28,8 @@ struct mdp5_smp { int blk_cnt; int blk_size; - - spinlock_t state_lock; - mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */ - - struct mdp5_client_smp_state client_state[MAX_CLIENTS]; }; -static void update_smp_state(struct mdp5_smp *smp, - u32 cid, mdp5_smp_state_t *assigned); - static inline struct mdp5_kms *get_kms(struct mdp5_smp *smp) { @@ -134,57 +60,38 @@ static inline u32 pipe2client(enum mdp5_pipe pipe, int plane) return mdp5_cfg->smp.clients[pipe] + plane; } -/* step #1: update # of blocks pending for the client: */ +/* allocate blocks for the specified request: */ static int smp_request_block(struct mdp5_smp *smp, + struct mdp5_smp_state *state, u32 cid, int nblks) { - struct mdp5_kms *mdp5_kms = get_kms(smp); - struct mdp5_client_smp_state *ps = &smp->client_state[cid]; - int i, ret, avail, cur_nblks, cnt = smp->blk_cnt; + void *cs = state->client_state[cid]; + int i, avail, cnt = smp->blk_cnt; uint8_t reserved; - unsigned long flags; - reserved = smp->reserved[cid]; + /* we shouldn't be requesting blocks for an in-use client: */ + WARN_ON(bitmap_weight(cs, cnt) > 0); - spin_lock_irqsave(&smp->state_lock, flags); + reserved = smp->reserved[cid]; if (reserved) { nblks = max(0, nblks - reserved); DBG("%d MMBs allocated (%d reserved)", nblks, reserved); } - avail = cnt - bitmap_weight(smp->state, cnt); + avail = cnt - bitmap_weight(state->state, cnt); if (nblks > avail) { - dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n", + dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n", nblks, avail); - ret = -ENOSPC; - goto fail; + return -ENOSPC; } - cur_nblks = bitmap_weight(ps->pending, cnt); - if (nblks > cur_nblks) { - /* grow the existing pending reservation: */ - for (i = cur_nblks; i < nblks; i++) { - int blk = find_first_zero_bit(smp->state, cnt); - set_bit(blk, ps->pending); - set_bit(blk, smp->state); - } - } else { - /* shrink the existing pending reservation: */ - for (i = cur_nblks; i > nblks; i--) { - int blk = find_first_bit(ps->pending, cnt); - clear_bit(blk, ps->pending); - - /* clear in global smp_state if not in configured - * otherwise until _commit() - */ - if (!test_bit(blk, ps->configured)) - clear_bit(blk, smp->state); - } + for (i = 0; i < nblks; i++) { + int blk = find_first_zero_bit(state->state, cnt); + set_bit(blk, cs); + set_bit(blk, state->state); } -fail: - spin_unlock_irqrestore(&smp->state_lock, flags); return 0; } @@ -209,14 +116,15 @@ static void set_fifo_thresholds(struct mdp5_smp *smp, * decimated width. Ie. SMP buffering sits downstream of decimation (which * presumably happens during the dma from scanout buffer). */ -int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, - const struct mdp_format *format, u32 width, bool hdecim) +uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, + const struct mdp_format *format, + u32 width, bool hdecim) { struct mdp5_kms *mdp5_kms = get_kms(smp); - struct drm_device *dev = mdp5_kms->dev; int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg); - int i, hsub, nplanes, nlines, nblks, ret; + int i, hsub, nplanes, nlines; u32 fmt = format->base.pixel_format; + uint32_t blkcfg = 0; nplanes = drm_format_num_planes(fmt); hsub = drm_format_horz_chroma_subsampling(fmt); @@ -239,7 +147,7 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, hsub = 1; } - for (i = 0, nblks = 0; i < nplanes; i++) { + for (i = 0; i < nplanes; i++) { int n, fetch_stride, cpp; cpp = drm_format_plane_cpp(fmt, i); @@ -251,60 +159,72 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, if (rev == 0) n = roundup_pow_of_two(n); + blkcfg |= (n << (8 * i)); + } + + return blkcfg; +} + +int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state, + enum mdp5_pipe pipe, uint32_t blkcfg) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + struct drm_device *dev = mdp5_kms->dev; + int i, ret; + + for (i = 0; i < pipe2nclients(pipe); i++) { + u32 cid = pipe2client(pipe, i); + int n = blkcfg & 0xff; + + if (!n) + continue; + DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n); - ret = smp_request_block(smp, pipe2client(pipe, i), n); + ret = smp_request_block(smp, state, cid, n); if (ret) { dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n", n, ret); return ret; } - nblks += n; + blkcfg >>= 8; } - set_fifo_thresholds(smp, pipe, nblks); + state->assigned |= (1 << pipe); return 0; } /* Release SMP blocks for all clients of the pipe */ -void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) +void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state, + enum mdp5_pipe pipe) { int i; - unsigned long flags; int cnt = smp->blk_cnt; for (i = 0; i < pipe2nclients(pipe); i++) { - mdp5_smp_state_t assigned; u32 cid = pipe2client(pipe, i); - struct mdp5_client_smp_state *ps = &smp->client_state[cid]; - - spin_lock_irqsave(&smp->state_lock, flags); - - /* clear hw assignment */ - bitmap_or(assigned, ps->inuse, ps->configured, cnt); - update_smp_state(smp, CID_UNUSED, &assigned); - - /* free to global pool */ - bitmap_andnot(smp->state, smp->state, ps->pending, cnt); - bitmap_andnot(smp->state, smp->state, assigned, cnt); + void *cs = state->client_state[cid]; - /* clear client's infor */ - bitmap_zero(ps->pending, cnt); - bitmap_zero(ps->configured, cnt); - bitmap_zero(ps->inuse, cnt); + /* update global state: */ + bitmap_andnot(state->state, state->state, cs, cnt); - spin_unlock_irqrestore(&smp->state_lock, flags); + /* clear client's state */ + bitmap_zero(cs, cnt); } - set_fifo_thresholds(smp, pipe, 0); + state->released |= (1 << pipe); } -static void update_smp_state(struct mdp5_smp *smp, +/* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to + * happen after scanout completes. + */ +static unsigned update_smp_state(struct mdp5_smp *smp, u32 cid, mdp5_smp_state_t *assigned) { struct mdp5_kms *mdp5_kms = get_kms(smp); int cnt = smp->blk_cnt; + unsigned nblks = 0; u32 blk, val; for_each_set_bit(blk, *assigned, cnt) { @@ -330,62 +250,88 @@ static void update_smp_state(struct mdp5_smp *smp, mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val); mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val); + + nblks++; } + + return nblks; } -/* step #2: configure hw for union(pending, inuse): */ -void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe) +void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state) { - int cnt = smp->blk_cnt; - mdp5_smp_state_t assigned; - int i; + enum mdp5_pipe pipe; - for (i = 0; i < pipe2nclients(pipe); i++) { - u32 cid = pipe2client(pipe, i); - struct mdp5_client_smp_state *ps = &smp->client_state[cid]; + for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) { + unsigned i, nblks = 0; - /* - * if vblank has not happened since last smp_configure - * skip the configure for now - */ - if (!bitmap_equal(ps->inuse, ps->configured, cnt)) - continue; + for (i = 0; i < pipe2nclients(pipe); i++) { + u32 cid = pipe2client(pipe, i); + void *cs = state->client_state[cid]; - bitmap_copy(ps->configured, ps->pending, cnt); - bitmap_or(assigned, ps->inuse, ps->configured, cnt); - update_smp_state(smp, cid, &assigned); + nblks += update_smp_state(smp, cid, cs); + + DBG("assign %s:%u, %u blks", + pipe2name(pipe), i, nblks); + } + + set_fifo_thresholds(smp, pipe, nblks); } + + state->assigned = 0; } -/* step #3: after vblank, copy configured -> inuse: */ -void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) +void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state) { - int cnt = smp->blk_cnt; - mdp5_smp_state_t released; - int i; - - for (i = 0; i < pipe2nclients(pipe); i++) { - u32 cid = pipe2client(pipe, i); - struct mdp5_client_smp_state *ps = &smp->client_state[cid]; + enum mdp5_pipe pipe; - /* - * Figure out if there are any blocks we where previously - * using, which can be released and made available to other - * clients: - */ - if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) { - unsigned long flags; + for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) { + DBG("release %s", pipe2name(pipe)); + set_fifo_thresholds(smp, pipe, 0); + } - spin_lock_irqsave(&smp->state_lock, flags); - /* clear released blocks: */ - bitmap_andnot(smp->state, smp->state, released, cnt); - spin_unlock_irqrestore(&smp->state_lock, flags); + state->released = 0; +} - update_smp_state(smp, CID_UNUSED, &released); +void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + struct mdp5_hw_pipe_state *hwpstate; + struct mdp5_smp_state *state; + int total = 0, i, j; + + drm_printf(p, "name\tinuse\tplane\n"); + drm_printf(p, "----\t-----\t-----\n"); + + if (drm_can_sleep()) + drm_modeset_lock(&mdp5_kms->state_lock, NULL); + + /* grab these *after* we hold the state_lock */ + hwpstate = &mdp5_kms->state->hwpipe; + state = &mdp5_kms->state->smp; + + for (i = 0; i < mdp5_kms->num_hwpipes; i++) { + struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; + struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx]; + enum mdp5_pipe pipe = hwpipe->pipe; + for (j = 0; j < pipe2nclients(pipe); j++) { + u32 cid = pipe2client(pipe, j); + void *cs = state->client_state[cid]; + int inuse = bitmap_weight(cs, smp->blk_cnt); + + drm_printf(p, "%s:%d\t%d\t%s\n", + pipe2name(pipe), j, inuse, + plane ? plane->name : NULL); + + total += inuse; } - - bitmap_copy(ps->inuse, ps->configured, cnt); } + + drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt); + drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt - + bitmap_weight(state->state, smp->blk_cnt)); + + if (drm_can_sleep()) + drm_modeset_unlock(&mdp5_kms->state_lock); } void mdp5_smp_destroy(struct mdp5_smp *smp) @@ -393,8 +339,9 @@ void mdp5_smp_destroy(struct mdp5_smp *smp) kfree(smp); } -struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg) +struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg) { + struct mdp5_smp_state *state = &mdp5_kms->state->smp; struct mdp5_smp *smp = NULL; int ret; @@ -404,14 +351,13 @@ struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_blo goto fail; } - smp->dev = dev; + smp->dev = mdp5_kms->dev; smp->blk_cnt = cfg->mmb_count; smp->blk_size = cfg->mmb_size; /* statically tied MMBs cannot be re-allocated: */ - bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt); + bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt); memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved)); - spin_lock_init(&smp->state_lock); return smp; fail: diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h index 20b87e800ea3..b41d0448fbe8 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h @@ -19,12 +19,53 @@ #ifndef __MDP5_SMP_H__ #define __MDP5_SMP_H__ +#include <drm/drm_print.h> + #include "msm_drv.h" -struct mdp5_client_smp_state { - mdp5_smp_state_t inuse; - mdp5_smp_state_t configured; - mdp5_smp_state_t pending; +/* + * SMP - Shared Memory Pool: + * + * SMP blocks are shared between all the clients, where each plane in + * a scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on + * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR. + * + * Based on the size of the attached scanout buffer, a certain # of + * blocks must be allocated to that client out of the shared pool. + * + * In some hw, some blocks are statically allocated for certain pipes + * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). + * + * + * Atomic SMP State: + * + * On atomic updates that modify SMP configuration, the state is cloned + * (copied) and modified. For test-only, or in cases where atomic + * update fails (or if we hit ww_mutex deadlock/backoff condition) the + * new state is simply thrown away. + * + * Because the SMP registers are not double buffered, updates are a + * two step process: + * + * 1) in _prepare_commit() we configure things (via read-modify-write) + * for the newly assigned pipes, so we don't take away blocks + * assigned to pipes that are still scanning out + * 2) in _complete_commit(), after vblank/etc, we clear things for the + * released clients, since at that point old pipes are no longer + * scanning out. + */ +struct mdp5_smp_state { + /* global state of what blocks are in use: */ + mdp5_smp_state_t state; + + /* per client state of what blocks they are using: */ + mdp5_smp_state_t client_state[MAX_CLIENTS]; + + /* assigned pipes (hw updated at _prepare_commit()): */ + unsigned long assigned; + + /* released pipes (hw updated at _complete_commit()): */ + unsigned long released; }; struct mdp5_kms; @@ -36,13 +77,22 @@ struct mdp5_smp; * which is then used to call the other mdp5_smp_*(handler, ...) functions. */ -struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg); +struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, + const struct mdp5_smp_block *cfg); void mdp5_smp_destroy(struct mdp5_smp *smp); -int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, - const struct mdp_format *format, u32 width, bool hdecim); -void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe); -void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe); -void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe); +void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p); + +uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, + const struct mdp_format *format, + u32 width, bool hdecim); + +int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state, + enum mdp5_pipe pipe, uint32_t blkcfg); +void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state, + enum mdp5_pipe pipe); + +void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state); +void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state); #endif /* __MDP5_SMP_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h index 452e3518f98b..8994c365e218 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h @@ -12,7 +12,7 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 4e21e1d72378..30b5d23e53b4 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -241,6 +241,10 @@ int msm_atomic_commit(struct drm_device *dev, drm_atomic_helper_swap_state(state, true); + /* swap driver private state while still holding state_lock */ + if (to_kms_state(state)->state) + priv->kms->funcs->swap_state(priv->kms, state); + /* * Everything below can be run asynchronously without the need to grab * any modeset locks at all under one conditions: It must be guaranteed @@ -271,3 +275,30 @@ error: drm_atomic_helper_cleanup_planes(dev, state); return ret; } + +struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev) +{ + struct msm_kms_state *state = kzalloc(sizeof(*state), GFP_KERNEL); + + if (!state || drm_atomic_state_init(dev, &state->base) < 0) { + kfree(state); + return NULL; + } + + return &state->base; +} + +void msm_atomic_state_clear(struct drm_atomic_state *s) +{ + struct msm_kms_state *state = to_kms_state(s); + drm_atomic_state_default_clear(&state->base); + kfree(state->state); + state->state = NULL; +} + +void msm_atomic_state_free(struct drm_atomic_state *state) +{ + kfree(to_kms_state(state)->state); + drm_atomic_state_default_release(state); + kfree(state); +} diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 3c853733c99a..c1b40f5adb60 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -18,6 +18,7 @@ #ifdef CONFIG_DEBUG_FS #include "msm_drv.h" #include "msm_gpu.h" +#include "msm_kms.h" #include "msm_debugfs.h" static int msm_gpu_show(struct drm_device *dev, struct seq_file *m) @@ -142,6 +143,7 @@ int msm_debugfs_late_init(struct drm_device *dev) int msm_debugfs_init(struct drm_minor *minor) { struct drm_device *dev = minor->dev; + struct msm_drm_private *priv = dev->dev_private; int ret; ret = drm_debugfs_create_files(msm_debugfs_list, @@ -153,15 +155,25 @@ int msm_debugfs_init(struct drm_minor *minor) return ret; } - return 0; + if (priv->kms->funcs->debugfs_init) + ret = priv->kms->funcs->debugfs_init(priv->kms, minor); + + return ret; } void msm_debugfs_cleanup(struct drm_minor *minor) { + struct drm_device *dev = minor->dev; + struct msm_drm_private *priv = dev->dev_private; + drm_debugfs_remove_files(msm_debugfs_list, ARRAY_SIZE(msm_debugfs_list), minor); - if (!minor->dev->dev_private) + if (!priv) return; + + if (priv->kms->funcs->debugfs_cleanup) + priv->kms->funcs->debugfs_cleanup(priv->kms, minor); + msm_rd_debugfs_cleanup(minor); msm_perf_debugfs_cleanup(minor); } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 8d21fb27a401..e29bb66f55b1 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -46,17 +46,21 @@ static const struct drm_mode_config_funcs mode_config_funcs = { .output_poll_changed = msm_fb_output_poll_changed, .atomic_check = msm_atomic_check, .atomic_commit = msm_atomic_commit, + .atomic_state_alloc = msm_atomic_state_alloc, + .atomic_state_clear = msm_atomic_state_clear, + .atomic_state_free = msm_atomic_state_free, }; -int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu) +int msm_register_address_space(struct drm_device *dev, + struct msm_gem_address_space *aspace) { struct msm_drm_private *priv = dev->dev_private; - int idx = priv->num_mmus++; + int idx = priv->num_aspaces++; - if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus))) + if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace))) return -EINVAL; - priv->mmus[idx] = mmu; + priv->aspace[idx] = aspace; return idx; } @@ -234,7 +238,7 @@ static int msm_drm_uninit(struct device *dev) flush_workqueue(priv->atomic_wq); destroy_workqueue(priv->atomic_wq); - if (kms) + if (kms && kms->funcs) kms->funcs->destroy(kms); if (gpu) { @@ -907,10 +911,8 @@ static int add_components_mdp(struct device *mdp_dev, * remote-endpoint isn't a component that we need to add */ if (of_device_is_compatible(np, "qcom,mdp4") && - ep.port == 0) { - of_node_put(ep_node); + ep.port == 0) continue; - } /* * It's okay if some of the ports don't have a remote endpoint @@ -918,15 +920,12 @@ static int add_components_mdp(struct device *mdp_dev, * any external interface. */ intf = of_graph_get_remote_port_parent(ep_node); - if (!intf) { - of_node_put(ep_node); + if (!intf) continue; - } drm_of_component_match_add(master_dev, matchptr, compare_of, intf); of_node_put(intf); - of_node_put(ep_node); } return 0; @@ -1039,7 +1038,13 @@ static int msm_pdev_probe(struct platform_device *pdev) if (ret) return ret; - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + /* on all devices that I am aware of, iommu's which can map + * any address the cpu can see are used: + */ + ret = dma_set_mask_and_coherent(&pdev->dev, ~0); + if (ret) + return ret; + return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); } diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 940bf4992fe2..ed4dad3ca133 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -52,6 +52,8 @@ struct msm_perf_state; struct msm_gem_submit; struct msm_fence_context; struct msm_fence_cb; +struct msm_gem_address_space; +struct msm_gem_vma; #define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */ @@ -121,12 +123,16 @@ struct msm_drm_private { uint32_t pending_crtcs; wait_queue_head_t pending_crtcs_event; - /* registered MMUs: */ - unsigned int num_mmus; - struct msm_mmu *mmus[NUM_DOMAINS]; + /* Registered address spaces.. currently this is fixed per # of + * iommu's. Ie. one for display block and one for gpu block. + * Eventually, to do per-process gpu pagetables, we'll want one + * of these per-process. + */ + unsigned int num_aspaces; + struct msm_gem_address_space *aspace[NUM_DOMAINS]; unsigned int num_planes; - struct drm_plane *planes[8]; + struct drm_plane *planes[16]; unsigned int num_crtcs; struct drm_crtc *crtcs[8]; @@ -173,8 +179,22 @@ int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock); +struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev); +void msm_atomic_state_clear(struct drm_atomic_state *state); +void msm_atomic_state_free(struct drm_atomic_state *state); + +int msm_register_address_space(struct drm_device *dev, + struct msm_gem_address_space *aspace); + +void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt); +int msm_gem_map_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, int npages); -int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); +void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace); +struct msm_gem_address_space * +msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, + const char *name); void msm_gem_submit_free(struct msm_gem_submit *submit); int msm_ioctl_gem_submit(struct drm_device *dev, void *data, @@ -189,9 +209,9 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, - uint32_t *iova); -int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova); -uint32_t msm_gem_iova(struct drm_gem_object *obj, int id); + uint64_t *iova); +int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova); +uint64_t msm_gem_iova(struct drm_gem_object *obj, int id); struct page **msm_gem_get_pages(struct drm_gem_object *obj); void msm_gem_put_pages(struct drm_gem_object *obj); void msm_gem_put_iova(struct drm_gem_object *obj, int id); @@ -303,8 +323,8 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, void msm_writel(u32 data, void __iomem *addr); u32 msm_readl(const void __iomem *addr); -#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) -#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) +#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) +#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) static inline int align_pitch(int width, int bpp) { diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 95cf8fe72ee5..9acf544e7a8f 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -88,11 +88,11 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id) { struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); int ret, i, n = drm_format_num_planes(fb->pixel_format); - uint32_t iova; + uint64_t iova; for (i = 0; i < n; i++) { ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova); - DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret); + DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret); if (ret) return ret; } diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index d29f5e82a410..bffe93498512 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -76,7 +76,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, struct drm_framebuffer *fb = NULL; struct fb_info *fbi = NULL; struct drm_mode_fb_cmd2 mode_cmd = {0}; - uint32_t paddr; + uint64_t paddr; int ret, size; DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 57db7dbbb618..cd06cfd94687 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -296,12 +296,8 @@ put_iova(struct drm_gem_object *obj) WARN_ON(!mutex_is_locked(&dev->struct_mutex)); for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { - struct msm_mmu *mmu = priv->mmus[id]; - if (mmu && msm_obj->domain[id].iova) { - uint32_t offset = msm_obj->domain[id].iova; - mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); - msm_obj->domain[id].iova = 0; - } + msm_gem_unmap_vma(priv->aspace[id], + &msm_obj->domain[id], msm_obj->sgt); } } @@ -313,7 +309,7 @@ put_iova(struct drm_gem_object *obj) * the refcnt counter needs to be atomic_t. */ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, - uint32_t *iova) + uint64_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret = 0; @@ -326,16 +322,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, return PTR_ERR(pages); if (iommu_present(&platform_bus_type)) { - struct msm_mmu *mmu = priv->mmus[id]; - uint32_t offset; - - if (WARN_ON(!mmu)) - return -EINVAL; - - offset = (uint32_t)mmap_offset(obj); - ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, - obj->size, IOMMU_READ | IOMMU_WRITE); - msm_obj->domain[id].iova = offset; + ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id], + msm_obj->sgt, obj->size >> PAGE_SHIFT); } else { msm_obj->domain[id].iova = physaddr(obj); } @@ -348,7 +336,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, } /* get iova, taking a reference. Should have a matching put */ -int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) +int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret; @@ -370,7 +358,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) /* get iova without taking a reference, used in places where you have * already done a 'msm_gem_get_iova()'. */ -uint32_t msm_gem_iova(struct drm_gem_object *obj, int id) +uint64_t msm_gem_iova(struct drm_gem_object *obj, int id) { struct msm_gem_object *msm_obj = to_msm_bo(obj); WARN_ON(!msm_obj->domain[id].iova); @@ -631,9 +619,11 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) struct msm_gem_object *msm_obj = to_msm_bo(obj); struct reservation_object *robj = msm_obj->resv; struct reservation_object_list *fobj; + struct msm_drm_private *priv = obj->dev->dev_private; struct dma_fence *fence; uint64_t off = drm_vma_node_start(&obj->vma_node); const char *madv; + unsigned id; WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); @@ -650,10 +640,15 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) break; } - seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n", + seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t", msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', obj->name, obj->refcount.refcount.counter, - off, msm_obj->vaddr, obj->size, madv); + off, msm_obj->vaddr); + + for (id = 0; id < priv->num_aspaces; id++) + seq_printf(m, " %08llx", msm_obj->domain[id].iova); + + seq_printf(m, " %zu%s\n", obj->size, madv); rcu_read_lock(); fobj = rcu_dereference(robj->fence); @@ -761,7 +756,6 @@ static int msm_gem_new_impl(struct drm_device *dev, { struct msm_drm_private *priv = dev->dev_private; struct msm_gem_object *msm_obj; - unsigned sz; bool use_vram = false; switch (flags & MSM_BO_CACHE_MASK) { @@ -783,16 +777,12 @@ static int msm_gem_new_impl(struct drm_device *dev, if (WARN_ON(use_vram && !priv->vram.size)) return -EINVAL; - sz = sizeof(*msm_obj); - if (use_vram) - sz += sizeof(struct drm_mm_node); - - msm_obj = kzalloc(sz, GFP_KERNEL); + msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); if (!msm_obj) return -ENOMEM; if (use_vram) - msm_obj->vram_node = (void *)&msm_obj[1]; + msm_obj->vram_node = &msm_obj->domain[0].node; msm_obj->flags = flags; msm_obj->madv = MSM_MADV_WILLNEED; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 2cb8551fda70..7d529516b332 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -24,6 +24,20 @@ /* Additional internal-use only BO flags: */ #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ +struct msm_gem_address_space { + const char *name; + /* NOTE: mm managed at the page level, size is in # of pages + * and position mm_node->start is in # of pages: + */ + struct drm_mm mm; + struct msm_mmu *mmu; +}; + +struct msm_gem_vma { + struct drm_mm_node node; + uint64_t iova; +}; + struct msm_gem_object { struct drm_gem_object base; @@ -61,10 +75,7 @@ struct msm_gem_object { struct sg_table *sgt; void *vaddr; - struct { - // XXX - uint32_t iova; - } domain[NUM_DOMAINS]; + struct msm_gem_vma domain[NUM_DOMAINS]; /* normally (resv == &_resv) except for imported bo's */ struct reservation_object *resv; @@ -112,13 +123,13 @@ struct msm_gem_submit { struct { uint32_t type; uint32_t size; /* in dwords */ - uint32_t iova; + uint64_t iova; uint32_t idx; /* cmdstream buffer idx in bos[] */ } *cmd; /* array of size nr_cmds */ struct { uint32_t flags; struct msm_gem_object *obj; - uint32_t iova; + uint64_t iova; } bos[0]; }; diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 283d2841ba58..192b2d3a79cb 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -163,6 +163,9 @@ void msm_gem_shrinker_init(struct drm_device *dev) void msm_gem_shrinker_cleanup(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; - WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier)); - unregister_shrinker(&priv->shrinker); + + if (priv->shrinker.nr_deferred) { + WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier)); + unregister_shrinker(&priv->shrinker); + } } diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 25e8786fa4ca..166e84e4f0d4 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -241,7 +241,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit) for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; - uint32_t iova; + uint64_t iova; /* if locking succeeded, pin bo: */ ret = msm_gem_get_iova_locked(&msm_obj->base, @@ -266,7 +266,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit) } static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, - struct msm_gem_object **obj, uint32_t *iova, bool *valid) + struct msm_gem_object **obj, uint64_t *iova, bool *valid) { if (idx >= submit->nr_bos) { DRM_ERROR("invalid buffer index: %u (out of %u)\n", @@ -312,7 +312,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob struct drm_msm_gem_submit_reloc submit_reloc; void __user *userptr = u64_to_user_ptr(relocs + (i * sizeof(submit_reloc))); - uint32_t iova, off; + uint32_t off; + uint64_t iova; bool valid; ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc)); @@ -461,7 +462,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, void __user *userptr = u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); struct msm_gem_object *msm_obj; - uint32_t iova; + uint64_t iova; ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd)); if (ret) { diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c new file mode 100644 index 000000000000..a311d26ccb21 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2016 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "msm_drv.h" +#include "msm_gem.h" +#include "msm_mmu.h" + +void +msm_gem_unmap_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt) +{ + if (!vma->iova) + return; + + if (aspace->mmu) { + unsigned size = vma->node.size << PAGE_SHIFT; + aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size); + } + + drm_mm_remove_node(&vma->node); + + vma->iova = 0; +} + +int +msm_gem_map_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, int npages) +{ + int ret; + + if (WARN_ON(drm_mm_node_allocated(&vma->node))) + return 0; + + ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages, + 0, DRM_MM_SEARCH_DEFAULT); + if (ret) + return ret; + + vma->iova = vma->node.start << PAGE_SHIFT; + + if (aspace->mmu) { + unsigned size = npages << PAGE_SHIFT; + ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, + size, IOMMU_READ | IOMMU_WRITE); + } + + return ret; +} + +void +msm_gem_address_space_destroy(struct msm_gem_address_space *aspace) +{ + drm_mm_takedown(&aspace->mm); + if (aspace->mmu) + aspace->mmu->funcs->destroy(aspace->mmu); + kfree(aspace); +} + +struct msm_gem_address_space * +msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, + const char *name) +{ + struct msm_gem_address_space *aspace; + + aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); + if (!aspace) + return ERR_PTR(-ENOMEM); + + aspace->name = name; + aspace->mmu = msm_iommu_new(dev, domain); + + drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT), + (domain->geometry.aperture_end >> PAGE_SHIFT) - 1); + + return aspace; +} diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 3249707e6834..b28527a65d09 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -91,21 +91,20 @@ static int disable_pwrrail(struct msm_gpu *gpu) static int enable_clk(struct msm_gpu *gpu) { - struct clk *rate_clk = NULL; int i; - /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */ - for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) { - if (gpu->grp_clks[i]) { - clk_prepare(gpu->grp_clks[i]); - rate_clk = gpu->grp_clks[i]; - } - } + if (gpu->grp_clks[0] && gpu->fast_rate) + clk_set_rate(gpu->grp_clks[0], gpu->fast_rate); - if (rate_clk && gpu->fast_rate) - clk_set_rate(rate_clk, gpu->fast_rate); + /* Set the RBBM timer rate to 19.2Mhz */ + if (gpu->grp_clks[2]) + clk_set_rate(gpu->grp_clks[2], 19200000); - for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) + for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) + if (gpu->grp_clks[i]) + clk_prepare(gpu->grp_clks[i]); + + for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) if (gpu->grp_clks[i]) clk_enable(gpu->grp_clks[i]); @@ -114,24 +113,22 @@ static int enable_clk(struct msm_gpu *gpu) static int disable_clk(struct msm_gpu *gpu) { - struct clk *rate_clk = NULL; int i; - /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */ - for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) { - if (gpu->grp_clks[i]) { + for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) + if (gpu->grp_clks[i]) clk_disable(gpu->grp_clks[i]); - rate_clk = gpu->grp_clks[i]; - } - } - - if (rate_clk && gpu->slow_rate) - clk_set_rate(rate_clk, gpu->slow_rate); - for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) + for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) if (gpu->grp_clks[i]) clk_unprepare(gpu->grp_clks[i]); + if (gpu->grp_clks[0] && gpu->slow_rate) + clk_set_rate(gpu->grp_clks[0], gpu->slow_rate); + + if (gpu->grp_clks[2]) + clk_set_rate(gpu->grp_clks[2], 0); + return 0; } @@ -528,7 +525,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; - uint32_t iova; + uint64_t iova; /* can't happen yet.. but when we add 2d support we'll have * to deal w/ cross-ring synchronization: @@ -563,8 +560,8 @@ static irqreturn_t irq_handler(int irq, void *data) } static const char *clk_names[] = { - "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk", - "alt_mem_iface_clk", + "core_clk", "iface_clk", "rbbmtimer_clk", "mem_clk", + "mem_iface_clk", "alt_mem_iface_clk", }; int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, @@ -656,12 +653,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, */ iommu = iommu_domain_alloc(&platform_bus_type); if (iommu) { + /* TODO 32b vs 64b address space.. */ + iommu->geometry.aperture_start = SZ_16M; + iommu->geometry.aperture_end = 0xffffffff; + dev_info(drm->dev, "%s: using IOMMU\n", name); - gpu->mmu = msm_iommu_new(&pdev->dev, iommu); - if (IS_ERR(gpu->mmu)) { - ret = PTR_ERR(gpu->mmu); + gpu->aspace = msm_gem_address_space_create(&pdev->dev, + iommu, "gpu"); + if (IS_ERR(gpu->aspace)) { + ret = PTR_ERR(gpu->aspace); dev_err(drm->dev, "failed to init iommu: %d\n", ret); - gpu->mmu = NULL; + gpu->aspace = NULL; iommu_domain_free(iommu); goto fail; } @@ -669,7 +671,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, } else { dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); } - gpu->id = msm_register_mmu(drm, gpu->mmu); + gpu->id = msm_register_address_space(drm, gpu->aspace); /* Create ringbuffer: */ @@ -705,8 +707,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) msm_ringbuffer_destroy(gpu->rb); } - if (gpu->mmu) - gpu->mmu->funcs->destroy(gpu->mmu); + if (gpu->aspace) + msm_gem_address_space_destroy(gpu->aspace); if (gpu->fctx) msm_fence_context_free(gpu->fctx); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index d61d98a6e047..c4c39d3272c7 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -50,7 +50,7 @@ struct msm_gpu_funcs { void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx); void (*flush)(struct msm_gpu *gpu); - void (*idle)(struct msm_gpu *gpu); + bool (*idle)(struct msm_gpu *gpu); irqreturn_t (*irq)(struct msm_gpu *irq); uint32_t (*last_fence)(struct msm_gpu *gpu); void (*recover)(struct msm_gpu *gpu); @@ -80,7 +80,7 @@ struct msm_gpu { /* ringbuffer: */ struct msm_ringbuffer *rb; - uint32_t rb_iova; + uint64_t rb_iova; /* list of GEM active objects: */ struct list_head active_list; @@ -98,7 +98,7 @@ struct msm_gpu { void __iomem *mmio; int irq; - struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; int id; /* Power Control: */ @@ -154,6 +154,45 @@ static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg) return msm_readl(gpu->mmio + (reg << 2)); } +static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or) +{ + uint32_t val = gpu_read(gpu, reg); + + val &= ~mask; + gpu_write(gpu, reg, val | or); +} + +static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi) +{ + u64 val; + + /* + * Why not a readq here? Two reasons: 1) many of the LO registers are + * not quad word aligned and 2) the GPU hardware designers have a bit + * of a history of putting registers where they fit, especially in + * spins. The longer a GPU family goes the higher the chance that + * we'll get burned. We could do a series of validity checks if we + * wanted to, but really is a readq() that much better? Nah. + */ + + /* + * For some lo/hi registers (like perfcounters), the hi value is latched + * when the lo is read, so make sure to read the lo first to trigger + * that + */ + val = (u64) msm_readl(gpu->mmio + (lo << 2)); + val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32); + + return val; +} + +static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val) +{ + /* Why not a writeq here? Read the screed above */ + msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2)); + msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2)); +} + int msm_gpu_pm_suspend(struct msm_gpu *gpu); int msm_gpu_pm_resume(struct msm_gpu *gpu); diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 3a294d0da3a0..61aaaa1de6eb 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -45,13 +45,13 @@ static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, iommu_detach_device(iommu->domain, mmu->dev); } -static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, +static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, unsigned len, int prot) { struct msm_iommu *iommu = to_msm_iommu(mmu); struct iommu_domain *domain = iommu->domain; struct scatterlist *sg; - unsigned int da = iova; + unsigned long da = iova; unsigned int i, j; int ret; @@ -62,7 +62,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, dma_addr_t pa = sg_phys(sg) - sg->offset; size_t bytes = sg->length + sg->offset; - VERB("map[%d]: %08x %08lx(%zx)", i, da, (unsigned long)pa, bytes); + VERB("map[%d]: %08lx %08lx(%zx)", i, da, (unsigned long)pa, bytes); ret = iommu_map(domain, da, pa, bytes, prot); if (ret) @@ -84,13 +84,13 @@ fail: return ret; } -static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova, +static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, unsigned len) { struct msm_iommu *iommu = to_msm_iommu(mmu); struct iommu_domain *domain = iommu->domain; struct scatterlist *sg; - unsigned int da = iova; + unsigned long da = iova; int i; for_each_sg(sgt->sgl, sg, sgt->nents, i) { @@ -101,7 +101,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova, if (unmapped < bytes) return unmapped; - VERB("unmap[%d]: %08x(%zx)", i, da, bytes); + VERB("unmap[%d]: %08lx(%zx)", i, da, bytes); BUG_ON(!PAGE_ALIGNED(bytes)); diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 40e41e5cdbc6..e470f4cf8f76 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -40,6 +40,8 @@ struct msm_kms_funcs { irqreturn_t (*irq)(struct msm_kms *kms); int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); + /* swap global atomic state: */ + void (*swap_state)(struct msm_kms *kms, struct drm_atomic_state *state); /* modeset, bracketing atomic_commit(): */ void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state); void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state); @@ -56,6 +58,11 @@ struct msm_kms_funcs { bool is_cmd_mode); /* cleanup: */ void (*destroy)(struct msm_kms *kms); +#ifdef CONFIG_DEBUG_FS + /* debugfs: */ + int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor); + void (*debugfs_cleanup)(struct msm_kms *kms, struct drm_minor *minor); +#endif }; struct msm_kms { @@ -65,6 +72,18 @@ struct msm_kms { int irq; }; +/** + * Subclass of drm_atomic_state, to allow kms backend to have driver + * private global state. The kms backend can do whatever it wants + * with the ->state ptr. On ->atomic_state_clear() the ->state ptr + * is kfree'd and set back to NULL. + */ +struct msm_kms_state { + struct drm_atomic_state base; + void *state; +}; +#define to_kms_state(x) container_of(x, struct msm_kms_state, base) + static inline void msm_kms_init(struct msm_kms *kms, const struct msm_kms_funcs *funcs) { diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index b8ca9a0e9170..f85c879e68d2 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -23,9 +23,9 @@ struct msm_mmu_funcs { int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt); void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt); - int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, + int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, unsigned len, int prot); - int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, + int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, unsigned len); void (*destroy)(struct msm_mmu *mmu); }; diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 8487f461f05f..6607456dc626 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -289,7 +289,7 @@ void msm_rd_debugfs_cleanup(struct drm_minor *minor) static void snapshot_buf(struct msm_rd_state *rd, struct msm_gem_submit *submit, int idx, - uint32_t iova, uint32_t size) + uint64_t iova, uint32_t size) { struct msm_gem_object *obj = submit->bos[idx].obj; const char *buf; @@ -306,7 +306,7 @@ static void snapshot_buf(struct msm_rd_state *rd, } rd_write_section(rd, RD_GPUADDR, - (uint32_t[2]){ iova, size }, 8); + (uint32_t[3]){ iova, size, iova >> 32 }, 12); rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size); msm_gem_put_vaddr_locked(&obj->base); diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 04270f5d110c..74fc9362ecf9 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -578,7 +578,7 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev, return 0; } -int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf) +static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf) { struct qxl_rect rect; int ret; diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index a61c0d460ec2..4b5eab8a47b3 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -36,7 +36,7 @@ static bool qxl_head_enabled(struct qxl_head *head) return head->width && head->height; } -void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count) +static void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count) { if (qdev->client_monitors_config && count > qdev->client_monitors_config->count) { @@ -57,11 +57,18 @@ void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count) qdev->client_monitors_config->count = count; } +enum { + MONITORS_CONFIG_MODIFIED, + MONITORS_CONFIG_UNCHANGED, + MONITORS_CONFIG_BAD_CRC, +}; + static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev) { int i; int num_monitors; uint32_t crc; + int status = MONITORS_CONFIG_UNCHANGED; num_monitors = qdev->rom->client_monitors_config.count; crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config, @@ -70,7 +77,7 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev) qxl_io_log(qdev, "crc mismatch: have %X (%zd) != %X\n", crc, sizeof(qdev->rom->client_monitors_config), qdev->rom->client_monitors_config_crc); - return 1; + return MONITORS_CONFIG_BAD_CRC; } if (num_monitors > qdev->monitors_config->max_allowed) { DRM_DEBUG_KMS("client monitors list will be truncated: %d < %d\n", @@ -79,6 +86,10 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev) } else { num_monitors = qdev->rom->client_monitors_config.count; } + if (qdev->client_monitors_config + && (num_monitors != qdev->client_monitors_config->count)) { + status = MONITORS_CONFIG_MODIFIED; + } qxl_alloc_client_monitors_config(qdev, num_monitors); /* we copy max from the client but it isn't used */ qdev->client_monitors_config->max_allowed = @@ -88,17 +99,39 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev) &qdev->rom->client_monitors_config.heads[i]; struct qxl_head *client_head = &qdev->client_monitors_config->heads[i]; - client_head->x = c_rect->left; - client_head->y = c_rect->top; - client_head->width = c_rect->right - c_rect->left; - client_head->height = c_rect->bottom - c_rect->top; - client_head->surface_id = 0; - client_head->id = i; - client_head->flags = 0; + if (client_head->x != c_rect->left) { + client_head->x = c_rect->left; + status = MONITORS_CONFIG_MODIFIED; + } + if (client_head->y != c_rect->top) { + client_head->y = c_rect->top; + status = MONITORS_CONFIG_MODIFIED; + } + if (client_head->width != c_rect->right - c_rect->left) { + client_head->width = c_rect->right - c_rect->left; + status = MONITORS_CONFIG_MODIFIED; + } + if (client_head->height != c_rect->bottom - c_rect->top) { + client_head->height = c_rect->bottom - c_rect->top; + status = MONITORS_CONFIG_MODIFIED; + } + if (client_head->surface_id != 0) { + client_head->surface_id = 0; + status = MONITORS_CONFIG_MODIFIED; + } + if (client_head->id != i) { + client_head->id = i; + status = MONITORS_CONFIG_MODIFIED; + } + if (client_head->flags != 0) { + client_head->flags = 0; + status = MONITORS_CONFIG_MODIFIED; + } DRM_DEBUG_KMS("read %dx%d+%d+%d\n", client_head->width, client_head->height, client_head->x, client_head->y); } - return 0; + + return status; } static void qxl_update_offset_props(struct qxl_device *qdev) @@ -124,9 +157,18 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev) { struct drm_device *dev = qdev->ddev; - while (qxl_display_copy_rom_client_monitors_config(qdev)) { + int status; + + status = qxl_display_copy_rom_client_monitors_config(qdev); + while (status == MONITORS_CONFIG_BAD_CRC) { qxl_io_log(qdev, "failed crc check for client_monitors_config," " retrying\n"); + status = qxl_display_copy_rom_client_monitors_config(qdev); + } + if (status == MONITORS_CONFIG_UNCHANGED) { + qxl_io_log(qdev, "config unchanged\n"); + DRM_DEBUG("ignoring unchanged client monitors config"); + return; } drm_modeset_lock_all(dev); @@ -157,6 +199,9 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector, mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false, false); mode->type |= DRM_MODE_TYPE_PREFERRED; + mode->hdisplay = head->width; + mode->vdisplay = head->height; + drm_mode_set_name(mode); *pwidth = head->width; *pheight = head->height; drm_mode_probed_add(connector, mode); @@ -607,7 +652,7 @@ static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc, return true; } -void +static void qxl_send_monitors_config(struct qxl_device *qdev) { int i; diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 84995ebc6ffc..785aad42e9bb 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -395,16 +395,11 @@ qxl_framebuffer_init(struct drm_device *dev, struct drm_gem_object *obj, const struct drm_framebuffer_funcs *funcs); void qxl_display_read_client_monitors_config(struct qxl_device *qdev); -void qxl_send_monitors_config(struct qxl_device *qdev); int qxl_create_monitors_object(struct qxl_device *qdev); int qxl_destroy_monitors_object(struct qxl_device *qdev); -/* used by qxl_debugfs only */ -void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev); -void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count); - /* qxl_gem.c */ -int qxl_gem_init(struct qxl_device *qdev); +void qxl_gem_init(struct qxl_device *qdev); void qxl_gem_fini(struct qxl_device *qdev); int qxl_gem_object_create(struct qxl_device *qdev, int size, int alignment, int initial_domain, @@ -574,6 +569,5 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo); struct qxl_drv_surface * qxl_surface_lookup(struct drm_device *dev, int surface_id); void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing); -int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); #endif diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 7e305d8a4146..fd7e5e94be5b 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -191,7 +191,7 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb, /* * we are using a shadow draw buffer, at qdev->surface0_shadow */ - qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", clips->x1, clips->x2, + qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]\n", clips->x1, clips->x2, clips->y1, clips->y2); image->dx = clips->x1; image->dy = clips->y1; diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index d9746e904ef1..3f185c4da5b7 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c @@ -111,10 +111,9 @@ void qxl_gem_object_close(struct drm_gem_object *obj, { } -int qxl_gem_init(struct qxl_device *qdev) +void qxl_gem_init(struct qxl_device *qdev) { INIT_LIST_HEAD(&qdev->gem.objects); - return 0; } void qxl_gem_fini(struct qxl_device *qdev) diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index e642242728c0..af685f1d91f8 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -131,7 +131,7 @@ static int qxl_device_init(struct qxl_device *qdev, mutex_init(&qdev->update_area_mutex); mutex_init(&qdev->release_mutex); mutex_init(&qdev->surf_evict_mutex); - INIT_LIST_HEAD(&qdev->gem.objects); + qxl_gem_init(qdev); qdev->rom_base = pci_resource_start(pdev, 2); qdev->rom_size = pci_resource_len(pdev, 2); @@ -273,6 +273,7 @@ static void qxl_device_fini(struct qxl_device *qdev) qxl_ring_free(qdev->command_ring); qxl_ring_free(qdev->cursor_ring); qxl_ring_free(qdev->release_ring); + qxl_gem_fini(qdev); qxl_bo_fini(qdev); io_mapping_free(qdev->surface_mapping); io_mapping_free(qdev->vram_mapping); diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index e18839d52e3e..27affbde058c 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -931,7 +931,7 @@ static void radeon_connector_unregister(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); - if (radeon_connector->ddc_bus->has_aux) { + if (radeon_connector->ddc_bus && radeon_connector->ddc_bus->has_aux) { drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux); radeon_connector->ddc_bus->has_aux = false; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0be8d5cd7826..60a8920fa0b9 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -104,6 +104,14 @@ static const char radeon_family_name[][16] = { "LAST", }; +#if defined(CONFIG_VGA_SWITCHEROO) +bool radeon_has_atpx_dgpu_power_cntl(void); +bool radeon_is_atpx_hybrid(void); +#else +static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; } +static inline bool radeon_is_atpx_hybrid(void) { return false; } +#endif + #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) @@ -160,6 +168,11 @@ static void radeon_device_handle_px_quirks(struct radeon_device *rdev) if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX) rdev->flags &= ~RADEON_IS_PX; + + /* disable PX is the system doesn't support dGPU power control or hybrid gfx */ + if (!radeon_is_atpx_hybrid() && + !radeon_has_atpx_dgpu_power_cntl()) + rdev->flags &= ~RADEON_IS_PX; } /** diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 29f0207fa677..873f010d9616 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -98,17 +98,23 @@ success: static int udl_select_std_channel(struct udl_device *udl) { int ret; - u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7, - 0x1C, 0x88, 0x5E, 0x15, - 0x60, 0xFE, 0xC6, 0x97, - 0x16, 0x3D, 0x47, 0xF2}; + static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7, + 0x1C, 0x88, 0x5E, 0x15, + 0x60, 0xFE, 0xC6, 0x97, + 0x16, 0x3D, 0x47, 0xF2}; + void *sendbuf; + + sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL); + if (!sendbuf) + return -ENOMEM; ret = usb_control_msg(udl->udev, usb_sndctrlpipe(udl->udev, 0), NR_USB_REQUEST_CHANNEL, (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, - set_def_chn, sizeof(set_def_chn), + sendbuf, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT); + kfree(sendbuf); return ret < 0 ? ret : 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c index 49e5996cb9f2..3b97d50fd392 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c +++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c @@ -28,16 +28,6 @@ #include "virtgpu_drv.h" -int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master) -{ - struct pci_dev *pdev = dev->pdev; - - if (pdev) { - return drm_pci_set_busid(dev, master); - } - return 0; -} - static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev) { struct apertures_struct *ap; @@ -71,13 +61,22 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev) if (strcmp(vdev->dev.parent->bus->name, "pci") == 0) { struct pci_dev *pdev = to_pci_dev(vdev->dev.parent); + const char *pname = dev_name(&pdev->dev); bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA; + char unique[20]; - DRM_INFO("pci: %s detected\n", - vga ? "virtio-vga" : "virtio-gpu-pci"); + DRM_INFO("pci: %s detected at %s\n", + vga ? "virtio-vga" : "virtio-gpu-pci", + pname); dev->pdev = pdev; if (vga) virtio_pci_kick_out_firmware_fb(pdev); + + snprintf(unique, sizeof(unique), "pci:%s", pname); + ret = drm_dev_set_unique(dev, unique); + if (ret) + goto err_free; + } ret = drm_dev_register(dev, 0); diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 04d98db75c64..d82489815096 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -115,7 +115,6 @@ static const struct file_operations virtio_gpu_driver_fops = { static struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC, - .set_busid = drm_virtio_set_busid, .load = virtio_gpu_driver_load, .unload = virtio_gpu_driver_unload, .open = virtio_gpu_driver_open, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index ec1ebdcfe80b..08906c8ce3fa 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -49,7 +49,6 @@ #define DRIVER_PATCHLEVEL 1 /* virtgpu_drm_bus.c */ -int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master); int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev); struct virtio_gpu_object { diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 5a0f8a745b9d..974f9410474b 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -75,7 +75,7 @@ void virtio_gpu_cursor_ack(struct virtqueue *vq) int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev) { struct virtio_gpu_vbuffer *vbuf; - int i, size, count = 0; + int i, size, count = 16; void *ptr; INIT_LIST_HEAD(&vgdev->free_vbufs); diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 77657a8c0cd4..0f5b2dd24507 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -31,6 +31,10 @@ #define pr_fmt(fmt) "vgaarb: " fmt +#define vgaarb_dbg(dev, fmt, arg...) dev_dbg(dev, "vgaarb: " fmt, ##arg) +#define vgaarb_info(dev, fmt, arg...) dev_info(dev, "vgaarb: " fmt, ##arg) +#define vgaarb_err(dev, fmt, arg...) dev_err(dev, "vgaarb: " fmt, ##arg) + #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> @@ -188,6 +192,7 @@ static void vga_check_first_use(void) static struct vga_device *__vga_tryget(struct vga_device *vgadev, unsigned int rsrc) { + struct device *dev = &vgadev->pdev->dev; unsigned int wants, legacy_wants, match; struct vga_device *conflict; unsigned int pci_bits; @@ -203,8 +208,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, (vgadev->decodes & VGA_RSRC_LEGACY_MEM)) rsrc |= VGA_RSRC_LEGACY_MEM; - pr_debug("%s: %d\n", __func__, rsrc); - pr_debug("%s: owns: %d\n", __func__, vgadev->owns); + vgaarb_dbg(dev, "%s: %d\n", __func__, rsrc); + vgaarb_dbg(dev, "%s: owns: %d\n", __func__, vgadev->owns); /* Check what resources we need to acquire */ wants = rsrc & ~vgadev->owns; @@ -336,9 +341,10 @@ lock_them: static void __vga_put(struct vga_device *vgadev, unsigned int rsrc) { + struct device *dev = &vgadev->pdev->dev; unsigned int old_locks = vgadev->locks; - pr_debug("%s\n", __func__); + vgaarb_dbg(dev, "%s\n", __func__); /* Update our counters, and account for equivalent legacy resources * if we decode them @@ -611,7 +617,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev) /* Allocate structure */ vgadev = kzalloc(sizeof(struct vga_device), GFP_KERNEL); if (vgadev == NULL) { - pr_err("failed to allocate pci device\n"); + vgaarb_err(&pdev->dev, "failed to allocate VGA arbiter data\n"); /* * What to do on allocation failure ? For now, let's just do * nothing, I'm not sure there is anything saner to be done. @@ -663,7 +669,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev) */ if (vga_default == NULL && ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) { - pr_info("setting as boot device: PCI:%s\n", pci_name(pdev)); + vgaarb_info(&pdev->dev, "setting as boot VGA device\n"); vga_set_default_device(pdev); } @@ -672,8 +678,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev) /* Add to the list */ list_add(&vgadev->list, &vga_list); vga_count++; - pr_info("device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n", - pci_name(pdev), + vgaarb_info(&pdev->dev, "VGA device added: decodes=%s,owns=%s,locks=%s\n", vga_iostate_to_str(vgadev->decodes), vga_iostate_to_str(vgadev->owns), vga_iostate_to_str(vgadev->locks)); @@ -725,6 +730,7 @@ bail: static inline void vga_update_device_decodes(struct vga_device *vgadev, int new_decodes) { + struct device *dev = &vgadev->pdev->dev; int old_decodes, decodes_removed, decodes_unlocked; old_decodes = vgadev->decodes; @@ -732,8 +738,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev, decodes_unlocked = vgadev->locks & decodes_removed; vgadev->decodes = new_decodes; - pr_info("device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n", - pci_name(vgadev->pdev), + vgaarb_info(dev, "changed VGA decodes: olddecodes=%s,decodes=%s:owns=%s\n", vga_iostate_to_str(old_decodes), vga_iostate_to_str(vgadev->decodes), vga_iostate_to_str(vgadev->owns)); @@ -754,7 +759,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev, if (!(old_decodes & VGA_RSRC_LEGACY_MASK) && new_decodes & VGA_RSRC_LEGACY_MASK) vga_decode_count++; - pr_debug("decoding count now is: %d\n", vga_decode_count); + vgaarb_dbg(dev, "decoding count now is: %d\n", vga_decode_count); } static void __vga_set_legacy_decoding(struct pci_dev *pdev, @@ -1184,24 +1189,25 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf, ret_val = -EPROTO; goto done; } - pr_debug("%s ==> %x:%x:%x.%x\n", curr_pos, - domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); - pdev = pci_get_domain_bus_and_slot(domain, bus, devfn); - pr_debug("pdev %p\n", pdev); if (!pdev) { - pr_err("invalid PCI address %x:%x:%x\n", - domain, bus, devfn); + pr_debug("invalid PCI address %04x:%02x:%02x.%x\n", + domain, bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); ret_val = -ENODEV; goto done; } + + pr_debug("%s ==> %04x:%02x:%02x.%x pdev %p\n", curr_pos, + domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), + pdev); } vgadev = vgadev_find(pdev); pr_debug("vgadev %p\n", vgadev); if (vgadev == NULL) { if (pdev) { - pr_err("this pci device is not a vga device\n"); + vgaarb_dbg(&pdev->dev, "not a VGA device\n"); pci_dev_put(pdev); } @@ -1221,7 +1227,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf, } } if (i == MAX_USER_CARDS) { - pr_err("maximum user cards (%d) number reached!\n", + vgaarb_dbg(&pdev->dev, "maximum user cards (%d) number reached, ignoring this one!\n", MAX_USER_CARDS); pci_dev_put(pdev); /* XXX: which value to return? */ @@ -1310,8 +1316,8 @@ static int vga_arb_release(struct inode *inode, struct file *file) uc = &priv->cards[i]; if (uc->pdev == NULL) continue; - pr_debug("uc->io_cnt == %d, uc->mem_cnt == %d\n", - uc->io_cnt, uc->mem_cnt); + vgaarb_dbg(&uc->pdev->dev, "uc->io_cnt == %d, uc->mem_cnt == %d\n", + uc->io_cnt, uc->mem_cnt); while (uc->io_cnt--) vga_put(uc->pdev, VGA_RSRC_LEGACY_IO); while (uc->mem_cnt--) @@ -1364,7 +1370,7 @@ static int pci_notify(struct notifier_block *nb, unsigned long action, struct pci_dev *pdev = to_pci_dev(dev); bool notify = false; - pr_debug("%s\n", __func__); + vgaarb_dbg(dev, "%s\n", __func__); /* For now we're only intereted in devices added and removed. I didn't * test this thing here, so someone needs to double check for the @@ -1416,9 +1422,8 @@ static int __init vga_arb_device_init(void) PCI_ANY_ID, pdev)) != NULL) vga_arbiter_add_pci_device(pdev); - pr_info("loaded\n"); - list_for_each_entry(vgadev, &vga_list, list) { + struct device *dev = &vgadev->pdev->dev; #if defined(CONFIG_X86) || defined(CONFIG_IA64) /* * Override vga_arbiter_add_pci_device()'s I/O based detection @@ -1451,21 +1456,19 @@ static int __init vga_arb_device_init(void) continue; if (!vga_default_device()) - pr_info("setting as boot device: PCI:%s\n", - pci_name(vgadev->pdev)); + vgaarb_info(dev, "setting as boot device\n"); else if (vgadev->pdev != vga_default_device()) - pr_info("overriding boot device: PCI:%s\n", - pci_name(vgadev->pdev)); + vgaarb_info(dev, "overriding boot device\n"); vga_set_default_device(vgadev->pdev); } #endif if (vgadev->bridge_has_one_vga) - pr_info("bridge control possible %s\n", - pci_name(vgadev->pdev)); + vgaarb_info(dev, "bridge control possible\n"); else - pr_info("no bridge control possible %s\n", - pci_name(vgadev->pdev)); + vgaarb_info(dev, "no bridge control possible\n"); } + + pr_info("loaded\n"); return rc; } subsys_initcall(vga_arb_device_init); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 6cfb5cacc253..575aa65436d1 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -179,6 +179,7 @@ #define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205 #define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208 #define USB_DEVICE_ID_ATEN_CS682 0x2213 +#define USB_DEVICE_ID_ATEN_CS692 0x8021 #define USB_VENDOR_ID_ATMEL 0x03eb #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c index 5614fee82347..3a84aaf1418b 100644 --- a/drivers/hid/hid-sensor-custom.c +++ b/drivers/hid/hid-sensor-custom.c @@ -292,11 +292,11 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr, bool input = false; int value = 0; - if (sscanf(attr->attr.name, "feature-%d-%x-%s", &index, &usage, + if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage, name) == 3) { feature = true; field_index = index + sensor_inst->input_field_count; - } else if (sscanf(attr->attr.name, "input-%d-%x-%s", &index, &usage, + } else if (sscanf(attr->attr.name, "input-%x-%x-%s", &index, &usage, name) == 3) { input = true; field_index = index; @@ -398,7 +398,7 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr, char name[HID_CUSTOM_NAME_LENGTH]; int value; - if (sscanf(attr->attr.name, "feature-%d-%x-%s", &index, &usage, + if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage, name) == 3) { field_index = index + sensor_inst->input_field_count; } else diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 658a607dc6d9..c5c3d6111729 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -251,6 +251,9 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev); int report_size; int ret = 0; + u8 *val_ptr; + int buffer_index = 0; + int i; mutex_lock(&data->mutex); report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT); @@ -271,7 +274,17 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, goto done_proc; } ret = min(report_size, buffer_size); - memcpy(buffer, report->field[field_index]->value, ret); + + val_ptr = (u8 *)report->field[field_index]->value; + for (i = 0; i < report->field[field_index]->report_count; ++i) { + if (buffer_index >= ret) + break; + + memcpy(&((u8 *)buffer)[buffer_index], val_ptr, + report->field[field_index]->report_size / 8); + val_ptr += sizeof(__s32); + buffer_index += (report->field[field_index]->report_size / 8); + } done_proc: mutex_unlock(&data->mutex); diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c index e2517c11e0ee..0c9ac4d5d850 100644 --- a/drivers/hid/intel-ish-hid/ipc/ipc.c +++ b/drivers/hid/intel-ish-hid/ipc/ipc.c @@ -638,6 +638,58 @@ eoi: } /** + * ish_disable_dma() - disable dma communication between host and ISHFW + * @dev: ishtp device pointer + * + * Clear the dma enable bit and wait for dma inactive. + * + * Return: 0 for success else error code. + */ +static int ish_disable_dma(struct ishtp_device *dev) +{ + unsigned int dma_delay; + + /* Clear the dma enable bit */ + ish_reg_write(dev, IPC_REG_ISH_RMP2, 0); + + /* wait for dma inactive */ + for (dma_delay = 0; dma_delay < MAX_DMA_DELAY && + _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA); + dma_delay += 5) + mdelay(5); + + if (dma_delay >= MAX_DMA_DELAY) { + dev_err(dev->devc, + "Wait for DMA inactive timeout\n"); + return -EBUSY; + } + + return 0; +} + +/** + * ish_wakeup() - wakeup ishfw from waiting-for-host state + * @dev: ishtp device pointer + * + * Set the dma enable bit and send a void message to FW, + * it wil wakeup FW from waiting-for-host state. + */ +static void ish_wakeup(struct ishtp_device *dev) +{ + /* Set dma enable bit */ + ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED); + + /* + * Send 0 IPC message so that ISH FW wakes up if it was already + * asleep. + */ + ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT); + + /* Flush writes to doorbell and REMAP2 */ + ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS); +} + +/** * _ish_hw_reset() - HW reset * @dev: ishtp device pointer * @@ -649,7 +701,6 @@ static int _ish_hw_reset(struct ishtp_device *dev) { struct pci_dev *pdev = dev->pdev; int rv; - unsigned int dma_delay; uint16_t csr; if (!pdev) @@ -664,15 +715,8 @@ static int _ish_hw_reset(struct ishtp_device *dev) return -EINVAL; } - /* Now trigger reset to FW */ - ish_reg_write(dev, IPC_REG_ISH_RMP2, 0); - - for (dma_delay = 0; dma_delay < MAX_DMA_DELAY && - _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA); - dma_delay += 5) - mdelay(5); - - if (dma_delay >= MAX_DMA_DELAY) { + /* Disable dma communication between FW and host */ + if (ish_disable_dma(dev)) { dev_err(&pdev->dev, "Can't reset - stuck with DMA in-progress\n"); return -EBUSY; @@ -690,16 +734,8 @@ static int _ish_hw_reset(struct ishtp_device *dev) csr |= PCI_D0; pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr); - ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED); - - /* - * Send 0 IPC message so that ISH FW wakes up if it was already - * asleep - */ - ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT); - - /* Flush writes to doorbell and REMAP2 */ - ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS); + /* Now we can enable ISH DMA operation and wakeup ISHFW */ + ish_wakeup(dev); return 0; } @@ -758,16 +794,9 @@ static int _ish_ipc_reset(struct ishtp_device *dev) int ish_hw_start(struct ishtp_device *dev) { ish_set_host_rdy(dev); - /* After that we can enable ISH DMA operation */ - ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED); - /* - * Send 0 IPC message so that ISH FW wakes up if it was already - * asleep - */ - ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT); - /* Flush write to doorbell */ - ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS); + /* After that we can enable ISH DMA operation and wakeup ISHFW */ + ish_wakeup(dev); set_host_ready(dev); @@ -876,6 +905,21 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev) */ void ish_device_disable(struct ishtp_device *dev) { + struct pci_dev *pdev = dev->pdev; + + if (!pdev) + return; + + /* Disable dma communication between FW and host */ + if (ish_disable_dma(dev)) { + dev_err(&pdev->dev, + "Can't reset - stuck with DMA in-progress\n"); + return; + } + + /* Put ISH to D3hot state for power saving */ + pci_set_power_state(pdev, PCI_D3hot); + dev->dev_state = ISHTP_DEV_DISABLED; ish_clr_host_rdy(dev); } diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 42f0beeb09fd..20d647d2dd2c 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -146,7 +146,7 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; /* request and enable interrupt */ - ret = request_irq(pdev->irq, ish_irq_handler, IRQF_NO_SUSPEND, + ret = request_irq(pdev->irq, ish_irq_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (ret) { dev_err(&pdev->dev, "ISH: request IRQ failure (%d)\n", @@ -202,6 +202,7 @@ static void ish_remove(struct pci_dev *pdev) kfree(ishtp_dev); } +#ifdef CONFIG_PM static struct device *ish_resume_device; /** @@ -293,7 +294,6 @@ static int ish_resume(struct device *device) return 0; } -#ifdef CONFIG_PM static const struct dev_pm_ops ish_pm_ops = { .suspend = ish_suspend, .resume = ish_resume, @@ -301,7 +301,7 @@ static const struct dev_pm_ops ish_pm_ops = { #define ISHTP_ISH_PM_OPS (&ish_pm_ops) #else #define ISHTP_ISH_PM_OPS NULL -#endif +#endif /* CONFIG_PM */ static struct pci_driver ish_driver = { .name = KBUILD_MODNAME, diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 354d49ea36dd..e6cfd323babc 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -63,6 +63,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET }, diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index a259e18d22d5..0276d2ef06ee 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -961,7 +961,7 @@ int vmbus_device_register(struct hv_device *child_device_obj) { int ret = 0; - dev_set_name(&child_device_obj->device, "vmbus-%pUl", + dev_set_name(&child_device_obj->device, "%pUl", child_device_obj->channel->offermsg.offer.if_instance.b); child_device_obj->device.bus = &hv_bus; diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index adae6848ffb2..a74c075a30ec 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -536,8 +536,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups), GFP_KERNEL); - if (!hwdev->groups) - return ERR_PTR(-ENOMEM); + if (!hwdev->groups) { + err = -ENOMEM; + goto free_hwmon; + } attrs = __hwmon_create_attrs(dev, drvdata, chip); if (IS_ERR(attrs)) { diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index da3fb069ec5c..ce69048c88e9 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -743,8 +743,8 @@ static int st_accel_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: - *val = 0; - *val2 = adata->current_fullscale->gain; + *val = adata->current_fullscale->gain / 1000000; + *val2 = adata->current_fullscale->gain % 1000000; return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_SAMP_FREQ: *val = adata->odr; @@ -763,9 +763,13 @@ static int st_accel_write_raw(struct iio_dev *indio_dev, int err; switch (mask) { - case IIO_CHAN_INFO_SCALE: - err = st_sensors_set_fullscale_by_gain(indio_dev, val2); + case IIO_CHAN_INFO_SCALE: { + int gain; + + gain = val * 1000000 + val2; + err = st_sensors_set_fullscale_by_gain(indio_dev, gain); break; + } case IIO_CHAN_INFO_SAMP_FREQ: if (val2) return -EINVAL; diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c index dc33c1dd5191..b5beea53d6f6 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c @@ -30,26 +30,26 @@ static struct { u32 usage_id; int unit; /* 0 for default others from HID sensor spec */ int scale_val0; /* scale, whole number */ - int scale_val1; /* scale, fraction in micros */ + int scale_val1; /* scale, fraction in nanos */ } unit_conversion[] = { - {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650}, + {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650000}, {HID_USAGE_SENSOR_ACCEL_3D, HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0}, {HID_USAGE_SENSOR_ACCEL_3D, - HID_USAGE_SENSOR_UNITS_G, 9, 806650}, + HID_USAGE_SENSOR_UNITS_G, 9, 806650000}, - {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453}, + {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453293}, {HID_USAGE_SENSOR_GYRO_3D, HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0}, {HID_USAGE_SENSOR_GYRO_3D, - HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453}, + HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453293}, - {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000}, + {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000000}, {HID_USAGE_SENSOR_COMPASS_3D, HID_USAGE_SENSOR_UNITS_GAUSS, 1, 0}, - {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453}, + {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453293}, {HID_USAGE_SENSOR_INCLINOMETER_3D, - HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453}, + HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293}, {HID_USAGE_SENSOR_INCLINOMETER_3D, HID_USAGE_SENSOR_UNITS_RADIANS, 1, 0}, @@ -57,7 +57,7 @@ static struct { {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0}, {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0}, - {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000}, + {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000000}, }; static int pow_10(unsigned power) @@ -266,15 +266,15 @@ EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value); /* * This fuction applies the unit exponent to the scale. * For example: - * 9.806650 ->exp:2-> val0[980]val1[665000] - * 9.000806 ->exp:2-> val0[900]val1[80600] - * 0.174535 ->exp:2-> val0[17]val1[453500] - * 1.001745 ->exp:0-> val0[1]val1[1745] - * 1.001745 ->exp:2-> val0[100]val1[174500] - * 1.001745 ->exp:4-> val0[10017]val1[450000] - * 9.806650 ->exp:-2-> val0[0]val1[98066] + * 9.806650000 ->exp:2-> val0[980]val1[665000000] + * 9.000806000 ->exp:2-> val0[900]val1[80600000] + * 0.174535293 ->exp:2-> val0[17]val1[453529300] + * 1.001745329 ->exp:0-> val0[1]val1[1745329] + * 1.001745329 ->exp:2-> val0[100]val1[174532900] + * 1.001745329 ->exp:4-> val0[10017]val1[453290000] + * 9.806650000 ->exp:-2-> val0[0]val1[98066500] */ -static void adjust_exponent_micro(int *val0, int *val1, int scale0, +static void adjust_exponent_nano(int *val0, int *val1, int scale0, int scale1, int exp) { int i; @@ -285,32 +285,32 @@ static void adjust_exponent_micro(int *val0, int *val1, int scale0, if (exp > 0) { *val0 = scale0 * pow_10(exp); res = 0; - if (exp > 6) { + if (exp > 9) { *val1 = 0; return; } for (i = 0; i < exp; ++i) { - x = scale1 / pow_10(5 - i); + x = scale1 / pow_10(8 - i); res += (pow_10(exp - 1 - i) * x); - scale1 = scale1 % pow_10(5 - i); + scale1 = scale1 % pow_10(8 - i); } *val0 += res; *val1 = scale1 * pow_10(exp); } else if (exp < 0) { exp = abs(exp); - if (exp > 6) { + if (exp > 9) { *val0 = *val1 = 0; return; } *val0 = scale0 / pow_10(exp); rem = scale0 % pow_10(exp); res = 0; - for (i = 0; i < (6 - exp); ++i) { - x = scale1 / pow_10(5 - i); - res += (pow_10(5 - exp - i) * x); - scale1 = scale1 % pow_10(5 - i); + for (i = 0; i < (9 - exp); ++i) { + x = scale1 / pow_10(8 - i); + res += (pow_10(8 - exp - i) * x); + scale1 = scale1 % pow_10(8 - i); } - *val1 = rem * pow_10(6 - exp) + res; + *val1 = rem * pow_10(9 - exp) + res; } else { *val0 = scale0; *val1 = scale1; @@ -332,14 +332,14 @@ int hid_sensor_format_scale(u32 usage_id, unit_conversion[i].unit == attr_info->units) { exp = hid_sensor_convert_exponent( attr_info->unit_expo); - adjust_exponent_micro(val0, val1, + adjust_exponent_nano(val0, val1, unit_conversion[i].scale_val0, unit_conversion[i].scale_val1, exp); break; } } - return IIO_VAL_INT_PLUS_MICRO; + return IIO_VAL_INT_PLUS_NANO; } EXPORT_SYMBOL(hid_sensor_format_scale); diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 285a64a589d7..975a1f19f747 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -612,7 +612,7 @@ EXPORT_SYMBOL(st_sensors_sysfs_sampling_frequency_avail); ssize_t st_sensors_sysfs_scale_avail(struct device *dev, struct device_attribute *attr, char *buf) { - int i, len = 0; + int i, len = 0, q, r; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct st_sensor_data *sdata = iio_priv(indio_dev); @@ -621,8 +621,10 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev, if (sdata->sensor_settings->fs.fs_avl[i].num == 0) break; - len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ", - sdata->sensor_settings->fs.fs_avl[i].gain); + q = sdata->sensor_settings->fs.fs_avl[i].gain / 1000000; + r = sdata->sensor_settings->fs.fs_avl[i].gain % 1000000; + + len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r); } mutex_unlock(&indio_dev->mlock); buf[len - 1] = '\n'; diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c index b98b9d94d184..a97e802ca523 100644 --- a/drivers/iio/orientation/hid-sensor-rotation.c +++ b/drivers/iio/orientation/hid-sensor-rotation.c @@ -335,6 +335,7 @@ static struct platform_driver hid_dev_rot_platform_driver = { .id_table = hid_dev_rot_ids, .driver = { .name = KBUILD_MODNAME, + .pm = &hid_sensor_pm_ops, }, .probe = hid_dev_rot_probe, .remove = hid_dev_rot_remove, diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c index 066161a4bccd..f962f31a5eb2 100644 --- a/drivers/iio/temperature/maxim_thermocouple.c +++ b/drivers/iio/temperature/maxim_thermocouple.c @@ -136,6 +136,8 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data, ret = spi_read(data->spi, (void *)&buf32, storage_bytes); *val = be32_to_cpu(buf32); break; + default: + ret = -EINVAL; } if (ret) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 36bf50ebb187..89a6b0546804 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1094,47 +1094,47 @@ static void cma_save_ib_info(struct sockaddr *src_addr, } } -static void cma_save_ip4_info(struct sockaddr *src_addr, - struct sockaddr *dst_addr, +static void cma_save_ip4_info(struct sockaddr_in *src_addr, + struct sockaddr_in *dst_addr, struct cma_hdr *hdr, __be16 local_port) { - struct sockaddr_in *ip4; - if (src_addr) { - ip4 = (struct sockaddr_in *)src_addr; - ip4->sin_family = AF_INET; - ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; - ip4->sin_port = local_port; + *src_addr = (struct sockaddr_in) { + .sin_family = AF_INET, + .sin_addr.s_addr = hdr->dst_addr.ip4.addr, + .sin_port = local_port, + }; } if (dst_addr) { - ip4 = (struct sockaddr_in *)dst_addr; - ip4->sin_family = AF_INET; - ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; - ip4->sin_port = hdr->port; + *dst_addr = (struct sockaddr_in) { + .sin_family = AF_INET, + .sin_addr.s_addr = hdr->src_addr.ip4.addr, + .sin_port = hdr->port, + }; } } -static void cma_save_ip6_info(struct sockaddr *src_addr, - struct sockaddr *dst_addr, +static void cma_save_ip6_info(struct sockaddr_in6 *src_addr, + struct sockaddr_in6 *dst_addr, struct cma_hdr *hdr, __be16 local_port) { - struct sockaddr_in6 *ip6; - if (src_addr) { - ip6 = (struct sockaddr_in6 *)src_addr; - ip6->sin6_family = AF_INET6; - ip6->sin6_addr = hdr->dst_addr.ip6; - ip6->sin6_port = local_port; + *src_addr = (struct sockaddr_in6) { + .sin6_family = AF_INET6, + .sin6_addr = hdr->dst_addr.ip6, + .sin6_port = local_port, + }; } if (dst_addr) { - ip6 = (struct sockaddr_in6 *)dst_addr; - ip6->sin6_family = AF_INET6; - ip6->sin6_addr = hdr->src_addr.ip6; - ip6->sin6_port = hdr->port; + *dst_addr = (struct sockaddr_in6) { + .sin6_family = AF_INET6, + .sin6_addr = hdr->src_addr.ip6, + .sin6_port = hdr->port, + }; } } @@ -1159,10 +1159,12 @@ static int cma_save_ip_info(struct sockaddr *src_addr, switch (cma_get_ip_ver(hdr)) { case 4: - cma_save_ip4_info(src_addr, dst_addr, hdr, port); + cma_save_ip4_info((struct sockaddr_in *)src_addr, + (struct sockaddr_in *)dst_addr, hdr, port); break; case 6: - cma_save_ip6_info(src_addr, dst_addr, hdr, port); + cma_save_ip6_info((struct sockaddr_in6 *)src_addr, + (struct sockaddr_in6 *)dst_addr, hdr, port); break; default: return -EAFNOSUPPORT; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 15c01c3cd540..e6f9b2d745ca 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2636,17 +2636,26 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) /* And we're up. Go go go! */ of_iommu_set_ops(dev->of_node, &arm_smmu_ops); #ifdef CONFIG_PCI - pci_request_acs(); - ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops); - if (ret) - return ret; + if (pci_bus_type.iommu_ops != &arm_smmu_ops) { + pci_request_acs(); + ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops); + if (ret) + return ret; + } #endif #ifdef CONFIG_ARM_AMBA - ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops); - if (ret) - return ret; + if (amba_bustype.iommu_ops != &arm_smmu_ops) { + ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops); + if (ret) + return ret; + } #endif - return bus_set_iommu(&platform_bus_type, &arm_smmu_ops); + if (platform_bus_type.iommu_ops != &arm_smmu_ops) { + ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops); + if (ret) + return ret; + } + return 0; } static int arm_smmu_device_remove(struct platform_device *pdev) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index c841eb7a1a74..8f7281444551 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -324,8 +324,10 @@ struct arm_smmu_master_cfg { #define INVALID_SMENDX -1 #define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv) #define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu) +#define fwspec_smendx(fw, i) \ + (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i]) #define for_each_cfg_sme(fw, i, idx) \ - for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i) + for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i) struct arm_smmu_device { struct device *dev; @@ -1228,6 +1230,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) return -ENXIO; } + /* + * FIXME: The arch/arm DMA API code tries to attach devices to its own + * domains between of_xlate() and add_device() - we have no way to cope + * with that, so until ARM gets converted to rely on groups and default + * domains, just say no (but more politely than by dereferencing NULL). + * This should be at least a WARN_ON once that's sorted. + */ + if (!fwspec->iommu_priv) + return -ENODEV; + smmu = fwspec_smmu(fwspec); /* Ensure that the domain is finalised */ ret = arm_smmu_init_domain_context(domain, smmu); @@ -1390,7 +1402,7 @@ static int arm_smmu_add_device(struct device *dev) fwspec = dev->iommu_fwspec; if (ret) goto out_free; - } else if (fwspec) { + } else if (fwspec && fwspec->ops == &arm_smmu_ops) { smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode)); } else { return -ENODEV; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a4407eabf0e6..3965e73db51c 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1711,6 +1711,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) if (!iommu->domains || !iommu->domain_ids) return; +again: spin_lock_irqsave(&device_domain_lock, flags); list_for_each_entry_safe(info, tmp, &device_domain_list, global) { struct dmar_domain *domain; @@ -1723,10 +1724,19 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) domain = info->domain; - dmar_remove_one_dev_info(domain, info->dev); + __dmar_remove_one_dev_info(info); - if (!domain_type_is_vm_or_si(domain)) + if (!domain_type_is_vm_or_si(domain)) { + /* + * The domain_exit() function can't be called under + * device_domain_lock, as it takes this lock itself. + * So release the lock here and re-run the loop + * afterwards. + */ + spin_unlock_irqrestore(&device_domain_lock, flags); domain_exit(domain); + goto again; + } } spin_unlock_irqrestore(&device_domain_lock, flags); diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig index 012225587c25..b71b747ee0ba 100644 --- a/drivers/media/dvb-frontends/Kconfig +++ b/drivers/media/dvb-frontends/Kconfig @@ -513,6 +513,11 @@ config DVB_AS102_FE depends on DVB_CORE default DVB_AS102 +config DVB_GP8PSK_FE + tristate + depends on DVB_CORE + default DVB_USB_GP8PSK + comment "DVB-C (cable) frontends" depends on DVB_CORE diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile index e90165ad361b..93921a4eaa27 100644 --- a/drivers/media/dvb-frontends/Makefile +++ b/drivers/media/dvb-frontends/Makefile @@ -121,6 +121,7 @@ obj-$(CONFIG_DVB_RTL2832_SDR) += rtl2832_sdr.o obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o obj-$(CONFIG_DVB_AF9033) += af9033.o obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o +obj-$(CONFIG_DVB_GP8PSK_FE) += gp8psk-fe.o obj-$(CONFIG_DVB_TC90522) += tc90522.o obj-$(CONFIG_DVB_HORUS3A) += horus3a.o obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o diff --git a/drivers/media/usb/dvb-usb/gp8psk-fe.c b/drivers/media/dvb-frontends/gp8psk-fe.c index db6eb79cde07..be19afeed7a9 100644 --- a/drivers/media/usb/dvb-usb/gp8psk-fe.c +++ b/drivers/media/dvb-frontends/gp8psk-fe.c @@ -14,11 +14,27 @@ * * see Documentation/dvb/README.dvb-usb for more information */ -#include "gp8psk.h" + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "gp8psk-fe.h" +#include "dvb_frontend.h" + +static int debug; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); + +#define dprintk(fmt, arg...) do { \ + if (debug) \ + printk(KERN_DEBUG pr_fmt("%s: " fmt), \ + __func__, ##arg); \ +} while (0) struct gp8psk_fe_state { struct dvb_frontend fe; - struct dvb_usb_device *d; + void *priv; + const struct gp8psk_fe_ops *ops; + bool is_rev1; u8 lock; u16 snr; unsigned long next_status_check; @@ -29,22 +45,24 @@ static int gp8psk_tuned_to_DCII(struct dvb_frontend *fe) { struct gp8psk_fe_state *st = fe->demodulator_priv; u8 status; - gp8psk_usb_in_op(st->d, GET_8PSK_CONFIG, 0, 0, &status, 1); + + st->ops->in(st->priv, GET_8PSK_CONFIG, 0, 0, &status, 1); return status & bmDCtuned; } static int gp8psk_set_tuner_mode(struct dvb_frontend *fe, int mode) { - struct gp8psk_fe_state *state = fe->demodulator_priv; - return gp8psk_usb_out_op(state->d, SET_8PSK_CONFIG, mode, 0, NULL, 0); + struct gp8psk_fe_state *st = fe->demodulator_priv; + + return st->ops->out(st->priv, SET_8PSK_CONFIG, mode, 0, NULL, 0); } static int gp8psk_fe_update_status(struct gp8psk_fe_state *st) { u8 buf[6]; if (time_after(jiffies,st->next_status_check)) { - gp8psk_usb_in_op(st->d, GET_SIGNAL_LOCK, 0,0,&st->lock,1); - gp8psk_usb_in_op(st->d, GET_SIGNAL_STRENGTH, 0,0,buf,6); + st->ops->in(st->priv, GET_SIGNAL_LOCK, 0, 0, &st->lock, 1); + st->ops->in(st->priv, GET_SIGNAL_STRENGTH, 0, 0, buf, 6); st->snr = (buf[1]) << 8 | buf[0]; st->next_status_check = jiffies + (st->status_check_interval*HZ)/1000; } @@ -116,13 +134,12 @@ static int gp8psk_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_front static int gp8psk_fe_set_frontend(struct dvb_frontend *fe) { - struct gp8psk_fe_state *state = fe->demodulator_priv; + struct gp8psk_fe_state *st = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; u8 cmd[10]; u32 freq = c->frequency * 1000; - int gp_product_id = le16_to_cpu(state->d->udev->descriptor.idProduct); - deb_fe("%s()\n", __func__); + dprintk("%s()\n", __func__); cmd[4] = freq & 0xff; cmd[5] = (freq >> 8) & 0xff; @@ -136,21 +153,21 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe) switch (c->delivery_system) { case SYS_DVBS: if (c->modulation != QPSK) { - deb_fe("%s: unsupported modulation selected (%d)\n", + dprintk("%s: unsupported modulation selected (%d)\n", __func__, c->modulation); return -EOPNOTSUPP; } c->fec_inner = FEC_AUTO; break; case SYS_DVBS2: /* kept for backwards compatibility */ - deb_fe("%s: DVB-S2 delivery system selected\n", __func__); + dprintk("%s: DVB-S2 delivery system selected\n", __func__); break; case SYS_TURBO: - deb_fe("%s: Turbo-FEC delivery system selected\n", __func__); + dprintk("%s: Turbo-FEC delivery system selected\n", __func__); break; default: - deb_fe("%s: unsupported delivery system selected (%d)\n", + dprintk("%s: unsupported delivery system selected (%d)\n", __func__, c->delivery_system); return -EOPNOTSUPP; } @@ -161,9 +178,9 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe) cmd[3] = (c->symbol_rate >> 24) & 0xff; switch (c->modulation) { case QPSK: - if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM) + if (st->is_rev1) if (gp8psk_tuned_to_DCII(fe)) - gp8psk_bcm4500_reload(state->d); + st->ops->reload(st->priv); switch (c->fec_inner) { case FEC_1_2: cmd[9] = 0; break; @@ -207,18 +224,18 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe) cmd[9] = 0; break; default: /* Unknown modulation */ - deb_fe("%s: unsupported modulation selected (%d)\n", + dprintk("%s: unsupported modulation selected (%d)\n", __func__, c->modulation); return -EOPNOTSUPP; } - if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM) + if (st->is_rev1) gp8psk_set_tuner_mode(fe, 0); - gp8psk_usb_out_op(state->d, TUNE_8PSK, 0, 0, cmd, 10); + st->ops->out(st->priv, TUNE_8PSK, 0, 0, cmd, 10); - state->lock = 0; - state->next_status_check = jiffies; - state->status_check_interval = 200; + st->lock = 0; + st->next_status_check = jiffies; + st->status_check_interval = 200; return 0; } @@ -228,9 +245,9 @@ static int gp8psk_fe_send_diseqc_msg (struct dvb_frontend* fe, { struct gp8psk_fe_state *st = fe->demodulator_priv; - deb_fe("%s\n",__func__); + dprintk("%s\n", __func__); - if (gp8psk_usb_out_op(st->d,SEND_DISEQC_COMMAND, m->msg[0], 0, + if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, m->msg[0], 0, m->msg, m->msg_len)) { return -EINVAL; } @@ -243,12 +260,12 @@ static int gp8psk_fe_send_diseqc_burst(struct dvb_frontend *fe, struct gp8psk_fe_state *st = fe->demodulator_priv; u8 cmd; - deb_fe("%s\n",__func__); + dprintk("%s\n", __func__); /* These commands are certainly wrong */ cmd = (burst == SEC_MINI_A) ? 0x00 : 0x01; - if (gp8psk_usb_out_op(st->d,SEND_DISEQC_COMMAND, cmd, 0, + if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, cmd, 0, &cmd, 0)) { return -EINVAL; } @@ -258,10 +275,10 @@ static int gp8psk_fe_send_diseqc_burst(struct dvb_frontend *fe, static int gp8psk_fe_set_tone(struct dvb_frontend *fe, enum fe_sec_tone_mode tone) { - struct gp8psk_fe_state* state = fe->demodulator_priv; + struct gp8psk_fe_state *st = fe->demodulator_priv; - if (gp8psk_usb_out_op(state->d,SET_22KHZ_TONE, - (tone == SEC_TONE_ON), 0, NULL, 0)) { + if (st->ops->out(st->priv, SET_22KHZ_TONE, + (tone == SEC_TONE_ON), 0, NULL, 0)) { return -EINVAL; } return 0; @@ -270,9 +287,9 @@ static int gp8psk_fe_set_tone(struct dvb_frontend *fe, static int gp8psk_fe_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage) { - struct gp8psk_fe_state* state = fe->demodulator_priv; + struct gp8psk_fe_state *st = fe->demodulator_priv; - if (gp8psk_usb_out_op(state->d,SET_LNB_VOLTAGE, + if (st->ops->out(st->priv, SET_LNB_VOLTAGE, voltage == SEC_VOLTAGE_18, 0, NULL, 0)) { return -EINVAL; } @@ -281,52 +298,60 @@ static int gp8psk_fe_set_voltage(struct dvb_frontend *fe, static int gp8psk_fe_enable_high_lnb_voltage(struct dvb_frontend* fe, long onoff) { - struct gp8psk_fe_state* state = fe->demodulator_priv; - return gp8psk_usb_out_op(state->d, USE_EXTRA_VOLT, onoff, 0,NULL,0); + struct gp8psk_fe_state *st = fe->demodulator_priv; + + return st->ops->out(st->priv, USE_EXTRA_VOLT, onoff, 0, NULL, 0); } static int gp8psk_fe_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long sw_cmd) { - struct gp8psk_fe_state* state = fe->demodulator_priv; + struct gp8psk_fe_state *st = fe->demodulator_priv; u8 cmd = sw_cmd & 0x7f; - if (gp8psk_usb_out_op(state->d,SET_DN_SWITCH, cmd, 0, - NULL, 0)) { + if (st->ops->out(st->priv, SET_DN_SWITCH, cmd, 0, NULL, 0)) return -EINVAL; - } - if (gp8psk_usb_out_op(state->d,SET_LNB_VOLTAGE, !!(sw_cmd & 0x80), - 0, NULL, 0)) { + + if (st->ops->out(st->priv, SET_LNB_VOLTAGE, !!(sw_cmd & 0x80), + 0, NULL, 0)) return -EINVAL; - } return 0; } static void gp8psk_fe_release(struct dvb_frontend* fe) { - struct gp8psk_fe_state *state = fe->demodulator_priv; - kfree(state); + struct gp8psk_fe_state *st = fe->demodulator_priv; + + kfree(st); } static struct dvb_frontend_ops gp8psk_fe_ops; -struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d) +struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops, + void *priv, bool is_rev1) { - struct gp8psk_fe_state *s = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL); - if (s == NULL) - goto error; - - s->d = d; - memcpy(&s->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops)); - s->fe.demodulator_priv = s; - - goto success; -error: - return NULL; -success: - return &s->fe; -} + struct gp8psk_fe_state *st; + if (!ops || !ops->in || !ops->out || !ops->reload) { + pr_err("Error! gp8psk-fe ops not defined.\n"); + return NULL; + } + + st = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL); + if (!st) + return NULL; + + memcpy(&st->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops)); + st->fe.demodulator_priv = st; + st->ops = ops; + st->priv = priv; + st->is_rev1 = is_rev1; + + pr_info("Frontend %sattached\n", is_rev1 ? "revision 1 " : ""); + + return &st->fe; +} +EXPORT_SYMBOL_GPL(gp8psk_fe_attach); static struct dvb_frontend_ops gp8psk_fe_ops = { .delsys = { SYS_DVBS }, diff --git a/drivers/media/dvb-frontends/gp8psk-fe.h b/drivers/media/dvb-frontends/gp8psk-fe.h new file mode 100644 index 000000000000..6c7944b1ecd6 --- /dev/null +++ b/drivers/media/dvb-frontends/gp8psk-fe.h @@ -0,0 +1,82 @@ +/* + * gp8psk_fe driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef GP8PSK_FE_H +#define GP8PSK_FE_H + +#include <linux/types.h> + +/* gp8psk commands */ + +#define GET_8PSK_CONFIG 0x80 /* in */ +#define SET_8PSK_CONFIG 0x81 +#define I2C_WRITE 0x83 +#define I2C_READ 0x84 +#define ARM_TRANSFER 0x85 +#define TUNE_8PSK 0x86 +#define GET_SIGNAL_STRENGTH 0x87 /* in */ +#define LOAD_BCM4500 0x88 +#define BOOT_8PSK 0x89 /* in */ +#define START_INTERSIL 0x8A /* in */ +#define SET_LNB_VOLTAGE 0x8B +#define SET_22KHZ_TONE 0x8C +#define SEND_DISEQC_COMMAND 0x8D +#define SET_DVB_MODE 0x8E +#define SET_DN_SWITCH 0x8F +#define GET_SIGNAL_LOCK 0x90 /* in */ +#define GET_FW_VERS 0x92 +#define GET_SERIAL_NUMBER 0x93 /* in */ +#define USE_EXTRA_VOLT 0x94 +#define GET_FPGA_VERS 0x95 +#define CW3K_INIT 0x9d + +/* PSK_configuration bits */ +#define bm8pskStarted 0x01 +#define bm8pskFW_Loaded 0x02 +#define bmIntersilOn 0x04 +#define bmDVBmode 0x08 +#define bm22kHz 0x10 +#define bmSEL18V 0x20 +#define bmDCtuned 0x40 +#define bmArmed 0x80 + +/* Satellite modulation modes */ +#define ADV_MOD_DVB_QPSK 0 /* DVB-S QPSK */ +#define ADV_MOD_TURBO_QPSK 1 /* Turbo QPSK */ +#define ADV_MOD_TURBO_8PSK 2 /* Turbo 8PSK (also used for Trellis 8PSK) */ +#define ADV_MOD_TURBO_16QAM 3 /* Turbo 16QAM (also used for Trellis 8PSK) */ + +#define ADV_MOD_DCII_C_QPSK 4 /* Digicipher II Combo */ +#define ADV_MOD_DCII_I_QPSK 5 /* Digicipher II I-stream */ +#define ADV_MOD_DCII_Q_QPSK 6 /* Digicipher II Q-stream */ +#define ADV_MOD_DCII_C_OQPSK 7 /* Digicipher II offset QPSK */ +#define ADV_MOD_DSS_QPSK 8 /* DSS (DIRECTV) QPSK */ +#define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */ + +/* firmware revision id's */ +#define GP8PSK_FW_REV1 0x020604 +#define GP8PSK_FW_REV2 0x020704 +#define GP8PSK_FW_VERS(_fw_vers) \ + ((_fw_vers)[2]<<0x10 | (_fw_vers)[1]<<0x08 | (_fw_vers)[0]) + +struct gp8psk_fe_ops { + int (*in)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen); + int (*out)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen); + int (*reload)(void *priv); +}; + +struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops, + void *priv, bool is_rev1); + +#endif diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c index f95a6bc839d5..cede3975d04b 100644 --- a/drivers/media/i2c/ir-kbd-i2c.c +++ b/drivers/media/i2c/ir-kbd-i2c.c @@ -118,7 +118,7 @@ static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol, *protocol = RC_TYPE_RC6_MCE; dev &= 0x7f; dprintk(1, "ir hauppauge (rc6-mce): t%d vendor=%d dev=%d code=%d\n", - toggle, vendor, dev, code); + *ptoggle, vendor, dev, code); } else { *ptoggle = 0; *protocol = RC_TYPE_RC6_6A_32; diff --git a/drivers/media/usb/dvb-usb/Makefile b/drivers/media/usb/dvb-usb/Makefile index 2a7b5a963acf..3b3f32b426d1 100644 --- a/drivers/media/usb/dvb-usb/Makefile +++ b/drivers/media/usb/dvb-usb/Makefile @@ -8,7 +8,7 @@ obj-$(CONFIG_DVB_USB_VP7045) += dvb-usb-vp7045.o dvb-usb-vp702x-objs := vp702x.o vp702x-fe.o obj-$(CONFIG_DVB_USB_VP702X) += dvb-usb-vp702x.o -dvb-usb-gp8psk-objs := gp8psk.o gp8psk-fe.o +dvb-usb-gp8psk-objs := gp8psk.o obj-$(CONFIG_DVB_USB_GP8PSK) += dvb-usb-gp8psk.o dvb-usb-dtt200u-objs := dtt200u.o dtt200u-fe.o diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c index b257780fb380..7853261906b1 100644 --- a/drivers/media/usb/dvb-usb/af9005.c +++ b/drivers/media/usb/dvb-usb/af9005.c @@ -53,7 +53,6 @@ struct af9005_device_state { u8 sequence; int led_state; unsigned char data[256]; - struct mutex data_mutex; }; static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg, @@ -72,7 +71,7 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg, return -EINVAL; } - mutex_lock(&st->data_mutex); + mutex_lock(&d->data_mutex); st->data[0] = 14; /* rest of buffer length low */ st->data[1] = 0; /* rest of buffer length high */ @@ -140,7 +139,7 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg, values[i] = st->data[8 + i]; ret: - mutex_unlock(&st->data_mutex); + mutex_unlock(&d->data_mutex); return ret; } @@ -481,7 +480,7 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf, } packet_len = wlen + 5; - mutex_lock(&st->data_mutex); + mutex_lock(&d->data_mutex); st->data[0] = (u8) (packet_len & 0xff); st->data[1] = (u8) ((packet_len & 0xff00) >> 8); @@ -512,7 +511,7 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf, rbuf[i] = st->data[i + 7]; } - mutex_unlock(&st->data_mutex); + mutex_unlock(&d->data_mutex); return ret; } @@ -523,7 +522,7 @@ int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values, u8 seq; int ret, i; - mutex_lock(&st->data_mutex); + mutex_lock(&d->data_mutex); memset(st->data, 0, sizeof(st->data)); @@ -559,7 +558,7 @@ int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values, for (i = 0; i < len; i++) values[i] = st->data[6 + i]; } - mutex_unlock(&st->data_mutex); + mutex_unlock(&d->data_mutex); return ret; } @@ -847,7 +846,7 @@ static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state) return 0; } - mutex_lock(&st->data_mutex); + mutex_lock(&d->data_mutex); /* deb_info("rc_query\n"); */ st->data[0] = 3; /* rest of packet length low */ @@ -890,7 +889,7 @@ static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state) } ret: - mutex_unlock(&st->data_mutex); + mutex_unlock(&d->data_mutex); return ret; } @@ -1004,20 +1003,8 @@ static struct dvb_usb_device_properties af9005_properties; static int af9005_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { - struct dvb_usb_device *d; - struct af9005_device_state *st; - int ret; - - ret = dvb_usb_device_init(intf, &af9005_properties, - THIS_MODULE, &d, adapter_nr); - - if (ret < 0) - return ret; - - st = d->priv; - mutex_init(&st->data_mutex); - - return 0; + return dvb_usb_device_init(intf, &af9005_properties, + THIS_MODULE, NULL, adapter_nr); } enum af9005_usb_table_entry { diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c index 8ac825413d5a..290275bc7fde 100644 --- a/drivers/media/usb/dvb-usb/cinergyT2-core.c +++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c @@ -42,7 +42,6 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); struct cinergyt2_state { u8 rc_counter; unsigned char data[64]; - struct mutex data_mutex; }; /* We are missing a release hook with usb_device data */ @@ -56,12 +55,12 @@ static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable) struct cinergyt2_state *st = d->priv; int ret; - mutex_lock(&st->data_mutex); + mutex_lock(&d->data_mutex); st->data[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER; st->data[1] = enable ? 1 : 0; ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 64, 0); - mutex_unlock(&st->data_mutex); + mutex_unlock(&d->data_mutex); return ret; } @@ -71,12 +70,12 @@ static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable) struct cinergyt2_state *st = d->priv; int ret; - mutex_lock(&st->data_mutex); + mutex_lock(&d->data_mutex); st->data[0] = CINERGYT2_EP1_SLEEP_MODE; st->data[1] = enable ? 0 : 1; ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 3, 0); - mutex_unlock(&st->data_mutex); + mutex_unlock(&d->data_mutex); return ret; } @@ -89,7 +88,7 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap) adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev); - mutex_lock(&st->data_mutex); + mutex_lock(&d->data_mutex); st->data[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION; ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0); @@ -97,7 +96,7 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap) deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep " "state info\n"); } - mutex_unlock(&st->data_mutex); + mutex_unlock(&d->data_mutex); /* Copy this pointer as we are gonna need it in the release phase */ cinergyt2_usb_device = adap->dev; @@ -166,7 +165,7 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state) *state = REMOTE_NO_KEY_PRESSED; - mutex_lock(&st->data_mutex); + mutex_lock(&d->data_mutex); st->data[0] = CINERGYT2_EP1_GET_RC_EVENTS; ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0); @@ -202,29 +201,17 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state) } ret: - mutex_unlock(&st->data_mutex); + mutex_unlock(&d->data_mutex); return ret; } static int cinergyt2_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { - struct dvb_usb_device *d; - struct cinergyt2_state *st; - int ret; - - ret = dvb_usb_device_init(intf, &cinergyt2_properties, - THIS_MODULE, &d, adapter_nr); - if (ret < 0) - return ret; - - st = d->priv; - mutex_init(&st->data_mutex); - - return 0; + return dvb_usb_device_init(intf, &cinergyt2_properties, + THIS_MODULE, NULL, adapter_nr); } - static struct usb_device_id cinergyt2_usb_table[] = { { USB_DEVICE(USB_VID_TERRATEC, 0x0038) }, { 0 } diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index 39772812269d..243403081fa5 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c @@ -68,7 +68,7 @@ static int cxusb_ctrl_msg(struct dvb_usb_device *d, wo = (rbuf == NULL || rlen == 0); /* write-only */ - mutex_lock(&st->data_mutex); + mutex_lock(&d->data_mutex); st->data[0] = cmd; memcpy(&st->data[1], wbuf, wlen); if (wo) @@ -77,7 +77,7 @@ static int cxusb_ctrl_msg(struct dvb_usb_device *d, ret = dvb_usb_generic_rw(d, st->data, 1 + wlen, rbuf, rlen, 0); - mutex_unlock(&st->data_mutex); + mutex_unlock(&d->data_mutex); return ret; } @@ -1461,43 +1461,36 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties; static int cxusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { - struct dvb_usb_device *d; - struct cxusb_state *st; - if (0 == dvb_usb_device_init(intf, &cxusb_medion_properties, - THIS_MODULE, &d, adapter_nr) || + THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgh064f_properties, - THIS_MODULE, &d, adapter_nr) || + THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dee1601_properties, - THIS_MODULE, &d, adapter_nr) || + THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgz201_properties, - THIS_MODULE, &d, adapter_nr) || + THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dtt7579_properties, - THIS_MODULE, &d, adapter_nr) || + THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dualdig4_properties, - THIS_MODULE, &d, adapter_nr) || + THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &cxusb_bluebird_nano2_properties, - THIS_MODULE, &d, adapter_nr) || + THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &cxusb_bluebird_nano2_needsfirmware_properties, - THIS_MODULE, &d, adapter_nr) || + THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &cxusb_aver_a868r_properties, - THIS_MODULE, &d, adapter_nr) || + THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dualdig4_rev2_properties, - THIS_MODULE, &d, adapter_nr) || + THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &cxusb_d680_dmb_properties, - THIS_MODULE, &d, adapter_nr) || + THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &cxusb_mygica_d689_properties, - THIS_MODULE, &d, adapter_nr) || + THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &cxusb_mygica_t230_properties, - THIS_MODULE, &d, adapter_nr) || - 0) { - st = d->priv; - mutex_init(&st->data_mutex); - + THIS_MODULE, NULL, adapter_nr) || + 0) return 0; - } return -EINVAL; } diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h index 9f3ee0e47d5c..18acda19527a 100644 --- a/drivers/media/usb/dvb-usb/cxusb.h +++ b/drivers/media/usb/dvb-usb/cxusb.h @@ -37,7 +37,6 @@ struct cxusb_state { struct i2c_client *i2c_client_tuner; unsigned char data[MAX_XFER_SIZE]; - struct mutex data_mutex; }; #endif diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c index 92d5408684ac..47ce9d5de4c6 100644 --- a/drivers/media/usb/dvb-usb/dib0700_core.c +++ b/drivers/media/usb/dvb-usb/dib0700_core.c @@ -704,7 +704,7 @@ static void dib0700_rc_urb_completion(struct urb *purb) struct dvb_usb_device *d = purb->context; struct dib0700_rc_response *poll_reply; enum rc_type protocol; - u32 uninitialized_var(keycode); + u32 keycode; u8 toggle; deb_info("%s()\n", __func__); @@ -745,7 +745,8 @@ static void dib0700_rc_urb_completion(struct urb *purb) poll_reply->nec.data == 0x00 && poll_reply->nec.not_data == 0xff) { poll_reply->data_state = 2; - break; + rc_repeat(d->rc_dev); + goto resubmit; } if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) { diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c index f88572c7ae7c..fcbff7fb0c4e 100644 --- a/drivers/media/usb/dvb-usb/dtt200u.c +++ b/drivers/media/usb/dvb-usb/dtt200u.c @@ -22,7 +22,6 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); struct dtt200u_state { unsigned char data[80]; - struct mutex data_mutex; }; static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff) @@ -30,23 +29,24 @@ static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff) struct dtt200u_state *st = d->priv; int ret = 0; - mutex_lock(&st->data_mutex); + mutex_lock(&d->data_mutex); st->data[0] = SET_INIT; if (onoff) ret = dvb_usb_generic_write(d, st->data, 2); - mutex_unlock(&st->data_mutex); + mutex_unlock(&d->data_mutex); return ret; } static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { - struct dtt200u_state *st = adap->dev->priv; + struct dvb_usb_device *d = adap->dev; + struct dtt200u_state *st = d->priv; int ret; - mutex_lock(&st->data_mutex); + mutex_lock(&d->data_mutex); st->data[0] = SET_STREAMING; st->data[1] = onoff; @@ -61,26 +61,27 @@ static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) ret = dvb_usb_generic_write(adap->dev, st->data, 1); ret: - mutex_unlock(&st->data_mutex); + mutex_unlock(&d->data_mutex); return ret; } static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) { - struct dtt200u_state *st = adap->dev->priv; + struct dvb_usb_device *d = adap->dev; + struct dtt200u_state *st = d->priv; int ret; pid = onoff ? pid : 0; - mutex_lock(&st->data_mutex); + mutex_lock(&d->data_mutex); st->data[0] = SET_PID_FILTER; st->data[1] = index; st->data[2] = pid & 0xff; st->data[3] = (pid >> 8) & 0x1f; ret = dvb_usb_generic_write(adap->dev, st->data, 4); - mutex_unlock(&st->data_mutex); + mutex_unlock(&d->data_mutex); return ret; } @@ -91,7 +92,7 @@ static int dtt200u_rc_query(struct dvb_usb_device *d) u32 scancode; int ret; - mutex_lock(&st->data_mutex); + mutex_lock(&d->data_mutex); st->data[0] = GET_RC_CODE; ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0); @@ -126,7 +127,7 @@ static int dtt200u_rc_query(struct dvb_usb_device *d) deb_info("st->data: %*ph\n", 5, st->data); ret: - mutex_unlock(&st->data_mutex); + mutex_unlock(&d->data_mutex); return ret; } @@ -145,24 +146,17 @@ static struct dvb_usb_device_properties wt220u_miglia_properties; static int dtt200u_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { - struct dvb_usb_device *d; - struct dtt200u_state *st; - if (0 == dvb_usb_device_init(intf, &dtt200u_properties, - THIS_MODULE, &d, adapter_nr) || + THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &wt220u_properties, - THIS_MODULE, &d, adapter_nr) || + THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &wt220u_fc_properties, - THIS_MODULE, &d, adapter_nr) || + THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &wt220u_zl0353_properties, - THIS_MODULE, &d, adapter_nr) || + THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &wt220u_miglia_properties, - THIS_MODULE, &d, adapter_nr)) { - st = d->priv; - mutex_init(&st->data_mutex); - + THIS_MODULE, NULL, adapter_nr)) return 0; - } return -ENODEV; } diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c index 3896ba9a4179..84308569e7dc 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c @@ -142,6 +142,7 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums) { int ret = 0; + mutex_init(&d->data_mutex); mutex_init(&d->usb_mutex); mutex_init(&d->i2c_mutex); diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h index 639c4678c65b..107255b08b2b 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb.h +++ b/drivers/media/usb/dvb-usb/dvb-usb.h @@ -404,8 +404,12 @@ struct dvb_usb_adapter { * Powered is in/decremented for each call to modify the state. * @udev: pointer to the device's struct usb_device. * - * @usb_mutex: semaphore of USB control messages (reading needs two messages) - * @i2c_mutex: semaphore for i2c-transfers + * @data_mutex: mutex to protect the data structure used to store URB data + * @usb_mutex: mutex of USB control messages (reading needs two messages). + * Please notice that this mutex is used internally at the generic + * URB control functions. So, drivers using dvb_usb_generic_rw() and + * derivated functions should not lock it internally. + * @i2c_mutex: mutex for i2c-transfers * * @i2c_adap: device's i2c_adapter if it uses I2CoverUSB * @@ -433,6 +437,7 @@ struct dvb_usb_device { int powered; /* locking */ + struct mutex data_mutex; struct mutex usb_mutex; /* i2c */ diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c index adfd76491451..993bb7a72985 100644 --- a/drivers/media/usb/dvb-usb/gp8psk.c +++ b/drivers/media/usb/dvb-usb/gp8psk.c @@ -15,6 +15,7 @@ * see Documentation/dvb/README.dvb-usb for more information */ #include "gp8psk.h" +#include "gp8psk-fe.h" /* debug */ static char bcm4500_firmware[] = "dvb-usb-gp8psk-02.fw"; @@ -28,34 +29,8 @@ struct gp8psk_state { unsigned char data[80]; }; -static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers) -{ - return (gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6)); -} - -static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers) -{ - return (gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1)); -} - -static void gp8psk_info(struct dvb_usb_device *d) -{ - u8 fpga_vers, fw_vers[6]; - - if (!gp8psk_get_fw_version(d, fw_vers)) - info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i", - fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers), - 2000 + fw_vers[5], fw_vers[4], fw_vers[3]); - else - info("failed to get FW version"); - - if (!gp8psk_get_fpga_version(d, &fpga_vers)) - info("FPGA Version = %i", fpga_vers); - else - info("failed to get FPGA version"); -} - -int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) +static int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, + u16 index, u8 *b, int blen) { struct gp8psk_state *st = d->priv; int ret = 0,try = 0; @@ -67,7 +42,6 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 return ret; while (ret >= 0 && ret != blen && try < 3) { - memcpy(st->data, b, blen); ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev,0), req, @@ -81,8 +55,10 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 if (ret < 0 || ret != blen) { warn("usb in %d operation failed.", req); ret = -EIO; - } else + } else { ret = 0; + memcpy(b, st->data, blen); + } deb_xfer("in: req. %x, val: %x, ind: %x, buffer: ",req,value,index); debug_dump(b,blen,deb_xfer); @@ -92,7 +68,7 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 return ret; } -int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, +static int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { struct gp8psk_state *st = d->priv; @@ -123,6 +99,34 @@ int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, return ret; } + +static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers) +{ + return gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6); +} + +static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers) +{ + return gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1); +} + +static void gp8psk_info(struct dvb_usb_device *d) +{ + u8 fpga_vers, fw_vers[6]; + + if (!gp8psk_get_fw_version(d, fw_vers)) + info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i", + fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers), + 2000 + fw_vers[5], fw_vers[4], fw_vers[3]); + else + info("failed to get FW version"); + + if (!gp8psk_get_fpga_version(d, &fpga_vers)) + info("FPGA Version = %i", fpga_vers); + else + info("failed to get FPGA version"); +} + static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d) { int ret; @@ -225,10 +229,13 @@ static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff) return 0; } -int gp8psk_bcm4500_reload(struct dvb_usb_device *d) +static int gp8psk_bcm4500_reload(struct dvb_usb_device *d) { u8 buf; int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct); + + deb_xfer("reloading firmware\n"); + /* Turn off 8psk power */ if (gp8psk_usb_in_op(d, BOOT_8PSK, 0, 0, &buf, 1)) return -EINVAL; @@ -247,9 +254,47 @@ static int gp8psk_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) return gp8psk_usb_out_op(adap->dev, ARM_TRANSFER, onoff, 0 , NULL, 0); } +/* Callbacks for gp8psk-fe.c */ + +static int gp8psk_fe_in(void *priv, u8 req, u16 value, + u16 index, u8 *b, int blen) +{ + struct dvb_usb_device *d = priv; + + return gp8psk_usb_in_op(d, req, value, index, b, blen); +} + +static int gp8psk_fe_out(void *priv, u8 req, u16 value, + u16 index, u8 *b, int blen) +{ + struct dvb_usb_device *d = priv; + + return gp8psk_usb_out_op(d, req, value, index, b, blen); +} + +static int gp8psk_fe_reload(void *priv) +{ + struct dvb_usb_device *d = priv; + + return gp8psk_bcm4500_reload(d); +} + +const struct gp8psk_fe_ops gp8psk_fe_ops = { + .in = gp8psk_fe_in, + .out = gp8psk_fe_out, + .reload = gp8psk_fe_reload, +}; + static int gp8psk_frontend_attach(struct dvb_usb_adapter *adap) { - adap->fe_adap[0].fe = gp8psk_fe_attach(adap->dev); + struct dvb_usb_device *d = adap->dev; + int id = le16_to_cpu(d->udev->descriptor.idProduct); + int is_rev1; + + is_rev1 = (id == USB_PID_GENPIX_8PSK_REV_1_WARM) ? true : false; + + adap->fe_adap[0].fe = dvb_attach(gp8psk_fe_attach, + &gp8psk_fe_ops, d, is_rev1); return 0; } diff --git a/drivers/media/usb/dvb-usb/gp8psk.h b/drivers/media/usb/dvb-usb/gp8psk.h index ed32b9da4843..d8975b866dee 100644 --- a/drivers/media/usb/dvb-usb/gp8psk.h +++ b/drivers/media/usb/dvb-usb/gp8psk.h @@ -24,58 +24,6 @@ extern int dvb_usb_gp8psk_debug; #define deb_info(args...) dprintk(dvb_usb_gp8psk_debug,0x01,args) #define deb_xfer(args...) dprintk(dvb_usb_gp8psk_debug,0x02,args) #define deb_rc(args...) dprintk(dvb_usb_gp8psk_debug,0x04,args) -#define deb_fe(args...) dprintk(dvb_usb_gp8psk_debug,0x08,args) - -/* Twinhan Vendor requests */ -#define TH_COMMAND_IN 0xC0 -#define TH_COMMAND_OUT 0xC1 - -/* gp8psk commands */ - -#define GET_8PSK_CONFIG 0x80 /* in */ -#define SET_8PSK_CONFIG 0x81 -#define I2C_WRITE 0x83 -#define I2C_READ 0x84 -#define ARM_TRANSFER 0x85 -#define TUNE_8PSK 0x86 -#define GET_SIGNAL_STRENGTH 0x87 /* in */ -#define LOAD_BCM4500 0x88 -#define BOOT_8PSK 0x89 /* in */ -#define START_INTERSIL 0x8A /* in */ -#define SET_LNB_VOLTAGE 0x8B -#define SET_22KHZ_TONE 0x8C -#define SEND_DISEQC_COMMAND 0x8D -#define SET_DVB_MODE 0x8E -#define SET_DN_SWITCH 0x8F -#define GET_SIGNAL_LOCK 0x90 /* in */ -#define GET_FW_VERS 0x92 -#define GET_SERIAL_NUMBER 0x93 /* in */ -#define USE_EXTRA_VOLT 0x94 -#define GET_FPGA_VERS 0x95 -#define CW3K_INIT 0x9d - -/* PSK_configuration bits */ -#define bm8pskStarted 0x01 -#define bm8pskFW_Loaded 0x02 -#define bmIntersilOn 0x04 -#define bmDVBmode 0x08 -#define bm22kHz 0x10 -#define bmSEL18V 0x20 -#define bmDCtuned 0x40 -#define bmArmed 0x80 - -/* Satellite modulation modes */ -#define ADV_MOD_DVB_QPSK 0 /* DVB-S QPSK */ -#define ADV_MOD_TURBO_QPSK 1 /* Turbo QPSK */ -#define ADV_MOD_TURBO_8PSK 2 /* Turbo 8PSK (also used for Trellis 8PSK) */ -#define ADV_MOD_TURBO_16QAM 3 /* Turbo 16QAM (also used for Trellis 8PSK) */ - -#define ADV_MOD_DCII_C_QPSK 4 /* Digicipher II Combo */ -#define ADV_MOD_DCII_I_QPSK 5 /* Digicipher II I-stream */ -#define ADV_MOD_DCII_Q_QPSK 6 /* Digicipher II Q-stream */ -#define ADV_MOD_DCII_C_OQPSK 7 /* Digicipher II offset QPSK */ -#define ADV_MOD_DSS_QPSK 8 /* DSS (DIRECTV) QPSK */ -#define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */ #define GET_USB_SPEED 0x07 @@ -86,15 +34,4 @@ extern int dvb_usb_gp8psk_debug; #define PRODUCT_STRING_READ 0x0D #define FW_BCD_VERSION_READ 0x14 -/* firmware revision id's */ -#define GP8PSK_FW_REV1 0x020604 -#define GP8PSK_FW_REV2 0x020704 -#define GP8PSK_FW_VERS(_fw_vers) ((_fw_vers)[2]<<0x10 | (_fw_vers)[1]<<0x08 | (_fw_vers)[0]) - -extern struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d); -extern int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen); -extern int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, - u16 index, u8 *b, int blen); -extern int gp8psk_bcm4500_reload(struct dvb_usb_device *d); - #endif diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index e9e6ea3ab73c..75b9d4ac8b1e 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -178,7 +178,7 @@ static int mei_nfc_if_version(struct mei_cl *cl, ret = 0; bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); - if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { + if (bytes_recv < if_version_length) { dev_err(bus->dev, "Could not read IF version\n"); ret = -EIO; goto err; diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 5a8dc5a76e0d..3678220964fe 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -2347,7 +2347,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test, struct mmc_test_req *rq = mmc_test_req_alloc(); struct mmc_host *host = test->card->host; struct mmc_test_area *t = &test->area; - struct mmc_async_req areq; + struct mmc_test_async_req test_areq = { .test = test }; struct mmc_request *mrq; unsigned long timeout; bool expired = false; @@ -2363,8 +2363,8 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test, mrq->sbc = &rq->sbc; mrq->cap_cmd_during_tfr = true; - areq.mrq = mrq; - areq.err_check = mmc_test_check_result_async; + test_areq.areq.mrq = mrq; + test_areq.areq.err_check = mmc_test_check_result_async; mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks, 512, write); @@ -2378,7 +2378,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test, /* Start ongoing data request */ if (use_areq) { - mmc_start_req(host, &areq, &ret); + mmc_start_req(host, &test_areq.areq, &ret); if (ret) goto out_free; } else { diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 39fc5b2b96c5..df19777068a6 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -26,6 +26,8 @@ #include "mmc_ops.h" #include "sd_ops.h" +#define DEFAULT_CMD6_TIMEOUT_MS 500 + static const unsigned int tran_exp[] = { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 @@ -571,6 +573,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) card->erased_byte = 0x0; /* eMMC v4.5 or later */ + card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS; if (card->ext_csd.rev >= 6) { card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 4fcbc4012ed0..50a674be6655 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -2940,7 +2940,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) return ERR_PTR(-ENOMEM); /* find reset controller when exist */ - pdata->rstc = devm_reset_control_get_optional(dev, NULL); + pdata->rstc = devm_reset_control_get_optional(dev, "reset"); if (IS_ERR(pdata->rstc)) { if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER) return ERR_PTR(-EPROBE_DEFER); diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index d839147e591d..44ecebd1ea8c 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -661,13 +661,13 @@ static int mxs_mmc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mmc); + spin_lock_init(&host->lock); + ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, dev_name(&pdev->dev), host); if (ret) goto out_free_dma; - spin_lock_init(&host->lock); - ret = mmc_add_host(mmc); if (ret) goto out_free_dma; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 71654b90227f..42ef3ebb1d8c 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2086,6 +2086,10 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) if (!host->tuning_done) { pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n"); + + sdhci_do_reset(host, SDHCI_RESET_CMD); + sdhci_do_reset(host, SDHCI_RESET_DATA); + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); ctrl &= ~SDHCI_CTRL_TUNED_CLK; ctrl &= ~SDHCI_CTRL_EXEC_TUNING; @@ -2286,10 +2290,8 @@ static bool sdhci_request_done(struct sdhci_host *host) for (i = 0; i < SDHCI_MAX_MRQS; i++) { mrq = host->mrqs_done[i]; - if (mrq) { - host->mrqs_done[i] = NULL; + if (mrq) break; - } } if (!mrq) { @@ -2320,6 +2322,17 @@ static bool sdhci_request_done(struct sdhci_host *host) * upon error conditions. */ if (sdhci_needs_reset(host, mrq)) { + /* + * Do not finish until command and data lines are available for + * reset. Note there can only be one other mrq, so it cannot + * also be in mrqs_done, otherwise host->cmd and host->data_cmd + * would both be null. + */ + if (host->cmd || host->data_cmd) { + spin_unlock_irqrestore(&host->lock, flags); + return true; + } + /* Some controllers need this kick or reset won't work here */ if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) /* This is to force an update */ @@ -2327,10 +2340,8 @@ static bool sdhci_request_done(struct sdhci_host *host) /* Spec says we should do both at the same time, but Ricoh controllers do not like that. */ - if (!host->cmd) - sdhci_do_reset(host, SDHCI_RESET_CMD); - if (!host->data_cmd) - sdhci_do_reset(host, SDHCI_RESET_DATA); + sdhci_do_reset(host, SDHCI_RESET_CMD); + sdhci_do_reset(host, SDHCI_RESET_DATA); host->pending_reset = false; } @@ -2338,6 +2349,8 @@ static bool sdhci_request_done(struct sdhci_host *host) if (!sdhci_has_requests(host)) sdhci_led_deactivate(host); + host->mrqs_done[i] = NULL; + mmiowb(); spin_unlock_irqrestore(&host->lock, flags); @@ -2512,9 +2525,6 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) if (!host->data) { struct mmc_command *data_cmd = host->data_cmd; - if (data_cmd) - host->data_cmd = NULL; - /* * The "data complete" interrupt is also used to * indicate that a busy state has ended. See comment @@ -2522,11 +2532,13 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) */ if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { if (intmask & SDHCI_INT_DATA_TIMEOUT) { + host->data_cmd = NULL; data_cmd->error = -ETIMEDOUT; sdhci_finish_mrq(host, data_cmd->mrq); return; } if (intmask & SDHCI_INT_DATA_END) { + host->data_cmd = NULL; /* * Some cards handle busy-end interrupt * before the command completed, so make @@ -2912,6 +2924,10 @@ int sdhci_runtime_resume_host(struct sdhci_host *host) spin_unlock_irqrestore(&host->lock, flags); } + if ((mmc->caps2 & MMC_CAP2_HS400_ES) && + mmc->ops->hs400_enhanced_strobe) + mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); + spin_lock_irqsave(&host->lock, flags); host->runtime_suspended = false; diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c index 83deda4bb4d6..6f9563a96488 100644 --- a/drivers/nfc/mei_phy.c +++ b/drivers/nfc/mei_phy.c @@ -133,7 +133,7 @@ static int mei_nfc_if_version(struct nfc_mei_phy *phy) return -ENOMEM; bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length); - if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { + if (bytes_recv < 0 || bytes_recv < if_version_length) { pr_err("Could not read IF version\n"); r = -EIO; goto err; diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index f5e3011e31fc..5daf2f4be0cd 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -612,7 +612,7 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node, ret = nvm_register(dev); - ns->lba_shift = ilog2(dev->sec_size) - 9; + ns->lba_shift = ilog2(dev->sec_size); if (sysfs_create_group(&dev->dev.kobj, attrs)) pr_warn("%s: failed to create sysfs group for identification\n", diff --git a/drivers/of/base.c b/drivers/of/base.c index d687e6de24a0..a0bccb54a9bd 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -2077,8 +2077,6 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) name = of_get_property(of_aliases, "stdout", NULL); if (name) of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); - if (of_stdout) - console_set_by_of(); } if (!of_aliases) diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c index e0b22dab9b7a..e04f69beb42d 100644 --- a/drivers/pci/host/pcie-rockchip.c +++ b/drivers/pci/host/pcie-rockchip.c @@ -190,6 +190,9 @@ struct rockchip_pcie { struct reset_control *mgmt_rst; struct reset_control *mgmt_sticky_rst; struct reset_control *pipe_rst; + struct reset_control *pm_rst; + struct reset_control *aclk_rst; + struct reset_control *pclk_rst; struct clk *aclk_pcie; struct clk *aclk_perf_pcie; struct clk *hclk_pcie; @@ -408,6 +411,44 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) gpiod_set_value(rockchip->ep_gpio, 0); + err = reset_control_assert(rockchip->aclk_rst); + if (err) { + dev_err(dev, "assert aclk_rst err %d\n", err); + return err; + } + + err = reset_control_assert(rockchip->pclk_rst); + if (err) { + dev_err(dev, "assert pclk_rst err %d\n", err); + return err; + } + + err = reset_control_assert(rockchip->pm_rst); + if (err) { + dev_err(dev, "assert pm_rst err %d\n", err); + return err; + } + + udelay(10); + + err = reset_control_deassert(rockchip->pm_rst); + if (err) { + dev_err(dev, "deassert pm_rst err %d\n", err); + return err; + } + + err = reset_control_deassert(rockchip->aclk_rst); + if (err) { + dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); + return err; + } + + err = reset_control_deassert(rockchip->pclk_rst); + if (err) { + dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); + return err; + } + err = phy_init(rockchip->phy); if (err < 0) { dev_err(dev, "fail to init phy, err %d\n", err); @@ -781,6 +822,27 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) return PTR_ERR(rockchip->pipe_rst); } + rockchip->pm_rst = devm_reset_control_get(dev, "pm"); + if (IS_ERR(rockchip->pm_rst)) { + if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER) + dev_err(dev, "missing pm reset property in node\n"); + return PTR_ERR(rockchip->pm_rst); + } + + rockchip->pclk_rst = devm_reset_control_get(dev, "pclk"); + if (IS_ERR(rockchip->pclk_rst)) { + if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER) + dev_err(dev, "missing pclk reset property in node\n"); + return PTR_ERR(rockchip->pclk_rst); + } + + rockchip->aclk_rst = devm_reset_control_get(dev, "aclk"); + if (IS_ERR(rockchip->aclk_rst)) { + if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER) + dev_err(dev, "missing aclk reset property in node\n"); + return PTR_ERR(rockchip->aclk_rst); + } + rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH); if (IS_ERR(rockchip->ep_gpio)) { dev_err(dev, "missing ep-gpios property in node\n"); diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 66c4d8f42233..9526e341988b 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -121,6 +121,14 @@ int pci_claim_resource(struct pci_dev *dev, int resource) return -EINVAL; } + /* + * If we have a shadow copy in RAM, the PCI device doesn't respond + * to the shadow range, so we don't need to claim it, and upstream + * bridges don't need to route the range to the device. + */ + if (res->flags & IORESOURCE_ROM_SHADOW) + return 0; + root = pci_find_parent_resource(dev, res); if (!root) { dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n", diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index 153f3122283d..b6b316de055c 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c @@ -107,7 +107,7 @@ int soc_pcmcia_regulator_set(struct soc_pcmcia_socket *skt, ret = regulator_enable(r->reg); } else { - regulator_disable(r->reg); + ret = regulator_disable(r->reg); } if (ret == 0) r->on = on; diff --git a/drivers/phy/phy-da8xx-usb.c b/drivers/phy/phy-da8xx-usb.c index 32ae78c8ca17..c85fb0b59729 100644 --- a/drivers/phy/phy-da8xx-usb.c +++ b/drivers/phy/phy-da8xx-usb.c @@ -198,7 +198,8 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev) } else { int ret; - ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0"); + ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy", + "ohci-da8xx"); if (ret) dev_warn(dev, "Failed to create usb11 phy lookup\n"); ret = phy_create_lookup(d_phy->usb20_phy, "usb-phy", @@ -216,7 +217,7 @@ static int da8xx_usb_phy_remove(struct platform_device *pdev) if (!pdev->dev.of_node) { phy_remove_lookup(d_phy->usb20_phy, "usb-phy", "musb-da8xx"); - phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0"); + phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci-da8xx"); } return 0; diff --git a/drivers/phy/phy-rockchip-pcie.c b/drivers/phy/phy-rockchip-pcie.c index a2b4c6b58aea..6904633cad68 100644 --- a/drivers/phy/phy-rockchip-pcie.c +++ b/drivers/phy/phy-rockchip-pcie.c @@ -249,21 +249,10 @@ err_refclk: static int rockchip_pcie_phy_exit(struct phy *phy) { struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy); - int err = 0; clk_disable_unprepare(rk_phy->clk_pciephy_ref); - err = reset_control_deassert(rk_phy->phy_rst); - if (err) { - dev_err(&phy->dev, "deassert phy_rst err %d\n", err); - goto err_reset; - } - - return err; - -err_reset: - clk_prepare_enable(rk_phy->clk_pciephy_ref); - return err; + return 0; } static const struct phy_ops ops = { diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c index b9342a2af7b3..fec34f5213c4 100644 --- a/drivers/phy/phy-sun4i-usb.c +++ b/drivers/phy/phy-sun4i-usb.c @@ -264,7 +264,7 @@ static int sun4i_usb_phy_init(struct phy *_phy) return ret; } - if (data->cfg->enable_pmu_unk1) { + if (phy->pmu && data->cfg->enable_pmu_unk1) { val = readl(phy->pmu + REG_PMU_UNK1); writel(val & ~2, phy->pmu + REG_PMU_UNK1); } diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c index c8c72e8259d3..87b46390b695 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c @@ -26,7 +26,7 @@ #define ASPEED_G5_NR_PINS 228 -#define COND1 SIG_DESC_BIT(SCU90, 6, 0) +#define COND1 { SCU90, BIT(6), 0, 0 } #define COND2 { SCU94, GENMASK(1, 0), 0, 0 } #define B14 0 diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c index 7f7700716398..5d1e505c3c63 100644 --- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c @@ -844,6 +844,6 @@ static struct platform_driver iproc_gpio_driver = { static int __init iproc_gpio_init(void) { - return platform_driver_probe(&iproc_gpio_driver, iproc_gpio_probe); + return platform_driver_register(&iproc_gpio_driver); } arch_initcall_sync(iproc_gpio_init); diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c index 35783db1c10b..c8deb8be1da7 100644 --- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c @@ -741,6 +741,6 @@ static struct platform_driver nsp_gpio_driver = { static int __init nsp_gpio_init(void) { - return platform_driver_probe(&nsp_gpio_driver, nsp_gpio_probe); + return platform_driver_register(&nsp_gpio_driver); } arch_initcall_sync(nsp_gpio_init); diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index 47613201269a..79c4e14a5a75 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -687,6 +687,7 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev, if (!info->functions) return -ENOMEM; + info->group_index = 0; if (flat_funcs) { info->ngroups = of_get_child_count(np); } else { diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 30389f4ccab4..c43b1e9a06af 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1652,12 +1652,15 @@ static int chv_pinctrl_probe(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP -static int chv_pinctrl_suspend(struct device *dev) +static int chv_pinctrl_suspend_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); + unsigned long flags; int i; + raw_spin_lock_irqsave(&chv_lock, flags); + pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK); for (i = 0; i < pctrl->community->npins; i++) { @@ -1678,15 +1681,20 @@ static int chv_pinctrl_suspend(struct device *dev) ctx->padctrl1 = readl(reg); } + raw_spin_unlock_irqrestore(&chv_lock, flags); + return 0; } -static int chv_pinctrl_resume(struct device *dev) +static int chv_pinctrl_resume_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); + unsigned long flags; int i; + raw_spin_lock_irqsave(&chv_lock, flags); + /* * Mask all interrupts before restoring per-pin configuration * registers because we don't know in which state BIOS left them @@ -1731,12 +1739,15 @@ static int chv_pinctrl_resume(struct device *dev) chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK); + raw_spin_unlock_irqrestore(&chv_lock, flags); + return 0; } #endif static const struct dev_pm_ops chv_pinctrl_pm_ops = { - SET_LATE_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend, chv_pinctrl_resume) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend_noirq, + chv_pinctrl_resume_noirq) }; static const struct acpi_device_id chv_pinctrl_acpi_match[] = { diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index 99da4cf91031..b7bb37167969 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c @@ -1512,7 +1512,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info, if (info->irqmux_base || gpio_irq > 0) { err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip, 0, handle_simple_irq, - IRQ_TYPE_LEVEL_LOW); + IRQ_TYPE_NONE); if (err) { gpiochip_remove(&bank->gpio_chip); dev_info(dev, "could not add irqchip\n"); diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index 200667f08c37..efc43711ff5c 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -1092,9 +1092,11 @@ int stm32_pctl_probe(struct platform_device *pdev) return -EINVAL; } - ret = stm32_pctrl_dt_setup_irq(pdev, pctl); - if (ret) - return ret; + if (of_find_property(np, "interrupt-parent", NULL)) { + ret = stm32_pctrl_dt_setup_irq(pdev, pctl); + if (ret) + return ret; + } for_each_child_of_node(np, child) if (of_property_read_bool(child, "gpio-controller")) diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index a2323941e677..a7614fc542b5 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -934,6 +934,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { }, }, { + .ident = "Lenovo Yoga 900", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_BOARD_NAME, "VIUU4"), + }, + }, + { .ident = "Lenovo YOGA 910-13IKB", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index ed5874217ee7..12dbb5063376 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -264,7 +264,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; if (acpi_match_device_ids(dev, ids) == 0) - if (acpi_create_platform_device(dev)) + if (acpi_create_platform_device(dev, NULL)) dev_info(&dev->dev, "intel-hid: created platform device\n"); diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index 146d02f8c9bc..78080763df51 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -164,7 +164,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; if (acpi_match_device_ids(dev, ids) == 0) - if (acpi_create_platform_device(dev)) + if (acpi_create_platform_device(dev, NULL)) dev_info(&dev->dev, "intel-vbtn: created platform device\n"); diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c index feac4576b837..2df07ee8f3c3 100644 --- a/drivers/platform/x86/toshiba-wmi.c +++ b/drivers/platform/x86/toshiba-wmi.c @@ -24,14 +24,15 @@ #include <linux/acpi.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> +#include <linux/dmi.h> MODULE_AUTHOR("Azael Avalos"); MODULE_DESCRIPTION("Toshiba WMI Hotkey Driver"); MODULE_LICENSE("GPL"); -#define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100" +#define WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100" -MODULE_ALIAS("wmi:"TOSHIBA_WMI_EVENT_GUID); +MODULE_ALIAS("wmi:"WMI_EVENT_GUID); static struct input_dev *toshiba_wmi_input_dev; @@ -63,6 +64,16 @@ static void toshiba_wmi_notify(u32 value, void *context) kfree(response.pointer); } +static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = { + { + .ident = "Toshiba laptop", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + }, + }, + {} +}; + static int __init toshiba_wmi_input_setup(void) { acpi_status status; @@ -81,7 +92,7 @@ static int __init toshiba_wmi_input_setup(void) if (err) goto err_free_dev; - status = wmi_install_notify_handler(TOSHIBA_WMI_EVENT_GUID, + status = wmi_install_notify_handler(WMI_EVENT_GUID, toshiba_wmi_notify, NULL); if (ACPI_FAILURE(status)) { err = -EIO; @@ -95,7 +106,7 @@ static int __init toshiba_wmi_input_setup(void) return 0; err_remove_notifier: - wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID); + wmi_remove_notify_handler(WMI_EVENT_GUID); err_free_keymap: sparse_keymap_free(toshiba_wmi_input_dev); err_free_dev: @@ -105,7 +116,7 @@ static int __init toshiba_wmi_input_setup(void) static void toshiba_wmi_input_destroy(void) { - wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID); + wmi_remove_notify_handler(WMI_EVENT_GUID); sparse_keymap_free(toshiba_wmi_input_dev); input_unregister_device(toshiba_wmi_input_dev); } @@ -114,7 +125,8 @@ static int __init toshiba_wmi_init(void) { int ret; - if (!wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) + if (!wmi_has_guid(WMI_EVENT_GUID) || + !dmi_check_system(toshiba_wmi_dmi_table)) return -ENODEV; ret = toshiba_wmi_input_setup(); @@ -130,7 +142,7 @@ static int __init toshiba_wmi_init(void) static void __exit toshiba_wmi_exit(void) { - if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) + if (wmi_has_guid(WMI_EVENT_GUID)) toshiba_wmi_input_destroy(); } diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index d1421139e6ea..2ffe029ff2b6 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -2081,9 +2081,10 @@ void cxgbi_cleanup_task(struct iscsi_task *task) /* never reached the xmit task callout */ if (tdata->skb) __kfree_skb(tdata->skb); - memset(tdata, 0, sizeof(*tdata)); task_release_itt(task, task->hdr_itt); + memset(tdata, 0, sizeof(*tdata)); + iscsi_tcp_cleanup_task(task); } EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 241829e59668..7bb20684e9fa 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -793,6 +793,7 @@ static void alua_rtpg_work(struct work_struct *work) WARN_ON(pg->flags & ALUA_PG_RUN_RTPG); WARN_ON(pg->flags & ALUA_PG_RUN_STPG); spin_unlock_irqrestore(&pg->lock, flags); + kref_put(&pg->kref, release_port_group); return; } if (pg->flags & ALUA_SYNC_STPG) @@ -890,6 +891,7 @@ static void alua_rtpg_queue(struct alua_port_group *pg, /* Do not queue if the worker is already running */ if (!(pg->flags & ALUA_PG_RUNNING)) { kref_get(&pg->kref); + sdev = NULL; start_queue = 1; } } @@ -901,7 +903,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg, if (start_queue && !queue_delayed_work(alua_wq, &pg->rtpg_work, msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) { - scsi_device_put(sdev); + if (sdev) + scsi_device_put(sdev); kref_put(&pg->kref, release_port_group); } } diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index ca86c885dfaa..3aaea713bf37 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2233,7 +2233,7 @@ struct megasas_instance_template { }; #define MEGASAS_IS_LOGICAL(scp) \ - (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 + ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) #define MEGASAS_DEV_INDEX(scp) \ (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 209a969a979d..8aa769a2d919 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -1273,9 +1273,9 @@ scsih_target_alloc(struct scsi_target *starget) sas_target_priv_data->handle = raid_device->handle; sas_target_priv_data->sas_address = raid_device->wwid; sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; - sas_target_priv_data->raid_device = raid_device; if (ioc->is_warpdrive) - raid_device->starget = starget; + sas_target_priv_data->raid_device = raid_device; + raid_device->starget = starget; } spin_unlock_irqrestore(&ioc->raid_device_lock, flags); return 0; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index ace65db1d2a2..567fa080e261 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -707,6 +707,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) srb_t *sp; int rval; + if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) { + cmd->result = DID_NO_CONNECT << 16; + goto qc24_fail_command; + } + if (ha->flags.eeh_busy) { if (ha->flags.pci_channel_io_perm_failure) { ql_dbg(ql_dbg_aer, vha, 0x9010, @@ -1451,6 +1456,15 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { + /* Get a reference to the sp and drop the lock. + * The reference ensures this sp->done() call + * - and not the call in qla2xxx_eh_abort() - + * ends the SCSI command (with result 'res'). + */ + sp_get(sp); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + qla2xxx_eh_abort(GET_CMD_SP(sp)); + spin_lock_irqsave(&ha->hardware_lock, flags); req->outstanding_cmds[cnt] = NULL; sp->done(vha, sp, res); } @@ -2341,6 +2355,8 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) { scsi_qla_host_t *vha = shost_priv(shost); + if (test_bit(UNLOADING, &vha->dpc_flags)) + return 1; if (!vha->host) return 1; if (time > vha->hw->loop_reset_delay * HZ) diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 4a0d3cdc607c..15ca09cd16f3 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -793,6 +793,7 @@ static int pvscsi_abort(struct scsi_cmnd *cmd) unsigned long flags; int result = SUCCESS; DECLARE_COMPLETION_ONSTACK(abort_cmp); + int done; scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", adapter->host->host_no, cmd); @@ -824,10 +825,10 @@ static int pvscsi_abort(struct scsi_cmnd *cmd) pvscsi_abort_cmd(adapter, ctx); spin_unlock_irqrestore(&adapter->hw_lock, flags); /* Wait for 2 secs for the completion. */ - wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000)); + done = wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000)); spin_lock_irqsave(&adapter->hw_lock, flags); - if (!completion_done(&abort_cmp)) { + if (!done) { /* * Failed to abort the command, unmark the fact that it * was requested to be aborted. diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index c097d2ccbde3..d41292ef85f2 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h @@ -26,7 +26,7 @@ #include <linux/types.h> -#define PVSCSI_DRIVER_VERSION_STRING "1.0.6.0-k" +#define PVSCSI_DRIVER_VERSION_STRING "1.0.7.0-k" #define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c index 7043eb0543f6..5ab49a798164 100644 --- a/drivers/staging/comedi/drivers/ni_tio.c +++ b/drivers/staging/comedi/drivers/ni_tio.c @@ -207,7 +207,8 @@ static int ni_tio_clock_period_ps(const struct ni_gpct *counter, * clock period is specified by user with prescaling * already taken into account. */ - return counter->clock_period_ps; + *period_ps = counter->clock_period_ps; + return 0; } switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) { diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c index 34307ac3f255..d33d6fe078ad 100644 --- a/drivers/staging/greybus/arche-platform.c +++ b/drivers/staging/greybus/arche-platform.c @@ -186,6 +186,7 @@ int arche_platform_change_state(enum arche_platform_state state, exit: spin_unlock_irqrestore(&arche_pdata->wake_lock, flags); mutex_unlock(&arche_pdata->platform_state_mutex); + put_device(&pdev->dev); of_node_put(np); return ret; } diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 5eecf1cb1028..3892a7470410 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -655,6 +655,7 @@ static void ad5933_work(struct work_struct *work) __be16 buf[2]; int val[2]; unsigned char status; + int ret; mutex_lock(&indio_dev->mlock); if (st->state == AD5933_CTRL_INIT_START_FREQ) { @@ -662,19 +663,22 @@ static void ad5933_work(struct work_struct *work) ad5933_cmd(st, AD5933_CTRL_START_SWEEP); st->state = AD5933_CTRL_START_SWEEP; schedule_delayed_work(&st->work, st->poll_time_jiffies); - mutex_unlock(&indio_dev->mlock); - return; + goto out; } - ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); + ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); + if (ret) + goto out; if (status & AD5933_STAT_DATA_VALID) { int scan_count = bitmap_weight(indio_dev->active_scan_mask, indio_dev->masklength); - ad5933_i2c_read(st->client, + ret = ad5933_i2c_read(st->client, test_bit(1, indio_dev->active_scan_mask) ? AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA, scan_count * 2, (u8 *)buf); + if (ret) + goto out; if (scan_count == 2) { val[0] = be16_to_cpu(buf[0]); @@ -686,8 +690,7 @@ static void ad5933_work(struct work_struct *work) } else { /* no data available - try again later */ schedule_delayed_work(&st->work, st->poll_time_jiffies); - mutex_unlock(&indio_dev->mlock); - return; + goto out; } if (status & AD5933_STAT_SWEEP_DONE) { @@ -700,7 +703,7 @@ static void ad5933_work(struct work_struct *work) ad5933_cmd(st, AD5933_CTRL_INC_FREQ); schedule_delayed_work(&st->work, st->poll_time_jiffies); } - +out: mutex_unlock(&indio_dev->mlock); } diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c index a324322ee0ad..499952c8ef39 100644 --- a/drivers/staging/nvec/nvec_ps2.c +++ b/drivers/staging/nvec/nvec_ps2.c @@ -106,13 +106,12 @@ static int nvec_mouse_probe(struct platform_device *pdev) { struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); struct serio *ser_dev; - char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 }; - ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL); + ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL); if (!ser_dev) return -ENOMEM; - ser_dev->id.type = SERIO_PS_PSTHRU; + ser_dev->id.type = SERIO_8042; ser_dev->write = ps2_sendcommand; ser_dev->start = ps2_startstreaming; ser_dev->stop = ps2_stopstreaming; @@ -127,9 +126,6 @@ static int nvec_mouse_probe(struct platform_device *pdev) serio_register_port(ser_dev); - /* mouse reset */ - nvec_write_async(nvec, mouse_reset, sizeof(mouse_reset)); - return 0; } diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h index 955247979aaa..4ed6d8d7712a 100644 --- a/drivers/staging/sm750fb/ddk750_reg.h +++ b/drivers/staging/sm750fb/ddk750_reg.h @@ -601,13 +601,13 @@ #define PANEL_PLANE_TL 0x08001C #define PANEL_PLANE_TL_TOP_SHIFT 16 -#define PANEL_PLANE_TL_TOP_MASK (0xeff << 16) -#define PANEL_PLANE_TL_LEFT_MASK 0xeff +#define PANEL_PLANE_TL_TOP_MASK (0x7ff << 16) +#define PANEL_PLANE_TL_LEFT_MASK 0x7ff #define PANEL_PLANE_BR 0x080020 #define PANEL_PLANE_BR_BOTTOM_SHIFT 16 -#define PANEL_PLANE_BR_BOTTOM_MASK (0xeff << 16) -#define PANEL_PLANE_BR_RIGHT_MASK 0xeff +#define PANEL_PLANE_BR_BOTTOM_MASK (0x7ff << 16) +#define PANEL_PLANE_BR_RIGHT_MASK 0x7ff #define PANEL_HORIZONTAL_TOTAL 0x080024 #define PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT 16 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 78f0f85bebdc..fada988512a1 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -932,8 +932,6 @@ static int wait_serial_change(struct acm *acm, unsigned long arg) DECLARE_WAITQUEUE(wait, current); struct async_icount old, new; - if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD)) - return -EINVAL; do { spin_lock_irq(&acm->read_lock); old = acm->oldcount; @@ -1161,6 +1159,8 @@ static int acm_probe(struct usb_interface *intf, if (quirks == IGNORE_DEVICE) return -ENODEV; + memset(&h, 0x00, sizeof(struct usb_cdc_parsed_header)); + num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR; /* handle quirks deadly to normal probing*/ diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 7287a763cd0c..fea446900cad 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -769,15 +769,14 @@ static int dwc3_core_init(struct dwc3 *dwc) return 0; err4: - phy_power_off(dwc->usb2_generic_phy); + phy_power_off(dwc->usb3_generic_phy); err3: - phy_power_off(dwc->usb3_generic_phy); + phy_power_off(dwc->usb2_generic_phy); err2: usb_phy_set_suspend(dwc->usb2_phy, 1); usb_phy_set_suspend(dwc->usb3_phy, 1); - dwc3_core_exit(dwc); err1: usb_phy_shutdown(dwc->usb2_phy); diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c index 89a2f712fdfe..aaaf256f71dd 100644 --- a/drivers/usb/dwc3/dwc3-st.c +++ b/drivers/usb/dwc3/dwc3-st.c @@ -31,6 +31,7 @@ #include <linux/slab.h> #include <linux/regmap.h> #include <linux/reset.h> +#include <linux/pinctrl/consumer.h> #include <linux/usb/of.h> #include "core.h" diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index fe1811650dbc..5d1bd13a56c1 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -588,14 +588,6 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, req->length = length; - /* throttle high/super speed IRQ rate back slightly */ - if (gadget_is_dualspeed(dev->gadget)) - req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH || - dev->gadget->speed == USB_SPEED_SUPER)) && - !list_empty(&dev->tx_reqs)) - ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0) - : 0; - retval = usb_ep_queue(in, req, GFP_ATOMIC); switch (retval) { default: diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index d793f548dfe2..a9a1e4c40480 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -995,6 +995,14 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev) } val = readl(base + ext_cap_offset); + /* Auto handoff never worked for these devices. Force it and continue */ + if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) || + (pdev->vendor == PCI_VENDOR_ID_RENESAS + && pdev->device == 0x0014)) { + val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED; + writel(val, base + ext_cap_offset); + } + /* If the BIOS owns the HC, signal that the OS wants it, and wait */ if (val & XHCI_HC_BIOS_OWNED) { writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index 210b7e43a6fd..2440f88e07a3 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c @@ -479,7 +479,8 @@ static int da8xx_probe(struct platform_device *pdev) glue->phy = devm_phy_get(&pdev->dev, "usb-phy"); if (IS_ERR(glue->phy)) { - dev_err(&pdev->dev, "failed to get phy\n"); + if (PTR_ERR(glue->phy) != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get phy\n"); return PTR_ERR(glue->phy); } diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 27dadc0d9114..e01116e4c067 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2114,11 +2114,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb->io.ep_offset = musb_flat_ep_offset; musb->io.ep_select = musb_flat_ep_select; } - /* And override them with platform specific ops if specified. */ - if (musb->ops->ep_offset) - musb->io.ep_offset = musb->ops->ep_offset; - if (musb->ops->ep_select) - musb->io.ep_select = musb->ops->ep_select; /* At least tusb6010 has its own offsets */ if (musb->ops->ep_offset) diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c index d059ad4d0dbd..97ee1b46db69 100644 --- a/drivers/uwb/lc-rc.c +++ b/drivers/uwb/lc-rc.c @@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index) struct uwb_rc *rc = NULL; dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match); - if (dev) + if (dev) { rc = dev_get_drvdata(dev); + put_device(dev); + } + return rc; } @@ -467,7 +470,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc) if (dev) { rc = dev_get_drvdata(dev); __uwb_rc_get(rc); + put_device(dev); } + return rc; } EXPORT_SYMBOL_GPL(__uwb_rc_try_get); @@ -520,8 +525,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev) dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev, find_rc_grandpa); - if (dev) + if (dev) { rc = dev_get_drvdata(dev); + put_device(dev); + } + return rc; } EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa); @@ -553,8 +561,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr) struct uwb_rc *rc = NULL; dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev); - if (dev) + if (dev) { rc = dev_get_drvdata(dev); + put_device(dev); + } return rc; } diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c index c1304b8d4985..678e93741ae1 100644 --- a/drivers/uwb/pal.c +++ b/drivers/uwb/pal.c @@ -97,6 +97,8 @@ static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc) dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc); + put_device(dev); + return (dev != NULL); } |