summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c57
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h10
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c23
-rw-r--r--drivers/gpu/drm/drm_file.c68
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c32
-rw-r--r--drivers/gpu/drm/drm_ioc32.c11
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c25
-rw-r--r--drivers/gpu/drm/drm_vblank.c9
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c48
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h3
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c67
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c47
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c93
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c52
-rw-r--r--drivers/gpu/drm/i915/i915_active.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c11
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c195
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c113
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h49
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c139
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c54
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c23
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c90
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c87
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c73
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c7
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c24
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c15
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c24
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c21
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c2
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base827c.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head917d.c28
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push.h216
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c3
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c3
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c25
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h6
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c26
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile2
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c44
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c17
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c16
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c17
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/udl/udl_main.c10
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c29
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c15
99 files changed, 1599 insertions, 710 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 19fc4ce62a94..85b79a7fee63 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -15,6 +15,9 @@ menuconfig DRM
select I2C_ALGOBIT
select DMA_SHARED_BUFFER
select SYNC_FILE
+# gallium uses SYS_kcmp for os_same_file_description() to de-duplicate
+# device and dmabuf fd. Let's make sure that is available for our userspace.
+ select KCMP
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index efdf639f6593..cfb1a9a04477 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -291,7 +291,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
continue;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
- if (!ACPI_FAILURE(status)) {
+ if (ACPI_SUCCESS(status)) {
found = true;
break;
}
@@ -304,7 +304,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
continue;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
- if (!ACPI_FAILURE(status)) {
+ if (ACPI_SUCCESS(status)) {
found = true;
break;
}
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 0c98d27f84ac..fee27952ec6d 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -14,6 +14,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/wait.h>
+#include <linux/workqueue.h>
#include <sound/hdmi-codec.h>
@@ -36,6 +37,7 @@ struct lt9611uxc {
struct mutex ocm_lock;
struct wait_queue_head wq;
+ struct work_struct work;
struct device_node *dsi0_node;
struct device_node *dsi1_node;
@@ -52,6 +54,8 @@ struct lt9611uxc {
bool hpd_supported;
bool edid_read;
+ /* can be accessed from different threads, so protect this with ocm_lock */
+ bool hdmi_connected;
uint8_t fw_version;
};
@@ -143,21 +147,41 @@ static irqreturn_t lt9611uxc_irq_thread_handler(int irq, void *dev_id)
if (irq_status)
regmap_write(lt9611uxc->regmap, 0xb022, 0);
- lt9611uxc_unlock(lt9611uxc);
-
- if (irq_status & BIT(0))
+ if (irq_status & BIT(0)) {
lt9611uxc->edid_read = !!(hpd_status & BIT(0));
+ wake_up_all(&lt9611uxc->wq);
+ }
if (irq_status & BIT(1)) {
- if (lt9611uxc->connector.dev)
- drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
- else
- drm_bridge_hpd_notify(&lt9611uxc->bridge, !!(hpd_status & BIT(1)));
+ lt9611uxc->hdmi_connected = hpd_status & BIT(1);
+ schedule_work(&lt9611uxc->work);
}
+ lt9611uxc_unlock(lt9611uxc);
+
return IRQ_HANDLED;
}
+static void lt9611uxc_hpd_work(struct work_struct *work)
+{
+ struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work);
+ bool connected;
+
+ if (lt9611uxc->connector.dev)
+ drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
+ else {
+
+ mutex_lock(&lt9611uxc->ocm_lock);
+ connected = lt9611uxc->hdmi_connected;
+ mutex_unlock(&lt9611uxc->ocm_lock);
+
+ drm_bridge_hpd_notify(&lt9611uxc->bridge,
+ connected ?
+ connector_status_connected :
+ connector_status_disconnected);
+ }
+}
+
static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc)
{
gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1);
@@ -445,18 +469,21 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
unsigned int reg_val = 0;
int ret;
- int connected = 1;
+ bool connected = true;
+
+ lt9611uxc_lock(lt9611uxc);
if (lt9611uxc->hpd_supported) {
- lt9611uxc_lock(lt9611uxc);
ret = regmap_read(lt9611uxc->regmap, 0xb023, &reg_val);
- lt9611uxc_unlock(lt9611uxc);
if (ret)
dev_err(lt9611uxc->dev, "failed to read hpd status: %d\n", ret);
else
connected = reg_val & BIT(1);
}
+ lt9611uxc->hdmi_connected = connected;
+
+ lt9611uxc_unlock(lt9611uxc);
return connected ? connector_status_connected :
connector_status_disconnected;
@@ -465,7 +492,7 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid
static int lt9611uxc_wait_for_edid(struct lt9611uxc *lt9611uxc)
{
return wait_event_interruptible_timeout(lt9611uxc->wq, lt9611uxc->edid_read,
- msecs_to_jiffies(100));
+ msecs_to_jiffies(500));
}
static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
@@ -503,7 +530,10 @@ static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge,
ret = lt9611uxc_wait_for_edid(lt9611uxc);
if (ret < 0) {
dev_err(lt9611uxc->dev, "wait for EDID failed: %d\n", ret);
- return ERR_PTR(ret);
+ return NULL;
+ } else if (ret == 0) {
+ dev_err(lt9611uxc->dev, "wait for EDID timeout\n");
+ return NULL;
}
return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc);
@@ -926,6 +956,8 @@ retry:
lt9611uxc->fw_version = ret;
init_waitqueue_head(&lt9611uxc->wq);
+ INIT_WORK(&lt9611uxc->work, lt9611uxc_hpd_work);
+
ret = devm_request_threaded_irq(dev, client->irq, NULL,
lt9611uxc_irq_thread_handler,
IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
@@ -962,6 +994,7 @@ static int lt9611uxc_remove(struct i2c_client *client)
struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
disable_irq(client->irq);
+ flush_scheduled_work();
lt9611uxc_audio_exit(lt9611uxc);
drm_bridge_remove(&lt9611uxc->bridge);
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index 25ce42e79995..61e09f8a8d0f 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -32,16 +32,6 @@
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
-/* drm_fb_helper.c */
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-int drm_fb_helper_modinit(void);
-#else
-static inline int drm_fb_helper_modinit(void)
-{
- return 0;
-}
-#endif
-
/* drm_dp_aux_dev.c */
#ifdef CONFIG_DRM_DP_AUX_CHARDEV
int drm_dp_aux_dev_init(void);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index da10a43fa0ac..309afe61afdd 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -4225,6 +4225,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
switch (port->pdt) {
case DP_PEER_DEVICE_NONE:
+ break;
case DP_PEER_DEVICE_MST_BRANCHING:
if (!port->mcs)
ret = connector_status_connected;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a44c3a438059..f6baa2046124 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -2048,7 +2048,7 @@ static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
if (shadow)
vfree(shadow);
- else
+ else if (fb_helper->buffer)
drm_client_buffer_vunmap(fb_helper->buffer);
drm_client_framebuffer_delete(fb_helper->buffer);
@@ -2514,24 +2514,3 @@ void drm_fbdev_generic_setup(struct drm_device *dev,
drm_client_register(&fb_helper->client);
}
EXPORT_SYMBOL(drm_fbdev_generic_setup);
-
-/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
- * but the module doesn't depend on any fb console symbols. At least
- * attempt to load fbcon to avoid leaving the system without a usable console.
- */
-int __init drm_fb_helper_modinit(void)
-{
-#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
- const char name[] = "fbcon";
- struct module *fbcon;
-
- mutex_lock(&module_mutex);
- fbcon = find_module(name);
- mutex_unlock(&module_mutex);
-
- if (!fbcon)
- request_module_nowait(name);
-#endif
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_modinit);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 6b116bfd747c..7efbccffc2ea 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -775,20 +775,19 @@ void drm_event_cancel_free(struct drm_device *dev,
EXPORT_SYMBOL(drm_event_cancel_free);
/**
- * drm_send_event_locked - send DRM event to file descriptor
+ * drm_send_event_helper - send DRM event to file descriptor
* @dev: DRM device
* @e: DRM event to deliver
+ * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
+ * time domain
*
- * This function sends the event @e, initialized with drm_event_reserve_init(),
- * to its associated userspace DRM file. Callers must already hold
- * &drm_device.event_lock, see drm_send_event() for the unlocked version.
- *
- * Note that the core will take care of unlinking and disarming events when the
- * corresponding DRM file is closed. Drivers need not worry about whether the
- * DRM file for this event still exists and can call this function upon
- * completion of the asynchronous work unconditionally.
+ * This helper function sends the event @e, initialized with
+ * drm_event_reserve_init(), to its associated userspace DRM file.
+ * The timestamp variant of dma_fence_signal is used when the caller
+ * sends a valid timestamp.
*/
-void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
+void drm_send_event_helper(struct drm_device *dev,
+ struct drm_pending_event *e, ktime_t timestamp)
{
assert_spin_locked(&dev->event_lock);
@@ -799,7 +798,10 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
}
if (e->fence) {
- dma_fence_signal(e->fence);
+ if (timestamp)
+ dma_fence_signal_timestamp(e->fence, timestamp);
+ else
+ dma_fence_signal(e->fence);
dma_fence_put(e->fence);
}
@@ -814,6 +816,48 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
wake_up_interruptible_poll(&e->file_priv->event_wait,
EPOLLIN | EPOLLRDNORM);
}
+
+/**
+ * drm_send_event_timestamp_locked - send DRM event to file descriptor
+ * @dev: DRM device
+ * @e: DRM event to deliver
+ * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
+ * time domain
+ *
+ * This function sends the event @e, initialized with drm_event_reserve_init(),
+ * to its associated userspace DRM file. Callers must already hold
+ * &drm_device.event_lock.
+ *
+ * Note that the core will take care of unlinking and disarming events when the
+ * corresponding DRM file is closed. Drivers need not worry about whether the
+ * DRM file for this event still exists and can call this function upon
+ * completion of the asynchronous work unconditionally.
+ */
+void drm_send_event_timestamp_locked(struct drm_device *dev,
+ struct drm_pending_event *e, ktime_t timestamp)
+{
+ drm_send_event_helper(dev, e, timestamp);
+}
+EXPORT_SYMBOL(drm_send_event_timestamp_locked);
+
+/**
+ * drm_send_event_locked - send DRM event to file descriptor
+ * @dev: DRM device
+ * @e: DRM event to deliver
+ *
+ * This function sends the event @e, initialized with drm_event_reserve_init(),
+ * to its associated userspace DRM file. Callers must already hold
+ * &drm_device.event_lock, see drm_send_event() for the unlocked version.
+ *
+ * Note that the core will take care of unlinking and disarming events when the
+ * corresponding DRM file is closed. Drivers need not worry about whether the
+ * DRM file for this event still exists and can call this function upon
+ * completion of the asynchronous work unconditionally.
+ */
+void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
+{
+ drm_send_event_helper(dev, e, 0);
+}
EXPORT_SYMBOL(drm_send_event_locked);
/**
@@ -836,7 +880,7 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
unsigned long irqflags;
spin_lock_irqsave(&dev->event_lock, irqflags);
- drm_send_event_locked(dev, e);
+ drm_send_event_helper(dev, e, 0);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
EXPORT_SYMBOL(drm_send_event);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 9825c378dfa6..6d625cee7a6a 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -357,13 +357,14 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
if (--shmem->vmap_use_count > 0)
return;
- if (obj->import_attach)
+ if (obj->import_attach) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
- else
+ } else {
vunmap(shmem->vaddr);
+ drm_gem_shmem_put_pages(shmem);
+ }
shmem->vaddr = NULL;
- drm_gem_shmem_put_pages(shmem);
}
/*
@@ -525,14 +526,28 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
loff_t num_pages = obj->size >> PAGE_SHIFT;
+ vm_fault_t ret;
struct page *page;
+ pgoff_t page_offset;
- if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))
- return VM_FAULT_SIGBUS;
+ /* We don't use vmf->pgoff since that has the fake offset */
+ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- page = shmem->pages[vmf->pgoff];
+ mutex_lock(&shmem->pages_lock);
- return vmf_insert_page(vma, vmf->address, page);
+ if (page_offset >= num_pages ||
+ WARN_ON_ONCE(!shmem->pages) ||
+ shmem->madv < 0) {
+ ret = VM_FAULT_SIGBUS;
+ } else {
+ page = shmem->pages[page_offset];
+
+ ret = vmf_insert_page(vma, vmf->address, page);
+ }
+
+ mutex_unlock(&shmem->pages_lock);
+
+ return ret;
}
static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
@@ -581,9 +596,6 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct drm_gem_shmem_object *shmem;
int ret;
- /* Remove the fake offset */
- vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
-
if (obj->import_attach) {
/* Drop the reference drm_gem_mmap_obj() acquired.*/
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index f86448ab1fe0..dc734d4828a1 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -99,6 +99,8 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
return -EFAULT;
+ memset(&v, 0, sizeof(v));
+
v = (struct drm_version) {
.name_len = v32.name_len,
.name = compat_ptr(v32.name),
@@ -137,6 +139,9 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
return -EFAULT;
+
+ memset(&uq, 0, sizeof(uq));
+
uq = (struct drm_unique){
.unique_len = uq32.unique_len,
.unique = compat_ptr(uq32.unique),
@@ -265,6 +270,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
if (copy_from_user(&c32, argp, sizeof(c32)))
return -EFAULT;
+ memset(&client, 0, sizeof(client));
+
client.idx = c32.idx;
err = drm_ioctl_kernel(file, drm_getclient, &client, 0);
@@ -852,6 +859,8 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
+ memset(&req, 0, sizeof(req));
+
req.request.type = req32.request.type;
req.request.sequence = req32.request.sequence;
req.request.signal = req32.request.signal;
@@ -889,6 +898,8 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
struct drm_mode_fb_cmd2 req64;
int err;
+ memset(&req64, 0, sizeof(req64));
+
if (copy_from_user(&req64, argp,
offsetof(drm_mode_fb_cmd232_t, modifier)))
return -EFAULT;
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 221a8528c993..f933da1656eb 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -64,19 +64,18 @@ MODULE_PARM_DESC(edid_firmware,
static int __init drm_kms_helper_init(void)
{
- int ret;
-
- /* Call init functions from specific kms helpers here */
- ret = drm_fb_helper_modinit();
- if (ret < 0)
- goto out;
-
- ret = drm_dp_aux_dev_init();
- if (ret < 0)
- goto out;
-
-out:
- return ret;
+ /*
+ * The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
+ * but the module doesn't depend on any fb console symbols. At least
+ * attempt to load fbcon to avoid leaving the system without a usable
+ * console.
+ */
+ if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) &&
+ IS_MODULE(CONFIG_FRAMEBUFFER_CONSOLE) &&
+ !IS_ENABLED(CONFIG_EXPERT))
+ request_module_nowait("fbcon");
+
+ return drm_dp_aux_dev_init();
}
static void __exit drm_kms_helper_exit(void)
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 30912d8f82a5..893165eeddf3 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -1006,7 +1006,14 @@ static void send_vblank_event(struct drm_device *dev,
break;
}
trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, seq);
- drm_send_event_locked(dev, &e->base);
+ /*
+ * Use the same timestamp for any associated fence signal to avoid
+ * mismatch in timestamps for vsync & fence events triggered by the
+ * same HW event. Frameworks like SurfaceFlinger in Android expects the
+ * retire-fence timestamp to match exactly with HW vsync as it uses it
+ * for its software vsync modeling.
+ */
+ drm_send_event_timestamp_locked(dev, &e->base, now);
}
/**
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 951d5f708e92..6a251e3aa779 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -89,7 +89,6 @@ comment "Sub-drivers"
config DRM_EXYNOS_G2D
bool "G2D"
depends on VIDEO_SAMSUNG_S5P_G2D=n || COMPILE_TEST
- select FRAME_VECTOR
help
Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 967a5cdc120e..1e0c5a7f206e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -205,7 +205,8 @@ struct g2d_cmdlist_userptr {
dma_addr_t dma_addr;
unsigned long userptr;
unsigned long size;
- struct frame_vector *vec;
+ struct page **pages;
+ unsigned int npages;
struct sg_table *sgt;
atomic_t refcount;
bool in_pool;
@@ -378,7 +379,6 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
bool force)
{
struct g2d_cmdlist_userptr *g2d_userptr = obj;
- struct page **pages;
if (!obj)
return;
@@ -398,15 +398,9 @@ out:
dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt,
DMA_BIDIRECTIONAL, 0);
- pages = frame_vector_pages(g2d_userptr->vec);
- if (!IS_ERR(pages)) {
- int i;
-
- for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++)
- set_page_dirty_lock(pages[i]);
- }
- put_vaddr_frames(g2d_userptr->vec);
- frame_vector_destroy(g2d_userptr->vec);
+ unpin_user_pages_dirty_lock(g2d_userptr->pages, g2d_userptr->npages,
+ true);
+ kvfree(g2d_userptr->pages);
if (!g2d_userptr->out_of_list)
list_del_init(&g2d_userptr->list);
@@ -474,35 +468,35 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
offset = userptr & ~PAGE_MASK;
end = PAGE_ALIGN(userptr + size);
npages = (end - start) >> PAGE_SHIFT;
- g2d_userptr->vec = frame_vector_create(npages);
- if (!g2d_userptr->vec) {
+ g2d_userptr->pages = kvmalloc_array(npages, sizeof(*g2d_userptr->pages),
+ GFP_KERNEL);
+ if (!g2d_userptr->pages) {
ret = -ENOMEM;
goto err_free;
}
- ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
- g2d_userptr->vec);
+ ret = pin_user_pages_fast(start, npages,
+ FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
+ g2d_userptr->pages);
if (ret != npages) {
DRM_DEV_ERROR(g2d->dev,
"failed to get user pages from userptr.\n");
if (ret < 0)
- goto err_destroy_framevec;
- ret = -EFAULT;
- goto err_put_framevec;
- }
- if (frame_vector_to_pages(g2d_userptr->vec) < 0) {
+ goto err_destroy_pages;
+ npages = ret;
ret = -EFAULT;
- goto err_put_framevec;
+ goto err_unpin_pages;
}
+ g2d_userptr->npages = npages;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
ret = -ENOMEM;
- goto err_put_framevec;
+ goto err_unpin_pages;
}
ret = sg_alloc_table_from_pages(sgt,
- frame_vector_pages(g2d_userptr->vec),
+ g2d_userptr->pages,
npages, offset, size, GFP_KERNEL);
if (ret < 0) {
DRM_DEV_ERROR(g2d->dev, "failed to get sgt from pages.\n");
@@ -538,11 +532,11 @@ err_sg_free_table:
err_free_sgt:
kfree(sgt);
-err_put_framevec:
- put_vaddr_frames(g2d_userptr->vec);
+err_unpin_pages:
+ unpin_user_pages(g2d_userptr->pages, npages);
-err_destroy_framevec:
- frame_vector_destroy(g2d_userptr->vec);
+err_destroy_pages:
+ kvfree(g2d_userptr->pages);
err_free:
kfree(g2d_userptr);
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 08cd5f73c868..aff0534831ef 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -10,9 +10,6 @@
#include <linux/dmi.h>
#include <linux/module.h>
-#include <asm/intel-mid.h>
-#include <asm/intel_scu_ipc.h>
-
#include <drm/drm.h>
#include "intel_bios.h"
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 020a71b91577..694495070c65 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -386,6 +386,8 @@ struct psb_ops;
#define PSB_NUM_PIPE 3
+struct intel_scu_ipc_dev;
+
struct drm_psb_private {
struct drm_device *dev;
struct pci_dev *aux_pdev; /* Currently only used by mrst */
@@ -525,6 +527,7 @@ struct drm_psb_private {
* Used for modifying backlight from
* xrandr -- consider removing and using HAL instead
*/
+ struct intel_scu_ipc_dev *scu;
struct backlight_device *backlight_device;
struct drm_property *backlight_property;
bool backlight_enabled;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 32bd1fdffffe..2385a7505f5d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -21,7 +21,6 @@ subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
-subdir-ccflags-y += $(call cc-disable-warning, uninitialized)
subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
@@ -298,7 +297,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
no-header-test := \
display/intel_vbt_defs.h
-extra-$(CONFIG_DRM_I915_WERROR) += \
+always-$(CONFIG_DRM_I915_WERROR) += \
$(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
$(shell cd $(srctree)/$(src) && find * -name '*.h')))
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 57b0a3ebe908..8e77ca7ddf11 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -109,7 +109,6 @@ void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
crtc_state->cpu_transcoder = INVALID_TRANSCODER;
crtc_state->master_transcoder = INVALID_TRANSCODER;
crtc_state->hsw_workaround_pipe = INVALID_PIPE;
- crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
crtc_state->scaler_state.scaler_id = -1;
crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index fbd857f2bdb2..1bb40ec5fe5d 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -2827,6 +2827,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
int n_entries, ln;
u32 val;
+ if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+ return;
+
ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
@@ -2962,6 +2965,9 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
u32 val, dpcnt_mask, dpcnt_val;
int n_entries, ln;
+ if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+ return;
+
ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
@@ -3615,6 +3621,23 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
}
+static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_phy_is_combo(i915, phy)) {
+ bool lane_reversal =
+ dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+ intel_combo_phy_power_up_lanes(i915, phy, false,
+ crtc_state->lane_count,
+ lane_reversal);
+ }
+}
+
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -3706,14 +3729,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
* 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
* the used lanes of the DDI.
*/
- if (intel_phy_is_combo(dev_priv, phy)) {
- bool lane_reversal =
- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
- intel_combo_phy_power_up_lanes(dev_priv, phy, false,
- crtc_state->lane_count,
- lane_reversal);
- }
+ intel_ddi_power_up_lanes(encoder, crtc_state);
/*
* 7.g Configure and enable DDI_BUF_CTL
@@ -3804,14 +3820,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
else
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
- if (intel_phy_is_combo(dev_priv, phy)) {
- bool lane_reversal =
- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
- intel_combo_phy_power_up_lanes(dev_priv, phy, false,
- crtc_state->lane_count,
- lane_reversal);
- }
+ intel_ddi_power_up_lanes(encoder, crtc_state);
intel_ddi_init_dp_buf_reg(encoder, crtc_state);
if (!is_mst)
@@ -4308,6 +4317,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, reg, val);
}
+ intel_ddi_power_up_lanes(encoder, crtc_state);
+
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 7ea1e5b487b6..8d7aaa68c6f6 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -10211,7 +10211,6 @@ static void snprintf_output_types(char *buf, size_t len,
}
static const char * const output_format_str[] = {
- [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
@@ -10220,7 +10219,7 @@ static const char * const output_format_str[] = {
static const char *output_formats(enum intel_output_format format)
{
if (format >= ARRAY_SIZE(output_format_str))
- format = INTEL_OUTPUT_FORMAT_INVALID;
+ return "invalid";
return output_format_str[format];
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 39397748b4b0..184ecbbcec99 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -830,7 +830,6 @@ struct intel_crtc_wm_state {
};
enum intel_output_format {
- INTEL_OUTPUT_FORMAT_INVALID,
INTEL_OUTPUT_FORMAT_RGB,
INTEL_OUTPUT_FORMAT_YCBCR420,
INTEL_OUTPUT_FORMAT_YCBCR444,
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 8edb9b2cdbeb..f455040fa989 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -183,6 +183,7 @@ struct intel_overlay {
struct intel_crtc *crtc;
struct i915_vma *vma;
struct i915_vma *old_vma;
+ struct intel_frontbuffer *frontbuffer;
bool active;
bool pfit_active;
u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
@@ -283,21 +284,19 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
struct i915_vma *vma)
{
enum pipe pipe = overlay->crtc->pipe;
- struct intel_frontbuffer *from = NULL, *to = NULL;
+ struct intel_frontbuffer *frontbuffer = NULL;
drm_WARN_ON(&overlay->i915->drm, overlay->old_vma);
- if (overlay->vma)
- from = intel_frontbuffer_get(overlay->vma->obj);
if (vma)
- to = intel_frontbuffer_get(vma->obj);
+ frontbuffer = intel_frontbuffer_get(vma->obj);
- intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe));
+ intel_frontbuffer_track(overlay->frontbuffer, frontbuffer,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
- if (to)
- intel_frontbuffer_put(to);
- if (from)
- intel_frontbuffer_put(from);
+ if (overlay->frontbuffer)
+ intel_frontbuffer_put(overlay->frontbuffer);
+ overlay->frontbuffer = frontbuffer;
intel_frontbuffer_flip_prepare(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(pipe));
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 27dc2dad6809..2cefc13535a0 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -23,36 +23,6 @@ static const char *tc_port_mode_name(enum tc_port_mode mode)
return names[mode];
}
-static void
-tc_port_load_fia_params(struct drm_i915_private *i915,
- struct intel_digital_port *dig_port)
-{
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(i915, port);
- u32 modular_fia;
-
- if (INTEL_INFO(i915)->display.has_modular_fia) {
- modular_fia = intel_uncore_read(&i915->uncore,
- PORT_TX_DFLEXDPSP(FIA1));
- drm_WARN_ON(&i915->drm, modular_fia == 0xffffffff);
- modular_fia &= MODULAR_FIA_MASK;
- } else {
- modular_fia = 0;
- }
-
- /*
- * Each Modular FIA instance houses 2 TC ports. In SOC that has more
- * than two TC ports, there are multiple instances of Modular FIA.
- */
- if (modular_fia) {
- dig_port->tc_phy_fia = tc_port / 2;
- dig_port->tc_phy_fia_idx = tc_port % 2;
- } else {
- dig_port->tc_phy_fia = FIA1;
- dig_port->tc_phy_fia_idx = tc_port;
- }
-}
-
static enum intel_display_power_domain
tc_cold_get_power_domain(struct intel_digital_port *dig_port)
{
@@ -646,6 +616,43 @@ void intel_tc_port_put_link(struct intel_digital_port *dig_port)
mutex_unlock(&dig_port->tc_lock);
}
+static bool
+tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
+{
+ intel_wakeref_t wakeref;
+ u32 val;
+
+ if (!INTEL_INFO(i915)->display.has_modular_fia)
+ return false;
+
+ wakeref = tc_cold_block(dig_port);
+ val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
+ tc_cold_unblock(dig_port, wakeref);
+
+ drm_WARN_ON(&i915->drm, val == 0xffffffff);
+
+ return val & MODULAR_FIA_MASK;
+}
+
+static void
+tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
+{
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(i915, port);
+
+ /*
+ * Each Modular FIA instance houses 2 TC ports. In SOC that has more
+ * than two TC ports, there are multiple instances of Modular FIA.
+ */
+ if (tc_has_modular_fia(i915, dig_port)) {
+ dig_port->tc_phy_fia = tc_port / 2;
+ dig_port->tc_phy_fia_idx = tc_port % 2;
+ } else {
+ dig_port->tc_phy_fia = FIA1;
+ dig_port->tc_phy_fia_idx = tc_port;
+ }
+}
+
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index eece0844fbe9..700588bc9d57 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -535,16 +535,39 @@ static int init_ggtt(struct i915_ggtt *ggtt)
mutex_init(&ggtt->error_mutex);
if (ggtt->mappable_end) {
- /* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->vm.mm,
- &ggtt->error_capture,
- PAGE_SIZE, 0,
- I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- if (ret)
- return ret;
+ /*
+ * Reserve a mappable slot for our lockless error capture.
+ *
+ * We strongly prefer taking address 0x0 in order to protect
+ * other critical buffers against accidental overwrites,
+ * as writing to address 0 is a very common mistake.
+ *
+ * Since 0 may already be in use by the system (e.g. the BIOS
+ * framebuffer), we let the reservation fail quietly and hope
+ * 0 remains reserved always.
+ *
+ * If we fail to reserve 0, and then fail to find any space
+ * for an error-capture, remain silent. We can afford not
+ * to reserve an error_capture node as we have fallback
+ * paths, and we trust that 0 will remain reserved. However,
+ * the only likely reason for failure to insert is a driver
+ * bug, which we expect to cause other failures...
+ */
+ ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
+ ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
+ if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
+ drm_mm_insert_node_in_range(&ggtt->vm.mm,
+ &ggtt->error_capture,
+ ggtt->error_capture.size, 0,
+ ggtt->error_capture.color,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
}
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_dbg(&ggtt->vm.i915->drm,
+ "Reserved GGTT:[%llx, %llx] for use by error capture\n",
+ ggtt->error_capture.start,
+ ggtt->error_capture.start + ggtt->error_capture.size);
/*
* The upper portion of the GuC address space has a sizeable hole
@@ -557,9 +580,9 @@ static int init_ggtt(struct i915_ggtt *ggtt)
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
- drm_dbg_kms(&ggtt->vm.i915->drm,
- "clearing unused GTT space: [%lx, %lx]\n",
- hole_start, hole_end);
+ drm_dbg(&ggtt->vm.i915->drm,
+ "clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
ggtt->vm.clear_range(&ggtt->vm, hole_start,
hole_end - hole_start);
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index d54ea0e4681d..fef1e857cefc 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -41,6 +41,7 @@
#include "gt/intel_lrc.h"
#include "gt/intel_ring.h"
#include "gt/intel_gt_requests.h"
+#include "gt/shmem_utils.h"
#include "gvt.h"
#include "i915_pvinfo.h"
#include "trace.h"
@@ -3094,71 +3095,28 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
*/
void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
{
+ const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
- struct i915_request *rq;
- struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_request *requests[I915_NUM_ENGINES] = {};
- bool is_ctx_pinned[I915_NUM_ENGINES] = {};
- int ret = 0;
if (gvt->is_reg_whitelist_updated)
return;
- for_each_engine(engine, &dev_priv->gt, id) {
- ret = intel_context_pin(s->shadow[id]);
- if (ret) {
- gvt_vgpu_err("fail to pin shadow ctx\n");
- goto out;
- }
- is_ctx_pinned[id] = true;
-
- rq = i915_request_create(s->shadow[id]);
- if (IS_ERR(rq)) {
- gvt_vgpu_err("fail to alloc default request\n");
- ret = -EIO;
- goto out;
- }
- requests[id] = i915_request_get(rq);
- i915_request_add(rq);
- }
-
- if (intel_gt_wait_for_idle(&dev_priv->gt,
- I915_GEM_IDLE_TIMEOUT) == -ETIME) {
- ret = -EIO;
- goto out;
- }
-
/* scan init ctx to update cmd accessible list */
- for_each_engine(engine, &dev_priv->gt, id) {
- int size = engine->context_size - PAGE_SIZE;
- void *vaddr;
+ for_each_engine(engine, gvt->gt, id) {
struct parser_exec_state s;
- struct drm_i915_gem_object *obj;
- struct i915_request *rq;
-
- rq = requests[id];
- GEM_BUG_ON(!i915_request_completed(rq));
- GEM_BUG_ON(!intel_context_is_pinned(rq->context));
- obj = rq->context->state->obj;
-
- if (!obj) {
- ret = -EIO;
- goto out;
- }
+ void *vaddr;
+ int ret;
- i915_gem_object_set_cache_coherency(obj,
- I915_CACHE_LLC);
+ if (!engine->default_state)
+ continue;
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ vaddr = shmem_pin_map(engine->default_state);
if (IS_ERR(vaddr)) {
- gvt_err("failed to pin init ctx obj, ring=%d, err=%lx\n",
- id, PTR_ERR(vaddr));
- ret = PTR_ERR(vaddr);
- goto out;
+ gvt_err("failed to map %s->default state, err:%zd\n",
+ engine->name, PTR_ERR(vaddr));
+ return;
}
s.buf_type = RING_BUFFER_CTX;
@@ -3166,9 +3124,9 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
s.vgpu = vgpu;
s.engine = engine;
s.ring_start = 0;
- s.ring_size = size;
+ s.ring_size = engine->context_size - start;
s.ring_head = 0;
- s.ring_tail = size;
+ s.ring_tail = s.ring_size;
s.rb_va = vaddr + start;
s.workload = NULL;
s.is_ctx_wa = false;
@@ -3176,29 +3134,18 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
/* skipping the first RING_CTX_SIZE(0x50) dwords */
ret = ip_gma_set(&s, RING_CTX_SIZE);
- if (ret) {
- i915_gem_object_unpin_map(obj);
- goto out;
+ if (ret == 0) {
+ ret = command_scan(&s, 0, s.ring_size, 0, s.ring_size);
+ if (ret)
+ gvt_err("Scan init ctx error\n");
}
- ret = command_scan(&s, 0, size, 0, size);
+ shmem_unpin_map(engine->default_state, vaddr);
if (ret)
- gvt_err("Scan init ctx error\n");
-
- i915_gem_object_unpin_map(obj);
+ return;
}
-out:
- if (!ret)
- gvt->is_reg_whitelist_updated = true;
-
- for (id = 0; id < I915_NUM_ENGINES ; id++) {
- if (requests[id])
- i915_request_put(requests[id]);
-
- if (is_ctx_pinned[id])
- intel_context_unpin(s->shadow[id]);
- }
+ gvt->is_reg_whitelist_updated = true;
}
int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload)
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 158873f269b1..c8dcda6d4f0d 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -522,12 +522,11 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu,
static void clean_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
- for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
+ for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
kfree(s->ring_scan_buffer[engine->id]);
s->ring_scan_buffer[engine->id] = NULL;
s->ring_scan_buffer_size[engine->id] = 0;
@@ -537,11 +536,10 @@ static void clean_execlist(struct intel_vgpu *vgpu,
static void reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
- for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
+ for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine);
}
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 60f1a386dd06..b4348256ae95 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1703,7 +1703,7 @@ static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
return -EINVAL;
}
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
if (kvmgt_gfn_is_write_protected(info, gfn))
goto out;
@@ -1712,7 +1712,7 @@ static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
kvmgt_protect_table_add(info, gfn);
out:
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
return 0;
}
@@ -1737,7 +1737,7 @@ static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
return -EINVAL;
}
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
if (!kvmgt_gfn_is_write_protected(info, gfn))
goto out;
@@ -1746,7 +1746,7 @@ static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
kvmgt_protect_table_del(info, gfn);
out:
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
return 0;
}
@@ -1772,7 +1772,7 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
struct kvmgt_guest_info *info = container_of(node,
struct kvmgt_guest_info, track_node);
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
for (i = 0; i < slot->npages; i++) {
gfn = slot->base_gfn + i;
if (kvmgt_gfn_is_write_protected(info, gfn)) {
@@ -1781,7 +1781,7 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
kvmgt_protect_table_del(info, gfn);
}
}
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
}
static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 43f31c2eab14..fc735692f21f 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -412,7 +412,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
if (!wa_ctx->indirect_ctx.obj)
return;
+ i915_gem_object_lock(wa_ctx->indirect_ctx.obj, NULL);
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
+ i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
wa_ctx->indirect_ctx.obj = NULL;
@@ -520,6 +522,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
struct intel_gvt *gvt = workload->vgpu->gvt;
const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
struct intel_vgpu_shadow_bb *bb;
+ struct i915_gem_ww_ctx ww;
int ret;
list_for_each_entry(bb, &workload->shadow_bb, list) {
@@ -544,10 +547,19 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
* directly
*/
if (!bb->ppgtt) {
- bb->vma = i915_gem_object_ggtt_pin(bb->obj,
- NULL, 0, 0, 0);
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ i915_gem_object_lock(bb->obj, &ww);
+
+ bb->vma = i915_gem_object_ggtt_pin_ww(bb->obj, &ww,
+ NULL, 0, 0, 0);
if (IS_ERR(bb->vma)) {
ret = PTR_ERR(bb->vma);
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
goto err;
}
@@ -561,13 +573,15 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
0);
if (ret)
goto err;
- }
- /* No one is going to touch shadow bb from now on. */
- i915_gem_object_flush_map(bb->obj);
+ /* No one is going to touch shadow bb from now on. */
+ i915_gem_object_flush_map(bb->obj);
+ i915_gem_object_unlock(bb->obj);
+ }
}
return 0;
err:
+ i915_gem_ww_ctx_fini(&ww);
release_shadow_batch_buffer(workload);
return ret;
}
@@ -594,14 +608,29 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
unsigned char *per_ctx_va =
(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
wa_ctx->indirect_ctx.size;
+ struct i915_gem_ww_ctx ww;
+ int ret;
if (wa_ctx->indirect_ctx.size == 0)
return 0;
- vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
- 0, CACHELINE_BYTES, 0);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ i915_gem_object_lock(wa_ctx->indirect_ctx.obj, &ww);
+
+ vma = i915_gem_object_ggtt_pin_ww(wa_ctx->indirect_ctx.obj, &ww, NULL,
+ 0, CACHELINE_BYTES, 0);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ return ret;
+ }
+
+ i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
/* FIXME: we are not tracking our pinned VMA leaving it
* up to the core to fix up the stray pin_count upon
@@ -635,12 +664,14 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
if (bb->obj) {
+ i915_gem_object_lock(bb->obj, NULL);
if (bb->va && !IS_ERR(bb->va))
i915_gem_object_unpin_map(bb->obj);
if (bb->vma && !IS_ERR(bb->vma))
i915_vma_unpin(bb->vma);
+ i915_gem_object_unlock(bb->obj);
i915_gem_object_put(bb->obj);
}
list_del(&bb->list);
@@ -1015,13 +1046,12 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
intel_engine_mask_t tmp;
/* free the unsubmited workloads in the queues. */
- for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
+ for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
list_for_each_entry_safe(pos, n,
&s->workload_q_head[engine->id], list) {
list_del_init(&pos->list);
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index ab4382841c6b..3bc616cc1ad2 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -628,24 +628,26 @@ static int flush_lazy_signals(struct i915_active *ref)
int __i915_active_wait(struct i915_active *ref, int state)
{
- int err;
-
might_sleep();
- if (!i915_active_acquire_if_busy(ref))
- return 0;
-
/* Any fence added after the wait begins will not be auto-signaled */
- err = flush_lazy_signals(ref);
- i915_active_release(ref);
- if (err)
- return err;
+ if (i915_active_acquire_if_busy(ref)) {
+ int err;
- if (!i915_active_is_idle(ref) &&
- ___wait_var_event(ref, i915_active_is_idle(ref),
- state, 0, 0, schedule()))
- return -EINTR;
+ err = flush_lazy_signals(ref);
+ i915_active_release(ref);
+ if (err)
+ return err;
+ if (___wait_var_event(ref, i915_active_is_idle(ref),
+ state, 0, 0, schedule()))
+ return -EINTR;
+ }
+
+ /*
+ * After the wait is complete, the caller may free the active.
+ * We have to flush any concurrent retirement before returning.
+ */
flush_work(&ref->work);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 56cc9191b8d0..26d69d06aa6d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1334,7 +1334,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
{
const unsigned int pi = __platform_mask_index(info, p);
- return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
+ return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1);
}
static __always_inline bool
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 70e07e9b78c2..c1adea8765a9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1881,7 +1881,7 @@ static int igt_cs_tlb(void *arg)
vma = i915_vma_instance(out, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto out_put_batch;
+ goto out_put_out;
}
err = i915_vma_pin(vma, 0, 0,
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 42c5d3246cfc..453d8b4c5763 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -482,6 +482,16 @@ static int meson_probe_remote(struct platform_device *pdev,
return count;
}
+static void meson_drv_shutdown(struct platform_device *pdev)
+{
+ struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
+ struct drm_device *drm = priv->drm;
+
+ DRM_DEBUG_DRIVER("\n");
+ drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
+}
+
static int meson_drv_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
@@ -553,6 +563,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = {
static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
+ .shutdown = meson_drv_shutdown,
.driver = {
.name = "meson-drm",
.of_match_table = dt_match,
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index 346cc6ff3a36..7b9fcfe95c04 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -2367,6 +2367,8 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80
+#define REG_A5XX_UCHE_MODE_CNTL 0x00000e81
+
#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82
#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index a5af223eaf50..7e553d3efeb2 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -222,7 +222,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
a5xx_preempt_trigger(gpu);
}
-static const struct {
+static const struct adreno_five_hwcg_regs {
u32 offset;
u32 value;
} a5xx_hwcg[] = {
@@ -318,16 +318,124 @@ static const struct {
{REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
{REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
+}, a50x_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00FFFFF4},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+}, a512_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
};
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- unsigned int i;
+ const struct adreno_five_hwcg_regs *regs;
+ unsigned int i, sz;
+
+ if (adreno_is_a508(adreno_gpu)) {
+ regs = a50x_hwcg;
+ sz = ARRAY_SIZE(a50x_hwcg);
+ } else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) {
+ regs = a512_hwcg;
+ sz = ARRAY_SIZE(a512_hwcg);
+ } else {
+ regs = a5xx_hwcg;
+ sz = ARRAY_SIZE(a5xx_hwcg);
+ }
- for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
- gpu_write(gpu, a5xx_hwcg[i].offset,
- state ? a5xx_hwcg[i].value : 0);
+ for (i = 0; i < sz; i++)
+ gpu_write(gpu, regs[i].offset,
+ state ? regs[i].value : 0);
if (adreno_is_a540(adreno_gpu)) {
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0);
@@ -538,11 +646,13 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ u32 regbit;
int ret;
gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
- if (adreno_is_a540(adreno_gpu))
+ if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
+ adreno_is_a540(adreno_gpu))
gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
/* Make all blocks contribute to the GPU BUSY perf counter */
@@ -604,29 +714,48 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
0x00100000 + adreno_gpu->gmem - 1);
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
- if (adreno_is_a510(adreno_gpu)) {
+ if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu)) {
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
- gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
+ if (adreno_is_a508(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
+ else
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
- gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
- (0x200 << 11 | 0x200 << 22));
} else {
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
if (adreno_is_a530(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
- if (adreno_is_a540(adreno_gpu))
+ else
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
+ }
+
+ if (adreno_is_a508(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
+ (0x100 << 11 | 0x100 << 22));
+ else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) ||
+ adreno_is_a512(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
+ (0x200 << 11 | 0x200 << 22));
+ else
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
(0x400 << 11 | 0x300 << 22));
- }
if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
- gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
+ /*
+ * Disable the RB sampler datapath DP2 clock gating optimization
+ * for 1-SP GPUs, as it is enabled by default.
+ */
+ if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
+ adreno_is_a512(adreno_gpu))
+ gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9));
+
+ /* Disable UCHE global filter as SP can invalidate/flush independently */
+ gpu_write(gpu, REG_A5XX_UCHE_MODE_CNTL, BIT(29));
/* Enable USE_RETENTION_FLOPS */
gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
@@ -653,10 +782,20 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
/* Set the highest bank bit */
- gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7);
- gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1);
if (adreno_is_a540(adreno_gpu))
- gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, 2);
+ regbit = 2;
+ else
+ regbit = 1;
+
+ gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7);
+ gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1);
+
+ if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
+ adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit);
+
+ /* Disable All flat shading optimization (ALLFLATOPTDIS) */
+ gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10));
/* Protect registers from the CP */
gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
@@ -688,12 +827,14 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
/* VPC */
gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
- gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 16));
/* UCHE */
gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
- if (adreno_is_a530(adreno_gpu) || adreno_is_a510(adreno_gpu))
+ if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
+ adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
+ adreno_is_a530(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
ADRENO_PROTECT_RW(0x10000, 0x8000));
@@ -735,7 +876,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
- if (!adreno_is_a510(adreno_gpu))
+ if (!(adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
+ adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu)))
a5xx_gpmu_ucode_init(gpu);
ret = a5xx_ucode_init(gpu);
@@ -1168,7 +1310,8 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
- if (adreno_is_a510(adreno_gpu)) {
+ /* Adreno 508, 509, 510, 512 needs manual RBBM sus/res control */
+ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) {
/* Halt the sp_input_clk at HM level */
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
a5xx_set_hwcg(gpu, true);
@@ -1210,8 +1353,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
u32 mask = 0xf;
int i, ret;
- /* A510 has 3 XIN ports in VBIF */
- if (adreno_is_a510(adreno_gpu))
+ /* A508, A510 have 3 XIN ports in VBIF */
+ if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu))
mask = 0x7;
/* Clear the VBIF pipe before shutting down */
@@ -1223,10 +1366,12 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
/*
* Reset the VBIF before power collapse to avoid issue with FIFO
- * entries
+ * entries on Adreno A510 and A530 (the others will tend to lock up)
*/
- gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
- gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+ if (adreno_is_a510(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+ }
ret = msm_gpu_pm_suspend(gpu);
if (ret)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index f176a6f3eff6..5ccc9da455a1 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -298,7 +298,7 @@ int a5xx_power_init(struct msm_gpu *gpu)
int ret;
/* Not all A5xx chips have a GPMU */
- if (adreno_is_a510(adreno_gpu))
+ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
return 0;
/* Set up the limits management */
@@ -330,7 +330,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
unsigned int *data, *ptr, *cmds;
unsigned int cmds_size;
- if (adreno_is_a510(adreno_gpu))
+ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
return;
if (a5xx_gpu->gpmu_bo)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index e6703ae98760..71c917f909af 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -134,7 +134,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
if (!gmu->legacy) {
a6xx_hfi_set_freq(gmu, perf_index);
- dev_pm_opp_set_bw(&gpu->pdev->dev, opp);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
pm_runtime_put(gmu->dev);
return;
}
@@ -158,7 +158,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
if (ret)
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
- dev_pm_opp_set_bw(&gpu->pdev->dev, opp);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
pm_runtime_put(gmu->dev);
}
@@ -245,37 +245,66 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
return ret;
}
+struct a6xx_gmu_oob_bits {
+ int set, ack, set_new, ack_new;
+ const char *name;
+};
+
+/* These are the interrupt / ack bits for each OOB request that are set
+ * in a6xx_gmu_set_oob and a6xx_clear_oob
+ */
+static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
+ [GMU_OOB_GPU_SET] = {
+ .name = "GPU_SET",
+ .set = 16,
+ .ack = 24,
+ .set_new = 30,
+ .ack_new = 31,
+ },
+
+ [GMU_OOB_PERFCOUNTER_SET] = {
+ .name = "PERFCOUNTER",
+ .set = 17,
+ .ack = 25,
+ .set_new = 28,
+ .ack_new = 30,
+ },
+
+ [GMU_OOB_BOOT_SLUMBER] = {
+ .name = "BOOT_SLUMBER",
+ .set = 22,
+ .ack = 30,
+ },
+
+ [GMU_OOB_DCVS_SET] = {
+ .name = "GPU_DCVS",
+ .set = 23,
+ .ack = 31,
+ },
+};
+
/* Trigger a OOB (out of band) request to the GMU */
int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
int ret;
u32 val;
int request, ack;
- const char *name;
- switch (state) {
- case GMU_OOB_GPU_SET:
- if (gmu->legacy) {
- request = GMU_OOB_GPU_SET_REQUEST;
- ack = GMU_OOB_GPU_SET_ACK;
- } else {
- request = GMU_OOB_GPU_SET_REQUEST_NEW;
- ack = GMU_OOB_GPU_SET_ACK_NEW;
- }
- name = "GPU_SET";
- break;
- case GMU_OOB_BOOT_SLUMBER:
- request = GMU_OOB_BOOT_SLUMBER_REQUEST;
- ack = GMU_OOB_BOOT_SLUMBER_ACK;
- name = "BOOT_SLUMBER";
- break;
- case GMU_OOB_DCVS_SET:
- request = GMU_OOB_DCVS_REQUEST;
- ack = GMU_OOB_DCVS_ACK;
- name = "GPU_DCVS";
- break;
- default:
+ if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return -EINVAL;
+
+ if (gmu->legacy) {
+ request = a6xx_gmu_oob_bits[state].set;
+ ack = a6xx_gmu_oob_bits[state].ack;
+ } else {
+ request = a6xx_gmu_oob_bits[state].set_new;
+ ack = a6xx_gmu_oob_bits[state].ack_new;
+ if (!request || !ack) {
+ DRM_DEV_ERROR(gmu->dev,
+ "Invalid non-legacy GMU request %s\n",
+ a6xx_gmu_oob_bits[state].name);
+ return -EINVAL;
+ }
}
/* Trigger the equested OOB operation */
@@ -288,7 +317,7 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
if (ret)
DRM_DEV_ERROR(gmu->dev,
"Timeout waiting for GMU OOB set %s: 0x%x\n",
- name,
+ a6xx_gmu_oob_bits[state].name,
gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
/* Clear the acknowledge interrupt */
@@ -300,27 +329,17 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
/* Clear a pending OOB state in the GMU */
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
- if (!gmu->legacy) {
- WARN_ON(state != GMU_OOB_GPU_SET);
- gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
- 1 << GMU_OOB_GPU_SET_CLEAR_NEW);
+ int bit;
+
+ if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return;
- }
- switch (state) {
- case GMU_OOB_GPU_SET:
- gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
- 1 << GMU_OOB_GPU_SET_CLEAR);
- break;
- case GMU_OOB_BOOT_SLUMBER:
- gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
- 1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
- break;
- case GMU_OOB_DCVS_SET:
- gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
- 1 << GMU_OOB_DCVS_CLEAR);
- break;
- }
+ if (gmu->legacy)
+ bit = a6xx_gmu_oob_bits[state].ack;
+ else
+ bit = a6xx_gmu_oob_bits[state].ack_new;
+
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, bit);
}
/* Enable CPU control of SPTP power power collapse */
@@ -866,7 +885,7 @@ static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
if (IS_ERR_OR_NULL(gpu_opp))
return;
- dev_pm_opp_set_bw(&gpu->pdev->dev, gpu_opp);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
dev_pm_opp_put(gpu_opp);
}
@@ -1072,7 +1091,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
a6xx_gmu_shutdown(gmu);
/* Remove the bus vote */
- dev_pm_opp_set_bw(&gpu->pdev->dev, NULL);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
/*
* Make sure the GX domain is off before turning off the GMU (CX)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index c6d2bced8e5d..71dfa60070cc 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -153,44 +153,27 @@ static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
*/
enum a6xx_gmu_oob_state {
+ /*
+ * Let the GMU know that a boot or slumber operation has started. The value in
+ * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are
+ * doing
+ */
GMU_OOB_BOOT_SLUMBER = 0,
+ /*
+ * Let the GMU know to not turn off any GPU registers while the CPU is in a
+ * critical section
+ */
GMU_OOB_GPU_SET,
+ /*
+ * Set a new power level for the GPU when the CPU is doing frequency scaling
+ */
GMU_OOB_DCVS_SET,
+ /*
+ * Used to keep the GPU on for CPU-side reads of performance counters.
+ */
+ GMU_OOB_PERFCOUNTER_SET,
};
-/* These are the interrupt / ack bits for each OOB request that are set
- * in a6xx_gmu_set_oob and a6xx_clear_oob
- */
-
-/*
- * Let the GMU know that a boot or slumber operation has started. The value in
- * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are
- * doing
- */
-#define GMU_OOB_BOOT_SLUMBER_REQUEST 22
-#define GMU_OOB_BOOT_SLUMBER_ACK 30
-#define GMU_OOB_BOOT_SLUMBER_CLEAR 30
-
-/*
- * Set a new power level for the GPU when the CPU is doing frequency scaling
- */
-#define GMU_OOB_DCVS_REQUEST 23
-#define GMU_OOB_DCVS_ACK 31
-#define GMU_OOB_DCVS_CLEAR 31
-
-/*
- * Let the GMU know to not turn off any GPU registers while the CPU is in a
- * critical section
- */
-#define GMU_OOB_GPU_SET_REQUEST 16
-#define GMU_OOB_GPU_SET_ACK 24
-#define GMU_OOB_GPU_SET_CLEAR 24
-
-#define GMU_OOB_GPU_SET_REQUEST_NEW 30
-#define GMU_OOB_GPU_SET_ACK_NEW 31
-#define GMU_OOB_GPU_SET_CLEAR_NEW 31
-
-
void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 130661898546..ba8e9d3cf0fe 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -10,6 +10,7 @@
#include <linux/bitfield.h>
#include <linux/devfreq.h>
+#include <linux/nvmem-consumer.h>
#include <linux/soc/qcom/llcc-qcom.h>
#define GPU_PAS_ID 13
@@ -1117,7 +1118,7 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
- if (IS_ERR(a6xx_gpu->llc_slice) && IS_ERR(a6xx_gpu->htw_llc_slice))
+ if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
}
@@ -1169,14 +1170,18 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ static DEFINE_MUTEX(perfcounter_oob);
+
+ mutex_lock(&perfcounter_oob);
/* Force the GPU power on so we can read this register */
- a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
*value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
REG_A6XX_RBBM_PERFCTR_CP_0_HI);
- a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+ mutex_unlock(&perfcounter_oob);
return 0;
}
@@ -1208,6 +1213,10 @@ static void a6xx_destroy(struct msm_gpu *gpu)
a6xx_gmu_remove(a6xx_gpu);
adreno_gpu_cleanup(adreno_gpu);
+
+ if (a6xx_gpu->opp_table)
+ dev_pm_opp_put_supported_hw(a6xx_gpu->opp_table);
+
kfree(a6xx_gpu);
}
@@ -1240,6 +1249,50 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
}
static struct msm_gem_address_space *
+a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct iommu_domain *iommu;
+ struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
+ u64 start, size;
+
+ iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu)
+ return NULL;
+
+ /*
+ * This allows GPU to set the bus attributes required to use system
+ * cache on behalf of the iommu page table walker.
+ */
+ if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
+ adreno_set_llc_attributes(iommu);
+
+ mmu = msm_iommu_new(&pdev->dev, iommu);
+ if (IS_ERR(mmu)) {
+ iommu_domain_free(iommu);
+ return ERR_CAST(mmu);
+ }
+
+ /*
+ * Use the aperture start or SZ_16M, whichever is greater. This will
+ * ensure that we align with the allocated pagetable range while still
+ * allowing room in the lower 32 bits for GMEM and whatnot
+ */
+ start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
+ size = iommu->geometry.aperture_end - start + 1;
+
+ aspace = msm_gem_address_space_create(mmu, "gpu",
+ start & GENMASK_ULL(48, 0), size);
+
+ if (IS_ERR(aspace) && !IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
+static struct msm_gem_address_space *
a6xx_create_private_address_space(struct msm_gpu *gpu)
{
struct msm_mmu *mmu;
@@ -1264,6 +1317,78 @@ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
}
+static u32 a618_get_speed_bin(u32 fuse)
+{
+ if (fuse == 0)
+ return 0;
+ else if (fuse == 169)
+ return 1;
+ else if (fuse == 174)
+ return 2;
+
+ return UINT_MAX;
+}
+
+static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse)
+{
+ u32 val = UINT_MAX;
+
+ if (revn == 618)
+ val = a618_get_speed_bin(fuse);
+
+ if (val == UINT_MAX) {
+ DRM_DEV_ERROR(dev,
+ "missing support for speed-bin: %u. Some OPPs may not be supported by hardware",
+ fuse);
+ return UINT_MAX;
+ }
+
+ return (1 << val);
+}
+
+static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
+ u32 revn)
+{
+ struct opp_table *opp_table;
+ struct nvmem_cell *cell;
+ u32 supp_hw = UINT_MAX;
+ void *buf;
+
+ cell = nvmem_cell_get(dev, "speed_bin");
+ /*
+ * -ENOENT means that the platform doesn't support speedbin which is
+ * fine
+ */
+ if (PTR_ERR(cell) == -ENOENT)
+ return 0;
+ else if (IS_ERR(cell)) {
+ DRM_DEV_ERROR(dev,
+ "failed to read speed-bin. Some OPPs may not be supported by hardware");
+ goto done;
+ }
+
+ buf = nvmem_cell_read(cell, NULL);
+ if (IS_ERR(buf)) {
+ nvmem_cell_put(cell);
+ DRM_DEV_ERROR(dev,
+ "failed to read speed-bin. Some OPPs may not be supported by hardware");
+ goto done;
+ }
+
+ supp_hw = fuse_to_supp_hw(dev, revn, *((u32 *) buf));
+
+ kfree(buf);
+ nvmem_cell_put(cell);
+
+done:
+ opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1);
+ if (IS_ERR(opp_table))
+ return PTR_ERR(opp_table);
+
+ a6xx_gpu->opp_table = opp_table;
+ return 0;
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@@ -1285,7 +1410,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
#endif
- .create_address_space = adreno_iommu_create_address_space,
+ .create_address_space = a6xx_create_address_space,
.create_private_address_space = a6xx_create_private_address_space,
.get_rptr = a6xx_get_rptr,
},
@@ -1325,6 +1450,12 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
a6xx_llc_slices_init(pdev, a6xx_gpu);
+ ret = a6xx_set_supported_hw(&pdev->dev, a6xx_gpu, info->revn);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index e793d329e77b..ce0610c5256f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -33,6 +33,8 @@ struct a6xx_gpu {
void *llc_slice;
void *htw_llc_slice;
bool have_mmu500;
+
+ struct opp_table *opp_table;
};
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 12e75ba360f9..600d445fabe8 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -134,6 +134,41 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
+ .rev = ADRENO_REV(5, 0, 8, ANY_ID),
+ .revn = 508,
+ .name = "A508",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_128K + SZ_8K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a508_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(5, 0, 9, ANY_ID),
+ .revn = 509,
+ .name = "A509",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_256K + SZ_16K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ /* Adreno 509 uses the same ZAP as 512 */
+ .zapfw = "a512_zap.mdt",
+ }, {
.rev = ADRENO_REV(5, 1, 0, ANY_ID),
.revn = 510,
.name = "A510",
@@ -149,6 +184,23 @@ static const struct adreno_info gpulist[] = {
.inactive_period = 250,
.init = a5xx_gpu_init,
}, {
+ .rev = ADRENO_REV(5, 1, 2, ANY_ID),
+ .revn = 512,
+ .name = "A512",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_256K + SZ_16K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a512_zap.mdt",
+ }, {
.rev = ADRENO_REV(5, 3, 0, 2),
.revn = 530,
.name = "A530",
@@ -168,7 +220,7 @@ static const struct adreno_info gpulist[] = {
.init = a5xx_gpu_init,
.zapfw = "a530_zap.mdt",
}, {
- .rev = ADRENO_REV(5, 4, 0, 2),
+ .rev = ADRENO_REV(5, 4, 0, ANY_ID),
.revn = 540,
.name = "A540",
.fw = {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f09175698827..0f184c3dd9d9 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -186,11 +186,18 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
+void adreno_set_llc_attributes(struct iommu_domain *iommu)
+{
+ struct io_pgtable_domain_attr pgtbl_cfg;
+
+ pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
+ iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+}
+
struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct iommu_domain *iommu;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
@@ -200,20 +207,6 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
if (!iommu)
return NULL;
-
- if (adreno_is_a6xx(adreno_gpu)) {
- struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- struct io_pgtable_domain_attr pgtbl_cfg;
- /*
- * This allows GPU to set the bus attributes required to use system
- * cache on behalf of the iommu page table walker.
- */
- if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
- pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
- }
- }
-
mmu = msm_iommu_new(&pdev->dev, iommu);
if (IS_ERR(mmu)) {
iommu_domain_free(iommu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index b3d9a333591b..ccac275aa7a2 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -197,11 +197,26 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
return gpu->revn == 430;
}
+static inline int adreno_is_a508(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 508;
+}
+
+static inline int adreno_is_a509(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 509;
+}
+
static inline int adreno_is_a510(struct adreno_gpu *gpu)
{
return gpu->revn == 510;
}
+static inline int adreno_is_a512(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 512;
+}
+
static inline int adreno_is_a530(struct adreno_gpu *gpu)
{
return gpu->revn == 530;
@@ -212,11 +227,6 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
return gpu->revn == 540;
}
-static inline bool adreno_is_a6xx(struct adreno_gpu *gpu)
-{
- return ((gpu->revn < 700 && gpu->revn > 599));
-}
-
static inline int adreno_is_a618(struct adreno_gpu *gpu)
{
return gpu->revn == 618;
@@ -278,6 +288,8 @@ struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev);
+void adreno_set_llc_attributes(struct iommu_domain *iommu);
+
/*
* For a5xx and a6xx targets load the zap shader that is used to pull the GPU
* out of secure mode
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 5a056c1191df..b2be39b9144e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -4,8 +4,10 @@
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/delay.h>
#include "dpu_encoder_phys.h"
#include "dpu_hw_interrupts.h"
+#include "dpu_hw_pingpong.h"
#include "dpu_core_irq.h"
#include "dpu_formats.h"
#include "dpu_trace.h"
@@ -35,6 +37,8 @@
#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
+#define DPU_ENC_MAX_POLL_TIMEOUT_US 2000
+
static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
{
return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
@@ -368,15 +372,12 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
tc_cfg.vsync_count = vsync_hz /
(mode->vtotal * drm_mode_vrefresh(mode));
- /* enable external TE after kickoff to avoid premature autorefresh */
- tc_cfg.hw_vsync_mode = 0;
-
/*
- * By setting sync_cfg_height to near max register value, we essentially
- * disable dpu hw generated TE signal, since hw TE will arrive first.
- * Only caveat is if due to error, we hit wrap-around.
+ * Set the sync_cfg_height to twice vtotal so that if we lose a
+ * TE event coming from the display TE pin we won't stall immediately
*/
- tc_cfg.sync_cfg_height = 0xFFF0;
+ tc_cfg.hw_vsync_mode = 1;
+ tc_cfg.sync_cfg_height = mode->vtotal * 2;
tc_cfg.vsync_init_val = mode->vdisplay;
tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
@@ -580,6 +581,69 @@ static void dpu_encoder_phys_cmd_prepare_for_kickoff(
atomic_read(&phys_enc->pending_kickoff_cnt));
}
+static bool dpu_encoder_phys_cmd_is_ongoing_pptx(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_pp_vsync_info info;
+
+ if (!phys_enc)
+ return false;
+
+ phys_enc->hw_pp->ops.get_vsync_info(phys_enc->hw_pp, &info);
+ if (info.wr_ptr_line_count > 0 &&
+ info.wr_ptr_line_count < phys_enc->cached_mode.vdisplay)
+ return true;
+
+ return false;
+}
+
+static void dpu_encoder_phys_cmd_prepare_commit(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ int trial = 0;
+
+ if (!phys_enc)
+ return;
+ if (!phys_enc->hw_pp)
+ return;
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return;
+
+ /* If autorefresh is already disabled, we have nothing to do */
+ if (!phys_enc->hw_pp->ops.get_autorefresh(phys_enc->hw_pp, NULL))
+ return;
+
+ /*
+ * If autorefresh is enabled, disable it and make sure it is safe to
+ * proceed with current frame commit/push. Sequence fallowed is,
+ * 1. Disable TE
+ * 2. Disable autorefresh config
+ * 4. Poll for frame transfer ongoing to be false
+ * 5. Enable TE back
+ */
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
+ phys_enc->hw_pp->ops.setup_autorefresh(phys_enc->hw_pp, 0, false);
+
+ do {
+ udelay(DPU_ENC_MAX_POLL_TIMEOUT_US);
+ if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US)
+ > (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) {
+ DPU_ERROR_CMDENC(cmd_enc,
+ "disable autorefresh failed\n");
+ break;
+ }
+
+ trial++;
+ } while (dpu_encoder_phys_cmd_is_ongoing_pptx(phys_enc));
+
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
+
+ DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc),
+ "disabled autorefresh\n");
+}
+
static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
struct dpu_encoder_phys *phys_enc)
{
@@ -621,20 +685,15 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete(
static int dpu_encoder_phys_cmd_wait_for_commit_done(
struct dpu_encoder_phys *phys_enc)
{
- int rc = 0;
struct dpu_encoder_phys_cmd *cmd_enc;
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
/* only required for master controller */
- if (dpu_encoder_phys_cmd_is_master(phys_enc))
- rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
-
- /* required for both controllers */
- if (!rc && cmd_enc->serialize_wait4pp)
- dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc);
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return 0;
- return rc;
+ return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
}
static int dpu_encoder_phys_cmd_wait_for_vblank(
@@ -681,6 +740,7 @@ static void dpu_encoder_phys_cmd_trigger_start(
static void dpu_encoder_phys_cmd_init_ops(
struct dpu_encoder_phys_ops *ops)
{
+ ops->prepare_commit = dpu_encoder_phys_cmd_prepare_commit;
ops->is_master = dpu_encoder_phys_cmd_is_master;
ops->mode_set = dpu_encoder_phys_cmd_mode_set;
ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 90393fe9e59c..189f3533525c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -12,14 +12,17 @@
#define VIG_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
- BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
+ BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
#define VIG_SDM845_MASK \
- (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
+ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3))
#define VIG_SC7180_MASK \
- (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED4))
+ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
+
+#define VIG_SM8250_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3LITE))
#define DMA_SDM845_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
@@ -185,7 +188,7 @@ static const struct dpu_caps sm8150_dpu_caps = {
static const struct dpu_caps sm8250_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED3, /* TODO: qseed3 lite */
+ .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
.ubwc_version = DPU_HW_UBWC_VER_40,
.has_src_split = true,
@@ -444,6 +447,34 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
};
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
+ _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
+ _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
+ _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
+ _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
+
+static const struct dpu_sspp_cfg sm8250_sspp[] = {
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
+ sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK,
+ sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
+ SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK,
+ sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
+ SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK,
+ sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+};
+
/*************************************************************
* MIXER sub blocks config
*************************************************************/
@@ -532,23 +563,28 @@ static const struct dpu_dspp_sub_blks sm8150_dspp_sblk = {
.len = 0x90, .version = 0x40000},
};
-#define DSPP_BLK(_name, _id, _base, _sblk) \
+#define DSPP_BLK(_name, _id, _base, _mask, _sblk) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x1800, \
- .features = DSPP_SC7180_MASK, \
+ .features = _mask, \
.sblk = _sblk \
}
static const struct dpu_dspp_cfg sc7180_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sc7180_dspp_sblk),
+ DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+ &sc7180_dspp_sblk),
};
static const struct dpu_dspp_cfg sm8150_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sm8150_dspp_sblk),
- DSPP_BLK("dspp_1", DSPP_1, 0x56000, &sm8150_dspp_sblk),
- DSPP_BLK("dspp_2", DSPP_2, 0x58000, &sm8150_dspp_sblk),
- DSPP_BLK("dspp_3", DSPP_3, 0x5a000, &sm8150_dspp_sblk),
+ DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
+ DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
+ DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
+ DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
};
/*************************************************************
@@ -624,33 +660,33 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
/*************************************************************
* INTF sub blocks config
*************************************************************/
-#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _features) \
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _progfetch, _features) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x280, \
.features = _features, \
.type = _type, \
.controller_id = _ctrl_id, \
- .prog_fetch_lines_worst_case = 24 \
+ .prog_fetch_lines_worst_case = _progfetch \
}
static const struct dpu_intf_cfg sdm845_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SDM845_MASK),
- INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SDM845_MASK),
- INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SDM845_MASK),
- INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SDM845_MASK),
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SDM845_MASK),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SDM845_MASK),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SDM845_MASK),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SDM845_MASK),
};
static const struct dpu_intf_cfg sc7180_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK),
- INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK),
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK),
};
static const struct dpu_intf_cfg sm8150_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK),
- INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK),
- INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SC7180_MASK),
- INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SC7180_MASK),
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SC7180_MASK),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SC7180_MASK),
};
/*************************************************************
@@ -969,9 +1005,8 @@ static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.mdp = sm8250_mdp,
.ctl_count = ARRAY_SIZE(sm8150_ctl),
.ctl = sm8150_ctl,
- /* TODO: sspp qseed version differs from 845 */
- .sspp_count = ARRAY_SIZE(sdm845_sspp),
- .sspp = sdm845_sspp,
+ .sspp_count = ARRAY_SIZE(sm8250_sspp),
+ .sspp = sm8250_sspp,
.mixer_count = ARRAY_SIZE(sm8150_lm),
.mixer = sm8150_lm,
.dspp_count = ARRAY_SIZE(sm8150_dspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index eaef99db2d2f..ea4647d21a20 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -95,6 +95,7 @@ enum {
* @DPU_SSPP_SRC Src and fetch part of the pipes,
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
* @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_QSEED3LITE, QSEED3 Lite alogorithm support
* @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support
* @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
* @DPU_SSPP_CSC, Support of Color space converion
@@ -114,6 +115,7 @@ enum {
DPU_SSPP_SRC = 0x1,
DPU_SSPP_SCALER_QSEED2,
DPU_SSPP_SCALER_QSEED3,
+ DPU_SSPP_SCALER_QSEED3LITE,
DPU_SSPP_SCALER_QSEED4,
DPU_SSPP_SCALER_RGB,
DPU_SSPP_CSC,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index bea4ab5c58c5..245a7a62b5c6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -23,6 +23,7 @@
#define PP_WR_PTR_IRQ 0x024
#define PP_OUT_LINE_COUNT 0x028
#define PP_LINE_COUNT 0x02C
+#define PP_AUTOREFRESH_CONFIG 0x030
#define PP_FBC_MODE 0x034
#define PP_FBC_BUDGET_CTL 0x038
@@ -120,6 +121,29 @@ static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
return 0;
}
+static void dpu_hw_pp_setup_autorefresh_config(struct dpu_hw_pingpong *pp,
+ u32 frame_count, bool enable)
+{
+ DPU_REG_WRITE(&pp->hw, PP_AUTOREFRESH_CONFIG,
+ enable ? (BIT(31) | frame_count) : 0);
+}
+
+/*
+ * dpu_hw_pp_get_autorefresh_config - Get autorefresh config from HW
+ * @pp: DPU pingpong structure
+ * @frame_count: Used to return the current frame count from hw
+ *
+ * Returns: True if autorefresh enabled, false if disabled.
+ */
+static bool dpu_hw_pp_get_autorefresh_config(struct dpu_hw_pingpong *pp,
+ u32 *frame_count)
+{
+ u32 val = DPU_REG_READ(&pp->hw, PP_AUTOREFRESH_CONFIG);
+ if (frame_count != NULL)
+ *frame_count = val & 0xffff;
+ return !!((val & BIT(31)) >> 31);
+}
+
static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
u32 timeout_us)
{
@@ -228,6 +252,8 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
c->ops.enable_tearcheck = dpu_hw_pp_enable_te;
c->ops.connect_external_te = dpu_hw_pp_connect_external_te;
c->ops.get_vsync_info = dpu_hw_pp_get_vsync_info;
+ c->ops.setup_autorefresh = dpu_hw_pp_setup_autorefresh_config;
+ c->ops.get_autorefresh = dpu_hw_pp_get_autorefresh_config;
c->ops.poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
c->ops.get_line_count = dpu_hw_pp_get_line_count;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 6902b9b95c8e..845b9ce80e31 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -63,6 +63,8 @@ struct dpu_hw_dither_cfg {
* @setup_tearcheck : program tear check values
* @enable_tearcheck : enables tear check
* @get_vsync_info : retries timing info of the panel
+ * @setup_autorefresh : configure and enable the autorefresh config
+ * @get_autorefresh : retrieve autorefresh config from hardware
* @setup_dither : function to program the dither hw block
* @get_line_count: obtain current vertical line counter
*/
@@ -95,6 +97,18 @@ struct dpu_hw_pingpong_ops {
struct dpu_hw_pp_vsync_info *info);
/**
+ * configure and enable the autorefresh config
+ */
+ void (*setup_autorefresh)(struct dpu_hw_pingpong *pp,
+ u32 frame_count, bool enable);
+
+ /**
+ * retrieve autorefresh config from hardware
+ */
+ bool (*get_autorefresh)(struct dpu_hw_pingpong *pp,
+ u32 *frame_count);
+
+ /**
* poll until write pointer transmission starts
* @Return: 0 on success, -ETIMEDOUT on timeout
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 2c2ca5335aa8..34d81aa16041 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -673,6 +673,7 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) ||
+ test_bit(DPU_SSPP_SCALER_QSEED3LITE, &features) ||
test_bit(DPU_SSPP_SCALER_QSEED4, &features)) {
c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 85b018a9b03c..fdfd4b46e2c6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -28,6 +28,7 @@ struct dpu_hw_pipe;
#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
(1UL << DPU_SSPP_SCALER_QSEED2) | \
(1UL << DPU_SSPP_SCALER_QSEED3) | \
+ (1UL << DPU_SSPP_SCALER_QSEED3LITE) | \
(1UL << DPU_SSPP_SCALER_QSEED4))
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index 84e9875994a8..f94584c982cd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -59,6 +59,19 @@ static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
#define QSEED3_SEP_LUT_SIZE \
(QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+/* DPU_SCALER_QSEED3LITE */
+#define QSEED3LITE_COEF_LUT_Y_SEP_BIT 4
+#define QSEED3LITE_COEF_LUT_UV_SEP_BIT 5
+#define QSEED3LITE_COEF_LUT_CTRL 0x4C
+#define QSEED3LITE_COEF_LUT_SWAP_BIT 0
+#define QSEED3LITE_DIR_FILTER_WEIGHT 0x60
+#define QSEED3LITE_FILTERS 2
+#define QSEED3LITE_SEPARABLE_LUTS 10
+#define QSEED3LITE_LUT_SIZE 33
+#define QSEED3LITE_SEP_LUT_SIZE \
+ (QSEED3LITE_LUT_SIZE * QSEED3LITE_SEPARABLE_LUTS * sizeof(u32))
+
+
void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
u32 reg_off,
u32 val,
@@ -156,6 +169,57 @@ static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
}
+static void _dpu_hw_setup_scaler3lite_lut(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
+{
+ int j, filter;
+ int config_lut = 0x0;
+ unsigned long lut_flags;
+ u32 lut_addr, lut_offset;
+ u32 *lut[QSEED3LITE_FILTERS] = {NULL, NULL};
+ static const uint32_t off_tbl[QSEED3_FILTERS] = { 0x000, 0x200 };
+
+ DPU_REG_WRITE(c, QSEED3LITE_DIR_FILTER_WEIGHT + offset, scaler3_cfg->dir_weight);
+
+ if (!scaler3_cfg->sep_lut)
+ return;
+
+ lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+ if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) {
+ lut[0] = scaler3_cfg->sep_lut +
+ scaler3_cfg->y_rgb_sep_lut_idx * QSEED3LITE_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) {
+ lut[1] = scaler3_cfg->sep_lut +
+ scaler3_cfg->uv_sep_lut_idx * QSEED3LITE_LUT_SIZE;
+ config_lut = 1;
+ }
+
+ if (config_lut) {
+ for (filter = 0; filter < QSEED3LITE_FILTERS; filter++) {
+ if (!lut[filter])
+ continue;
+ lut_offset = 0;
+ lut_addr = QSEED3_COEF_LUT + offset + off_tbl[filter];
+ for (j = 0; j < QSEED3LITE_LUT_SIZE; j++) {
+ DPU_REG_WRITE(c,
+ lut_addr,
+ (lut[filter])[lut_offset++]);
+ lut_addr += 4;
+ }
+ }
+ }
+
+ if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+ DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
+
+}
+
static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset)
{
@@ -242,9 +306,12 @@ void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
op_mode |= BIT(8);
}
- if (scaler3_cfg->lut_flag)
- _dpu_hw_setup_scaler3_lut(c, scaler3_cfg,
- scaler_offset);
+ if (scaler3_cfg->lut_flag) {
+ if (scaler_version < 0x2004)
+ _dpu_hw_setup_scaler3_lut(c, scaler3_cfg, scaler_offset);
+ else
+ _dpu_hw_setup_scaler3lite_lut(c, scaler3_cfg, scaler_offset);
+ }
if (scaler_version == 0x1002) {
phase_init =
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 234eb7d65753..ff3cffde84cd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -97,6 +97,7 @@ struct dpu_hw_scaler3_de_cfg {
* @ cir_lut: pointer to circular filter LUT
* @ sep_lut: pointer to separable filter LUT
* @ de: detail enhancer configuration
+ * @ dir_weight: Directional weight
*/
struct dpu_hw_scaler3_cfg {
u32 enable;
@@ -137,6 +138,8 @@ struct dpu_hw_scaler3_cfg {
* Detail enhancer settings
*/
struct dpu_hw_scaler3_de_cfg de;
+
+ u32 dir_weight;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
index cf867f3f7c36..b757054e1c23 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -30,7 +30,7 @@
#define VBIF_XIN_HALT_CTRL0 0x0200
#define VBIF_XIN_HALT_CTRL1 0x0204
#define VBIF_XINL_QOS_RP_REMAP_000 0x0550
-#define VBIF_XINL_QOS_LVL_REMAP_000 0x0590
+#define VBIF_XINL_QOS_LVL_REMAP_000(v) (v < DPU_HW_VER_400 ? 0x570 : 0x0590)
static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
u32 *pnd_errors, u32 *src_errors)
@@ -156,18 +156,19 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
u32 xin_id, u32 level, u32 remap_level)
{
struct dpu_hw_blk_reg_map *c;
- u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+ u32 reg_lvl, reg_val, reg_val_lvl, mask, reg_high, reg_shift;
if (!vbif)
return;
c = &vbif->hw;
+ reg_lvl = VBIF_XINL_QOS_LVL_REMAP_000(c->hwversion);
reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
reg_shift = (xin_id & 0x7) * 4;
reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
- reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
+ reg_val_lvl = DPU_REG_READ(c, reg_lvl + reg_high);
mask = 0x7 << reg_shift;
@@ -178,7 +179,7 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
reg_val_lvl |= (remap_level << reg_shift) & mask;
DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
- DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
+ DPU_REG_WRITE(c, reg_lvl + reg_high, reg_val_lvl);
}
static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 374b0e8471e6..5a8e3e1fc48c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -749,7 +749,7 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
case DRM_MODE_ENCODER_TMDS:
info.num_of_h_tiles = 1;
break;
- };
+ }
rc = dpu_encoder_setup(encoder->dev, encoder, &info);
if (rc)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index bc0231a50132..f898a8f67b7f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -1465,6 +1465,7 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
pdpu->debugfs_root, &pdpu->debugfs_src);
if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) ||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) {
dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index df10c1ac7591..94ce62a26daf 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -177,7 +177,7 @@ static const struct mdp5_cfg_hw msm8x74v2_config = {
[3] = INTF_HDMI,
},
},
- .max_clk = 200000000,
+ .max_clk = 320000000,
};
static const struct mdp5_cfg_hw apq8084_config = {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 0c8f9f88301f..f5d71b274079 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -1180,7 +1180,7 @@ static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
pp_done);
- complete(&mdp5_crtc->pp_completion);
+ complete_all(&mdp5_crtc->pp_completion);
}
static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 19b35ae3e927..1c6e1d2b947c 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -336,7 +336,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
ssize_t ret;
int const aux_cmd_native_max = 16;
int const aux_cmd_i2c_max = 128;
- int const retry_count = 5;
struct dp_aux_private *aux = container_of(dp_aux,
struct dp_aux_private, dp_aux);
@@ -378,12 +377,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
ret = dp_aux_cmd_fifo_tx(aux, msg);
if (ret < 0) {
- if (aux->native) {
- aux->retry_cnt++;
- if (!(aux->retry_cnt % retry_count))
- dp_catalog_aux_update_cfg(aux->catalog);
- dp_catalog_aux_reset(aux->catalog);
- }
usleep_range(400, 500); /* at least 400us to next try */
goto unlock_exit;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 44f0c57798d0..b1a9b1b98f5f 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -190,6 +190,18 @@ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog)
return 0;
}
+/**
+ * dp_catalog_aux_reset() - reset AUX controller
+ *
+ * @aux: DP catalog structure
+ *
+ * return: void
+ *
+ * This function reset AUX controller
+ *
+ * NOTE: reset AUX controller will also clear any pending HPD related interrupts
+ *
+ */
void dp_catalog_aux_reset(struct dp_catalog *dp_catalog)
{
u32 aux_ctrl;
@@ -483,6 +495,18 @@ int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog,
return 0;
}
+/**
+ * dp_catalog_ctrl_reset() - reset DP controller
+ *
+ * @dp_catalog: DP catalog structure
+ *
+ * return: void
+ *
+ * This function reset the DP controller
+ *
+ * NOTE: reset DP controller will also clear any pending HPD related interrupts
+ *
+ */
void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog)
{
u32 sw_reset;
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 36b39c381b3f..1390f3547fde 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -631,7 +631,7 @@ static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in,
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (!tu)
- return
+ return;
dp_panel_update_tu_timings(in, tu);
@@ -1158,7 +1158,7 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
default:
ret = -EINVAL;
break;
- };
+ }
if (!ret)
DRM_DEBUG_DP("new rate=0x%x\n", ctrl->link->link_params.rate);
@@ -1296,7 +1296,6 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
* transitioned to PUSH_IDLE. In order to start transmitting
* a link training pattern, we have to first do soft reset.
*/
- dp_catalog_ctrl_reset(ctrl->catalog);
ret = dp_ctrl_link_train(ctrl, cr, training_step);
@@ -1365,7 +1364,7 @@ static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
return ret;
}
-int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
+int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset)
{
struct dp_ctrl_private *ctrl;
struct dp_io *dp_io;
@@ -1382,6 +1381,9 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
ctrl->dp_ctrl.orientation = flip;
+ if (reset)
+ dp_catalog_ctrl_reset(ctrl->catalog);
+
dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_init(phy);
dp_catalog_ctrl_enable_irq(ctrl->catalog, true);
@@ -1496,7 +1498,6 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
int training_step = DP_TRAINING_NONE;
dp_ctrl_push_idle(&ctrl->dp_ctrl);
- dp_catalog_ctrl_reset(ctrl->catalog);
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
@@ -1785,14 +1786,14 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
* Set up transfer unit values and set controller state to send
* video.
*/
+ reinit_completion(&ctrl->video_comp);
+
dp_ctrl_configure_source_params(ctrl);
dp_catalog_ctrl_config_msa(ctrl->catalog,
ctrl->link->link_params.rate,
ctrl->dp_ctrl.pixel_rate, dp_ctrl_use_fixed_nvid(ctrl));
- reinit_completion(&ctrl->video_comp);
-
dp_ctrl_setup_tr_unit(ctrl);
dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index f60ba93c8678..a836bd358447 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -19,7 +19,7 @@ struct dp_ctrl {
u32 pixel_rate;
};
-int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip);
+int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 3bc7ed21de28..5a39da6e1eaf 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -350,7 +350,7 @@ end:
return rc;
}
-static void dp_display_host_init(struct dp_display_private *dp)
+static void dp_display_host_init(struct dp_display_private *dp, int reset)
{
bool flip = false;
@@ -365,7 +365,7 @@ static void dp_display_host_init(struct dp_display_private *dp)
dp_display_set_encoder_mode(dp);
dp_power_init(dp->power, flip);
- dp_ctrl_host_init(dp->ctrl, flip);
+ dp_ctrl_host_init(dp->ctrl, flip, reset);
dp_aux_init(dp->aux);
dp->core_initialized = true;
}
@@ -403,7 +403,7 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
goto end;
}
- dp_display_host_init(dp);
+ dp_display_host_init(dp, false);
/*
* set sink to normal operation mode -- D0
@@ -651,8 +651,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
/* signal the disconnect event early to ensure proper teardown */
- dp_display_handle_plugged_change(g_dp_display, false);
reinit_completion(&dp->audio_comp);
+ dp_display_handle_plugged_change(g_dp_display, false);
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
DP_DP_IRQ_HPD_INT_MASK, true);
@@ -700,6 +700,13 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
return 0;
}
+ if (state == ST_CONNECT_PENDING || state == ST_DISCONNECT_PENDING) {
+ /* wait until ST_CONNECTED */
+ dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
if (ret == -ECONNRESET) { /* cable unplugged */
dp->core_initialized = false;
@@ -890,6 +897,9 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
/* wait only if audio was enabled */
if (dp_display->audio_enabled) {
+ /* signal the disconnect event */
+ reinit_completion(&dp->audio_comp);
+ dp_display_handle_plugged_change(dp_display, false);
if (!wait_for_completion_timeout(&dp->audio_comp,
HZ * 5))
DRM_ERROR("audio comp timeout\n");
@@ -1002,7 +1012,7 @@ int dp_display_get_test_bpp(struct msm_dp *dp)
static void dp_display_config_hpd(struct dp_display_private *dp)
{
- dp_display_host_init(dp);
+ dp_display_host_init(dp, true);
dp_catalog_ctrl_hpd_config(dp->catalog);
/* Enable interrupt first time
@@ -1256,7 +1266,7 @@ static int dp_pm_resume(struct device *dev)
dp->hpd_state = ST_DISCONNECTED;
/* turn on dp ctrl/phy */
- dp_display_host_init(dp);
+ dp_display_host_init(dp, true);
dp_catalog_ctrl_hpd_config(dp->catalog);
@@ -1439,7 +1449,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
state = dp_display->hpd_state;
if (state == ST_DISPLAY_OFF)
- dp_display_host_init(dp_display);
+ dp_display_host_init(dp_display, true);
dp_display_enable(dp_display, 0);
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index d1780bcac8cc..9cc816663668 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -409,7 +409,6 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel)
int dp_panel_init_panel_info(struct dp_panel *dp_panel)
{
- int rc = 0;
struct drm_display_mode *drm_mode;
drm_mode = &dp_panel->dp_mode.drm_mode;
@@ -436,7 +435,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel)
min_t(u32, dp_panel->dp_mode.bpp, 30));
DRM_DEBUG_DP("updated bpp = %d\n", dp_panel->dp_mode.bpp);
- return rc;
+ return 0;
}
struct dp_panel *dp_panel_get(struct dp_panel_in *in)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index 1afb7c579dbb..eca86bf448f7 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -139,7 +139,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.disable = dsi_20nm_phy_disable,
.init = msm_dsi_phy_init_common,
},
- .io_start = { 0xfd998300, 0xfd9a0300 },
+ .io_start = { 0xfd998500, 0xfd9a0500 },
.num_dsi_phy = 2,
};
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index e4e9bf04b736..de3b802ccd3d 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -172,9 +172,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll)
multiplier = 1 << config->frac_bits;
dec_multiple = div_u64(pll_freq * multiplier, divider);
- div_u64_rem(dec_multiple, multiplier, &frac);
-
- dec = div_u64(dec_multiple, multiplier);
+ dec = div_u64_rem(dec_multiple, multiplier, &frac);
if (pll_freq <= 1900000000UL)
regs->pll_prop_gain_rate = 8;
@@ -306,7 +304,8 @@ static void dsi_pll_commit(struct dsi_pll_10nm *pll)
reg->frac_div_start_mid);
pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
reg->frac_div_start_high);
- pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1,
+ reg->pll_lockdet_rate);
pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
@@ -345,6 +344,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
{
+ struct device *dev = &pll->pdev->dev;
int rc;
u32 status = 0;
u32 const delay_us = 100;
@@ -357,8 +357,8 @@ static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
delay_us,
timeout_us);
if (rc)
- pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
- pll->id, status);
+ DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
+ pll->id, status);
return rc;
}
@@ -405,6 +405,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ struct device *dev = &pll_10nm->pdev->dev;
int rc;
dsi_pll_enable_pll_bias(pll_10nm);
@@ -413,7 +414,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
if (rc) {
- pr_err("vco_set_rate failed, rc=%d\n", rc);
+ DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
return rc;
}
@@ -430,7 +431,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
/* Check for PLL lock */
rc = dsi_pll_10nm_lock_status(pll_10nm);
if (rc) {
- pr_err("PLL(%d) lock failed\n", pll_10nm->id);
+ DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->id);
goto error;
}
@@ -483,6 +484,7 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ struct dsi_pll_config *config = &pll_10nm->pll_configuration;
void __iomem *base = pll_10nm->mmio;
u64 ref_clk = pll_10nm->vco_ref_clk_rate;
u64 vco_rate = 0x0;
@@ -503,9 +505,8 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
/*
* TODO:
* 1. Assumes prescaler is disabled
- * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
*/
- multiplier = 1 << 18;
+ multiplier = 1 << config->frac_bits;
pll_freq = dec * (ref_clk * 2);
tmp64 = (ref_clk * 2 * frac);
pll_freq += div_u64(tmp64, multiplier);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 108c405e03dd..94525ac76d4e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -788,9 +788,10 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
struct drm_file *file, struct drm_gem_object *obj,
uint64_t *iova)
{
+ struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv;
- if (!ctx->aspace)
+ if (!priv->gpu)
return -EINVAL;
/*
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index a588b0388d27..f091c1e164fa 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -987,8 +987,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
*/
- if (msm_obj->pages)
- kvfree(msm_obj->pages);
+ kvfree(msm_obj->pages);
put_iova_vmas(obj);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index d04c349d8112..5480852bdeda 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -198,6 +198,8 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
submit->cmd[i].idx = submit_cmd.submit_idx;
submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
+ userptr = u64_to_user_ptr(submit_cmd.relocs);
+
sz = array_size(submit_cmd.nr_relocs,
sizeof(struct drm_msm_gem_submit_reloc));
/* check for overflow: */
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index d8151a89e163..4735251a394d 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -157,6 +157,7 @@ struct msm_kms {
* from the crtc's pending_timer close to end of the frame:
*/
struct mutex commit_lock[MAX_CRTCS];
+ struct lock_class_key commit_lock_keys[MAX_CRTCS];
unsigned pending_crtc_mask;
struct msm_pending_timer pending_timers[MAX_CRTCS];
};
@@ -166,8 +167,11 @@ static inline int msm_kms_init(struct msm_kms *kms,
{
unsigned i, ret;
- for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
- mutex_init(&kms->commit_lock[i]);
+ for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) {
+ lockdep_register_key(&kms->commit_lock_keys[i]);
+ __mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]",
+ &kms->commit_lock_keys[i]);
+ }
kms->funcs = funcs;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index 302d4e6fc52f..788db043a342 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -88,7 +88,11 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
NVVAL(NV507C, SET_CONVERSION, OFS, 0x64));
} else {
PUSH_MTHD(push, NV507C, SET_PROCESSING,
- NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
+ NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV507C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV507C, SET_CONVERSION, OFS, 0));
}
PUSH_MTHD(push, NV507C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
index 18d34096f125..093d4ba6910e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -49,7 +49,11 @@ base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
NVVAL(NV827C, SET_CONVERSION, OFS, 0x64));
} else {
PUSH_MTHD(push, NV827C, SET_PROCESSING,
- NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
+ NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV827C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV827C, SET_CONVERSION, OFS, 0));
}
PUSH_MTHD(push, NV827C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index c9e1575a06a2..196612addfd6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2693,6 +2693,14 @@ nv50_display_create(struct drm_device *dev)
else
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
+ if (disp->disp->object.oclass >= GK104_DISP) {
+ dev->mode_config.cursor_width = 256;
+ dev->mode_config.cursor_height = 256;
+ } else {
+ dev->mode_config.cursor_width = 64;
+ dev->mode_config.cursor_height = 64;
+ }
+
/* create crtc objects to represent the hw heads */
if (disp->disp->object.oclass >= GV100_DISP)
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
index a5d827403660..ea9f8667305e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -22,6 +22,7 @@
#include "head.h"
#include "core.h"
+#include "nvif/push.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl917d.h>
@@ -73,6 +74,31 @@ head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
return 0;
}
+static int
+head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+ NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND),
+
+ HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
+ return 0;
+}
+
int
head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
@@ -101,7 +127,7 @@ head917d = {
.core_clr = head907d_core_clr,
.curs_layout = head917d_curs_layout,
.curs_format = head507d_curs_format,
- .curs_set = head907d_curs_set,
+ .curs_set = head917d_curs_set,
.curs_clr = head907d_curs_clr,
.base = head917d_base,
.ovly = head907d_ovly,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index ce451242f79e..271de3a63f21 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -702,6 +702,11 @@ nv50_wndw_init(struct nv50_wndw *wndw)
nvif_notify_get(&wndw->notify);
}
+static const u64 nv50_cursor_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
int
nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
enum drm_plane_type type, const char *name, int index,
@@ -713,6 +718,7 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
struct nvif_mmu *mmu = &drm->client.mmu;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_wndw *wndw;
+ const u64 *format_modifiers;
int nformat;
int ret;
@@ -728,10 +734,13 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
for (nformat = 0; format[nformat]; nformat++);
- ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
- format, nformat,
- nouveau_display(dev)->format_modifiers,
- type, "%s-%d", name, index);
+ if (type == DRM_PLANE_TYPE_CURSOR)
+ format_modifiers = nv50_cursor_format_modifiers;
+ else
+ format_modifiers = nouveau_display(dev)->format_modifiers;
+
+ ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, format, nformat,
+ format_modifiers, type, "%s-%d", name, index);
if (ret) {
kfree(*pwndw);
*pwndw = NULL;
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h
index 2a2612d6e1e0..fb223723a38a 100644
--- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h
@@ -66,6 +66,10 @@
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000)
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001)
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002)
+#define NV917D_HEAD_SET_OFFSET_CURSOR(a) (0x00000484 + (a)*0x00000300)
+#define NV917D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0
+#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR(a) (0x0000048C + (a)*0x00000300)
+#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0
#define NV917D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300)
#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0
#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000)
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push.h b/drivers/gpu/drm/nouveau/include/nvif/push.h
index 168d7694ede5..6d3a8a3d2087 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/push.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/push.h
@@ -123,131 +123,131 @@ PUSH_KICK(struct nvif_push *push)
} while(0)
#endif
-#define PUSH_1(X,f,ds,n,c,o,p,s,mA,dA) do { \
- PUSH_##o##_HDR((p), s, mA, (c)+(n)); \
- PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \
+#define PUSH_1(X,f,ds,n,o,p,s,mA,dA) do { \
+ PUSH_##o##_HDR((p), s, mA, (ds)+(n)); \
+ PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \
} while(0)
-#define PUSH_2(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \
- PUSH_1(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_2(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \
+ PUSH_1(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_3(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \
- PUSH_2(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_3(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \
+ PUSH_2(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_4(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \
- PUSH_3(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_4(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \
+ PUSH_3(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_5(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \
- PUSH_4(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_5(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \
+ PUSH_4(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_6(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \
- PUSH_5(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_6(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \
+ PUSH_5(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_7(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \
- PUSH_6(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_7(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \
+ PUSH_6(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_8(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \
- PUSH_7(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_8(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \
+ PUSH_7(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_9(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \
- PUSH_8(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_9(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \
+ PUSH_8(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_10(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \
- PUSH_9(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_10(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \
+ PUSH_9(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_1D(X,o,p,s,mA,dA) \
- PUSH_1(X, DATA_, 1, 1, 0, o, (p), s, X##mA, (dA))
-#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \
- PUSH_2(X, DATA_, 1, 1, 0, o, (p), s, X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \
- PUSH_3(X, DATA_, 1, 1, 0, o, (p), s, X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \
- PUSH_4(X, DATA_, 1, 1, 0, o, (p), s, X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \
- PUSH_5(X, DATA_, 1, 1, 0, o, (p), s, X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+#define PUSH_1D(X,o,p,s,mA,dA) \
+ PUSH_1(X, DATA_, 1, 0, o, (p), s, X##mA, (dA))
+#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \
+ PUSH_2(X, DATA_, 1, 0, o, (p), s, X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \
+ PUSH_3(X, DATA_, 1, 0, o, (p), s, X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \
+ PUSH_4(X, DATA_, 1, 0, o, (p), s, X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \
+ PUSH_5(X, DATA_, 1, 0, o, (p), s, X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_6D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF) \
- PUSH_6(X, DATA_, 1, 1, 0, o, (p), s, X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_6(X, DATA_, 1, 0, o, (p), s, X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_7D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG) \
- PUSH_7(X, DATA_, 1, 1, 0, o, (p), s, X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_7(X, DATA_, 1, 0, o, (p), s, X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_8D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH) \
- PUSH_8(X, DATA_, 1, 1, 0, o, (p), s, X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_8(X, DATA_, 1, 0, o, (p), s, X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_9D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI) \
- PUSH_9(X, DATA_, 1, 1, 0, o, (p), s, X##mI, (dI), \
- X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_9(X, DATA_, 1, 0, o, (p), s, X##mI, (dI), \
+ X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_10D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI,mJ,dJ) \
- PUSH_10(X, DATA_, 1, 1, 0, o, (p), s, X##mJ, (dJ), \
- X##mI, (dI), \
- X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_10(X, DATA_, 1, 0, o, (p), s, X##mJ, (dJ), \
+ X##mI, (dI), \
+ X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
-#define PUSH_1P(X,o,p,s,mA,dp,ds) \
- PUSH_1(X, DATAp, ds, ds, 0, o, (p), s, X##mA, (dp))
-#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \
- PUSH_2(X, DATAp, ds, ds, 0, o, (p), s, X##mB, (dp), \
- X##mA, (dA))
-#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \
- PUSH_3(X, DATAp, ds, ds, 0, o, (p), s, X##mC, (dp), \
- X##mB, (dB), \
- X##mA, (dA))
+#define PUSH_1P(X,o,p,s,mA,dp,ds) \
+ PUSH_1(X, DATAp, ds, 0, o, (p), s, X##mA, (dp))
+#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \
+ PUSH_2(X, DATAp, ds, 0, o, (p), s, X##mB, (dp), \
+ X##mA, (dA))
+#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \
+ PUSH_3(X, DATAp, ds, 0, o, (p), s, X##mC, (dp), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,IMPL,...) IMPL
#define PUSH(A...) PUSH_(A, PUSH_10P, PUSH_10D, \
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 33dc886d1d6d..2375711877cf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -547,7 +547,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
- int i;
+ int i, j;
if (!ttm_dma)
return;
@@ -556,10 +556,21 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
if (nvbo->force_coherent)
return;
- for (i = 0; i < ttm_dma->num_pages; i++)
+ for (i = 0; i < ttm_dma->num_pages; ++i) {
+ struct page *p = ttm_dma->pages[i];
+ size_t num_pages = 1;
+
+ for (j = i + 1; j < ttm_dma->num_pages; ++j) {
+ if (++p != ttm_dma->pages[j])
+ break;
+
+ ++num_pages;
+ }
dma_sync_single_for_device(drm->dev->dev,
ttm_dma->dma_address[i],
- PAGE_SIZE, DMA_TO_DEVICE);
+ num_pages * PAGE_SIZE, DMA_TO_DEVICE);
+ i += num_pages;
+ }
}
void
@@ -567,7 +578,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
- int i;
+ int i, j;
if (!ttm_dma)
return;
@@ -576,9 +587,21 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
if (nvbo->force_coherent)
return;
- for (i = 0; i < ttm_dma->num_pages; i++)
+ for (i = 0; i < ttm_dma->num_pages; ++i) {
+ struct page *p = ttm_dma->pages[i];
+ size_t num_pages = 1;
+
+ for (j = i + 1; j < ttm_dma->num_pages; ++j) {
+ if (++p != ttm_dma->pages[j])
+ break;
+
+ ++num_pages;
+ }
+
dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
- PAGE_SIZE, DMA_FROM_DEVICE);
+ num_pages * PAGE_SIZE, DMA_FROM_DEVICE);
+ i += num_pages;
+ }
}
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 4f69e4c3dafd..1c3f890377d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -315,6 +315,10 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
struct drm_nouveau_svm_init *args = data;
int ret;
+ /* We need to fail if svm is disabled */
+ if (!cli->drm->svm)
+ return -ENOSYS;
+
/* Allocate tracking for SVM-enabled VMM. */
if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 69da601f1754..e771bd519ee2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -261,6 +261,9 @@ gk104_fifo_pbdma = {
struct nvkm_engine *
gk104_fifo_id_engine(struct nvkm_fifo *base, int engi)
{
+ if (engi == GK104_FIFO_ENGN_SW)
+ return nvkm_device_engine(base->engine.subdev.device, NVKM_ENGINE_SW, 0);
+
return gk104_fifo(base)->engine[engi].engine;
}
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index bc36aa3c1123..fe5ac3ef9018 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -265,7 +265,8 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 1;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index e4dcaef6c143..fa0a737e9dea 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -320,7 +320,7 @@ dev_put:
return ret;
}
-static int pl111_amba_remove(struct amba_device *amba_dev)
+static void pl111_amba_remove(struct amba_device *amba_dev)
{
struct device *dev = &amba_dev->dev;
struct drm_device *drm = amba_get_drvdata(amba_dev);
@@ -331,8 +331,6 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
drm_panel_bridge_remove(priv->bridge);
drm_dev_put(drm);
of_reserved_mem_device_release(dev);
-
- return 0;
}
/*
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 012bce0cdb65..10738e04c09b 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -328,6 +328,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
head.id = i;
head.flags = 0;
+ head.surface_id = 0;
oldcount = qdev->monitors_config->count;
if (crtc->state->active) {
struct drm_display_mode *mode = &crtc->mode;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index fb5f6a5e81d7..1864467f1063 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -141,7 +141,7 @@ static void qxl_drm_release(struct drm_device *dev)
/*
* TODO: qxl_device_fini() call should be in qxl_pci_remove(),
- * reodering qxl_modeset_fini() + qxl_device_fini() calls is
+ * reordering qxl_modeset_fini() + qxl_device_fini() calls is
* non-trivial though.
*/
qxl_modeset_fini(qdev);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 0fcfc952d5e9..b372455e2729 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -321,7 +321,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
int type, struct qxl_release **release,
struct qxl_bo **rbo)
{
- struct qxl_bo *bo;
+ struct qxl_bo *bo, *free_bo = NULL;
int idr_ret;
int ret = 0;
union qxl_release_info *info;
@@ -347,7 +347,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
mutex_lock(&qdev->release_mutex);
if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
- qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
+ free_bo = qdev->current_release_bo[cur_idx];
qdev->current_release_bo_offset[cur_idx] = 0;
qdev->current_release_bo[cur_idx] = NULL;
}
@@ -355,6 +355,10 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
if (ret) {
mutex_unlock(&qdev->release_mutex);
+ if (free_bo) {
+ qxl_bo_unpin(free_bo);
+ qxl_bo_unref(&free_bo);
+ }
qxl_release_free(qdev, *release);
return ret;
}
@@ -370,6 +374,10 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
*rbo = bo;
mutex_unlock(&qdev->release_mutex);
+ if (free_bo) {
+ qxl_bo_unpin(free_bo);
+ qxl_bo_unref(&free_bo);
+ }
ret = qxl_release_list_add(*release, bo);
qxl_bo_unref(&bo);
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 500796dc5d74..33121655d50b 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -205,7 +205,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
continue;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
- if (!ACPI_FAILURE(status)) {
+ if (ACPI_SUCCESS(status)) {
found = true;
break;
}
@@ -218,7 +218,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
continue;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
- if (!ACPI_FAILURE(status)) {
+ if (ACPI_SUCCESS(status)) {
found = true;
break;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 4a2099cb582e..857d97cdc67c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -17,9 +17,20 @@
#define NUM_YUV2YUV_COEFFICIENTS 12
+/* AFBC supports a number of configurable modes. Relevant to us is block size
+ * (16x16 or 32x8), storage modifiers (SPARSE, SPLIT), and the YUV-like
+ * colourspace transform (YTR). 16x16 SPARSE mode is always used. SPLIT mode
+ * could be enabled via the hreg_block_split register, but is not currently
+ * handled. The colourspace transform is implicitly always assumed by the
+ * decoder, so consumers must use this transform as well.
+ *
+ * Failure to match modifiers will cause errors displaying AFBC buffers
+ * produced by conformant AFBC producers, including Mesa.
+ */
#define ROCKCHIP_AFBC_MOD \
DRM_FORMAT_MOD_ARM_AFBC( \
AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \
+ | AFBC_FORMAT_MOD_YTR \
)
enum vop_data_format {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 6b9af4c08cd6..9f06dec0fc61 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -672,6 +672,30 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
SUN4I_TCON1_BASIC5_V_SYNC(vsync) |
SUN4I_TCON1_BASIC5_H_SYNC(hsync));
+ /* Setup the polarity of multiple signals */
+ if (tcon->quirks->polarity_in_ch0) {
+ val = 0;
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
+
+ regmap_write(tcon->regs, SUN4I_TCON0_IO_POL_REG, val);
+ } else {
+ /* according to vendor driver, this bit must be always set */
+ val = SUN4I_TCON1_IO_POL_UNKNOWN;
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ val |= SUN4I_TCON1_IO_POL_HSYNC_POSITIVE;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ val |= SUN4I_TCON1_IO_POL_VSYNC_POSITIVE;
+
+ regmap_write(tcon->regs, SUN4I_TCON1_IO_POL_REG, val);
+ }
+
/* Map output pins to channel 1 */
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
SUN4I_TCON_GCTL_IOMAP_MASK,
@@ -1500,6 +1524,7 @@ static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
.has_channel_1 = true,
+ .polarity_in_ch0 = true,
.set_mux = sun8i_r40_tcon_tv_set_mux,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index c5ac1b02482c..e624f6977eb8 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -154,6 +154,11 @@
#define SUN4I_TCON1_BASIC5_V_SYNC(height) (((height) - 1) & 0x3ff)
#define SUN4I_TCON1_IO_POL_REG 0xf0
+/* there is no documentation about this bit */
+#define SUN4I_TCON1_IO_POL_UNKNOWN BIT(26)
+#define SUN4I_TCON1_IO_POL_HSYNC_POSITIVE BIT(25)
+#define SUN4I_TCON1_IO_POL_VSYNC_POSITIVE BIT(24)
+
#define SUN4I_TCON1_IO_TRI_REG 0xf4
#define SUN4I_TCON_ECC_FIFO_REG 0xf8
@@ -236,6 +241,7 @@ struct sun4i_tcon_quirks {
bool needs_de_be_mux; /* sun6i needs mux to select backend */
bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */
bool supports_lvds; /* Does the TCON support an LVDS output? */
+ bool polarity_in_ch0; /* some tcon1 channels have polarity bits in tcon0 pol register */
u8 dclk_min_div; /* minimum divider for TCON0 DCLK */
/* callback to handle tcon muxing options */
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 92add2cef2e7..bbdfd5e26ec8 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -21,8 +21,7 @@ static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder,
{
struct sun8i_dw_hdmi *hdmi = encoder_to_sun8i_dw_hdmi(encoder);
- if (hdmi->quirks->set_rate)
- clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
+ clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
}
static const struct drm_encoder_helper_funcs
@@ -48,11 +47,9 @@ sun8i_dw_hdmi_mode_valid_h6(struct dw_hdmi *hdmi, void *data,
{
/*
* Controller support maximum of 594 MHz, which correlates to
- * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than
- * 340 MHz scrambling has to be enabled. Because scrambling is
- * not yet implemented, just limit to 340 MHz for now.
+ * 4K@60Hz 4:4:4 or RGB.
*/
- if (mode->clock > 340000)
+ if (mode->clock > 594000)
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -295,7 +292,6 @@ static int sun8i_dw_hdmi_remove(struct platform_device *pdev)
static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
.mode_valid = sun8i_dw_hdmi_mode_valid_a83t,
- .set_rate = true,
};
static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index d983746fa194..d4b55af0592f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -179,7 +179,6 @@ struct sun8i_dw_hdmi_quirks {
enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data,
const struct drm_display_info *info,
const struct drm_display_mode *mode);
- unsigned int set_rate : 1;
unsigned int use_drm_infoframe : 1;
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 35c2133724e2..9994edf67509 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -104,29 +104,21 @@ static const struct dw_hdmi_mpll_config sun50i_h6_mpll_cfg[] = {
static const struct dw_hdmi_curr_ctrl sun50i_h6_cur_ctr[] = {
/* pixelclk bpp8 bpp10 bpp12 */
- { 25175000, { 0x0000, 0x0000, 0x0000 }, },
{ 27000000, { 0x0012, 0x0000, 0x0000 }, },
- { 59400000, { 0x0008, 0x0008, 0x0008 }, },
- { 72000000, { 0x0008, 0x0008, 0x001b }, },
- { 74250000, { 0x0013, 0x0013, 0x0013 }, },
- { 90000000, { 0x0008, 0x001a, 0x001b }, },
- { 118800000, { 0x001b, 0x001a, 0x001b }, },
- { 144000000, { 0x001b, 0x001a, 0x0034 }, },
- { 180000000, { 0x001b, 0x0033, 0x0034 }, },
- { 216000000, { 0x0036, 0x0033, 0x0034 }, },
- { 237600000, { 0x0036, 0x0033, 0x001b }, },
- { 288000000, { 0x0036, 0x001b, 0x001b }, },
- { 297000000, { 0x0019, 0x001b, 0x0019 }, },
- { 330000000, { 0x0036, 0x001b, 0x001b }, },
- { 594000000, { 0x003f, 0x001b, 0x001b }, },
+ { 74250000, { 0x0013, 0x001a, 0x001b }, },
+ { 148500000, { 0x0019, 0x0033, 0x0034 }, },
+ { 297000000, { 0x0019, 0x001b, 0x001b }, },
+ { 594000000, { 0x0010, 0x001b, 0x001b }, },
{ ~0UL, { 0x0000, 0x0000, 0x0000 }, }
};
static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = {
/*pixelclk symbol term vlev*/
- { 74250000, 0x8009, 0x0004, 0x0232},
- { 148500000, 0x8029, 0x0004, 0x0273},
- { 594000000, 0x8039, 0x0004, 0x014a},
+ { 27000000, 0x8009, 0x0007, 0x02b0 },
+ { 74250000, 0x8009, 0x0006, 0x022d },
+ { 148500000, 0x8029, 0x0006, 0x0270 },
+ { 297000000, 0x8039, 0x0005, 0x01ab },
+ { 594000000, 0x8029, 0x0000, 0x008a },
{ ~0UL, 0x0000, 0x0000, 0x0000}
};
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index 662bf3a348c9..f5190477de72 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
+ifeq (, $(findstring -W,$(KCFLAGS)))
ccflags-y += -Werror
endif
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 33f65f4626e5..23866a54e3f9 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -83,6 +83,7 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
struct gm12u320_device {
struct drm_device dev;
+ struct device *dmadev;
struct drm_simple_display_pipe pipe;
struct drm_connector conn;
unsigned char *cmd_buf;
@@ -601,6 +602,22 @@ static const uint64_t gm12u320_pipe_modifiers[] = {
DRM_FORMAT_MOD_INVALID
};
+/*
+ * FIXME: Dma-buf sharing requires DMA support by the importing device.
+ * This function is a workaround to make USB devices work as well.
+ * See todo.rst for how to fix the issue in the dma-buf framework.
+ */
+static struct drm_gem_object *gm12u320_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct gm12u320_device *gm12u320 = to_gm12u320(dev);
+
+ if (!gm12u320->dmadev)
+ return ERR_PTR(-ENODEV);
+
+ return drm_gem_prime_import_dev(dev, dma_buf, gm12u320->dmadev);
+}
+
DEFINE_DRM_GEM_FOPS(gm12u320_fops);
static const struct drm_driver gm12u320_drm_driver = {
@@ -614,6 +631,7 @@ static const struct drm_driver gm12u320_drm_driver = {
.fops = &gm12u320_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
+ .gem_prime_import = gm12u320_gem_prime_import,
};
static const struct drm_mode_config_funcs gm12u320_mode_config_funcs = {
@@ -640,15 +658,18 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
struct gm12u320_device, dev);
if (IS_ERR(gm12u320))
return PTR_ERR(gm12u320);
+ dev = &gm12u320->dev;
+
+ gm12u320->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
+ if (!gm12u320->dmadev)
+ drm_warn(dev, "buffer sharing not supported"); /* not an error */
INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
mutex_init(&gm12u320->fb_update.lock);
- dev = &gm12u320->dev;
-
ret = drmm_mode_config_init(dev);
if (ret)
- return ret;
+ goto err_put_device;
dev->mode_config.min_width = GM12U320_USER_WIDTH;
dev->mode_config.max_width = GM12U320_USER_WIDTH;
@@ -658,15 +679,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
ret = gm12u320_usb_alloc(gm12u320);
if (ret)
- return ret;
+ goto err_put_device;
ret = gm12u320_set_ecomode(gm12u320);
if (ret)
- return ret;
+ goto err_put_device;
ret = gm12u320_conn_init(gm12u320);
if (ret)
- return ret;
+ goto err_put_device;
ret = drm_simple_display_pipe_init(&gm12u320->dev,
&gm12u320->pipe,
@@ -676,24 +697,31 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
gm12u320_pipe_modifiers,
&gm12u320->conn);
if (ret)
- return ret;
+ goto err_put_device;
drm_mode_config_reset(dev);
usb_set_intfdata(interface, dev);
ret = drm_dev_register(dev, 0);
if (ret)
- return ret;
+ goto err_put_device;
drm_fbdev_generic_setup(dev, 0);
return 0;
+
+err_put_device:
+ put_device(gm12u320->dmadev);
+ return ret;
}
static void gm12u320_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
+ struct gm12u320_device *gm12u320 = to_gm12u320(dev);
+ put_device(gm12u320->dmadev);
+ gm12u320->dmadev = NULL;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index b65f4b12f986..101a68dc615b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -136,7 +136,8 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_resource_manager *man;
- dma_resv_assert_held(bo->base.resv);
+ if (!bo->deleted)
+ dma_resv_assert_held(bo->base.resv);
if (bo->pin_count) {
ttm_bo_del_from_lru(bo);
@@ -508,8 +509,11 @@ static void ttm_bo_release(struct kref *kref)
* Make pinned bos immediately available to
* shrinkers, now that they are queued for
* destruction.
+ *
+ * FIXME: QXL is triggering this. Can be removed when the
+ * driver is fixed.
*/
- if (WARN_ON(bo->pin_count)) {
+ if (WARN_ON_ONCE(bo->pin_count)) {
bo->pin_count = 0;
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
}
@@ -959,8 +963,10 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
return ret;
/* move to the bounce domain */
ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
- if (ret)
+ if (ret) {
+ ttm_resource_free(bo, &hop_mem);
return ret;
+ }
return 0;
}
@@ -991,18 +997,19 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* stop and the driver will be called to make
* the second hop.
*/
-bounce:
ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
if (ret)
return ret;
+bounce:
ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
if (ret == -EMULTIHOP) {
ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
if (ret)
- return ret;
+ goto out;
/* try and move to final place now. */
goto bounce;
}
+out:
if (ret)
ttm_resource_free(bo, &mem);
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 11e0313db0ea..4eb6efb8b8c0 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
@@ -84,7 +85,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
* put_page() on a TTM allocated page is illegal.
*/
if (order)
- gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY |
+ gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
__GFP_KSWAPD_RECLAIM;
if (!pool->use_dma_alloc) {
@@ -218,6 +219,15 @@ static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
/* Give pages into a specific pool_type */
static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
{
+ unsigned int i, num_pages = 1 << pt->order;
+
+ for (i = 0; i < num_pages; ++i) {
+ if (PageHighMem(p))
+ clear_highpage(p + i);
+ else
+ clear_page(page_address(p + i));
+ }
+
spin_lock(&pt->lock);
list_add(&p->lru, &pt->pages);
spin_unlock(&pt->lock);
@@ -258,13 +268,13 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
/* Remove a pool_type from the global shrinker list and free all pages */
static void ttm_pool_type_fini(struct ttm_pool_type *pt)
{
- struct page *p, *tmp;
+ struct page *p;
mutex_lock(&shrinker_lock);
list_del(&pt->shrinker_list);
mutex_unlock(&shrinker_lock);
- list_for_each_entry_safe(p, tmp, &pt->pages, lru)
+ while ((p = ttm_pool_type_take(pt)))
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 9269092697d8..5703277c6f52 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -32,6 +32,22 @@ static int udl_usb_resume(struct usb_interface *interface)
return drm_mode_config_helper_resume(dev);
}
+/*
+ * FIXME: Dma-buf sharing requires DMA support by the importing device.
+ * This function is a workaround to make USB devices work as well.
+ * See todo.rst for how to fix the issue in the dma-buf framework.
+ */
+static struct drm_gem_object *udl_driver_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct udl_device *udl = to_udl(dev);
+
+ if (!udl->dmadev)
+ return ERR_PTR(-ENODEV);
+
+ return drm_gem_prime_import_dev(dev, dma_buf, udl->dmadev);
+}
+
DEFINE_DRM_GEM_FOPS(udl_driver_fops);
static const struct drm_driver driver = {
@@ -40,6 +56,7 @@ static const struct drm_driver driver = {
/* GEM hooks */
.fops = &udl_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
+ .gem_prime_import = udl_driver_gem_prime_import,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 875e73551ae9..cc16a13316e4 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -50,6 +50,7 @@ struct urb_list {
struct udl_device {
struct drm_device drm;
struct device *dev;
+ struct device *dmadev;
struct drm_simple_display_pipe display_pipe;
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 0e2a376cb075..853f147036f6 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -315,6 +315,10 @@ int udl_init(struct udl_device *udl)
DRM_DEBUG("\n");
+ udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
+ if (!udl->dmadev)
+ drm_warn(dev, "buffer sharing not supported"); /* not an error */
+
mutex_init(&udl->gem_lock);
if (!udl_parse_vendor_descriptor(udl)) {
@@ -343,12 +347,18 @@ int udl_init(struct udl_device *udl)
err:
if (udl->urbs.count)
udl_free_urb_list(dev);
+ put_device(udl->dmadev);
DRM_ERROR("%d\n", ret);
return ret;
}
int udl_drop_usb(struct drm_device *dev)
{
+ struct udl_device *udl = to_udl(dev);
+
udl_free_urb_list(dev);
+ put_device(udl->dmadev);
+ udl->dmadev = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 2b3a597fa65f..c239045e05d6 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -622,11 +622,11 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
* for now we just allocate globally.
*/
if (!hvs->hvs5)
- /* 96kB */
- drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024);
+ /* 48k words of 2x12-bit pixels */
+ drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
else
- /* 70k words */
- drm_mm_init(&hvs->lbm_mm, 0, 70 * 2 * 1024);
+ /* 60k words of 4x12-bit pixels */
+ drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
/* Upload filter kernels. We only have the one for now, so we
* keep it around for the lifetime of the driver.
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 6bd8260aa9f2..7322169c0682 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -220,7 +220,7 @@ static void vc4_plane_reset(struct drm_plane *plane)
__drm_atomic_helper_plane_reset(plane, &vc4_state->base);
}
-static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
+static void vc4_dlist_counter_increment(struct vc4_plane_state *vc4_state)
{
if (vc4_state->dlist_count == vc4_state->dlist_size) {
u32 new_size = max(4u, vc4_state->dlist_count * 2);
@@ -235,7 +235,15 @@ static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
vc4_state->dlist_size = new_size;
}
- vc4_state->dlist[vc4_state->dlist_count++] = val;
+ vc4_state->dlist_count++;
+}
+
+static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
+{
+ unsigned int idx = vc4_state->dlist_count;
+
+ vc4_dlist_counter_increment(vc4_state);
+ vc4_state->dlist[idx] = val;
}
/* Returns the scl0/scl1 field based on whether the dimensions need to
@@ -437,6 +445,7 @@ static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
static u32 vc4_lbm_size(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
u32 pix_per_line;
u32 lbm;
@@ -472,7 +481,11 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
lbm = pix_per_line * 16;
}
- lbm = roundup(lbm, 32);
+ /* Align it to 64 or 128 (hvs5) bytes */
+ lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64);
+
+ /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
+ lbm /= vc4->hvs->hvs5 ? 4 : 2;
return lbm;
}
@@ -912,9 +925,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
if (!vc4_state->is_unity) {
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(vc4_state->crtc_w,
- SCALER_POS1_SCL_WIDTH) |
+ SCALER5_POS1_SCL_WIDTH) |
VC4_SET_FIELD(vc4_state->crtc_h,
- SCALER_POS1_SCL_HEIGHT));
+ SCALER5_POS1_SCL_HEIGHT));
}
/* Position Word 2: Source Image Size */
@@ -973,8 +986,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
* be set when calling vc4_plane_allocate_lbm().
*/
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
- vc4_state->y_scaling[1] != VC4_SCALING_NONE)
- vc4_state->lbm_offset = vc4_state->dlist_count++;
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
+ vc4_state->lbm_offset = vc4_state->dlist_count;
+ vc4_dlist_counter_increment(vc4_state);
+ }
if (num_planes > 1) {
/* Emit Cb/Cr as channel 0 and Y as channel
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index c685d94409b0..148add0ca1d6 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -1396,19 +1396,11 @@ static void zynqmp_disp_enable(struct zynqmp_disp *disp)
*/
static void zynqmp_disp_disable(struct zynqmp_disp *disp)
{
- struct drm_crtc *crtc = &disp->crtc;
-
zynqmp_disp_audio_disable(&disp->audio);
zynqmp_disp_avbuf_disable_audio(&disp->avbuf);
zynqmp_disp_avbuf_disable_channels(&disp->avbuf);
zynqmp_disp_avbuf_disable(&disp->avbuf);
-
- /* Mark the flip is done as crtc is disabled anyway */
- if (crtc->state->event) {
- complete_all(crtc->state->event->base.completion);
- crtc->state->event = NULL;
- }
}
static inline struct zynqmp_disp *crtc_to_disp(struct drm_crtc *crtc)
@@ -1499,6 +1491,13 @@ zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(&disp->crtc);
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
clk_disable_unprepare(disp->pclk);
pm_runtime_put_sync(disp->dev);
}