summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2017-07-26 13:23:10 +0200
committerDaniel Vetter <daniel.vetter@ffwll.ch>2017-07-26 13:43:33 +0200
commitaf055598542670c8533a58582813b1419949cae0 (patch)
tree901fa1bf635d5c1e91d08f9f4c2e4943516dbb71 /drivers/gpu
parent9f15a4ab19ab33658dbc9fd37be5210e8c1ac622 (diff)
parent2d62c799f8ffac4f7ffba6a4e7f148827dfc24c7 (diff)
downloadlinux-af055598542670c8533a58582813b1419949cae0.tar.bz2
Merge airlied/drm-next into drm-misc-next
I need this to be able to apply the deferred fbdev setup patches, I need the relevant prep work that landed through the drm-intel tree. Also squash in conflict fixup from Laurent Pinchart. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c218
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c7
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/include/vi_structs.h268
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c42
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c2
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c2
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c5
-rw-r--r--drivers/gpu/drm/armada/armada_gem.h1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c22
-rw-r--r--drivers/gpu/drm/bridge/panel.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c21
-rw-r--r--drivers/gpu/drm/drm_bufs.c116
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c109
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c1
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_ioc32.c753
-rw-r--r--drivers/gpu/drm/drm_ioctl.c48
-rw-r--r--drivers/gpu/drm/drm_legacy.h7
-rw-r--r--drivers/gpu/drm/drm_syncobj.c8
-rw-r--r--drivers/gpu/drm/drm_vblank.c2
-rw-r--r--drivers/gpu/drm/etnaviv/common.xml.h150
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c30
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c4
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c30
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c111
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c139
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h184
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c168
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c186
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c62
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c66
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c159
-rw-r--r--drivers/gpu/drm/i915/i915_params.c18
-rw-r--r--drivers/gpu/drm/i915/i915_params.h7
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c6
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c18
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h15
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c35
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c10
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c14
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c14
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c15
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c9
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c20
-rw-r--r--drivers/gpu/drm/i915/intel_color.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c123
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c108
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c185
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h5
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c52
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c11
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c148
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c5
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c139
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c15
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c159
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c3
-rw-r--r--drivers/gpu/drm/mediatek/Makefile3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c176
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c80
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c33
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c27
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h2
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c149
-rw-r--r--drivers/gpu/drm/mga/mga_state.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c9
-rw-r--r--drivers/gpu/drm/radeon/Makefile1
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c424
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c1
-rw-r--r--drivers/gpu/vga/vgaarb.c2
138 files changed, 3366 insertions, 2319 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 12d61edb3597..ff7bf1a9f967 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1028,12 +1028,15 @@ struct amdgpu_gfx_config {
};
struct amdgpu_cu_info {
- uint32_t number; /* total active CU number */
- uint32_t ao_cu_mask;
uint32_t max_waves_per_simd;
uint32_t wave_front_size;
uint32_t max_scratch_slots_per_cu;
uint32_t lds_size;
+
+ /* total active CU number */
+ uint32_t number;
+ uint32_t ao_cu_mask;
+ uint32_t ao_cu_bitmap[4][4];
uint32_t bitmap[4][4];
};
@@ -1924,7 +1927,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
bool amdgpu_need_post(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev);
-int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index c6dba1eaefbd..c0a806280257 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -838,6 +838,12 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
return -EINVAL;
mode_info = info->mode_info;
+ if (mode_info) {
+ /* if the displays are off, vblank time is max */
+ mode_info->vblank_time_us = 0xffffffff;
+ /* always set the reference clock */
+ mode_info->ref_clock = adev->clock.spll.reference_freq;
+ }
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index aeee6840e82b..5599c01b265d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -64,7 +64,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
return 0;
}
-int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
+static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
@@ -497,7 +497,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
&e->user_invalidated) && e->user_pages) {
/* We acquired a page array, but somebody
- * invalidated it. Free it an try again
+ * invalidated it. Free it and try again
*/
release_pages(e->user_pages,
e->robj->tbo.ttm->num_pages,
@@ -1069,10 +1069,8 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
{
int i;
- for (i = 0; i < p->num_post_dep_syncobjs; ++i) {
- drm_syncobj_replace_fence(p->filp, p->post_dep_syncobjs[i],
- p->fence);
- }
+ for (i = 0; i < p->num_post_dep_syncobjs; ++i)
+ drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
}
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b2c960b2ea82..4a8fc15467cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1162,16 +1162,12 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
return;
if (state == VGA_SWITCHEROO_ON) {
- unsigned d3_delay = dev->pdev->d3_delay;
-
pr_info("amdgpu: switched on\n");
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_resume(dev, true, true);
- dev->pdev->d3_delay = d3_delay;
-
dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
} else {
@@ -3804,7 +3800,7 @@ int amdgpu_debugfs_init(struct drm_minor *minor)
return 0;
}
#else
-static int amdgpu_debugfs_test_ib_init(struct amdgpu_device *adev)
+static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0148dd32e561..469992470953 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -67,9 +67,10 @@
* - 3.15.0 - Export more gpu info for gfx9
* - 3.16.0 - Add reserved vmid support
* - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
+ * - 3.18.0 - Export gpu always on cu bitmap
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 17
+#define KMS_DRIVER_MINOR 18
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -247,14 +248,28 @@ MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 =
module_param_named(lbpw, amdgpu_lbpw, int, 0444);
#ifdef CONFIG_DRM_AMDGPU_SI
+
+#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
int amdgpu_si_support = 0;
MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))");
+#else
+int amdgpu_si_support = 1;
+MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)");
+#endif
+
module_param_named(si_support, amdgpu_si_support, int, 0444);
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
+
+#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
int amdgpu_cik_support = 0;
MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))");
+#else
+int amdgpu_cik_support = 1;
+MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)");
+#endif
+
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 12497a40ef92..b0b23101d1c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -594,6 +594,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.cu_active_number = adev->gfx.cu_info.number;
dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
dev_info.ce_ram_size = adev->gfx.ce_ram_size;
+ memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
+ sizeof(adev->gfx.cu_info.ao_cu_bitmap));
memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
sizeof(adev->gfx.cu_info.bitmap));
dev_info.vram_type = adev->mc.vram_type;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 72c03c744594..b7e1c026c0c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -188,6 +188,9 @@ static int amdgpu_pp_hw_fini(void *handle)
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->pp_enabled && adev->pm.dpm_enabled)
+ amdgpu_pm_sysfs_fini(adev);
+
if (adev->powerplay.ip_funcs->hw_fini)
ret = adev->powerplay.ip_funcs->hw_fini(
adev->powerplay.pp_handle);
@@ -206,10 +209,9 @@ static void amdgpu_pp_late_fini(void *handle)
adev->powerplay.ip_funcs->late_fini(
adev->powerplay.pp_handle);
- if (adev->pp_enabled && adev->pm.dpm_enabled)
- amdgpu_pm_sysfs_fini(adev);
- amd_powerplay_destroy(adev->powerplay.pp_handle);
+ if (adev->pp_enabled)
+ amd_powerplay_destroy(adev->powerplay.pp_handle);
}
static int amdgpu_pp_suspend(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index c224c5caba5b..4083be61b328 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -152,8 +152,8 @@ static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint64_t tmr_mc, uint32_t size)
{
cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
- cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = (uint32_t)tmr_mc;
- cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = (uint32_t)(tmr_mc >> 32);
+ cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
+ cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_size = size;
}
@@ -333,14 +333,11 @@ static int psp_load_fw(struct amdgpu_device *adev)
{
int ret;
struct psp_context *psp = &adev->psp;
- struct psp_gfx_cmd_resp *cmd;
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
+ psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!psp->cmd)
return -ENOMEM;
- psp->cmd = cmd;
-
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
AMDGPU_GEM_DOMAIN_GTT,
&psp->fw_pri_bo,
@@ -379,8 +376,6 @@ static int psp_load_fw(struct amdgpu_device *adev)
if (ret)
goto failed_mem;
- kfree(cmd);
-
return 0;
failed_mem:
@@ -390,7 +385,8 @@ failed_mem1:
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
failed:
- kfree(cmd);
+ kfree(psp->cmd);
+ psp->cmd = NULL;
return ret;
}
@@ -450,6 +446,9 @@ static int psp_hw_fini(void *handle)
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf);
+ kfree(psp->cmd);
+ psp->cmd = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 7b0b3cf16334..5173ca1fd159 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -3535,7 +3535,9 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
mask <<= 1;
}
active_cu_number += counter;
- ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ if (i < 2 && j < 2)
+ ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index ec754288f146..37b45e4403d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -5427,7 +5427,9 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
mask <<= 1;
}
active_cu_number += counter;
- ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ if (i < 2 && j < 2)
+ ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
}
gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 142924212b43..aa5a50f5eac8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -40,7 +40,6 @@
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
-
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
#include "gca/gfx_8_0_sh_mask.h"
@@ -2100,7 +2099,7 @@ static int gfx_v8_0_sw_init(void *handle)
return r;
/* create MQD for all compute queues as well as KIQ for SRIOV case */
- r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct vi_mqd));
+ r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation));
if (r)
return r;
@@ -4637,56 +4636,6 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
return r;
}
-static int gfx_v8_0_kiq_kcq_disable(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
- uint32_t scratch, tmp = 0;
- int r, i;
-
- r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("Failed to get scratch reg (%d).\n", r);
- return r;
- }
- WREG32(scratch, 0xCAFEDEAD);
-
- r = amdgpu_ring_alloc(kiq_ring, 6 + 3);
- if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
- }
- /* unmap queues */
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
- amdgpu_ring_write(kiq_ring,
- PACKET3_UNMAP_QUEUES_ACTION(1)| /* RESET_QUEUES */
- PACKET3_UNMAP_QUEUES_QUEUE_SEL(2)); /* select all queues */
- amdgpu_ring_write(kiq_ring, 0);
- amdgpu_ring_write(kiq_ring, 0);
- amdgpu_ring_write(kiq_ring, 0);
- amdgpu_ring_write(kiq_ring, 0);
- /* write to scratch for completion */
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
- amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
- amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
- amdgpu_ring_commit(kiq_ring);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i >= adev->usec_timeout) {
- DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
- r = -EINVAL;
- }
- amdgpu_gfx_scratch_free(adev, scratch);
-
- return r;
-}
-
static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
{
int i, r = 0;
@@ -4715,9 +4664,6 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
uint32_t tmp;
- /* init the mqd struct */
- memset(mqd, 0, sizeof(struct vi_mqd));
-
mqd->header = 0xC0310800;
mqd->compute_pipelinestat_enable = 0x00000001;
mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
@@ -4725,7 +4671,12 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
mqd->compute_misc_reserved = 0x00000003;
-
+ if (!(adev->flags & AMD_IS_APU)) {
+ mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct vi_mqd_allocation, dyamic_cu_mask));
+ mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct vi_mqd_allocation, dyamic_cu_mask));
+ }
eop_base_addr = ring->eop_gpu_addr >> 8;
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
@@ -4890,7 +4841,6 @@ int gfx_v8_0_mqd_commit(struct amdgpu_device *adev,
static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
{
- int r = 0;
struct amdgpu_device *adev = ring->adev;
struct vi_mqd *mqd = ring->mqd_ptr;
int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
@@ -4900,44 +4850,32 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.in_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
amdgpu_ring_clear_ring(ring);
mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
- r = gfx_v8_0_deactivate_hqd(adev, 1);
- if (r) {
- dev_err(adev->dev, "failed to deactivate ring %s\n", ring->name);
- goto out_unlock;
- }
gfx_v8_0_mqd_commit(adev, mqd);
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
} else {
+ memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
+ ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF;
+ ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v8_0_mqd_init(ring);
- r = gfx_v8_0_deactivate_hqd(adev, 1);
- if (r) {
- dev_err(adev->dev, "failed to deactivate ring %s\n", ring->name);
- goto out_unlock;
- }
gfx_v8_0_mqd_commit(adev, mqd);
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
}
- return r;
-
-out_unlock:
- vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- return r;
+ return 0;
}
static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
@@ -4947,6 +4885,9 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
int mqd_idx = ring - &adev->gfx.compute_ring[0];
if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
+ memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
+ ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF;
+ ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v8_0_mqd_init(ring);
@@ -4954,11 +4895,11 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
} else if (adev->gfx.in_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
amdgpu_ring_clear_ring(ring);
@@ -5138,7 +5079,6 @@ static int gfx_v8_0_hw_fini(void *handle)
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
}
- gfx_v8_0_kiq_kcq_disable(adev);
gfx_v8_0_cp_enable(adev, false);
gfx_v8_0_rlc_stop(adev);
@@ -7080,7 +7020,9 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
mask <<= 1;
}
active_cu_number += counter;
- ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ if (i < 2 && j < 2)
+ ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
}
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index ba228f613027..3a0b69b09ed6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1964,8 +1964,8 @@ static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
}
-void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
- bool enable)
+static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t data, default_data;
@@ -1978,7 +1978,7 @@ void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
-void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
+static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t data, default_data;
@@ -2502,56 +2502,6 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
return r;
}
-static int gfx_v9_0_kiq_kcq_disable(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
- uint32_t scratch, tmp = 0;
- int r, i;
-
- r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("Failed to get scratch reg (%d).\n", r);
- return r;
- }
- WREG32(scratch, 0xCAFEDEAD);
-
- r = amdgpu_ring_alloc(kiq_ring, 6 + 3);
- if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
- }
- /* unmap queues */
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
- amdgpu_ring_write(kiq_ring,
- PACKET3_UNMAP_QUEUES_ACTION(1)| /* RESET_QUEUES */
- PACKET3_UNMAP_QUEUES_QUEUE_SEL(2)); /* select all queues */
- amdgpu_ring_write(kiq_ring, 0);
- amdgpu_ring_write(kiq_ring, 0);
- amdgpu_ring_write(kiq_ring, 0);
- amdgpu_ring_write(kiq_ring, 0);
- /* write to scratch for completion */
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
- amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
- amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
- amdgpu_ring_commit(kiq_ring);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i >= adev->usec_timeout) {
- DRM_ERROR("KCQ disable failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
- r = -EINVAL;
- }
- amdgpu_gfx_scratch_free(adev, scratch);
-
- return r;
-}
-
static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -2996,7 +2946,6 @@ static int gfx_v9_0_hw_fini(void *handle)
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
}
- gfx_v9_0_kiq_kcq_disable(adev);
gfx_v9_0_cp_enable(adev, false);
gfx_v9_0_rlc_stop(adev);
@@ -4416,6 +4365,20 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
}
}
+static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+ u32 bitmap)
+{
+ u32 data;
+
+ if (!bitmap)
+ return;
+
+ data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+
+ WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
+}
+
static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
@@ -4436,10 +4399,13 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
{
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
+ unsigned disable_masks[4 * 2];
if (!adev || !cu_info)
return -EINVAL;
+ amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
@@ -4447,6 +4413,9 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
ao_bitmap = 0;
counter = 0;
gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
+ if (i < 4 && j < 2)
+ gfx_v9_0_set_user_cu_inactive_bitmap(
+ adev, disable_masks[i * 2 + j]);
bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
cu_info->bitmap[i][j] = bitmap;
@@ -4459,7 +4428,9 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
mask <<= 1;
}
active_cu_number += counter;
- ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ if (i < 2 && j < 2)
+ ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
}
gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index ce68d609b619..d0214d942bfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -794,14 +794,6 @@ static int gmc_v6_0_early_init(void *handle)
gmc_v6_0_set_gart_funcs(adev);
gmc_v6_0_set_irq_funcs(adev);
- if (adev->flags & AMD_IS_APU) {
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- } else {
- u32 tmp = RREG32(mmMC_SEQ_MISC0);
- tmp &= MC_SEQ_MISC0__MT__MASK;
- adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
- }
-
return 0;
}
@@ -821,6 +813,14 @@ static int gmc_v6_0_sw_init(void *handle)
int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ } else {
+ u32 tmp = RREG32(mmMC_SEQ_MISC0);
+ tmp &= MC_SEQ_MISC0__MT__MASK;
+ adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
+ }
+
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 68172aace3ee..175ba5f9691c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -695,6 +695,15 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
else
nbio_v6_1_hdp_flush(adev);
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ mmhub_v1_0_initialize_power_gating(adev);
+ mmhub_v1_0_update_power_gating(adev, true);
+ break;
+ default:
+ break;
+ }
+
r = gfxhub_v1_0_gart_enable(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index f50b5a77f45a..9804318f3488 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -244,6 +244,224 @@ static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
}
}
+struct pctl_data {
+ uint32_t index;
+ uint32_t data;
+};
+
+const struct pctl_data pctl0_data[] = {
+ {0x0, 0x7a640},
+ {0x9, 0x2a64a},
+ {0xd, 0x2a680},
+ {0x11, 0x6a684},
+ {0x19, 0xea68e},
+ {0x29, 0xa69e},
+ {0x2b, 0x34a6c0},
+ {0x61, 0x83a707},
+ {0xe6, 0x8a7a4},
+ {0xf0, 0x1a7b8},
+ {0xf3, 0xfa7cc},
+ {0x104, 0x17a7dd},
+ {0x11d, 0xa7dc},
+ {0x11f, 0x12a7f5},
+ {0x133, 0xa808},
+ {0x135, 0x12a810},
+ {0x149, 0x7a82c}
+};
+#define PCTL0_DATA_LEN (sizeof(pctl0_data)/sizeof(pctl0_data[0]))
+
+#define PCTL0_RENG_EXEC_END_PTR 0x151
+#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640
+#define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833
+
+const struct pctl_data pctl1_data[] = {
+ {0x0, 0x39a000},
+ {0x3b, 0x44a040},
+ {0x81, 0x2a08d},
+ {0x85, 0x6ba094},
+ {0xf2, 0x18a100},
+ {0x10c, 0x4a132},
+ {0x112, 0xca141},
+ {0x120, 0x2fa158},
+ {0x151, 0x17a1d0},
+ {0x16a, 0x1a1e9},
+ {0x16d, 0x13a1ec},
+ {0x182, 0x7a201},
+ {0x18b, 0x3a20a},
+ {0x190, 0x7a580},
+ {0x199, 0xa590},
+ {0x19b, 0x4a594},
+ {0x1a1, 0x1a59c},
+ {0x1a4, 0x7a82c},
+ {0x1ad, 0xfa7cc},
+ {0x1be, 0x17a7dd},
+ {0x1d7, 0x12a810}
+};
+#define PCTL1_DATA_LEN (sizeof(pctl1_data)/sizeof(pctl1_data[0]))
+
+#define PCTL1_RENG_EXEC_END_PTR 0x1ea
+#define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000
+#define PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa20d
+#define PCTL1_STCTRL_REG_SAVE_RANGE1_BASE 0xa580
+#define PCTL1_STCTRL_REG_SAVE_RANGE1_LIMIT 0xa59d
+#define PCTL1_STCTRL_REG_SAVE_RANGE2_BASE 0xa82c
+#define PCTL1_STCTRL_REG_SAVE_RANGE2_LIMIT 0xa833
+
+static void mmhub_v1_0_power_gating_write_save_ranges(struct amdgpu_device *adev)
+{
+ uint32_t tmp = 0;
+
+ /* PCTL0_STCTRL_REGISTER_SAVE_RANGE0 */
+ tmp = REG_SET_FIELD(tmp, PCTL0_STCTRL_REGISTER_SAVE_RANGE0,
+ STCTRL_REGISTER_SAVE_BASE,
+ PCTL0_STCTRL_REG_SAVE_RANGE0_BASE);
+ tmp = REG_SET_FIELD(tmp, PCTL0_STCTRL_REGISTER_SAVE_RANGE0,
+ STCTRL_REGISTER_SAVE_LIMIT,
+ PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT);
+ WREG32_SOC15(MMHUB, 0, mmPCTL0_STCTRL_REGISTER_SAVE_RANGE0, tmp);
+
+ /* PCTL1_STCTRL_REGISTER_SAVE_RANGE0 */
+ tmp = 0;
+ tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE0,
+ STCTRL_REGISTER_SAVE_BASE,
+ PCTL1_STCTRL_REG_SAVE_RANGE0_BASE);
+ tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE0,
+ STCTRL_REGISTER_SAVE_LIMIT,
+ PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT);
+ WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE0, tmp);
+
+ /* PCTL1_STCTRL_REGISTER_SAVE_RANGE1 */
+ tmp = 0;
+ tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE1,
+ STCTRL_REGISTER_SAVE_BASE,
+ PCTL1_STCTRL_REG_SAVE_RANGE1_BASE);
+ tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE1,
+ STCTRL_REGISTER_SAVE_LIMIT,
+ PCTL1_STCTRL_REG_SAVE_RANGE1_LIMIT);
+ WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE1, tmp);
+
+ /* PCTL1_STCTRL_REGISTER_SAVE_RANGE2 */
+ tmp = 0;
+ tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE2,
+ STCTRL_REGISTER_SAVE_BASE,
+ PCTL1_STCTRL_REG_SAVE_RANGE2_BASE);
+ tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE2,
+ STCTRL_REGISTER_SAVE_LIMIT,
+ PCTL1_STCTRL_REG_SAVE_RANGE2_LIMIT);
+ WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE2, tmp);
+}
+
+void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev)
+{
+ uint32_t pctl0_misc = 0;
+ uint32_t pctl0_reng_execute = 0;
+ uint32_t pctl1_misc = 0;
+ uint32_t pctl1_reng_execute = 0;
+ int i = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ pctl0_misc = RREG32_SOC15(MMHUB, 0, mmPCTL0_MISC);
+ pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE);
+ pctl1_misc = RREG32_SOC15(MMHUB, 0, mmPCTL1_MISC);
+ pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE);
+
+ /* Light sleep must be disabled before writing to pctl0 registers */
+ pctl0_misc &= ~PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
+ WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc);
+
+ /* Write data used to access ram of register engine */
+ for (i = 0; i < PCTL0_DATA_LEN; i++) {
+ WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_RAM_INDEX,
+ pctl0_data[i].index);
+ WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_RAM_DATA,
+ pctl0_data[i].data);
+ }
+
+ /* Set the reng execute end ptr for pctl0 */
+ pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
+ PCTL0_RENG_EXECUTE,
+ RENG_EXECUTE_END_PTR,
+ PCTL0_RENG_EXEC_END_PTR);
+ WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute);
+
+ /* Light sleep must be disabled before writing to pctl1 registers */
+ pctl1_misc &= ~PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
+ WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc);
+
+ /* Write data used to access ram of register engine */
+ for (i = 0; i < PCTL1_DATA_LEN; i++) {
+ WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_RAM_INDEX,
+ pctl1_data[i].index);
+ WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_RAM_DATA,
+ pctl1_data[i].data);
+ }
+
+ /* Set the reng execute end ptr for pctl1 */
+ pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
+ PCTL1_RENG_EXECUTE,
+ RENG_EXECUTE_END_PTR,
+ PCTL1_RENG_EXEC_END_PTR);
+ WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
+
+ mmhub_v1_0_power_gating_write_save_ranges(adev);
+
+ /* Re-enable light sleep */
+ pctl0_misc |= PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
+ WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc);
+ pctl1_misc |= PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
+ WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc);
+}
+
+void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t pctl0_reng_execute = 0;
+ uint32_t pctl1_reng_execute = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE);
+ pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE);
+
+ if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
+ pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
+ PCTL0_RENG_EXECUTE,
+ RENG_EXECUTE_ON_PWR_UP, 1);
+ pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
+ PCTL0_RENG_EXECUTE,
+ RENG_EXECUTE_ON_REG_UPDATE, 1);
+ WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute);
+
+ pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
+ PCTL1_RENG_EXECUTE,
+ RENG_EXECUTE_ON_PWR_UP, 1);
+ pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
+ PCTL1_RENG_EXECUTE,
+ RENG_EXECUTE_ON_REG_UPDATE, 1);
+ WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
+
+ } else {
+ pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
+ PCTL0_RENG_EXECUTE,
+ RENG_EXECUTE_ON_PWR_UP, 0);
+ pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
+ PCTL0_RENG_EXECUTE,
+ RENG_EXECUTE_ON_REG_UPDATE, 0);
+ WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute);
+
+ pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
+ PCTL1_RENG_EXECUTE,
+ RENG_EXECUTE_ON_PWR_UP, 0);
+ pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
+ PCTL1_RENG_EXECUTE,
+ RENG_EXECUTE_ON_REG_UPDATE, 0);
+ WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
+ }
+}
+
int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index bbfacbcdc4a2..57bb940c0ecd 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -32,6 +32,9 @@ void mmhub_v1_0_init(struct amdgpu_device *adev);
int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state);
void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
+void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev);
+void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
+ bool enable);
extern const struct amd_ip_funcs mmhub_v1_0_ip_funcs;
extern const struct amdgpu_ip_block_version mmhub_v1_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 20c1e539ff35..2258323a3c26 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -96,8 +96,8 @@ int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cm
header = (struct common_firmware_header *)ucode->fw;
cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
- cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = (uint32_t)fw_mem_mc_addr;
- cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = (uint32_t)((uint64_t)fw_mem_mc_addr >> 32);
+ cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
+ cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
cmd->cmd.cmd_load_ip_fw.fw_size = le32_to_cpu(header->ucode_size_bytes);
ret = psp_v10_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
@@ -172,10 +172,10 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
write_frame = ring->ring_mem + (psp_write_ptr_reg / (sizeof(struct psp_gfx_rb_frame) / 4));
/* Update KM RB frame */
- write_frame->cmd_buf_addr_hi = (unsigned int)(cmd_buf_mc_addr >> 32);
- write_frame->cmd_buf_addr_lo = (unsigned int)(cmd_buf_mc_addr);
- write_frame->fence_addr_hi = (unsigned int)(fence_mc_addr >> 32);
- write_frame->fence_addr_lo = (unsigned int)(fence_mc_addr);
+ write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
+ write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
+ write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
+ write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
/* Update the write Pointer in DWORDs */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 6e5c6edabb84..c98d77d0c8f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -254,8 +254,8 @@ int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd
memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
- cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = (uint32_t)fw_mem_mc_addr;
- cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = (uint32_t)((uint64_t)fw_mem_mc_addr >> 32);
+ cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
+ cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
ret = psp_v3_1_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
@@ -375,10 +375,10 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
/* Update KM RB frame */
- write_frame->cmd_buf_addr_hi = (unsigned int)(cmd_buf_mc_addr >> 32);
- write_frame->cmd_buf_addr_lo = (unsigned int)(cmd_buf_mc_addr);
- write_frame->fence_addr_hi = (unsigned int)(fence_mc_addr >> 32);
- write_frame->fence_addr_lo = (unsigned int)(fence_mc_addr);
+ write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
+ write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
+ write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
+ write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
/* Update the write Pointer in DWORDs */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 5fdb05a0c88a..a7341d88a320 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -625,7 +625,8 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS;
- adev->pg_flags = AMD_PG_SUPPORT_SDMA;
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA |
+ AMD_PG_SUPPORT_MMHUB;
adev->external_rev_id = 0x1;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 84d1ffd1eef9..035bbc98a63d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -318,12 +318,13 @@ static struct kfd_process *create_process(const struct task_struct *thread)
/* init process apertures*/
process->is_32bit_user_mode = in_compat_syscall();
- if (kfd_init_apertures(process) != 0)
- goto err_init_apretures;
+ err = kfd_init_apertures(process);
+ if (err != 0)
+ goto err_init_apertures;
return process;
-err_init_apretures:
+err_init_apertures:
pqm_uninit(&process->pqm);
err_process_pqm_init:
hash_del_rcu(&process->kfd_processes);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index beb2a81ab7da..70e8c20acb2f 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -184,6 +184,7 @@ enum amd_fan_ctrl_mode {
#define AMD_PG_SUPPORT_SAMU (1 << 10)
#define AMD_PG_SUPPORT_GFX_QUICK_MG (1 << 11)
#define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12)
+#define AMD_PG_SUPPORT_MMHUB (1 << 13)
enum amd_pm_state_type {
/* not used for dpm */
diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h
index b68f8efcdeae..ca93b5160ba6 100644
--- a/drivers/gpu/drm/amd/include/vi_structs.h
+++ b/drivers/gpu/drm/amd/include/vi_structs.h
@@ -195,6 +195,274 @@ struct vi_mqd {
uint32_t compute_wave_restore_addr_lo;
uint32_t compute_wave_restore_addr_hi;
uint32_t compute_wave_restore_control;
+ uint32_t reserved9;
+ uint32_t reserved10;
+ uint32_t reserved11;
+ uint32_t reserved12;
+ uint32_t reserved13;
+ uint32_t reserved14;
+ uint32_t reserved15;
+ uint32_t reserved16;
+ uint32_t reserved17;
+ uint32_t reserved18;
+ uint32_t reserved19;
+ uint32_t reserved20;
+ uint32_t reserved21;
+ uint32_t reserved22;
+ uint32_t reserved23;
+ uint32_t reserved24;
+ uint32_t reserved25;
+ uint32_t reserved26;
+ uint32_t reserved27;
+ uint32_t reserved28;
+ uint32_t reserved29;
+ uint32_t reserved30;
+ uint32_t reserved31;
+ uint32_t reserved32;
+ uint32_t reserved33;
+ uint32_t reserved34;
+ uint32_t compute_user_data_0;
+ uint32_t compute_user_data_1;
+ uint32_t compute_user_data_2;
+ uint32_t compute_user_data_3;
+ uint32_t compute_user_data_4;
+ uint32_t compute_user_data_5;
+ uint32_t compute_user_data_6;
+ uint32_t compute_user_data_7;
+ uint32_t compute_user_data_8;
+ uint32_t compute_user_data_9;
+ uint32_t compute_user_data_10;
+ uint32_t compute_user_data_11;
+ uint32_t compute_user_data_12;
+ uint32_t compute_user_data_13;
+ uint32_t compute_user_data_14;
+ uint32_t compute_user_data_15;
+ uint32_t cp_compute_csinvoc_count_lo;
+ uint32_t cp_compute_csinvoc_count_hi;
+ uint32_t reserved35;
+ uint32_t reserved36;
+ uint32_t reserved37;
+ uint32_t cp_mqd_query_time_lo;
+ uint32_t cp_mqd_query_time_hi;
+ uint32_t cp_mqd_connect_start_time_lo;
+ uint32_t cp_mqd_connect_start_time_hi;
+ uint32_t cp_mqd_connect_end_time_lo;
+ uint32_t cp_mqd_connect_end_time_hi;
+ uint32_t cp_mqd_connect_end_wf_count;
+ uint32_t cp_mqd_connect_end_pq_rptr;
+ uint32_t cp_mqd_connect_endvi_sdma_mqd_pq_wptr;
+ uint32_t cp_mqd_connect_end_ib_rptr;
+ uint32_t reserved38;
+ uint32_t reserved39;
+ uint32_t cp_mqd_save_start_time_lo;
+ uint32_t cp_mqd_save_start_time_hi;
+ uint32_t cp_mqd_save_end_time_lo;
+ uint32_t cp_mqd_save_end_time_hi;
+ uint32_t cp_mqd_restore_start_time_lo;
+ uint32_t cp_mqd_restore_start_time_hi;
+ uint32_t cp_mqd_restore_end_time_lo;
+ uint32_t cp_mqd_restore_end_time_hi;
+ uint32_t disable_queue;
+ uint32_t reserved41;
+ uint32_t gds_cs_ctxsw_cnt0;
+ uint32_t gds_cs_ctxsw_cnt1;
+ uint32_t gds_cs_ctxsw_cnt2;
+ uint32_t gds_cs_ctxsw_cnt3;
+ uint32_t reserved42;
+ uint32_t reserved43;
+ uint32_t cp_pq_exe_status_lo;
+ uint32_t cp_pq_exe_status_hi;
+ uint32_t cp_packet_id_lo;
+ uint32_t cp_packet_id_hi;
+ uint32_t cp_packet_exe_status_lo;
+ uint32_t cp_packet_exe_status_hi;
+ uint32_t gds_save_base_addr_lo;
+ uint32_t gds_save_base_addr_hi;
+ uint32_t gds_save_mask_lo;
+ uint32_t gds_save_mask_hi;
+ uint32_t ctx_save_base_addr_lo;
+ uint32_t ctx_save_base_addr_hi;
+ uint32_t dynamic_cu_mask_addr_lo;
+ uint32_t dynamic_cu_mask_addr_hi;
+ uint32_t cp_mqd_base_addr_lo;
+ uint32_t cp_mqd_base_addr_hi;
+ uint32_t cp_hqd_active;
+ uint32_t cp_hqd_vmid;
+ uint32_t cp_hqd_persistent_state;
+ uint32_t cp_hqd_pipe_priority;
+ uint32_t cp_hqd_queue_priority;
+ uint32_t cp_hqd_quantum;
+ uint32_t cp_hqd_pq_base_lo;
+ uint32_t cp_hqd_pq_base_hi;
+ uint32_t cp_hqd_pq_rptr;
+ uint32_t cp_hqd_pq_rptr_report_addr_lo;
+ uint32_t cp_hqd_pq_rptr_report_addr_hi;
+ uint32_t cp_hqd_pq_wptr_poll_addr_lo;
+ uint32_t cp_hqd_pq_wptr_poll_addr_hi;
+ uint32_t cp_hqd_pq_doorbell_control;
+ uint32_t cp_hqd_pq_wptr;
+ uint32_t cp_hqd_pq_control;
+ uint32_t cp_hqd_ib_base_addr_lo;
+ uint32_t cp_hqd_ib_base_addr_hi;
+ uint32_t cp_hqd_ib_rptr;
+ uint32_t cp_hqd_ib_control;
+ uint32_t cp_hqd_iq_timer;
+ uint32_t cp_hqd_iq_rptr;
+ uint32_t cp_hqd_dequeue_request;
+ uint32_t cp_hqd_dma_offload;
+ uint32_t cp_hqd_sema_cmd;
+ uint32_t cp_hqd_msg_type;
+ uint32_t cp_hqd_atomic0_preop_lo;
+ uint32_t cp_hqd_atomic0_preop_hi;
+ uint32_t cp_hqd_atomic1_preop_lo;
+ uint32_t cp_hqd_atomic1_preop_hi;
+ uint32_t cp_hqd_hq_status0;
+ uint32_t cp_hqd_hq_control0;
+ uint32_t cp_mqd_control;
+ uint32_t cp_hqd_hq_status1;
+ uint32_t cp_hqd_hq_control1;
+ uint32_t cp_hqd_eop_base_addr_lo;
+ uint32_t cp_hqd_eop_base_addr_hi;
+ uint32_t cp_hqd_eop_control;
+ uint32_t cp_hqd_eop_rptr;
+ uint32_t cp_hqd_eop_wptr;
+ uint32_t cp_hqd_eop_done_events;
+ uint32_t cp_hqd_ctx_save_base_addr_lo;
+ uint32_t cp_hqd_ctx_save_base_addr_hi;
+ uint32_t cp_hqd_ctx_save_control;
+ uint32_t cp_hqd_cntl_stack_offset;
+ uint32_t cp_hqd_cntl_stack_size;
+ uint32_t cp_hqd_wg_state_offset;
+ uint32_t cp_hqd_ctx_save_size;
+ uint32_t cp_hqd_gds_resource_state;
+ uint32_t cp_hqd_error;
+ uint32_t cp_hqd_eop_wptr_mem;
+ uint32_t cp_hqd_eop_dones;
+ uint32_t reserved46;
+ uint32_t reserved47;
+ uint32_t reserved48;
+ uint32_t reserved49;
+ uint32_t reserved50;
+ uint32_t reserved51;
+ uint32_t reserved52;
+ uint32_t reserved53;
+ uint32_t reserved54;
+ uint32_t reserved55;
+ uint32_t iqtimer_pkt_header;
+ uint32_t iqtimer_pkt_dw0;
+ uint32_t iqtimer_pkt_dw1;
+ uint32_t iqtimer_pkt_dw2;
+ uint32_t iqtimer_pkt_dw3;
+ uint32_t iqtimer_pkt_dw4;
+ uint32_t iqtimer_pkt_dw5;
+ uint32_t iqtimer_pkt_dw6;
+ uint32_t iqtimer_pkt_dw7;
+ uint32_t iqtimer_pkt_dw8;
+ uint32_t iqtimer_pkt_dw9;
+ uint32_t iqtimer_pkt_dw10;
+ uint32_t iqtimer_pkt_dw11;
+ uint32_t iqtimer_pkt_dw12;
+ uint32_t iqtimer_pkt_dw13;
+ uint32_t iqtimer_pkt_dw14;
+ uint32_t iqtimer_pkt_dw15;
+ uint32_t iqtimer_pkt_dw16;
+ uint32_t iqtimer_pkt_dw17;
+ uint32_t iqtimer_pkt_dw18;
+ uint32_t iqtimer_pkt_dw19;
+ uint32_t iqtimer_pkt_dw20;
+ uint32_t iqtimer_pkt_dw21;
+ uint32_t iqtimer_pkt_dw22;
+ uint32_t iqtimer_pkt_dw23;
+ uint32_t iqtimer_pkt_dw24;
+ uint32_t iqtimer_pkt_dw25;
+ uint32_t iqtimer_pkt_dw26;
+ uint32_t iqtimer_pkt_dw27;
+ uint32_t iqtimer_pkt_dw28;
+ uint32_t iqtimer_pkt_dw29;
+ uint32_t iqtimer_pkt_dw30;
+ uint32_t iqtimer_pkt_dw31;
+ uint32_t reserved56;
+ uint32_t reserved57;
+ uint32_t reserved58;
+ uint32_t set_resources_header;
+ uint32_t set_resources_dw1;
+ uint32_t set_resources_dw2;
+ uint32_t set_resources_dw3;
+ uint32_t set_resources_dw4;
+ uint32_t set_resources_dw5;
+ uint32_t set_resources_dw6;
+ uint32_t set_resources_dw7;
+ uint32_t reserved59;
+ uint32_t reserved60;
+ uint32_t reserved61;
+ uint32_t reserved62;
+ uint32_t reserved63;
+ uint32_t reserved64;
+ uint32_t reserved65;
+ uint32_t reserved66;
+ uint32_t reserved67;
+ uint32_t reserved68;
+ uint32_t reserved69;
+ uint32_t reserved70;
+ uint32_t reserved71;
+ uint32_t reserved72;
+ uint32_t reserved73;
+ uint32_t reserved74;
+ uint32_t reserved75;
+ uint32_t reserved76;
+ uint32_t reserved77;
+ uint32_t reserved78;
+ uint32_t reserved_t[256];
+};
+
+struct vi_mqd_allocation {
+ struct vi_mqd mqd;
+ uint32_t wptr_poll_mem;
+ uint32_t rptr_report_mem;
+ uint32_t dyamic_cu_mask;
+ uint32_t dyamic_rb_mask;
+};
+
+struct cz_mqd {
+ uint32_t header;
+ uint32_t compute_dispatch_initiator;
+ uint32_t compute_dim_x;
+ uint32_t compute_dim_y;
+ uint32_t compute_dim_z;
+ uint32_t compute_start_x;
+ uint32_t compute_start_y;
+ uint32_t compute_start_z;
+ uint32_t compute_num_thread_x;
+ uint32_t compute_num_thread_y;
+ uint32_t compute_num_thread_z;
+ uint32_t compute_pipelinestat_enable;
+ uint32_t compute_perfcount_enable;
+ uint32_t compute_pgm_lo;
+ uint32_t compute_pgm_hi;
+ uint32_t compute_tba_lo;
+ uint32_t compute_tba_hi;
+ uint32_t compute_tma_lo;
+ uint32_t compute_tma_hi;
+ uint32_t compute_pgm_rsrc1;
+ uint32_t compute_pgm_rsrc2;
+ uint32_t compute_vmid;
+ uint32_t compute_resource_limits;
+ uint32_t compute_static_thread_mgmt_se0;
+ uint32_t compute_static_thread_mgmt_se1;
+ uint32_t compute_tmpring_size;
+ uint32_t compute_static_thread_mgmt_se2;
+ uint32_t compute_static_thread_mgmt_se3;
+ uint32_t compute_restart_x;
+ uint32_t compute_restart_y;
+ uint32_t compute_restart_z;
+ uint32_t compute_thread_trace_enable;
+ uint32_t compute_misc_reserved;
+ uint32_t compute_dispatch_id;
+ uint32_t compute_threadgroup_id;
+ uint32_t compute_relaunch;
+ uint32_t compute_wave_restore_addr_lo;
+ uint32_t compute_wave_restore_addr_hi;
+ uint32_t compute_wave_restore_control;
uint32_t reserved_39;
uint32_t reserved_40;
uint32_t reserved_41;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index f988ed204d9a..d6f097f44b6c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -2865,6 +2865,7 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
void *state, struct pp_power_state *power_state,
void *pp_table, uint32_t classification_flag)
{
+ ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2;
struct vega10_power_state *vega10_power_state =
cast_phw_vega10_power_state(&(power_state->hardware));
struct vega10_performance_level *performance_level;
@@ -2941,11 +2942,16 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
performance_level = &(vega10_power_state->performance_levels
[vega10_power_state->performance_level_count++]);
-
performance_level->soc_clock = socclk_dep_table->entries
- [state_entry->ucSocClockIndexHigh].ulClk;
- performance_level->gfx_clock = gfxclk_dep_table->entries
+ [state_entry->ucSocClockIndexHigh].ulClk;
+ if (gfxclk_dep_table->ucRevId == 0) {
+ performance_level->gfx_clock = gfxclk_dep_table->entries
[state_entry->ucGfxClockIndexHigh].ulClk;
+ } else if (gfxclk_dep_table->ucRevId == 1) {
+ patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
+ performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
+ }
+
performance_level->mem_clock = mclk_dep_table->entries
[state_entry->ucMemClockIndexHigh].ulMemClk;
return 0;
@@ -3349,7 +3355,6 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
dpm_table->
gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
value = sclk;
-
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_OD6PlusinACSupport) ||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -3472,7 +3477,6 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
return result);
}
}
-
return result;
}
@@ -3828,13 +3832,18 @@ static int vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
struct pp_gpu_power *query)
{
+ uint32_t value;
+
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_GetCurrPkgPwr),
"Failed to get current package power!",
return -EINVAL);
- return vega10_read_arg_from_smc(hwmgr->smumgr,
- &query->average_gpu_power);
+ vega10_read_arg_from_smc(hwmgr->smumgr, &value);
+ /* power value is an integer */
+ query->average_gpu_power = value << 8;
+
+ return 0;
}
static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
index 52beea3bf6b7..b3e63003a789 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
@@ -144,6 +144,15 @@ typedef struct _ATOM_Vega10_GFXCLK_Dependency_Record {
USHORT usAVFSOffset; /* AVFS Voltage offset */
} ATOM_Vega10_GFXCLK_Dependency_Record;
+typedef struct _ATOM_Vega10_GFXCLK_Dependency_Record_V2 {
+ ULONG ulClk;
+ UCHAR ucVddInd;
+ USHORT usCKSVOffsetandDisable;
+ USHORT usAVFSOffset;
+ UCHAR ucACGEnable;
+ UCHAR ucReserved[3];
+} ATOM_Vega10_GFXCLK_Dependency_Record_V2;
+
typedef struct _ATOM_Vega10_MCLK_Dependency_Record {
ULONG ulMemClk; /* Clock Frequency */
UCHAR ucVddInd; /* SOC_VDD index */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index 2b892e47d8dc..1623644ea49a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -585,6 +585,7 @@ static int get_gfxclk_voltage_dependency_table(
uint32_t table_size, i;
struct phm_ppt_v1_clock_voltage_dependency_table
*clk_table;
+ ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2;
PP_ASSERT_WITH_CODE((clk_dep_table->ucNumEntries != 0),
"Invalid PowerPlay Table!", return -1);
@@ -601,18 +602,41 @@ static int get_gfxclk_voltage_dependency_table(
clk_table->count = clk_dep_table->ucNumEntries;
- for (i = 0; i < clk_table->count; i++) {
- clk_table->entries[i].vddInd =
+ if (clk_dep_table->ucRevId == 0) {
+ for (i = 0; i < clk_table->count; i++) {
+ clk_table->entries[i].vddInd =
clk_dep_table->entries[i].ucVddInd;
- clk_table->entries[i].clk =
+ clk_table->entries[i].clk =
le32_to_cpu(clk_dep_table->entries[i].ulClk);
- clk_table->entries[i].cks_enable =
- (((clk_dep_table->entries[i].usCKSVOffsetandDisable & 0x8000)
+ clk_table->entries[i].cks_enable =
+ (((le16_to_cpu(clk_dep_table->entries[i].usCKSVOffsetandDisable) & 0x8000)
>> 15) == 0) ? 1 : 0;
- clk_table->entries[i].cks_voffset =
- (clk_dep_table->entries[i].usCKSVOffsetandDisable & 0x7F);
- clk_table->entries[i].sclk_offset =
- clk_dep_table->entries[i].usAVFSOffset;
+ clk_table->entries[i].cks_voffset =
+ le16_to_cpu(clk_dep_table->entries[i].usCKSVOffsetandDisable) & 0x7F;
+ clk_table->entries[i].sclk_offset =
+ le16_to_cpu(clk_dep_table->entries[i].usAVFSOffset);
+ }
+ } else if (clk_dep_table->ucRevId == 1) {
+ patom_record_v2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)clk_dep_table->entries;
+ for (i = 0; i < clk_table->count; i++) {
+ clk_table->entries[i].vddInd =
+ patom_record_v2->ucVddInd;
+ clk_table->entries[i].clk =
+ le32_to_cpu(patom_record_v2->ulClk);
+ clk_table->entries[i].cks_enable =
+ (((le16_to_cpu(patom_record_v2->usCKSVOffsetandDisable) & 0x8000)
+ >> 15) == 0) ? 1 : 0;
+ clk_table->entries[i].cks_voffset =
+ le16_to_cpu(patom_record_v2->usCKSVOffsetandDisable) & 0x7F;
+ clk_table->entries[i].sclk_offset =
+ le16_to_cpu(patom_record_v2->usAVFSOffset);
+ patom_record_v2++;
+ }
+ } else {
+ kfree(clk_table);
+ PP_ASSERT_WITH_CODE(false,
+ "Unsupported GFXClockDependencyTable Revision!",
+ return -EINVAL);
}
*pp_vega10_clk_dep_table = clk_table;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
index e07cab311c7a..b4af9e85dfa5 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
@@ -124,8 +124,8 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_NumOfDisplays 0x56
#define PPSMC_MSG_ReadSerialNumTop32 0x58
#define PPSMC_MSG_ReadSerialNumBottom32 0x59
-#define PPSMC_MSG_GetCurrPkgPwr 0x5C
-#define PPSMC_Message_Count 0x5D
+#define PPSMC_MSG_GetCurrPkgPwr 0x61
+#define PPSMC_Message_Count 0x62
typedef int PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index 39c7091866e8..652aaa43e95c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -72,7 +72,7 @@ static int cz_send_msg_to_smc_async(struct pp_smumgr *smumgr,
result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
if (result != 0) {
- pr_err("cz_send_msg_to_smc_async failed\n");
+ pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg);
return result;
}
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index 2a7eb6817c36..92e6b08ea64a 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -133,7 +133,7 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
}
/* Framebuffer objects must have a valid device address for scanout */
- if (obj->dev_addr == DMA_ERROR_CODE) {
+ if (!obj->mapped) {
ret = -EINVAL;
goto err_unref;
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index d6c2a5d190eb..a76ca21d063b 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -175,6 +175,7 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
obj->phys_addr = obj->linear->start;
obj->dev_addr = obj->linear->start;
+ obj->mapped = true;
}
DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
@@ -205,7 +206,6 @@ armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
return NULL;
drm_gem_private_object_init(dev, &obj->obj, size);
- obj->dev_addr = DMA_ERROR_CODE;
DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
@@ -229,8 +229,6 @@ static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
return NULL;
}
- obj->dev_addr = DMA_ERROR_CODE;
-
mapping = obj->obj.filp->f_mapping;
mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
@@ -610,5 +608,6 @@ int armada_gem_map_import(struct armada_gem_object *dobj)
return -EINVAL;
}
dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
+ dobj->mapped = true;
return 0;
}
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
index b88d2b9853c7..6e524e0676bb 100644
--- a/drivers/gpu/drm/armada/armada_gem.h
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -16,6 +16,7 @@ struct armada_gem_object {
void *addr;
phys_addr_t phys_addr;
resource_size_t dev_addr;
+ bool mapped;
struct drm_mm_node *linear; /* for linear backed */
struct page *page; /* for page backed */
struct sg_table *sgt; /* for imported */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index cf92ebfe6ab7..67469c26bae8 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -11,6 +11,7 @@
#include <sound/hdmi-codec.h>
#include <sound/pcm.h>
#include <sound/soc.h>
+#include <linux/of_graph.h>
#include "adv7511.h"
@@ -182,10 +183,31 @@ static void audio_shutdown(struct device *dev, void *data)
{
}
+static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
+ struct device_node *endpoint)
+{
+ struct of_endpoint of_ep;
+ int ret;
+
+ ret = of_graph_parse_endpoint(endpoint, &of_ep);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * HDMI sound should be located as reg = <2>
+ * Then, it is sound port 0
+ */
+ if (of_ep.port == 2)
+ return 0;
+
+ return -EINVAL;
+}
+
static const struct hdmi_codec_ops adv7511_codec_ops = {
.hw_params = adv7511_hdmi_hw_params,
.audio_shutdown = audio_shutdown,
.audio_startup = audio_startup,
+ .get_dai_id = adv7511_hdmi_i2s_get_dai_id,
};
static struct hdmi_codec_pdata codec_data = {
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 65ab28cc2946..685c1a480201 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -160,7 +160,7 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
struct panel_bridge *panel_bridge;
if (!panel)
- return ERR_PTR(EINVAL);
+ return ERR_PTR(-EINVAL);
panel_bridge = devm_kzalloc(panel->dev, sizeof(*panel_bridge),
GFP_KERNEL);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index aaf287d2e91d..b2cf59f54c88 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -82,9 +82,30 @@ static void dw_hdmi_i2s_audio_shutdown(struct device *dev, void *data)
hdmi_write(audio, HDMI_AUD_CONF0_SW_RESET, HDMI_AUD_CONF0);
}
+static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
+ struct device_node *endpoint)
+{
+ struct of_endpoint of_ep;
+ int ret;
+
+ ret = of_graph_parse_endpoint(endpoint, &of_ep);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * HDMI sound should be located as reg = <2>
+ * Then, it is sound port 0
+ */
+ if (of_ep.port == 2)
+ return 0;
+
+ return -EINVAL;
+}
+
static struct hdmi_codec_ops dw_hdmi_i2s_ops = {
.hw_params = dw_hdmi_i2s_hw_params,
.audio_shutdown = dw_hdmi_i2s_audio_shutdown,
+ .get_dai_id = dw_hdmi_i2s_get_dai_id,
};
static int snd_dw_hdmi_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index adb1dd7fde5f..1ee84dd802d4 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -1258,11 +1258,11 @@ int drm_legacy_addbufs(struct drm_device *dev, void *data,
* lock, preventing of allocating more buffers after this call. Information
* about each requested buffer is then copied into user space.
*/
-int drm_legacy_infobufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int __drm_legacy_infobufs(struct drm_device *dev,
+ void *data, int *p,
+ int (*f)(void *, int, struct drm_buf_entry *))
{
struct drm_device_dma *dma = dev->dma;
- struct drm_buf_info *request = data;
int i;
int count;
@@ -1290,26 +1290,12 @@ int drm_legacy_infobufs(struct drm_device *dev, void *data,
DRM_DEBUG("count = %d\n", count);
- if (request->count >= count) {
+ if (*p >= count) {
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
- if (dma->bufs[i].buf_count) {
- struct drm_buf_desc __user *to =
- &request->list[count];
- struct drm_buf_entry *from = &dma->bufs[i];
- if (copy_to_user(&to->count,
- &from->buf_count,
- sizeof(from->buf_count)) ||
- copy_to_user(&to->size,
- &from->buf_size,
- sizeof(from->buf_size)) ||
- copy_to_user(&to->low_mark,
- &from->low_mark,
- sizeof(from->low_mark)) ||
- copy_to_user(&to->high_mark,
- &from->high_mark,
- sizeof(from->high_mark)))
+ struct drm_buf_entry *from = &dma->bufs[i];
+ if (from->buf_count) {
+ if (f(data, count, from) < 0)
return -EFAULT;
-
DRM_DEBUG("%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
@@ -1320,11 +1306,29 @@ int drm_legacy_infobufs(struct drm_device *dev, void *data,
}
}
}
- request->count = count;
+ *p = count;
return 0;
}
+static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
+{
+ struct drm_buf_info *request = data;
+ struct drm_buf_desc __user *to = &request->list[count];
+ struct drm_buf_desc v = {.count = from->buf_count,
+ .size = from->buf_size,
+ .low_mark = from->low_mark,
+ .high_mark = from->high_mark};
+ return copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags));
+}
+
+int drm_legacy_infobufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_buf_info *request = data;
+ return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
+}
+
/**
* Specifies a low and high water mark for buffer allocation
*
@@ -1439,15 +1443,15 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
* offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
* drm_mmap_dma().
*/
-int drm_legacy_mapbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
+ void __user **v,
+ int (*f)(void *, int, unsigned long,
+ struct drm_buf *),
+ struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int retcode = 0;
- const int zero = 0;
unsigned long virtual;
- unsigned long address;
- struct drm_buf_map *request = data;
int i;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
@@ -1467,7 +1471,7 @@ int drm_legacy_mapbufs(struct drm_device *dev, void *data,
dev->buf_use++; /* Can't allocate more after this call */
spin_unlock(&dev->buf_lock);
- if (request->count >= dma->buf_count) {
+ if (*p >= dma->buf_count) {
if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
|| (drm_core_check_feature(dev, DRIVER_SG)
&& (dma->flags & _DRM_DMA_USE_SG))) {
@@ -1492,41 +1496,51 @@ int drm_legacy_mapbufs(struct drm_device *dev, void *data,
retcode = (signed long)virtual;
goto done;
}
- request->virtual = (void __user *)virtual;
+ *v = (void __user *)virtual;
for (i = 0; i < dma->buf_count; i++) {
- if (copy_to_user(&request->list[i].idx,
- &dma->buflist[i]->idx,
- sizeof(request->list[0].idx))) {
- retcode = -EFAULT;
- goto done;
- }
- if (copy_to_user(&request->list[i].total,
- &dma->buflist[i]->total,
- sizeof(request->list[0].total))) {
- retcode = -EFAULT;
- goto done;
- }
- if (copy_to_user(&request->list[i].used,
- &zero, sizeof(zero))) {
- retcode = -EFAULT;
- goto done;
- }
- address = virtual + dma->buflist[i]->offset; /* *** */
- if (copy_to_user(&request->list[i].address,
- &address, sizeof(address))) {
+ if (f(data, i, virtual, dma->buflist[i]) < 0) {
retcode = -EFAULT;
goto done;
}
}
}
done:
- request->count = dma->buf_count;
- DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
+ *p = dma->buf_count;
+ DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
return retcode;
}
+static int map_one_buf(void *data, int idx, unsigned long virtual,
+ struct drm_buf *buf)
+{
+ struct drm_buf_map *request = data;
+ unsigned long address = virtual + buf->offset; /* *** */
+
+ if (copy_to_user(&request->list[idx].idx, &buf->idx,
+ sizeof(request->list[0].idx)))
+ return -EFAULT;
+ if (copy_to_user(&request->list[idx].total, &buf->total,
+ sizeof(request->list[0].total)))
+ return -EFAULT;
+ if (clear_user(&request->list[idx].used, sizeof(int)))
+ return -EFAULT;
+ if (copy_to_user(&request->list[idx].address, &address,
+ sizeof(address)))
+ return -EFAULT;
+ return 0;
+}
+
+int drm_legacy_mapbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_buf_map *request = data;
+ return __drm_legacy_mapbufs(dev, data, &request->count,
+ &request->virtual, map_one_buf,
+ file_priv);
+}
+
int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index ec1ed94b2390..d34e5096887a 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uaccess.h>
+#include <linux/uio.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drmP.h>
@@ -140,101 +141,83 @@ static loff_t auxdev_llseek(struct file *file, loff_t offset, int whence)
return fixed_size_llseek(file, offset, whence, AUX_MAX_OFFSET);
}
-static ssize_t auxdev_read(struct file *file, char __user *buf, size_t count,
- loff_t *offset)
+static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- size_t bytes_pending, num_bytes_processed = 0;
- struct drm_dp_aux_dev *aux_dev = file->private_data;
+ struct drm_dp_aux_dev *aux_dev = iocb->ki_filp->private_data;
+ loff_t pos = iocb->ki_pos;
ssize_t res = 0;
if (!atomic_inc_not_zero(&aux_dev->usecount))
return -ENODEV;
- bytes_pending = min((loff_t)count, AUX_MAX_OFFSET - (*offset));
-
- if (!access_ok(VERIFY_WRITE, buf, bytes_pending)) {
- res = -EFAULT;
- goto out;
- }
+ iov_iter_truncate(to, AUX_MAX_OFFSET - pos);
- while (bytes_pending > 0) {
- uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES];
- ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf));
+ while (iov_iter_count(to)) {
+ uint8_t buf[DP_AUX_MAX_PAYLOAD_BYTES];
+ ssize_t todo = min(iov_iter_count(to), sizeof(buf));
if (signal_pending(current)) {
- res = num_bytes_processed ?
- num_bytes_processed : -ERESTARTSYS;
- goto out;
+ res = -ERESTARTSYS;
+ break;
}
- res = drm_dp_dpcd_read(aux_dev->aux, *offset, localbuf, todo);
- if (res <= 0) {
- res = num_bytes_processed ? num_bytes_processed : res;
- goto out;
- }
- if (__copy_to_user(buf + num_bytes_processed, localbuf, res)) {
- res = num_bytes_processed ?
- num_bytes_processed : -EFAULT;
- goto out;
+ res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo);
+ if (res <= 0)
+ break;
+
+ if (copy_to_iter(buf, res, to) != res) {
+ res = -EFAULT;
+ break;
}
- bytes_pending -= res;
- *offset += res;
- num_bytes_processed += res;
- res = num_bytes_processed;
+
+ pos += res;
}
-out:
+ if (pos != iocb->ki_pos)
+ res = pos - iocb->ki_pos;
+ iocb->ki_pos = pos;
+
atomic_dec(&aux_dev->usecount);
wake_up_atomic_t(&aux_dev->usecount);
return res;
}
-static ssize_t auxdev_write(struct file *file, const char __user *buf,
- size_t count, loff_t *offset)
+static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
- size_t bytes_pending, num_bytes_processed = 0;
- struct drm_dp_aux_dev *aux_dev = file->private_data;
+ struct drm_dp_aux_dev *aux_dev = iocb->ki_filp->private_data;
+ loff_t pos = iocb->ki_pos;
ssize_t res = 0;
if (!atomic_inc_not_zero(&aux_dev->usecount))
return -ENODEV;
- bytes_pending = min((loff_t)count, AUX_MAX_OFFSET - *offset);
-
- if (!access_ok(VERIFY_READ, buf, bytes_pending)) {
- res = -EFAULT;
- goto out;
- }
+ iov_iter_truncate(from, AUX_MAX_OFFSET - pos);
- while (bytes_pending > 0) {
- uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES];
- ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf));
+ while (iov_iter_count(from)) {
+ uint8_t buf[DP_AUX_MAX_PAYLOAD_BYTES];
+ ssize_t todo = min(iov_iter_count(from), sizeof(buf));
if (signal_pending(current)) {
- res = num_bytes_processed ?
- num_bytes_processed : -ERESTARTSYS;
- goto out;
+ res = -ERESTARTSYS;
+ break;
}
- if (__copy_from_user(localbuf,
- buf + num_bytes_processed, todo)) {
- res = num_bytes_processed ?
- num_bytes_processed : -EFAULT;
- goto out;
+ if (!copy_from_iter_full(buf, todo, from)) {
+ res = -EFAULT;
+ break;
}
- res = drm_dp_dpcd_write(aux_dev->aux, *offset, localbuf, todo);
- if (res <= 0) {
- res = num_bytes_processed ? num_bytes_processed : res;
- goto out;
- }
- bytes_pending -= res;
- *offset += res;
- num_bytes_processed += res;
- res = num_bytes_processed;
+ res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo);
+ if (res <= 0)
+ break;
+
+ pos += res;
}
-out:
+ if (pos != iocb->ki_pos)
+ res = pos - iocb->ki_pos;
+ iocb->ki_pos = pos;
+
atomic_dec(&aux_dev->usecount);
wake_up_atomic_t(&aux_dev->usecount);
return res;
@@ -251,8 +234,8 @@ static int auxdev_release(struct inode *inode, struct file *file)
static const struct file_operations auxdev_fops = {
.owner = THIS_MODULE,
.llseek = auxdev_llseek,
- .read = auxdev_read,
- .write = auxdev_write,
+ .read_iter = auxdev_read_iter,
+ .write_iter = auxdev_write_iter,
.open = auxdev_open,
.release = auxdev_release,
};
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 607ef3a97c42..af279844d7ce 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -832,6 +832,7 @@ unlock:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
goto retry;
}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 068b685608cf..4e906b82a170 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -152,6 +152,10 @@ static inline int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
#endif
+drm_ioctl_t drm_version;
+drm_ioctl_t drm_getunique;
+drm_ioctl_t drm_getclient;
+
/* drm_syncobj.c */
void drm_syncobj_open(struct drm_file *file_private);
void drm_syncobj_release(struct drm_file *file_private);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index ae386783e3ea..f8e96e648acf 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -32,6 +32,9 @@
#include <linux/export.h>
#include <drm/drmP.h>
+#include "drm_legacy.h"
+#include "drm_internal.h"
+#include "drm_crtc_internal.h"
#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t)
#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t)
@@ -87,39 +90,31 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_version32_t v32;
- struct drm_version __user *version;
+ struct drm_version v;
int err;
if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
return -EFAULT;
- version = compat_alloc_user_space(sizeof(*version));
- if (!version)
- return -EFAULT;
- if (__put_user(v32.name_len, &version->name_len)
- || __put_user((void __user *)(unsigned long)v32.name,
- &version->name)
- || __put_user(v32.date_len, &version->date_len)
- || __put_user((void __user *)(unsigned long)v32.date,
- &version->date)
- || __put_user(v32.desc_len, &version->desc_len)
- || __put_user((void __user *)(unsigned long)v32.desc,
- &version->desc))
- return -EFAULT;
-
- err = drm_ioctl(file,
- DRM_IOCTL_VERSION, (unsigned long)version);
+ v = (struct drm_version) {
+ .name_len = v32.name_len,
+ .name = compat_ptr(v32.name),
+ .date_len = v32.date_len,
+ .date = compat_ptr(v32.date),
+ .desc_len = v32.desc_len,
+ .desc = compat_ptr(v32.desc),
+ };
+ err = drm_ioctl_kernel(file, drm_version, &v,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW);
if (err)
return err;
- if (__get_user(v32.version_major, &version->version_major)
- || __get_user(v32.version_minor, &version->version_minor)
- || __get_user(v32.version_patchlevel, &version->version_patchlevel)
- || __get_user(v32.name_len, &version->name_len)
- || __get_user(v32.date_len, &version->date_len)
- || __get_user(v32.desc_len, &version->desc_len))
- return -EFAULT;
-
+ v32.version_major = v.version_major;
+ v32.version_minor = v.version_minor;
+ v32.version_patchlevel = v.version_patchlevel;
+ v32.name_len = v.name_len;
+ v32.date_len = v.date_len;
+ v32.desc_len = v.desc_len;
if (copy_to_user((void __user *)arg, &v32, sizeof(v32)))
return -EFAULT;
return 0;
@@ -134,26 +129,21 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_unique32_t uq32;
- struct drm_unique __user *u;
+ struct drm_unique uq;
int err;
if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
return -EFAULT;
+ uq = (struct drm_unique){
+ .unique_len = uq32.unique_len,
+ .unique = compat_ptr(uq32.unique),
+ };
- u = compat_alloc_user_space(sizeof(*u));
- if (!u)
- return -EFAULT;
- if (__put_user(uq32.unique_len, &u->unique_len)
- || __put_user((void __user *)(unsigned long)uq32.unique,
- &u->unique))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
+ err = drm_ioctl_kernel(file, drm_getunique, &uq, DRM_UNLOCKED);
if (err)
return err;
- if (__get_user(uq32.unique_len, &u->unique_len))
- return -EFAULT;
+ uq32.unique_len = uq.unique_len;
if (copy_to_user((void __user *)arg, &uq32, sizeof(uq32)))
return -EFAULT;
return 0;
@@ -162,21 +152,8 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
static int compat_drm_setunique(struct file *file, unsigned int cmd,
unsigned long arg)
{
- drm_unique32_t uq32;
- struct drm_unique __user *u;
-
- if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
- return -EFAULT;
-
- u = compat_alloc_user_space(sizeof(*u));
- if (!u)
- return -EFAULT;
- if (__put_user(uq32.unique_len, &u->unique_len)
- || __put_user((void __user *)(unsigned long)uq32.unique,
- &u->unique))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
+ /* it's dead */
+ return -EINVAL;
}
typedef struct drm_map32 {
@@ -193,32 +170,23 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
{
drm_map32_t __user *argp = (void __user *)arg;
drm_map32_t m32;
- struct drm_map __user *map;
- int idx, err;
- void *handle;
-
- if (get_user(idx, &argp->offset))
- return -EFAULT;
+ struct drm_map map;
+ int err;
- map = compat_alloc_user_space(sizeof(*map));
- if (!map)
- return -EFAULT;
- if (__put_user(idx, &map->offset))
+ if (copy_from_user(&m32, argp, sizeof(m32)))
return -EFAULT;
- err = drm_ioctl(file, DRM_IOCTL_GET_MAP, (unsigned long)map);
+ map.offset = m32.offset;
+ err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, DRM_UNLOCKED);
if (err)
return err;
- if (__get_user(m32.offset, &map->offset)
- || __get_user(m32.size, &map->size)
- || __get_user(m32.type, &map->type)
- || __get_user(m32.flags, &map->flags)
- || __get_user(handle, &map->handle)
- || __get_user(m32.mtrr, &map->mtrr))
- return -EFAULT;
-
- m32.handle = (unsigned long)handle;
+ m32.offset = map.offset;
+ m32.size = map.size;
+ m32.type = map.type;
+ m32.flags = map.flags;
+ m32.handle = ptr_to_compat(map.handle);
+ m32.mtrr = map.mtrr;
if (copy_to_user(argp, &m32, sizeof(m32)))
return -EFAULT;
return 0;
@@ -230,35 +198,28 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
{
drm_map32_t __user *argp = (void __user *)arg;
drm_map32_t m32;
- struct drm_map __user *map;
+ struct drm_map map;
int err;
- void *handle;
if (copy_from_user(&m32, argp, sizeof(m32)))
return -EFAULT;
- map = compat_alloc_user_space(sizeof(*map));
- if (!map)
- return -EFAULT;
- if (__put_user(m32.offset, &map->offset)
- || __put_user(m32.size, &map->size)
- || __put_user(m32.type, &map->type)
- || __put_user(m32.flags, &map->flags))
- return -EFAULT;
+ map.offset = m32.offset;
+ map.size = m32.size;
+ map.type = m32.type;
+ map.flags = m32.flags;
- err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map);
+ err = drm_ioctl_kernel(file, drm_legacy_addmap_ioctl, &map,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
if (err)
return err;
- if (__get_user(m32.offset, &map->offset)
- || __get_user(m32.mtrr, &map->mtrr)
- || __get_user(handle, &map->handle))
- return -EFAULT;
-
- m32.handle = (unsigned long)handle;
- if (m32.handle != (unsigned long)handle)
+ m32.offset = map.offset;
+ m32.mtrr = map.mtrr;
+ m32.handle = ptr_to_compat(map.handle);
+ if (map.handle != compat_ptr(m32.handle))
pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n",
- handle, m32.type, m32.offset);
+ map.handle, m32.type, m32.offset);
if (copy_to_user(argp, &m32, sizeof(m32)))
return -EFAULT;
@@ -270,19 +231,13 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_map32_t __user *argp = (void __user *)arg;
- struct drm_map __user *map;
+ struct drm_map map;
u32 handle;
if (get_user(handle, &argp->handle))
return -EFAULT;
-
- map = compat_alloc_user_space(sizeof(*map));
- if (!map)
- return -EFAULT;
- if (__put_user((void *)(unsigned long)handle, &map->handle))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RM_MAP, (unsigned long)map);
+ map.handle = compat_ptr(handle);
+ return drm_ioctl_kernel(file, drm_legacy_rmmap_ioctl, &map, DRM_AUTH);
}
typedef struct drm_client32 {
@@ -299,29 +254,24 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
{
drm_client32_t c32;
drm_client32_t __user *argp = (void __user *)arg;
- struct drm_client __user *client;
- int idx, err;
+ struct drm_client client;
+ int err;
- if (get_user(idx, &argp->idx))
+ if (copy_from_user(&c32, argp, sizeof(c32)))
return -EFAULT;
- client = compat_alloc_user_space(sizeof(*client));
- if (!client)
- return -EFAULT;
- if (__put_user(idx, &client->idx))
- return -EFAULT;
+ client.idx = c32.idx;
- err = drm_ioctl(file, DRM_IOCTL_GET_CLIENT, (unsigned long)client);
+ err = drm_ioctl_kernel(file, drm_getclient, &client, DRM_UNLOCKED);
if (err)
return err;
- if (__get_user(c32.idx, &client->idx)
- || __get_user(c32.auth, &client->auth)
- || __get_user(c32.pid, &client->pid)
- || __get_user(c32.uid, &client->uid)
- || __get_user(c32.magic, &client->magic)
- || __get_user(c32.iocs, &client->iocs))
- return -EFAULT;
+ c32.idx = client.idx;
+ c32.auth = client.auth;
+ c32.pid = client.pid;
+ c32.uid = client.uid;
+ c32.magic = client.magic;
+ c32.iocs = client.iocs;
if (copy_to_user(argp, &c32, sizeof(c32)))
return -EFAULT;
@@ -339,28 +289,14 @@ typedef struct drm_stats32 {
static int compat_drm_getstats(struct file *file, unsigned int cmd,
unsigned long arg)
{
- drm_stats32_t s32;
drm_stats32_t __user *argp = (void __user *)arg;
- struct drm_stats __user *stats;
- int i, err;
-
- memset(&s32, 0, sizeof(drm_stats32_t));
- stats = compat_alloc_user_space(sizeof(*stats));
- if (!stats)
- return -EFAULT;
+ int err;
- err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
+ err = drm_ioctl_kernel(file, drm_noop, NULL, DRM_UNLOCKED);
if (err)
return err;
- if (__get_user(s32.count, &stats->count))
- return -EFAULT;
- for (i = 0; i < 15; ++i)
- if (__get_user(s32.data[i].value, &stats->data[i].value)
- || __get_user(s32.data[i].type, &stats->data[i].type))
- return -EFAULT;
-
- if (copy_to_user(argp, &s32, sizeof(s32)))
+ if (clear_user(argp, sizeof(drm_stats32_t)))
return -EFAULT;
return 0;
}
@@ -378,26 +314,28 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_desc32_t __user *argp = (void __user *)arg;
- struct drm_buf_desc __user *buf;
+ drm_buf_desc32_t desc32;
+ struct drm_buf_desc desc;
int err;
- unsigned long agp_start;
- buf = compat_alloc_user_space(sizeof(*buf));
- if (!buf || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
+ if (copy_from_user(&desc32, argp, sizeof(drm_buf_desc32_t)))
return -EFAULT;
- if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
- || __get_user(agp_start, &argp->agp_start)
- || __put_user(agp_start, &buf->agp_start))
- return -EFAULT;
+ desc = (struct drm_buf_desc){
+ desc32.count, desc32.size, desc32.low_mark, desc32.high_mark,
+ desc32.flags, desc32.agp_start
+ };
- err = drm_ioctl(file, DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
+ err = drm_ioctl_kernel(file, drm_legacy_addbufs, &desc,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
if (err)
return err;
- if (__copy_in_user(argp, buf, offsetof(drm_buf_desc32_t, agp_start))
- || __get_user(agp_start, &buf->agp_start)
- || __put_user(agp_start, &argp->agp_start))
+ desc32 = (drm_buf_desc32_t){
+ desc.count, desc.size, desc.low_mark, desc.high_mark,
+ desc.flags, desc.agp_start
+ };
+ if (copy_to_user(argp, &desc32, sizeof(drm_buf_desc32_t)))
return -EFAULT;
return 0;
@@ -408,21 +346,17 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd,
{
drm_buf_desc32_t b32;
drm_buf_desc32_t __user *argp = (void __user *)arg;
- struct drm_buf_desc __user *buf;
+ struct drm_buf_desc buf;
if (copy_from_user(&b32, argp, sizeof(b32)))
return -EFAULT;
- buf = compat_alloc_user_space(sizeof(*buf));
- if (!buf)
- return -EFAULT;
-
- if (__put_user(b32.size, &buf->size)
- || __put_user(b32.low_mark, &buf->low_mark)
- || __put_user(b32.high_mark, &buf->high_mark))
- return -EFAULT;
+ buf.size = b32.size;
+ buf.low_mark = b32.low_mark;
+ buf.high_mark = b32.high_mark;
- return drm_ioctl(file, DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
+ return drm_ioctl_kernel(file, drm_legacy_markbufs, &buf,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
typedef struct drm_buf_info32 {
@@ -430,52 +364,42 @@ typedef struct drm_buf_info32 {
u32 list;
} drm_buf_info32_t;
+static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from)
+{
+ drm_buf_info32_t *request = data;
+ drm_buf_desc32_t __user *to = compat_ptr(request->list);
+ drm_buf_desc32_t v = {.count = from->buf_count,
+ .size = from->buf_size,
+ .low_mark = from->low_mark,
+ .high_mark = from->high_mark};
+ return copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags));
+}
+
+static int drm_legacy_infobufs32(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_buf_info32_t *request = data;
+ return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf32);
+}
+
static int compat_drm_infobufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_info32_t req32;
drm_buf_info32_t __user *argp = (void __user *)arg;
- drm_buf_desc32_t __user *to;
- struct drm_buf_info __user *request;
- struct drm_buf_desc __user *list;
- size_t nbytes;
- int i, err;
- int count, actual;
+ int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
- count = req32.count;
- to = (drm_buf_desc32_t __user *) (unsigned long)req32.list;
- if (count < 0)
- count = 0;
- if (count > 0
- && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t)))
- return -EFAULT;
-
- nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
- request = compat_alloc_user_space(nbytes);
- if (!request)
- return -EFAULT;
- list = (struct drm_buf_desc *) (request + 1);
-
- if (__put_user(count, &request->count)
- || __put_user(list, &request->list))
- return -EFAULT;
+ if (req32.count < 0)
+ req32.count = 0;
- err = drm_ioctl(file, DRM_IOCTL_INFO_BUFS, (unsigned long)request);
+ err = drm_ioctl_kernel(file, drm_legacy_infobufs32, &req32, DRM_AUTH);
if (err)
return err;
- if (__get_user(actual, &request->count))
- return -EFAULT;
- if (count >= actual)
- for (i = 0; i < actual; ++i)
- if (__copy_in_user(&to[i], &list[i],
- offsetof(struct drm_buf_desc, flags)))
- return -EFAULT;
-
- if (__put_user(actual, &argp->count))
+ if (put_user(req32.count, &argp->count))
return -EFAULT;
return 0;
@@ -494,54 +418,52 @@ typedef struct drm_buf_map32 {
u32 list; /**< Buffer information */
} drm_buf_map32_t;
+static int map_one_buf32(void *data, int idx, unsigned long virtual,
+ struct drm_buf *buf)
+{
+ drm_buf_map32_t *request = data;
+ drm_buf_pub32_t __user *to = compat_ptr(request->list) + idx;
+ drm_buf_pub32_t v;
+
+ v.idx = buf->idx;
+ v.total = buf->total;
+ v.used = 0;
+ v.address = virtual + buf->offset;
+ if (copy_to_user(to, &v, sizeof(v)))
+ return -EFAULT;
+ return 0;
+}
+
+static int drm_legacy_mapbufs32(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_buf_map32_t *request = data;
+ void __user *v;
+ int err = __drm_legacy_mapbufs(dev, data, &request->count,
+ &v, map_one_buf32,
+ file_priv);
+ request->virtual = ptr_to_compat(v);
+ return err;
+}
+
static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_map32_t __user *argp = (void __user *)arg;
drm_buf_map32_t req32;
- drm_buf_pub32_t __user *list32;
- struct drm_buf_map __user *request;
- struct drm_buf_pub __user *list;
- int i, err;
- int count, actual;
- size_t nbytes;
- void __user *addr;
+ int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
- count = req32.count;
- list32 = (void __user *)(unsigned long)req32.list;
-
- if (count < 0)
+ if (req32.count < 0)
return -EINVAL;
- nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
- request = compat_alloc_user_space(nbytes);
- if (!request)
- return -EFAULT;
- list = (struct drm_buf_pub *) (request + 1);
-
- if (__put_user(count, &request->count)
- || __put_user(list, &request->list))
- return -EFAULT;
- err = drm_ioctl(file, DRM_IOCTL_MAP_BUFS, (unsigned long)request);
+ err = drm_ioctl_kernel(file, drm_legacy_mapbufs32, &req32, DRM_AUTH);
if (err)
return err;
- if (__get_user(actual, &request->count))
- return -EFAULT;
- if (count >= actual)
- for (i = 0; i < actual; ++i)
- if (__copy_in_user(&list32[i], &list[i],
- offsetof(struct drm_buf_pub, address))
- || __get_user(addr, &list[i].address)
- || __put_user((unsigned long)addr,
- &list32[i].address))
- return -EFAULT;
-
- if (__put_user(actual, &argp->count)
- || __get_user(addr, &request->virtual)
- || __put_user((unsigned long)addr, &argp->virtual))
+ if (put_user(req32.count, &argp->count)
+ || put_user(req32.virtual, &argp->virtual))
return -EFAULT;
return 0;
@@ -556,21 +478,15 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_free32_t req32;
- struct drm_buf_free __user *request;
+ struct drm_buf_free request;
drm_buf_free32_t __user *argp = (void __user *)arg;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request)
- return -EFAULT;
- if (__put_user(req32.count, &request->count)
- || __put_user((int __user *)(unsigned long)req32.list,
- &request->list))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_FREE_BUFS, (unsigned long)request);
+ request.count = req32.count;
+ request.list = compat_ptr(req32.list);
+ return drm_ioctl_kernel(file, drm_legacy_freebufs, &request, DRM_AUTH);
}
typedef struct drm_ctx_priv_map32 {
@@ -582,48 +498,36 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_ctx_priv_map32_t req32;
- struct drm_ctx_priv_map __user *request;
+ struct drm_ctx_priv_map request;
drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request)
- return -EFAULT;
- if (__put_user(req32.ctx_id, &request->ctx_id)
- || __put_user((void *)(unsigned long)req32.handle,
- &request->handle))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
+ request.ctx_id = req32.ctx_id;
+ request.handle = compat_ptr(req32.handle);
+ return drm_ioctl_kernel(file, drm_legacy_setsareactx, &request,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct drm_ctx_priv_map __user *request;
+ struct drm_ctx_priv_map req;
+ drm_ctx_priv_map32_t req32;
drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
int err;
- unsigned int ctx_id;
- void *handle;
-
- if (!access_ok(VERIFY_WRITE, argp, sizeof(*argp))
- || __get_user(ctx_id, &argp->ctx_id))
- return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request)
- return -EFAULT;
- if (__put_user(ctx_id, &request->ctx_id))
+ if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
- err = drm_ioctl(file, DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
+ req.ctx_id = req32.ctx_id;
+ err = drm_ioctl_kernel(file, drm_legacy_getsareactx, &req, DRM_AUTH);
if (err)
return err;
- if (__get_user(handle, &request->handle)
- || __put_user((unsigned long)handle, &argp->handle))
+ req32.handle = ptr_to_compat(req.handle);
+ if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT;
return 0;
@@ -639,26 +543,20 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
{
drm_ctx_res32_t __user *argp = (void __user *)arg;
drm_ctx_res32_t res32;
- struct drm_ctx_res __user *res;
+ struct drm_ctx_res res;
int err;
if (copy_from_user(&res32, argp, sizeof(res32)))
return -EFAULT;
- res = compat_alloc_user_space(sizeof(*res));
- if (!res)
- return -EFAULT;
- if (__put_user(res32.count, &res->count)
- || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
- &res->contexts))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_RES_CTX, (unsigned long)res);
+ res.count = res32.count;
+ res.contexts = compat_ptr(res32.contexts);
+ err = drm_ioctl_kernel(file, drm_legacy_resctx, &res, DRM_AUTH);
if (err)
return err;
- if (__get_user(res32.count, &res->count)
- || __put_user(res32.count, &argp->count))
+ res32.count = res.count;
+ if (copy_to_user(argp, &res32, sizeof(res32)))
return -EFAULT;
return 0;
@@ -682,38 +580,26 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
{
drm_dma32_t d32;
drm_dma32_t __user *argp = (void __user *)arg;
- struct drm_dma __user *d;
+ struct drm_dma d;
int err;
if (copy_from_user(&d32, argp, sizeof(d32)))
return -EFAULT;
- d = compat_alloc_user_space(sizeof(*d));
- if (!d)
- return -EFAULT;
-
- if (__put_user(d32.context, &d->context)
- || __put_user(d32.send_count, &d->send_count)
- || __put_user((int __user *)(unsigned long)d32.send_indices,
- &d->send_indices)
- || __put_user((int __user *)(unsigned long)d32.send_sizes,
- &d->send_sizes)
- || __put_user(d32.flags, &d->flags)
- || __put_user(d32.request_count, &d->request_count)
- || __put_user((int __user *)(unsigned long)d32.request_indices,
- &d->request_indices)
- || __put_user((int __user *)(unsigned long)d32.request_sizes,
- &d->request_sizes))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_DMA, (unsigned long)d);
+ d.context = d32.context;
+ d.send_count = d32.send_count;
+ d.send_indices = compat_ptr(d32.send_indices);
+ d.send_sizes = compat_ptr(d32.send_sizes);
+ d.flags = d32.flags;
+ d.request_count = d32.request_count;
+ d.request_indices = compat_ptr(d32.request_indices);
+ d.request_sizes = compat_ptr(d32.request_sizes);
+ err = drm_ioctl_kernel(file, drm_legacy_dma_ioctl, &d, DRM_AUTH);
if (err)
return err;
- if (__get_user(d32.request_size, &d->request_size)
- || __get_user(d32.granted_count, &d->granted_count)
- || __put_user(d32.request_size, &argp->request_size)
- || __put_user(d32.granted_count, &argp->granted_count))
+ if (put_user(d.request_size, &argp->request_size)
+ || put_user(d.granted_count, &argp->granted_count))
return -EFAULT;
return 0;
@@ -728,17 +614,13 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_mode32_t __user *argp = (void __user *)arg;
- drm_agp_mode32_t m32;
- struct drm_agp_mode __user *mode;
-
- if (get_user(m32.mode, &argp->mode))
- return -EFAULT;
+ struct drm_agp_mode mode;
- mode = compat_alloc_user_space(sizeof(*mode));
- if (put_user(m32.mode, &mode->mode))
+ if (get_user(mode.mode, &argp->mode))
return -EFAULT;
- return drm_ioctl(file, DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
+ return drm_ioctl_kernel(file, drm_agp_enable_ioctl, &mode,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
typedef struct drm_agp_info32 {
@@ -760,28 +642,22 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd,
{
drm_agp_info32_t __user *argp = (void __user *)arg;
drm_agp_info32_t i32;
- struct drm_agp_info __user *info;
+ struct drm_agp_info info;
int err;
- info = compat_alloc_user_space(sizeof(*info));
- if (!info)
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
+ err = drm_ioctl_kernel(file, drm_agp_info_ioctl, &info, DRM_AUTH);
if (err)
return err;
- if (__get_user(i32.agp_version_major, &info->agp_version_major)
- || __get_user(i32.agp_version_minor, &info->agp_version_minor)
- || __get_user(i32.mode, &info->mode)
- || __get_user(i32.aperture_base, &info->aperture_base)
- || __get_user(i32.aperture_size, &info->aperture_size)
- || __get_user(i32.memory_allowed, &info->memory_allowed)
- || __get_user(i32.memory_used, &info->memory_used)
- || __get_user(i32.id_vendor, &info->id_vendor)
- || __get_user(i32.id_device, &info->id_device))
- return -EFAULT;
-
+ i32.agp_version_major = info.agp_version_major;
+ i32.agp_version_minor = info.agp_version_minor;
+ i32.mode = info.mode;
+ i32.aperture_base = info.aperture_base;
+ i32.aperture_size = info.aperture_size;
+ i32.memory_allowed = info.memory_allowed;
+ i32.memory_used = info.memory_used;
+ i32.id_vendor = info.id_vendor;
+ i32.id_device = info.id_device;
if (copy_to_user(argp, &i32, sizeof(i32)))
return -EFAULT;
@@ -800,26 +676,24 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
{
drm_agp_buffer32_t __user *argp = (void __user *)arg;
drm_agp_buffer32_t req32;
- struct drm_agp_buffer __user *request;
+ struct drm_agp_buffer request;
int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request
- || __put_user(req32.size, &request->size)
- || __put_user(req32.type, &request->type))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
+ request.size = req32.size;
+ request.type = req32.type;
+ err = drm_ioctl_kernel(file, drm_agp_alloc_ioctl, &request,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
if (err)
return err;
- if (__get_user(req32.handle, &request->handle)
- || __get_user(req32.physical, &request->physical)
- || copy_to_user(argp, &req32, sizeof(req32))) {
- drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
+ req32.handle = request.handle;
+ req32.physical = request.physical;
+ if (copy_to_user(argp, &req32, sizeof(req32))) {
+ drm_ioctl_kernel(file, drm_agp_free_ioctl, &request,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
return -EFAULT;
}
@@ -830,16 +704,13 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_buffer32_t __user *argp = (void __user *)arg;
- struct drm_agp_buffer __user *request;
- u32 handle;
+ struct drm_agp_buffer request;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request
- || get_user(handle, &argp->handle)
- || __put_user(handle, &request->handle))
+ if (get_user(request.handle, &argp->handle))
return -EFAULT;
- return drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
+ return drm_ioctl_kernel(file, drm_agp_free_ioctl, &request,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
typedef struct drm_agp_binding32 {
@@ -852,34 +723,28 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
{
drm_agp_binding32_t __user *argp = (void __user *)arg;
drm_agp_binding32_t req32;
- struct drm_agp_binding __user *request;
+ struct drm_agp_binding request;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request
- || __put_user(req32.handle, &request->handle)
- || __put_user(req32.offset, &request->offset))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_AGP_BIND, (unsigned long)request);
+ request.handle = req32.handle;
+ request.offset = req32.offset;
+ return drm_ioctl_kernel(file, drm_agp_bind_ioctl, &request,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_binding32_t __user *argp = (void __user *)arg;
- struct drm_agp_binding __user *request;
- u32 handle;
+ struct drm_agp_binding request;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request
- || get_user(handle, &argp->handle)
- || __put_user(handle, &request->handle))
+ if (get_user(request.handle, &argp->handle))
return -EFAULT;
- return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
+ return drm_ioctl_kernel(file, drm_agp_unbind_ioctl, &request,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
#endif /* CONFIG_AGP */
@@ -892,23 +757,19 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_scatter_gather32_t __user *argp = (void __user *)arg;
- struct drm_scatter_gather __user *request;
+ struct drm_scatter_gather request;
int err;
- unsigned long x;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
- || __get_user(x, &argp->size)
- || __put_user(x, &request->size))
+ if (get_user(request.size, &argp->size))
return -EFAULT;
- err = drm_ioctl(file, DRM_IOCTL_SG_ALLOC, (unsigned long)request);
+ err = drm_ioctl_kernel(file, drm_legacy_sg_alloc, &request,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
if (err)
return err;
/* XXX not sure about the handle conversion here... */
- if (__get_user(x, &request->handle)
- || __put_user(x >> PAGE_SHIFT, &argp->handle))
+ if (put_user(request.handle >> PAGE_SHIFT, &argp->handle))
return -EFAULT;
return 0;
@@ -918,19 +779,17 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_scatter_gather32_t __user *argp = (void __user *)arg;
- struct drm_scatter_gather __user *request;
+ struct drm_scatter_gather request;
unsigned long x;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
- || __get_user(x, &argp->handle)
- || __put_user(x << PAGE_SHIFT, &request->handle))
+ if (get_user(x, &argp->handle))
return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_SG_FREE, (unsigned long)request);
+ request.handle = x << PAGE_SHIFT;
+ return drm_ioctl_kernel(file, drm_legacy_sg_free, &request,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+#if defined(CONFIG_X86)
typedef struct drm_update_draw32 {
drm_drawable_t handle;
unsigned int type;
@@ -943,22 +802,11 @@ static int compat_drm_update_draw(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_update_draw32_t update32;
- struct drm_update_draw __user *request;
- int err;
-
if (copy_from_user(&update32, (void __user *)arg, sizeof(update32)))
return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request ||
- __put_user(update32.handle, &request->handle) ||
- __put_user(update32.type, &request->type) ||
- __put_user(update32.num, &request->num) ||
- __put_user(update32.data, &request->data))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
- return err;
+ return drm_ioctl_kernel(file, drm_noop, NULL,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
#endif
@@ -985,36 +833,30 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
{
drm_wait_vblank32_t __user *argp = (void __user *)arg;
drm_wait_vblank32_t req32;
- union drm_wait_vblank __user *request;
+ union drm_wait_vblank req;
int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request
- || __put_user(req32.request.type, &request->request.type)
- || __put_user(req32.request.sequence, &request->request.sequence)
- || __put_user(req32.request.signal, &request->request.signal))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
+ req.request.type = req32.request.type;
+ req.request.sequence = req32.request.sequence;
+ req.request.signal = req32.request.signal;
+ err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
if (err)
return err;
- if (__get_user(req32.reply.type, &request->reply.type)
- || __get_user(req32.reply.sequence, &request->reply.sequence)
- || __get_user(req32.reply.tval_sec, &request->reply.tval_sec)
- || __get_user(req32.reply.tval_usec, &request->reply.tval_usec))
- return -EFAULT;
-
+ req32.reply.type = req.reply.type;
+ req32.reply.sequence = req.reply.sequence;
+ req32.reply.tval_sec = req.reply.tval_sec;
+ req32.reply.tval_usec = req.reply.tval_usec;
if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT;
return 0;
}
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+#if defined(CONFIG_X86)
typedef struct drm_mode_fb_cmd232 {
u32 fb_id;
u32 width;
@@ -1031,82 +873,67 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg;
- struct drm_mode_fb_cmd232 req32;
- struct drm_mode_fb_cmd2 __user *req64;
- int i;
+ struct drm_mode_fb_cmd2 req64;
int err;
- if (copy_from_user(&req32, argp, sizeof(req32)))
+ if (copy_from_user(&req64, argp,
+ offsetof(drm_mode_fb_cmd232_t, modifier)))
return -EFAULT;
- req64 = compat_alloc_user_space(sizeof(*req64));
-
- if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64))
- || __put_user(req32.width, &req64->width)
- || __put_user(req32.height, &req64->height)
- || __put_user(req32.pixel_format, &req64->pixel_format)
- || __put_user(req32.flags, &req64->flags))
+ if (copy_from_user(&req64.modifier, &argp->modifier,
+ sizeof(req64.modifier)))
return -EFAULT;
- for (i = 0; i < 4; i++) {
- if (__put_user(req32.handles[i], &req64->handles[i]))
- return -EFAULT;
- if (__put_user(req32.pitches[i], &req64->pitches[i]))
- return -EFAULT;
- if (__put_user(req32.offsets[i], &req64->offsets[i]))
- return -EFAULT;
- if (__put_user(req32.modifier[i], &req64->modifier[i]))
- return -EFAULT;
- }
-
- err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64);
+ err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64,
+ DRM_CONTROL_ALLOW|DRM_UNLOCKED);
if (err)
return err;
- if (__get_user(req32.fb_id, &req64->fb_id))
- return -EFAULT;
-
- if (copy_to_user(argp, &req32, sizeof(req32)))
+ if (put_user(req64.fb_id, &argp->fb_id))
return -EFAULT;
return 0;
}
#endif
-static drm_ioctl_compat_t *drm_compat_ioctls[] = {
- [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT32)] = compat_drm_getclient,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS32)] = compat_drm_getstats,
- [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE32)] = compat_drm_setunique,
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP32)] = compat_drm_addmap,
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS32)] = compat_drm_addbufs,
- [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS32)] = compat_drm_markbufs,
- [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS32)] = compat_drm_infobufs,
- [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS32)] = compat_drm_mapbufs,
- [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS32)] = compat_drm_freebufs,
- [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP32)] = compat_drm_rmmap,
- [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX32)] = compat_drm_setsareactx,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx,
- [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx,
- [DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma,
+static struct {
+ drm_ioctl_compat_t *fn;
+ char *name;
+} drm_compat_ioctls[] = {
+#define DRM_IOCTL32_DEF(n, f) [DRM_IOCTL_NR(n##32)] = {.fn = f, .name = #n}
+ DRM_IOCTL32_DEF(DRM_IOCTL_VERSION, compat_drm_version),
+ DRM_IOCTL32_DEF(DRM_IOCTL_GET_UNIQUE, compat_drm_getunique),
+ DRM_IOCTL32_DEF(DRM_IOCTL_GET_MAP, compat_drm_getmap),
+ DRM_IOCTL32_DEF(DRM_IOCTL_GET_CLIENT, compat_drm_getclient),
+ DRM_IOCTL32_DEF(DRM_IOCTL_GET_STATS, compat_drm_getstats),
+ DRM_IOCTL32_DEF(DRM_IOCTL_SET_UNIQUE, compat_drm_setunique),
+ DRM_IOCTL32_DEF(DRM_IOCTL_ADD_MAP, compat_drm_addmap),
+ DRM_IOCTL32_DEF(DRM_IOCTL_ADD_BUFS, compat_drm_addbufs),
+ DRM_IOCTL32_DEF(DRM_IOCTL_MARK_BUFS, compat_drm_markbufs),
+ DRM_IOCTL32_DEF(DRM_IOCTL_INFO_BUFS, compat_drm_infobufs),
+ DRM_IOCTL32_DEF(DRM_IOCTL_MAP_BUFS, compat_drm_mapbufs),
+ DRM_IOCTL32_DEF(DRM_IOCTL_FREE_BUFS, compat_drm_freebufs),
+ DRM_IOCTL32_DEF(DRM_IOCTL_RM_MAP, compat_drm_rmmap),
+ DRM_IOCTL32_DEF(DRM_IOCTL_SET_SAREA_CTX, compat_drm_setsareactx),
+ DRM_IOCTL32_DEF(DRM_IOCTL_GET_SAREA_CTX, compat_drm_getsareactx),
+ DRM_IOCTL32_DEF(DRM_IOCTL_RES_CTX, compat_drm_resctx),
+ DRM_IOCTL32_DEF(DRM_IOCTL_DMA, compat_drm_dma),
#if IS_ENABLED(CONFIG_AGP)
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable,
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info,
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc,
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE32)] = compat_drm_agp_free,
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND32)] = compat_drm_agp_bind,
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND32)] = compat_drm_agp_unbind,
+ DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ENABLE, compat_drm_agp_enable),
+ DRM_IOCTL32_DEF(DRM_IOCTL_AGP_INFO, compat_drm_agp_info),
+ DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ALLOC, compat_drm_agp_alloc),
+ DRM_IOCTL32_DEF(DRM_IOCTL_AGP_FREE, compat_drm_agp_free),
+ DRM_IOCTL32_DEF(DRM_IOCTL_AGP_BIND, compat_drm_agp_bind),
+ DRM_IOCTL32_DEF(DRM_IOCTL_AGP_UNBIND, compat_drm_agp_unbind),
#endif
- [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc,
- [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free,
+ DRM_IOCTL32_DEF(DRM_IOCTL_SG_ALLOC, compat_drm_sg_alloc),
+ DRM_IOCTL32_DEF(DRM_IOCTL_SG_FREE, compat_drm_sg_free),
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
- [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
+ DRM_IOCTL32_DEF(DRM_IOCTL_UPDATE_DRAW, compat_drm_update_draw),
#endif
- [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+ DRM_IOCTL32_DEF(DRM_IOCTL_WAIT_VBLANK, compat_drm_wait_vblank),
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
- [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
+ DRM_IOCTL32_DEF(DRM_IOCTL_MODE_ADDFB2, compat_drm_mode_addfb2),
#endif
};
@@ -1127,6 +954,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
+ struct drm_file *file_priv = filp->private_data;
drm_ioctl_compat_t *fn;
int ret;
@@ -1137,13 +965,18 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr >= ARRAY_SIZE(drm_compat_ioctls))
return drm_ioctl(filp, cmd, arg);
- fn = drm_compat_ioctls[nr];
-
- if (fn != NULL)
- ret = (*fn) (filp, cmd, arg);
- else
- ret = drm_ioctl(filp, cmd, arg);
+ fn = drm_compat_ioctls[nr].fn;
+ if (!fn)
+ return drm_ioctl(filp, cmd, arg);
+ DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated,
+ drm_compat_ioctls[nr].name);
+ ret = (*fn)(filp, cmd, arg);
+ if (ret)
+ DRM_DEBUG("ret = %d\n", ret);
return ret;
}
EXPORT_SYMBOL(drm_compat_ioctl);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index f7f150e4a0c0..8bfeb32f8a10 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -107,7 +107,7 @@
*
* Copies the bus id from drm_device::unique into user space.
*/
-static int drm_getunique(struct drm_device *dev, void *data,
+int drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
@@ -172,7 +172,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
* Searches for the client with the specified index and copies its information
* into userspace
*/
-static int drm_getclient(struct drm_device *dev, void *data,
+int drm_getclient(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_client *client = data;
@@ -464,7 +464,7 @@ static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
*
* Fills in the version information in \p arg.
*/
-static int drm_version(struct drm_device *dev, void *data,
+int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_version *version = data;
@@ -709,6 +709,33 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
* the driver-specific IOCTLs are wired up.
*/
+long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
+ u32 flags)
+{
+ struct drm_file *file_priv = file->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
+ int retcode;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
+ retcode = drm_ioctl_permit(flags, file_priv);
+ if (unlikely(retcode))
+ return retcode;
+
+ /* Enforce sane locking for modern driver ioctls. */
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY) ||
+ (flags & DRM_UNLOCKED))
+ retcode = func(dev, kdata, file_priv);
+ else {
+ mutex_lock(&drm_global_mutex);
+ retcode = func(dev, kdata, file_priv);
+ mutex_unlock(&drm_global_mutex);
+ }
+ return retcode;
+}
+EXPORT_SYMBOL(drm_ioctl_kernel);
+
/**
* drm_ioctl - ioctl callback implementation for DRM drivers
* @filp: file this ioctl is called on
@@ -777,10 +804,6 @@ long drm_ioctl(struct file *filp,
goto err_i1;
}
- retcode = drm_ioctl_permit(ioctl->flags, file_priv);
- if (unlikely(retcode))
- goto err_i1;
-
if (ksize <= sizeof(stack_kdata)) {
kdata = stack_kdata;
} else {
@@ -799,16 +822,7 @@ long drm_ioctl(struct file *filp,
if (ksize > in_size)
memset(kdata + in_size, 0, ksize - in_size);
- /* Enforce sane locking for modern driver ioctls. */
- if (!drm_core_check_feature(dev, DRIVER_LEGACY) ||
- (ioctl->flags & DRM_UNLOCKED))
- retcode = func(dev, kdata, file_priv);
- else {
- mutex_lock(&drm_global_mutex);
- retcode = func(dev, kdata, file_priv);
- mutex_unlock(&drm_global_mutex);
- }
-
+ retcode = drm_ioctl_kernel(filp, func, kdata, ioctl->flags);
if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
retcode = -EFAULT;
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index e4bb5ad747c8..280fbeb846ff 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -74,6 +74,13 @@ int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
+int __drm_legacy_infobufs(struct drm_device *, void *, int *,
+ int (*)(void *, int, struct drm_buf_entry *));
+int __drm_legacy_mapbufs(struct drm_device *, void *, int *,
+ void __user **,
+ int (*)(void *, int, unsigned long, struct drm_buf *),
+ struct drm_file *);
+
#ifdef CONFIG_DRM_VM
void drm_legacy_vma_flush(struct drm_device *d);
#else
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index f64af5e06ac6..a5b38a80a99a 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -77,17 +77,15 @@ EXPORT_SYMBOL(drm_syncobj_find);
/**
* drm_syncobj_replace_fence - replace fence in a sync object.
- * @file_private: drm file private pointer.
* @syncobj: Sync object to replace fence in
* @fence: fence to install in sync file.
*
* This replaces the fence on a sync object.
*/
-void drm_syncobj_replace_fence(struct drm_file *file_private,
- struct drm_syncobj *syncobj,
+void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
struct dma_fence *fence)
{
- struct dma_fence *old_fence = NULL;
+ struct dma_fence *old_fence;
if (fence)
dma_fence_get(fence);
@@ -292,7 +290,7 @@ int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
return -ENOENT;
}
- drm_syncobj_replace_fence(file_private, syncobj, fence);
+ drm_syncobj_replace_fence(syncobj, fence);
dma_fence_put(fence);
drm_syncobj_put(syncobj);
return 0;
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 8099574c8a11..70f2b9593edc 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -277,7 +277,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
* Otherwise reinitialize delayed at next vblank interrupt and assign 0
* for now, to mark the vblanktimestamp as invalid.
*/
- if (!rc && in_vblank_irq)
+ if (!rc && !in_vblank_irq)
t_vblank = (struct timeval) {0, 0};
store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h
index e881482b5971..207f45c999c3 100644
--- a/drivers/gpu/drm/etnaviv/common.xml.h
+++ b/drivers/gpu/drm/etnaviv/common.xml.h
@@ -8,10 +8,38 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- state_hi.xml ( 24309 bytes, from 2015-12-12 09:02:53)
-- common.xml ( 18379 bytes, from 2015-12-12 09:02:53)
+- state.xml ( 19930 bytes, from 2017-03-09 15:43:43)
+- common.xml ( 23473 bytes, from 2017-03-09 15:43:43)
+- state_hi.xml ( 26403 bytes, from 2017-03-09 15:43:43)
+- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
+- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56)
+- state_3d.xml ( 66957 bytes, from 2017-03-09 15:43:43)
+- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56)
-Copyright (C) 2015
+Copyright (C) 2012-2017 by the following authors:
+- Wladimir J. van der Laan <laanwj@gmail.com>
+- Christian Gmeiner <christian.gmeiner@gmail.com>
+- Lucas Stach <l.stach@pengutronix.de>
+- Russell King <rmk@arm.linux.org.uk>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sub license,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
*/
@@ -162,129 +190,129 @@ Copyright (C) 2015
#define chipMinorFeatures1_FC_FLUSH_STALL 0x80000000
#define chipMinorFeatures2_LINE_LOOP 0x00000001
#define chipMinorFeatures2_LOGIC_OP 0x00000002
-#define chipMinorFeatures2_UNK2 0x00000004
+#define chipMinorFeatures2_SEAMLESS_CUBE_MAP 0x00000004
#define chipMinorFeatures2_SUPERTILED_TEXTURE 0x00000008
-#define chipMinorFeatures2_UNK4 0x00000010
+#define chipMinorFeatures2_LINEAR_PE 0x00000010
#define chipMinorFeatures2_RECT_PRIMITIVE 0x00000020
#define chipMinorFeatures2_COMPOSITION 0x00000040
#define chipMinorFeatures2_CORRECT_AUTO_DISABLE_COUNT 0x00000080
-#define chipMinorFeatures2_UNK8 0x00000100
-#define chipMinorFeatures2_UNK9 0x00000200
-#define chipMinorFeatures2_UNK10 0x00000400
+#define chipMinorFeatures2_PE_SWIZZLE 0x00000100
+#define chipMinorFeatures2_END_EVENT 0x00000200
+#define chipMinorFeatures2_S1S8 0x00000400
#define chipMinorFeatures2_HALTI1 0x00000800
-#define chipMinorFeatures2_UNK12 0x00001000
-#define chipMinorFeatures2_UNK13 0x00002000
-#define chipMinorFeatures2_UNK14 0x00004000
+#define chipMinorFeatures2_RGB888 0x00001000
+#define chipMinorFeatures2_TX__YUV_ASSEMBLER 0x00002000
+#define chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING 0x00004000
#define chipMinorFeatures2_EXTRA_TEXTURE_STATE 0x00008000
#define chipMinorFeatures2_FULL_DIRECTFB 0x00010000
#define chipMinorFeatures2_2D_TILING 0x00020000
#define chipMinorFeatures2_THREAD_WALKER_IN_PS 0x00040000
#define chipMinorFeatures2_TILE_FILLER 0x00080000
-#define chipMinorFeatures2_UNK20 0x00100000
+#define chipMinorFeatures2_YUV_STANDARD 0x00100000
#define chipMinorFeatures2_2D_MULTI_SOURCE_BLIT 0x00200000
-#define chipMinorFeatures2_UNK22 0x00400000
-#define chipMinorFeatures2_UNK23 0x00800000
-#define chipMinorFeatures2_UNK24 0x01000000
+#define chipMinorFeatures2_YUV_CONVERSION 0x00400000
+#define chipMinorFeatures2_FLUSH_FIXED_2D 0x00800000
+#define chipMinorFeatures2_INTERLEAVER 0x01000000
#define chipMinorFeatures2_MIXED_STREAMS 0x02000000
#define chipMinorFeatures2_2D_420_L2CACHE 0x04000000
-#define chipMinorFeatures2_UNK27 0x08000000
+#define chipMinorFeatures2_BUG_FIXES7 0x08000000
#define chipMinorFeatures2_2D_NO_INDEX8_BRUSH 0x10000000
#define chipMinorFeatures2_TEXTURE_TILED_READ 0x20000000
-#define chipMinorFeatures2_UNK30 0x40000000
-#define chipMinorFeatures2_UNK31 0x80000000
+#define chipMinorFeatures2_DECOMPRESS_Z16 0x40000000
+#define chipMinorFeatures2_BUG_FIXES8 0x80000000
#define chipMinorFeatures3_ROTATION_STALL_FIX 0x00000001
-#define chipMinorFeatures3_UNK1 0x00000002
+#define chipMinorFeatures3_OCL_ONLY 0x00000002
#define chipMinorFeatures3_2D_MULTI_SOURCE_BLT_EX 0x00000004
-#define chipMinorFeatures3_UNK3 0x00000008
-#define chipMinorFeatures3_UNK4 0x00000010
-#define chipMinorFeatures3_UNK5 0x00000020
-#define chipMinorFeatures3_UNK6 0x00000040
-#define chipMinorFeatures3_UNK7 0x00000080
+#define chipMinorFeatures3_INSTRUCTION_CACHE 0x00000008
+#define chipMinorFeatures3_GEOMETRY_SHADER 0x00000010
+#define chipMinorFeatures3_TEX_COMPRESSION_SUPERTILED 0x00000020
+#define chipMinorFeatures3_GENERICS 0x00000040
+#define chipMinorFeatures3_BUG_FIXES9 0x00000080
#define chipMinorFeatures3_FAST_MSAA 0x00000100
-#define chipMinorFeatures3_UNK9 0x00000200
+#define chipMinorFeatures3_WCLIP 0x00000200
#define chipMinorFeatures3_BUG_FIXES10 0x00000400
-#define chipMinorFeatures3_UNK11 0x00000800
+#define chipMinorFeatures3_UNIFIED_SAMPLERS 0x00000800
#define chipMinorFeatures3_BUG_FIXES11 0x00001000
-#define chipMinorFeatures3_UNK13 0x00002000
-#define chipMinorFeatures3_UNK14 0x00004000
-#define chipMinorFeatures3_UNK15 0x00008000
-#define chipMinorFeatures3_UNK16 0x00010000
-#define chipMinorFeatures3_UNK17 0x00020000
+#define chipMinorFeatures3_PERFORMANCE_COUNTERS 0x00002000
+#define chipMinorFeatures3_HAS_FAST_TRANSCENDENTALS 0x00004000
+#define chipMinorFeatures3_BUG_FIXES12 0x00008000
+#define chipMinorFeatures3_BUG_FIXES13 0x00010000
+#define chipMinorFeatures3_DE_ENHANCEMENTS1 0x00020000
#define chipMinorFeatures3_ACE 0x00040000
-#define chipMinorFeatures3_UNK19 0x00080000
-#define chipMinorFeatures3_UNK20 0x00100000
-#define chipMinorFeatures3_UNK21 0x00200000
+#define chipMinorFeatures3_TX_ENHANCEMENTS1 0x00080000
+#define chipMinorFeatures3_SH_ENHANCEMENTS1 0x00100000
+#define chipMinorFeatures3_SH_ENHANCEMENTS2 0x00200000
#define chipMinorFeatures3_UNK22 0x00400000
-#define chipMinorFeatures3_UNK23 0x00800000
+#define chipMinorFeatures3_2D_FC_SOURCE 0x00800000
#define chipMinorFeatures3_UNK24 0x01000000
#define chipMinorFeatures3_UNK25 0x02000000
#define chipMinorFeatures3_NEW_HZ 0x04000000
#define chipMinorFeatures3_UNK27 0x08000000
#define chipMinorFeatures3_UNK28 0x10000000
-#define chipMinorFeatures3_UNK29 0x20000000
+#define chipMinorFeatures3_SH_ENHANCEMENTS3 0x20000000
#define chipMinorFeatures3_UNK30 0x40000000
#define chipMinorFeatures3_UNK31 0x80000000
#define chipMinorFeatures4_UNK0 0x00000001
-#define chipMinorFeatures4_UNK1 0x00000002
-#define chipMinorFeatures4_UNK2 0x00000004
+#define chipMinorFeatures4_PE_ENHANCEMENTS2 0x00000002
+#define chipMinorFeatures4_FRUSTUM_CLIP_FIX 0x00000004
#define chipMinorFeatures4_UNK3 0x00000008
#define chipMinorFeatures4_UNK4 0x00000010
-#define chipMinorFeatures4_UNK5 0x00000020
-#define chipMinorFeatures4_UNK6 0x00000040
+#define chipMinorFeatures4_2D_GAMMA 0x00000020
+#define chipMinorFeatures4_SINGLE_BUFFER 0x00000040
#define chipMinorFeatures4_UNK7 0x00000080
#define chipMinorFeatures4_UNK8 0x00000100
#define chipMinorFeatures4_UNK9 0x00000200
#define chipMinorFeatures4_UNK10 0x00000400
-#define chipMinorFeatures4_UNK11 0x00000800
-#define chipMinorFeatures4_UNK12 0x00001000
-#define chipMinorFeatures4_UNK13 0x00002000
+#define chipMinorFeatures4_TX_LERP_PRECISION_FIX 0x00000800
+#define chipMinorFeatures4_2D_COLOR_SPACE_CONVERSION 0x00001000
+#define chipMinorFeatures4_TEXTURE_ASTC 0x00002000
#define chipMinorFeatures4_UNK14 0x00004000
#define chipMinorFeatures4_UNK15 0x00008000
#define chipMinorFeatures4_HALTI2 0x00010000
#define chipMinorFeatures4_UNK17 0x00020000
#define chipMinorFeatures4_SMALL_MSAA 0x00040000
#define chipMinorFeatures4_UNK19 0x00080000
-#define chipMinorFeatures4_UNK20 0x00100000
-#define chipMinorFeatures4_UNK21 0x00200000
-#define chipMinorFeatures4_UNK22 0x00400000
-#define chipMinorFeatures4_UNK23 0x00800000
-#define chipMinorFeatures4_UNK24 0x01000000
-#define chipMinorFeatures4_UNK25 0x02000000
-#define chipMinorFeatures4_UNK26 0x04000000
-#define chipMinorFeatures4_UNK27 0x08000000
+#define chipMinorFeatures4_NEW_RA 0x00100000
+#define chipMinorFeatures4_2D_OPF_YUV_OUTPUT 0x00200000
+#define chipMinorFeatures4_2D_MULTI_SOURCE_BLT_EX2 0x00400000
+#define chipMinorFeatures4_NO_USER_CSC 0x00800000
+#define chipMinorFeatures4_ZFIXES 0x01000000
+#define chipMinorFeatures4_BUG_FIXES18 0x02000000
+#define chipMinorFeatures4_2D_COMPRESSION 0x04000000
+#define chipMinorFeatures4_PROBE 0x08000000
#define chipMinorFeatures4_UNK28 0x10000000
-#define chipMinorFeatures4_UNK29 0x20000000
+#define chipMinorFeatures4_2D_SUPER_TILE_VERSION 0x20000000
#define chipMinorFeatures4_UNK30 0x40000000
#define chipMinorFeatures4_UNK31 0x80000000
#define chipMinorFeatures5_UNK0 0x00000001
#define chipMinorFeatures5_UNK1 0x00000002
#define chipMinorFeatures5_UNK2 0x00000004
#define chipMinorFeatures5_UNK3 0x00000008
-#define chipMinorFeatures5_UNK4 0x00000010
+#define chipMinorFeatures5_EEZ 0x00000010
#define chipMinorFeatures5_UNK5 0x00000020
#define chipMinorFeatures5_UNK6 0x00000040
#define chipMinorFeatures5_UNK7 0x00000080
#define chipMinorFeatures5_UNK8 0x00000100
#define chipMinorFeatures5_HALTI3 0x00000200
#define chipMinorFeatures5_UNK10 0x00000400
-#define chipMinorFeatures5_UNK11 0x00000800
+#define chipMinorFeatures5_2D_ONE_PASS_FILTER_TAP 0x00000800
#define chipMinorFeatures5_UNK12 0x00001000
-#define chipMinorFeatures5_UNK13 0x00002000
-#define chipMinorFeatures5_UNK14 0x00004000
+#define chipMinorFeatures5_SEPARATE_SRC_DST 0x00002000
+#define chipMinorFeatures5_HALTI4 0x00004000
#define chipMinorFeatures5_UNK15 0x00008000
-#define chipMinorFeatures5_UNK16 0x00010000
-#define chipMinorFeatures5_UNK17 0x00020000
+#define chipMinorFeatures5_ANDROID_ONLY 0x00010000
+#define chipMinorFeatures5_HAS_PRODUCTID 0x00020000
#define chipMinorFeatures5_UNK18 0x00040000
#define chipMinorFeatures5_UNK19 0x00080000
-#define chipMinorFeatures5_UNK20 0x00100000
+#define chipMinorFeatures5_PE_DITHER_FIX2 0x00100000
#define chipMinorFeatures5_UNK21 0x00200000
#define chipMinorFeatures5_UNK22 0x00400000
#define chipMinorFeatures5_UNK23 0x00800000
#define chipMinorFeatures5_UNK24 0x01000000
#define chipMinorFeatures5_UNK25 0x02000000
#define chipMinorFeatures5_UNK26 0x04000000
-#define chipMinorFeatures5_UNK27 0x08000000
-#define chipMinorFeatures5_UNK28 0x10000000
+#define chipMinorFeatures5_RS_DEPTHSTENCIL_NATIVE_SUPPORT 0x08000000
+#define chipMinorFeatures5_V2_MSAA_COMP_FIX 0x10000000
#define chipMinorFeatures5_UNK29 0x20000000
#define chipMinorFeatures5_UNK30 0x40000000
#define chipMinorFeatures5_UNK31 0x80000000
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 5255278dde56..91e17aeee1da 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -495,6 +495,7 @@ static struct drm_driver etnaviv_drm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
+ .gem_prime_res_obj = etnaviv_gem_prime_res_obj,
.gem_prime_pin = etnaviv_gem_prime_pin,
.gem_prime_unpin = etnaviv_gem_prime_unpin,
.gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index e41f38667c1c..058389f93b69 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -80,6 +80,7 @@ void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma);
+struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj);
struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index d6fb724fc3cc..9a3bea738330 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -411,16 +411,20 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct drm_device *dev = obj->dev;
bool write = !!(op & ETNA_PREP_WRITE);
- unsigned long remain =
- op & ETNA_PREP_NOSYNC ? 0 : etnaviv_timeout_to_jiffies(timeout);
- long lret;
-
- lret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
- write, true, remain);
- if (lret < 0)
- return lret;
- else if (lret == 0)
- return remain == 0 ? -EBUSY : -ETIMEDOUT;
+ int ret;
+
+ if (op & ETNA_PREP_NOSYNC) {
+ if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
+ write))
+ return -EBUSY;
+ } else {
+ unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
+
+ ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
+ write, true, remain);
+ if (ret <= 0)
+ return ret == 0 ? -ETIMEDOUT : ret;
+ }
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
if (!etnaviv_obj->sgt) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index c4a091e87426..e437fba1209d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -106,9 +106,10 @@ struct etnaviv_gem_submit {
struct etnaviv_gpu *gpu;
struct ww_acquire_ctx ticket;
struct dma_fence *fence;
+ u32 flags;
unsigned int nr_bos;
struct etnaviv_gem_submit_bo bos[0];
- u32 flags;
+ /* No new members here, the previous one is variable-length! */
};
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 367bf952f61a..e5da4f2300ba 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -150,3 +150,10 @@ fail:
return ERR_PTR(ret);
}
+
+struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+ return etnaviv_obj->resv;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index ee7069e93eda..5bd93169dac2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -172,7 +172,7 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
- bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
+ bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
explicit);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 9a9c40717801..ada45fdd0eae 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -412,13 +412,19 @@ static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
{
- unsigned int fscale = 1 << (6 - gpu->freq_scale);
- u32 clock;
-
- clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
- VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
+ if (gpu->identity.minor_features2 &
+ chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING) {
+ clk_set_rate(gpu->clk_core,
+ gpu->base_rate_core >> gpu->freq_scale);
+ clk_set_rate(gpu->clk_shader,
+ gpu->base_rate_shader >> gpu->freq_scale);
+ } else {
+ unsigned int fscale = 1 << (6 - gpu->freq_scale);
+ u32 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
+ VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
- etnaviv_gpu_load_clock(gpu, clock);
+ etnaviv_gpu_load_clock(gpu, clock);
+ }
}
static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
@@ -523,9 +529,10 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
- /* Disable PA clock gating for GC400+ except for GC420 */
+ /* Disable PA clock gating for GC400+ without bugfix except for GC420 */
if (gpu->identity.model >= chipModel_GC400 &&
- gpu->identity.model != chipModel_GC420)
+ gpu->identity.model != chipModel_GC420 &&
+ !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12))
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
/*
@@ -541,6 +548,11 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
if (gpu->identity.revision < 0x5422)
pmc |= BIT(15); /* Unknown bit */
+ /* Disable TX clock gating on affected core revisions. */
+ if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
+ etnaviv_is_model_rev(gpu, GC2000, 0x5108))
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
+
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
@@ -1736,11 +1748,13 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
DBG("clk_core: %p", gpu->clk_core);
if (IS_ERR(gpu->clk_core))
gpu->clk_core = NULL;
+ gpu->base_rate_core = clk_get_rate(gpu->clk_core);
gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
DBG("clk_shader: %p", gpu->clk_shader);
if (IS_ERR(gpu->clk_shader))
gpu->clk_shader = NULL;
+ gpu->base_rate_shader = clk_get_rate(gpu->clk_shader);
/* TODO: figure out max mapped size */
dev_set_drvdata(dev, gpu);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 9227a9740447..689cb8f3680c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -152,6 +152,8 @@ struct etnaviv_gpu {
u32 hangcheck_dma_addr;
struct work_struct recover_work;
unsigned int freq_scale;
+ unsigned long base_rate_core;
+ unsigned long base_rate_shader;
};
static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index c77a5aced81a..d48fd7c918f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -181,8 +181,8 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
- if (index >= MAX_FB_BUFFER)
- return DMA_ERROR_CODE;
+ if (WARN_ON_ONCE(index >= MAX_FB_BUFFER))
+ return 0;
return exynos_fb->dma_addr[index];
}
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index a5cd5dacf055..e9e64e8e9765 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -21,6 +21,7 @@ config DRM_I915
select ACPI_BUTTON if ACPI
select SYNC_FILE
select IOSF_MBI
+ select CRC32
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 51241de5e7a7..713848c36349 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2536,6 +2536,11 @@ static int scan_workload(struct intel_vgpu_workload *workload)
gma_head == gma_tail)
return 0;
+ if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
@@ -2579,6 +2584,11 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
s.workload = workload;
+ if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index e0261fcc5b50..2deb05f618fb 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -197,6 +197,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ if (IS_BROADWELL(dev_priv)) {
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) &=
+ ~PORT_CLK_SEL_MASK;
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |=
+ PORT_CLK_SEL_LCPLL_810;
+ }
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
@@ -211,6 +217,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ if (IS_BROADWELL(dev_priv)) {
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) &=
+ ~PORT_CLK_SEL_MASK;
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |=
+ PORT_CLK_SEL_LCPLL_810;
+ }
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
@@ -225,6 +237,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ if (IS_BROADWELL(dev_priv)) {
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) &=
+ ~PORT_CLK_SEL_MASK;
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |=
+ PORT_CLK_SEL_LCPLL_810;
+ }
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
@@ -244,6 +262,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
}
+
+ /* Clear host CRT status, so guest couldn't detect this host CRT. */
+ if (IS_BROADWELL(dev_priv))
+ vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 66374dba3b1a..6166e34d892b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2259,6 +2259,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
ret = setup_spt_oos(gvt);
if (ret) {
gvt_err("fail to initialize SPT oos\n");
+ dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ __free_page(gvt->gtt.scratch_ggtt_page);
return ret;
}
}
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 1414d7e6148d..17febe830ff6 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -367,21 +367,24 @@ static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- *(u32 *)p_data = (1 << 17);
- return 0;
-}
-
-static int dpy_reg_mmio_read_2(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes)
-{
- *(u32 *)p_data = 3;
- return 0;
-}
+ switch (offset) {
+ case 0xe651c:
+ case 0xe661c:
+ case 0xe671c:
+ case 0xe681c:
+ vgpu_vreg(vgpu, offset) = 1 << 17;
+ break;
+ case 0xe6c04:
+ vgpu_vreg(vgpu, offset) = 0x3;
+ break;
+ case 0xe6e1c:
+ vgpu_vreg(vgpu, offset) = 0x2f << 16;
+ break;
+ default:
+ return -EINVAL;
+ }
-static int dpy_reg_mmio_read_3(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes)
-{
- *(u32 *)p_data = (0x2f << 16);
+ read_vreg(vgpu, offset, p_data, bytes);
return 0;
}
@@ -1925,7 +1928,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_RO(PCH_ADPA, D_ALL, 0, ADPA_CRT_HOTPLUG_MONITOR_MASK, NULL, pch_adpa_mmio_write);
+ MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
@@ -2011,8 +2014,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read_2, NULL);
- MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read_3, NULL);
+ MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
PORTA_HOTPLUG_STATUS_MASK
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 1ae0b4083ce1..fd0c85f9ef3c 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -232,16 +232,20 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
struct device *dev = mdev_dev(vgpu->vdev.mdev);
unsigned long gfn;
- mutex_lock(&vgpu->vdev.cache_lock);
- while ((node = rb_first(&vgpu->vdev.cache))) {
+ for (;;) {
+ mutex_lock(&vgpu->vdev.cache_lock);
+ node = rb_first(&vgpu->vdev.cache);
+ if (!node) {
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ break;
+ }
dma = rb_entry(node, struct gvt_dma, node);
gvt_dma_unmap_iova(vgpu, dma->iova);
gfn = dma->gfn;
-
- vfio_unpin_pages(dev, &gfn, 1);
__gvt_cache_remove_entry(vgpu, dma);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ vfio_unpin_pages(dev, &gfn, 1);
}
- mutex_unlock(&vgpu->vdev.cache_lock);
}
static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 488fdea348a9..0e2e36ad6196 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -174,15 +174,6 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
- /* If the status is -EINPROGRESS means this workload
- * doesn't meet any issue during dispatching so when
- * get the SCHEDULE_OUT set the status to be zero for
- * good. If the status is NOT -EINPROGRESS means there
- * is something wrong happened during dispatching and
- * the status should not be set to zero
- */
- if (workload->status == -EINPROGRESS)
- workload->status = 0;
atomic_set(&workload->shadow_ctx_active, 0);
break;
default:
@@ -427,6 +418,18 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
+ /* If this request caused GPU hang, req->fence.error will
+ * be set to -EIO. Use -EIO to set workload status so
+ * that when this request caused GPU hang, didn't trigger
+ * context switch interrupt to guest.
+ */
+ if (likely(workload->status == -EINPROGRESS)) {
+ if (workload->req->fence.error == -EIO)
+ workload->status = -EIO;
+ else
+ workload->status = 0;
+ }
+
i915_gem_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !vgpu->resetting) {
@@ -464,8 +467,6 @@ struct workload_thread_param {
int ring_id;
};
-static DEFINE_MUTEX(scheduler_mutex);
-
static int workload_thread(void *priv)
{
struct workload_thread_param *p = (struct workload_thread_param *)priv;
@@ -497,8 +498,6 @@ static int workload_thread(void *priv)
if (!workload)
break;
- mutex_lock(&scheduler_mutex);
-
gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
workload->ring_id, workload,
workload->vgpu->id);
@@ -537,9 +536,6 @@ complete:
FORCEWAKE_ALL);
intel_runtime_pm_put(gvt->dev_priv);
-
- mutex_unlock(&scheduler_mutex);
-
}
return 0;
}
@@ -620,7 +616,7 @@ err:
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
{
- i915_gem_context_put_unlocked(vgpu->shadow_ctx);
+ i915_gem_context_put(vgpu->shadow_ctx);
}
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4577b0af6886..2ef75c1a6119 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -292,6 +292,8 @@ static int per_file_stats(int id, void *ptr, void *data)
struct file_stats *stats = data;
struct i915_vma *vma;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
stats->count++;
stats->total += obj->base.size;
if (!obj->bind_count)
@@ -476,6 +478,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct drm_i915_gem_request *request;
struct task_struct *task;
+ mutex_lock(&dev->struct_mutex);
+
memset(&stats, 0, sizeof(stats));
stats.file_priv = file->driver_priv;
spin_lock(&file->table_lock);
@@ -487,7 +491,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
* still alive (e.g. get_pid(current) => fork() => exit()).
* Therefore, we need to protect this ->comm access using RCU.
*/
- mutex_lock(&dev->struct_mutex);
request = list_first_entry_or_null(&file_priv->mm.request_list,
struct drm_i915_gem_request,
client_link);
@@ -497,6 +500,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
PIDTYPE_PID);
print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
+
mutex_unlock(&dev->struct_mutex);
}
mutex_unlock(&dev->filelist_mutex);
@@ -1155,7 +1159,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
- if (IS_GEN9(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
reqf >>= 23;
else {
reqf &= ~GEN6_TURBO_DISABLE;
@@ -1177,7 +1181,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
- if (IS_GEN9(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
@@ -1206,7 +1210,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
dev_priv->rps.pm_intrmsk_mbz);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
- (gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8);
+ (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
@@ -1237,18 +1241,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
- max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
+ max_freq *= (IS_GEN9_BC(dev_priv) ||
+ IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
- max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
+ max_freq *= (IS_GEN9_BC(dev_priv) ||
+ IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
- max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
+ max_freq *= (IS_GEN9_BC(dev_priv) ||
+ IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
@@ -1403,6 +1410,23 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
return 0;
}
+static int i915_reset_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct i915_gpu_error *error = &dev_priv->gpu_error;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
+
+ for_each_engine(engine, dev_priv, id) {
+ seq_printf(m, "%s = %u\n", engine->name,
+ i915_reset_engine_count(error, engine));
+ }
+
+ return 0;
+}
+
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1834,7 +1858,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret)
goto out;
- if (IS_GEN9_BC(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq =
dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
@@ -1854,7 +1878,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
&ia_freq);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(dev_priv, (gpu_freq *
- (IS_GEN9_BC(dev_priv) ?
+ (IS_GEN9_BC(dev_priv) ||
+ IS_CANNONLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
@@ -1910,7 +1935,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (dev_priv->fbdev) {
+ if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
@@ -1966,7 +1991,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret)
return ret;
- list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
seq_printf(m, "HW context %u ", ctx->hw_id);
if (ctx->pid) {
struct task_struct *task;
@@ -2072,7 +2097,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
if (ret)
return ret;
- list_for_each_entry(ctx, &dev_priv->context_list, link)
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link)
for_each_engine(engine, dev_priv, id)
i915_dump_lrc_obj(m, ctx, engine);
@@ -2306,6 +2331,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "GPU busy? %s [%d requests]\n",
yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
+ seq_printf(m, "Boosts outstanding? %d\n",
+ atomic_read(&dev_priv->rps.num_waiters));
seq_printf(m, "Frequency requested %d\n",
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
@@ -2319,22 +2346,20 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
mutex_lock(&dev->filelist_mutex);
- spin_lock(&dev_priv->rps.client_lock);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv;
struct task_struct *task;
rcu_read_lock();
task = pid_task(file->pid, PIDTYPE_PID);
- seq_printf(m, "%s [%d]: %d boosts%s\n",
+ seq_printf(m, "%s [%d]: %d boosts\n",
task ? task->comm : "<unknown>",
task ? task->pid : -1,
- file_priv->rps.boosts,
- list_empty(&file_priv->rps.link) ? "" : ", active");
+ atomic_read(&file_priv->rps.boosts));
rcu_read_unlock();
}
- seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts);
- spin_unlock(&dev_priv->rps.client_lock);
+ seq_printf(m, "Kernel (anonymous) boosts: %d\n",
+ atomic_read(&dev_priv->rps.boosts));
mutex_unlock(&dev->filelist_mutex);
if (INTEL_GEN(dev_priv) >= 6 &&
@@ -3083,7 +3108,7 @@ static void intel_connector_info(struct seq_file *m,
connector->display_info.cea_rev);
}
- if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
+ if (!intel_encoder)
return;
switch (connector->connector_type) {
@@ -3285,6 +3310,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
static int i915_engine_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct i915_gpu_error *error = &dev_priv->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -3308,6 +3334,8 @@ static int i915_engine_info(struct seq_file *m, void *unused)
engine->hangcheck.seqno,
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
engine->timeline->inflight_seqnos);
+ seq_printf(m, "\tReset count: %d\n",
+ i915_reset_engine_count(error, engine));
rcu_read_lock();
@@ -3754,13 +3782,18 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
- if (connector->status == connector_status_connected &&
- connector->encoder != NULL) {
- intel_dp = enc_to_intel_dp(connector->encoder);
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
status = kstrtoint(input_buffer, 10, &val);
if (status < 0)
break;
@@ -3792,13 +3825,18 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
- if (connector->status == connector_status_connected &&
- connector->encoder != NULL) {
- intel_dp = enc_to_intel_dp(connector->encoder);
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
if (intel_dp->compliance.test_active)
seq_puts(m, "1");
else
@@ -3838,13 +3876,18 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
- if (connector->status == connector_status_connected &&
- connector->encoder != NULL) {
- intel_dp = enc_to_intel_dp(connector->encoder);
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
if (intel_dp->compliance.test_type ==
DP_TEST_LINK_EDID_READ)
seq_printf(m, "%lx",
@@ -3891,13 +3934,18 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
- if (connector->status == connector_status_connected &&
- connector->encoder != NULL) {
- intel_dp = enc_to_intel_dp(connector->encoder);
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
seq_printf(m, "%02lx", intel_dp->compliance.test_type);
} else
seq_puts(m, "0");
@@ -4820,6 +4868,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_hangcheck_info", i915_hangcheck_info, 0},
+ {"i915_reset_info", i915_reset_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 04d9bd84ee43..d310d8245dca 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -132,9 +132,13 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
} else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
ret = PCH_CPT;
- DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
+ DRM_DEBUG_KMS("Assuming CougarPoint PCH\n");
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
ret = PCH_LPT;
+ if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+ dev_priv->pch_id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+ else
+ dev_priv->pch_id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ret = PCH_SPT;
@@ -173,29 +177,25 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
- unsigned short id_ext = pch->device &
- INTEL_PCH_DEVICE_ID_MASK_EXT;
+
+ dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_IBX;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN5(dev_priv));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev_priv) ||
- IS_IVYBRIDGE(dev_priv)));
+ WARN_ON(!IS_GEN6(dev_priv) &&
+ !IS_IVYBRIDGE(dev_priv));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev_priv) ||
- IS_IVYBRIDGE(dev_priv)));
+ WARN_ON(!IS_GEN6(dev_priv) &&
+ !IS_IVYBRIDGE(dev_priv));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) &&
@@ -203,51 +203,60 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
WARN_ON(IS_HSW_ULT(dev_priv) ||
IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) &&
!IS_BROADWELL(dev_priv));
WARN_ON(!IS_HSW_ULT(dev_priv) &&
!IS_BDW_ULT(dev_priv));
+ } else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) {
+ /* WildcatPoint is LPT compatible */
+ dev_priv->pch_type = PCH_LPT;
+ DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) &&
+ !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) ||
+ IS_BDW_ULT(dev_priv));
+ } else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) {
+ /* WildcatPoint is LPT compatible */
+ dev_priv->pch_type = PCH_LPT;
+ DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) &&
+ !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) &&
+ !IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
- } else if (id_ext == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id_ext;
+ } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_KBP;
DRM_DEBUG_KMS("Found KabyPoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_CNP;
DRM_DEBUG_KMS("Found CannonPoint PCH\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv));
- } else if (id_ext == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id_ext;
+ } else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CNP;
DRM_DEBUG_KMS("Found CannonPoint LP PCH\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv));
- } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
- (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
- ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
+ } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
+ id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
+ (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
pch->subsystem_vendor ==
PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
pch->subsystem_device ==
PCI_SUBDEVICE_ID_QEMU)) {
- dev_priv->pch_id = id;
dev_priv->pch_type =
intel_virt_detect_pch(dev_priv);
} else
@@ -331,6 +340,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
break;
case I915_PARAM_HAS_GPU_RESET:
value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
+ if (value && intel_has_reset_engine(dev_priv))
+ value = 2;
break;
case I915_PARAM_HAS_RESOURCE_STREAMER:
value = HAS_RESOURCE_STREAMER(dev_priv);
@@ -585,16 +596,18 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
static void i915_gem_fini(struct drm_i915_private *dev_priv)
{
+ flush_workqueue(dev_priv->wq);
+
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv);
i915_gem_cleanup_engines(dev_priv);
- i915_gem_context_fini(dev_priv);
+ i915_gem_contexts_fini(dev_priv);
i915_gem_cleanup_userptr(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_drain_freed_objects(dev_priv);
- WARN_ON(!list_empty(&dev_priv->context_list));
+ WARN_ON(!list_empty(&dev_priv->contexts.list));
}
static int i915_load_modeset_init(struct drm_device *dev)
@@ -1132,10 +1145,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* and the registers being closely associated.
*
* According to chipset errata, on the 965GM, MSI interrupts may
- * be lost or delayed, but we use them anyways to avoid
- * stuck interrupts on some machines.
+ * be lost or delayed, and was defeatured. MSI interrupts seem to
+ * get lost on g4x as well, and interrupt delivery seems to stay
+ * properly dead afterwards. So we'll just disable them for all
+ * pre-gen5 chipsets.
*/
- if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 5) {
if (pci_enable_msi(pdev) < 0)
DRM_DEBUG_DRIVER("can't enable MSI");
}
@@ -1421,9 +1436,10 @@ static void i915_driver_release(struct drm_device *dev)
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
{
+ struct drm_i915_private *i915 = to_i915(dev);
int ret;
- ret = i915_gem_open(dev, file);
+ ret = i915_gem_open(i915, file);
if (ret)
return ret;
@@ -1453,7 +1469,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
mutex_lock(&dev->struct_mutex);
- i915_gem_context_close(dev, file);
+ i915_gem_context_close(file);
i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
@@ -1905,9 +1921,72 @@ wakeup:
error:
i915_gem_set_wedged(dev_priv);
+ i915_gem_retire_requests(dev_priv);
goto finish;
}
+/**
+ * i915_reset_engine - reset GPU engine to recover from a hang
+ * @engine: engine to reset
+ *
+ * Reset a specific GPU engine. Useful if a hang is detected.
+ * Returns zero on successful reset or otherwise an error code.
+ *
+ * Procedure is:
+ * - identifies the request that caused the hang and it is dropped
+ * - reset engine (which will force the engine to idle)
+ * - re-init/configure engine
+ */
+int i915_reset_engine(struct intel_engine_cs *engine)
+{
+ struct i915_gpu_error *error = &engine->i915->gpu_error;
+ struct drm_i915_gem_request *active_request;
+ int ret;
+
+ GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+
+ DRM_DEBUG_DRIVER("resetting %s\n", engine->name);
+
+ active_request = i915_gem_reset_prepare_engine(engine);
+ if (IS_ERR(active_request)) {
+ DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n");
+ ret = PTR_ERR(active_request);
+ goto out;
+ }
+
+ /*
+ * The request that caused the hang is stuck on elsp, we know the
+ * active request and can drop it, adjust head to skip the offending
+ * request to resume executing remaining requests in the queue.
+ */
+ i915_gem_reset_engine(engine, active_request);
+
+ /* Finally, reset just this engine. */
+ ret = intel_gpu_reset(engine->i915, intel_engine_flag(engine));
+
+ i915_gem_reset_finish_engine(engine);
+
+ if (ret) {
+ /* If we fail here, we expect to fallback to a global reset */
+ DRM_DEBUG_DRIVER("Failed to reset %s, ret=%d\n",
+ engine->name, ret);
+ goto out;
+ }
+
+ /*
+ * The engine and its registers (and workarounds in case of render)
+ * have been reset to their default values. Follow the init_ring
+ * process to program RING_MODE, HWSP and re-enable submission.
+ */
+ ret = engine->init_hw(engine);
+ if (ret)
+ goto out;
+
+ error->reset_engine_count[engine->id]++;
+out:
+ return ret;
+}
+
static int i915_pm_suspend(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2981014fcfe2..7c6fab08a2e6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -80,8 +80,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20170619"
-#define DRIVER_TIMESTAMP 1497857498
+#define DRIVER_DATE "20170717"
+#define DRIVER_TIMESTAMP 1500275179
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -122,7 +122,7 @@ static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
return false;
}
-static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val)
+static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
{
uint_fixed_16_16_t fp;
@@ -132,17 +132,17 @@ static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val)
return fp;
}
-static inline uint32_t fixed_16_16_to_u32_round_up(uint_fixed_16_16_t fp)
+static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
{
return DIV_ROUND_UP(fp.val, 1 << 16);
}
-static inline uint32_t fixed_16_16_to_u32(uint_fixed_16_16_t fp)
+static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
{
return fp.val >> 16;
}
-static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1,
+static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
uint_fixed_16_16_t min2)
{
uint_fixed_16_16_t min;
@@ -151,7 +151,7 @@ static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1,
return min;
}
-static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1,
+static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
uint_fixed_16_16_t max2)
{
uint_fixed_16_16_t max;
@@ -160,6 +160,14 @@ static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1,
return max;
}
+static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
+{
+ uint_fixed_16_16_t fp;
+ WARN_ON(val >> 32);
+ fp.val = clamp_t(uint32_t, val, 0, ~0);
+ return fp;
+}
+
static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
uint_fixed_16_16_t d)
{
@@ -170,48 +178,30 @@ static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
uint_fixed_16_16_t mul)
{
uint64_t intermediate_val;
- uint32_t result;
intermediate_val = (uint64_t) val * mul.val;
intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
WARN_ON(intermediate_val >> 32);
- result = clamp_t(uint32_t, intermediate_val, 0, ~0);
- return result;
+ return clamp_t(uint32_t, intermediate_val, 0, ~0);
}
static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
uint_fixed_16_16_t mul)
{
uint64_t intermediate_val;
- uint_fixed_16_16_t fp;
intermediate_val = (uint64_t) val.val * mul.val;
intermediate_val = intermediate_val >> 16;
- WARN_ON(intermediate_val >> 32);
- fp.val = clamp_t(uint32_t, intermediate_val, 0, ~0);
- return fp;
+ return clamp_u64_to_fixed16(intermediate_val);
}
-static inline uint_fixed_16_16_t fixed_16_16_div(uint32_t val, uint32_t d)
+static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
{
- uint_fixed_16_16_t fp, res;
-
- fp = u32_to_fixed_16_16(val);
- res.val = DIV_ROUND_UP(fp.val, d);
- return res;
-}
-
-static inline uint_fixed_16_16_t fixed_16_16_div_u64(uint32_t val, uint32_t d)
-{
- uint_fixed_16_16_t res;
uint64_t interm_val;
interm_val = (uint64_t)val << 16;
interm_val = DIV_ROUND_UP_ULL(interm_val, d);
- WARN_ON(interm_val >> 32);
- res.val = (uint32_t) interm_val;
-
- return res;
+ return clamp_u64_to_fixed16(interm_val);
}
static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
@@ -225,16 +215,32 @@ static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
return clamp_t(uint32_t, interm_val, 0, ~0);
}
-static inline uint_fixed_16_16_t mul_u32_fixed_16_16(uint32_t val,
+static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
uint_fixed_16_16_t mul)
{
uint64_t intermediate_val;
- uint_fixed_16_16_t fp;
intermediate_val = (uint64_t) val * mul.val;
- WARN_ON(intermediate_val >> 32);
- fp.val = (uint32_t) intermediate_val;
- return fp;
+ return clamp_u64_to_fixed16(intermediate_val);
+}
+
+static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
+ uint_fixed_16_16_t add2)
+{
+ uint64_t interm_sum;
+
+ interm_sum = (uint64_t) add1.val + add2.val;
+ return clamp_u64_to_fixed16(interm_sum);
+}
+
+static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
+ uint32_t add2)
+{
+ uint64_t interm_sum;
+ uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
+
+ interm_sum = (uint64_t) add1.val + interm_add2.val;
+ return clamp_u64_to_fixed16(interm_sum);
}
static inline const char *yesno(bool v)
@@ -584,8 +590,7 @@ struct drm_i915_file_private {
struct idr context_idr;
struct intel_rps_client {
- struct list_head link;
- unsigned boosts;
+ atomic_t boosts;
} rps;
unsigned int bsd_engine;
@@ -753,6 +758,7 @@ struct intel_csr {
func(has_csr); \
func(has_ddi); \
func(has_dp_mst); \
+ func(has_reset_engine); \
func(has_fbc); \
func(has_fpga_dbg); \
func(has_full_ppgtt); \
@@ -917,6 +923,7 @@ struct i915_gpu_state {
enum intel_engine_hangcheck_action hangcheck_action;
struct i915_address_space *vm;
int num_requests;
+ u32 reset_count;
/* position of active request inside the ring */
u32 rq_head, rq_post, rq_tail;
@@ -1149,8 +1156,8 @@ struct i915_psr {
enum intel_pch {
PCH_NONE = 0, /* No PCH present */
PCH_IBX, /* Ibexpeak PCH */
- PCH_CPT, /* Cougarpoint PCH */
- PCH_LPT, /* Lynxpoint PCH */
+ PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
+ PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */
PCH_KBP, /* Kabypoint PCH */
PCH_CNP, /* Cannonpoint PCH */
@@ -1166,6 +1173,7 @@ enum intel_sbi_destination {
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
+#define QUIRK_INCREASE_T12_DELAY (1<<6)
struct intel_fbdev;
struct intel_fbc_work;
@@ -1301,13 +1309,10 @@ struct intel_gen6_power_mgmt {
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
- spinlock_t client_lock;
- struct list_head clients;
- bool client_boost;
-
bool enabled;
struct delayed_work autoenable_work;
- unsigned boosts;
+ atomic_t num_waiters;
+ atomic_t boosts;
/* manual wa residency calculations */
struct intel_rps_ei ei;
@@ -1550,6 +1555,12 @@ struct i915_gpu_error {
* inspect the bit and do the reset directly, otherwise the worker
* waits for the struct_mutex.
*
+ * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
+ * acquire the struct_mutex to reset an engine, we need an explicit
+ * flag to prevent two concurrent reset attempts in the same engine.
+ * As the number of engines continues to grow, allocate the flags from
+ * the most significant bits.
+ *
* #I915_WEDGED - If reset fails and we can no longer use the GPU,
* we set the #I915_WEDGED bit. Prior to command submission, e.g.
* i915_gem_request_alloc(), this bit is checked and the sequence
@@ -1559,6 +1570,10 @@ struct i915_gpu_error {
#define I915_RESET_BACKOFF 0
#define I915_RESET_HANDOFF 1
#define I915_WEDGED (BITS_PER_LONG - 1)
+#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES)
+
+ /** Number of times an engine has been reset */
+ u32 reset_engine_count[I915_NUM_ENGINES];
/**
* Waitqueue to signal when a hang is detected. Used to for waiters
@@ -2236,13 +2251,6 @@ struct drm_i915_private {
DECLARE_HASHTABLE(mm_structs, 7);
struct mutex mm_lock;
- /* The hw wants to have a stable context identifier for the lifetime
- * of the context (for OA, PASID, faults, etc). This is limited
- * in execlists to 21 bits.
- */
- struct ida context_hw_ida;
-#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
-
/* Kernel Modesetting */
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -2319,7 +2327,18 @@ struct drm_i915_private {
*/
struct mutex av_mutex;
- struct list_head context_list;
+ struct {
+ struct list_head list;
+ struct llist_head free_list;
+ struct work_struct free_work;
+
+ /* The hw wants to have a stable context identifier for the
+ * lifetime of the context (for OA, PASID, faults, etc).
+ * This is limited in execlists to 21 bits.
+ */
+ struct ida hw_ida;
+#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+ } contexts;
u32 fdi_rx_config;
@@ -2994,16 +3013,17 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
-#define INTEL_PCH_DEVICE_ID_MASK 0xff00
-#define INTEL_PCH_DEVICE_ID_MASK_EXT 0xff80
+#define INTEL_PCH_DEVICE_ID_MASK 0xff80
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
+#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
-#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
@@ -3018,9 +3038,11 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev_priv) \
- ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+ ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
+ (dev_priv)->pch_id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
#define HAS_PCH_LPT_H(dev_priv) \
- ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
+ ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
+ (dev_priv)->pch_id == INTEL_PCH_WPT_DEVICE_ID_TYPE)
#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
@@ -3087,6 +3109,8 @@ extern void i915_driver_unload(struct drm_device *dev);
extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
extern void i915_reset(struct drm_i915_private *dev_priv);
+extern int i915_reset_engine(struct intel_engine_cs *engine);
+extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
@@ -3459,11 +3483,22 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
return READ_ONCE(error->reset_count);
}
+static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
+ struct intel_engine_cs *engine)
+{
+ return READ_ONCE(error->reset_engine_count[engine->id]);
+}
+
+struct drm_i915_gem_request *
+i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
void i915_gem_reset(struct drm_i915_private *dev_priv);
+void i915_gem_reset_finish_engine(struct intel_engine_cs *engine);
void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
+void i915_gem_reset_engine(struct intel_engine_cs *engine,
+ struct drm_i915_gem_request *request);
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
@@ -3497,7 +3532,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align);
-int i915_gem_open(struct drm_device *dev, struct drm_file *file);
+int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
@@ -3529,38 +3564,23 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
static inline struct i915_gem_context *
-i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
+__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
{
- struct i915_gem_context *ctx;
-
- lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
-
- ctx = idr_find(&file_priv->context_idr, id);
- if (!ctx)
- return ERR_PTR(-ENOENT);
-
- return ctx;
+ return idr_find(&file_priv->context_idr, id);
}
static inline struct i915_gem_context *
-i915_gem_context_get(struct i915_gem_context *ctx)
-{
- kref_get(&ctx->ref);
- return ctx;
-}
-
-static inline void i915_gem_context_put(struct i915_gem_context *ctx)
+i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
{
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- kref_put(&ctx->ref, i915_gem_context_free);
-}
+ struct i915_gem_context *ctx;
-static inline void i915_gem_context_put_unlocked(struct i915_gem_context *ctx)
-{
- struct mutex *lock = &ctx->i915->drm.struct_mutex;
+ rcu_read_lock();
+ ctx = __i915_gem_context_lookup_rcu(file_priv, id);
+ if (ctx && !kref_get_unless_zero(&ctx->ref))
+ ctx = NULL;
+ rcu_read_unlock();
- if (kref_put_mutex(&ctx->ref, i915_gem_context_free, lock))
- mutex_unlock(lock);
+ return ctx;
}
static inline struct intel_timeline *
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7dcac3bfb771..d6f9b4cb6e9b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -388,7 +388,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
*/
if (rps) {
if (INTEL_GEN(rq->i915) >= 6)
- gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
+ gen6_rps_boost(rq, rps);
else
rps = NULL;
}
@@ -399,22 +399,6 @@ out:
if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
i915_gem_request_retire_upto(rq);
- if (rps && i915_gem_request_global_seqno(rq) == intel_engine_last_submit(rq->engine)) {
- /* The GPU is now idle and this client has stalled.
- * Since no other client has submitted a request in the
- * meantime, assume that this client is the only one
- * supplying work to the GPU but is unable to keep that
- * work supplied because it is waiting. Since the GPU is
- * then never kept fully busy, RPS autoclocking will
- * keep the clocks relatively low, causing further delays.
- * Compensate by giving the synchronous client credit for
- * a waitboost next time.
- */
- spin_lock(&rq->i915->rps.client_lock);
- list_del_init(&rps->link);
- spin_unlock(&rq->i915->rps.client_lock);
- }
-
return timeout;
}
@@ -2434,8 +2418,9 @@ rebuild_st:
* again with !__GFP_NORETRY. However, we still
* want to fail this allocation rather than
* trigger the out-of-memory killer and for
- * this we want the future __GFP_MAYFAIL.
+ * this we want __GFP_RETRY_MAYFAIL.
*/
+ gfp |= __GFP_RETRY_MAYFAIL;
}
} while (1);
@@ -2831,46 +2816,64 @@ static bool engine_stalled(struct intel_engine_cs *engine)
return true;
}
+/*
+ * Ensure irq handler finishes, and not run again.
+ * Also return the active request so that we only search for it once.
+ */
+struct drm_i915_gem_request *
+i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_request *request = NULL;
+
+ /* Prevent the signaler thread from updating the request
+ * state (by calling dma_fence_signal) as we are processing
+ * the reset. The write from the GPU of the seqno is
+ * asynchronous and the signaler thread may see a different
+ * value to us and declare the request complete, even though
+ * the reset routine have picked that request as the active
+ * (incomplete) request. This conflict is not handled
+ * gracefully!
+ */
+ kthread_park(engine->breadcrumbs.signaler);
+
+ /* Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its engine->irq_tasklet *just* as we are
+ * calling engine->init_hw() and also writing the ELSP.
+ * Turning off the engine->irq_tasklet until the reset is over
+ * prevents the race.
+ */
+ tasklet_kill(&engine->irq_tasklet);
+ tasklet_disable(&engine->irq_tasklet);
+
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+
+ if (engine_stalled(engine)) {
+ request = i915_gem_find_active_request(engine);
+ if (request && request->fence.error == -EIO)
+ request = ERR_PTR(-EIO); /* Previous reset failed! */
+ }
+
+ return request;
+}
+
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ struct drm_i915_gem_request *request;
enum intel_engine_id id;
int err = 0;
- /* Ensure irq handler finishes, and not run again. */
for_each_engine(engine, dev_priv, id) {
- struct drm_i915_gem_request *request;
-
- /* Prevent the signaler thread from updating the request
- * state (by calling dma_fence_signal) as we are processing
- * the reset. The write from the GPU of the seqno is
- * asynchronous and the signaler thread may see a different
- * value to us and declare the request complete, even though
- * the reset routine have picked that request as the active
- * (incomplete) request. This conflict is not handled
- * gracefully!
- */
- kthread_park(engine->breadcrumbs.signaler);
-
- /* Prevent request submission to the hardware until we have
- * completed the reset in i915_gem_reset_finish(). If a request
- * is completed by one engine, it may then queue a request
- * to a second via its engine->irq_tasklet *just* as we are
- * calling engine->init_hw() and also writing the ELSP.
- * Turning off the engine->irq_tasklet until the reset is over
- * prevents the race.
- */
- tasklet_kill(&engine->irq_tasklet);
- tasklet_disable(&engine->irq_tasklet);
-
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
- if (engine_stalled(engine)) {
- request = i915_gem_find_active_request(engine);
- if (request && request->fence.error == -EIO)
- err = -EIO; /* Previous reset failed! */
+ request = i915_gem_reset_prepare_engine(engine);
+ if (IS_ERR(request)) {
+ err = PTR_ERR(request);
+ continue;
}
+
+ engine->hangcheck.active_request = request;
}
i915_gem_revoke_fences(dev_priv);
@@ -2924,7 +2927,7 @@ static void engine_skip_context(struct drm_i915_gem_request *request)
static bool i915_gem_reset_request(struct drm_i915_gem_request *request)
{
/* Read once and return the resolution */
- const bool guilty = engine_stalled(request->engine);
+ const bool guilty = !i915_gem_request_completed(request);
/* The guilty request will get skipped on a hung engine.
*
@@ -2958,11 +2961,9 @@ static bool i915_gem_reset_request(struct drm_i915_gem_request *request)
return guilty;
}
-static void i915_gem_reset_engine(struct intel_engine_cs *engine)
+void i915_gem_reset_engine(struct intel_engine_cs *engine,
+ struct drm_i915_gem_request *request)
{
- struct drm_i915_gem_request *request;
-
- request = i915_gem_find_active_request(engine);
if (request && i915_gem_reset_request(request)) {
DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
engine->name, request->global_seqno);
@@ -2988,7 +2989,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id) {
struct i915_gem_context *ctx;
- i915_gem_reset_engine(engine);
+ i915_gem_reset_engine(engine, engine->hangcheck.active_request);
ctx = fetch_and_zero(&engine->last_retired_context);
if (ctx)
engine->context_unpin(engine, ctx);
@@ -3004,6 +3005,12 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
}
}
+void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
+{
+ tasklet_enable(&engine->irq_tasklet);
+ kthread_unpark(engine->breadcrumbs.signaler);
+}
+
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
@@ -3012,8 +3019,8 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
for_each_engine(engine, dev_priv, id) {
- tasklet_enable(&engine->irq_tasklet);
- kthread_unpark(engine->breadcrumbs.signaler);
+ engine->hangcheck.active_request = NULL;
+ i915_gem_reset_finish_engine(engine);
}
}
@@ -3040,7 +3047,8 @@ static void engine_set_wedged(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped */
spin_lock_irqsave(&engine->timeline->lock, flags);
list_for_each_entry(request, &engine->timeline->requests, link)
- dma_fence_set_error(&request->fence, -EIO);
+ if (!i915_gem_request_completed(request))
+ dma_fence_set_error(&request->fence, -EIO);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
/* Mark all pending requests as complete so that any concurrent
@@ -3070,6 +3078,13 @@ static void engine_set_wedged(struct intel_engine_cs *engine)
engine->execlist_first = NULL;
spin_unlock_irqrestore(&engine->timeline->lock, flags);
+
+ /* The port is checked prior to scheduling a tasklet, but
+ * just in case we have suspended the tasklet to do the
+ * wedging make sure that when it wakes, it decides there
+ * is no work to do by clearing the irq_posted bit.
+ */
+ clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
}
}
@@ -3079,6 +3094,7 @@ static int __i915_gem_set_wedged_BKL(void *data)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ set_bit(I915_WEDGED, &i915->gpu_error.flags);
for_each_engine(engine, i915, id)
engine_set_wedged(engine);
@@ -3087,20 +3103,7 @@ static int __i915_gem_set_wedged_BKL(void *data)
void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
{
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
- set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
-
- /* Retire completed requests first so the list of inflight/incomplete
- * requests is accurate and we don't try and mark successful requests
- * as in error during __i915_gem_set_wedged_BKL().
- */
- i915_gem_retire_requests(dev_priv);
-
stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
-
- i915_gem_context_lost(dev_priv);
-
- mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
}
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
@@ -3155,6 +3158,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
* context and do not require stop_machine().
*/
intel_engines_reset_default_submission(i915);
+ i915_gem_contexts_lost(i915);
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
clear_bit(I915_WEDGED, &i915->gpu_error.flags);
@@ -4564,7 +4568,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
goto err_unlock;
assert_kernel_context_is_current(dev_priv);
- i915_gem_context_lost(dev_priv);
+ i915_gem_contexts_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
intel_guc_suspend(dev_priv);
@@ -4578,8 +4582,6 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
while (flush_delayed_work(&dev_priv->gt.idle_work))
;
- i915_gem_drain_freed_objects(dev_priv);
-
/* Assert that we sucessfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
@@ -4811,7 +4813,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
goto out_unlock;
- ret = i915_gem_context_init(dev_priv);
+ ret = i915_gem_contexts_init(dev_priv);
if (ret)
goto out_unlock;
@@ -4921,7 +4923,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
if (err)
goto err_priorities;
- INIT_LIST_HEAD(&dev_priv->context_list);
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
init_llist_head(&dev_priv->mm.free_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
@@ -5037,15 +5038,9 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
list_for_each_entry(request, &file_priv->mm.request_list, client_link)
request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
-
- if (!list_empty(&file_priv->rps.link)) {
- spin_lock(&to_i915(dev)->rps.client_lock);
- list_del(&file_priv->rps.link);
- spin_unlock(&to_i915(dev)->rps.client_lock);
- }
}
-int i915_gem_open(struct drm_device *dev, struct drm_file *file)
+int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
{
struct drm_i915_file_private *file_priv;
int ret;
@@ -5057,16 +5052,15 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
return -ENOMEM;
file->driver_priv = file_priv;
- file_priv->dev_priv = to_i915(dev);
+ file_priv->dev_priv = i915;
file_priv->file = file;
- INIT_LIST_HEAD(&file_priv->rps.link);
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
file_priv->bsd_engine = -1;
- ret = i915_gem_context_open(dev, file);
+ ret = i915_gem_context_open(i915, file);
if (ret)
kfree(file_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 39ed58a21fc1..1a87d04e7937 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -158,13 +158,11 @@ static void vma_lut_free(struct i915_gem_context *ctx)
kvfree(lut->ht);
}
-void i915_gem_context_free(struct kref *ctx_ref)
+static void i915_gem_context_free(struct i915_gem_context *ctx)
{
- struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
int i;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
vma_lut_free(ctx);
@@ -188,8 +186,54 @@ void i915_gem_context_free(struct kref *ctx_ref)
list_del(&ctx->link);
- ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
- kfree(ctx);
+ ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id);
+ kfree_rcu(ctx, rcu);
+}
+
+static void contexts_free(struct drm_i915_private *i915)
+{
+ struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
+ struct i915_gem_context *ctx, *cn;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ llist_for_each_entry_safe(ctx, cn, freed, free_link)
+ i915_gem_context_free(ctx);
+}
+
+static void contexts_free_first(struct drm_i915_private *i915)
+{
+ struct i915_gem_context *ctx;
+ struct llist_node *freed;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ freed = llist_del_first(&i915->contexts.free_list);
+ if (!freed)
+ return;
+
+ ctx = container_of(freed, typeof(*ctx), free_link);
+ i915_gem_context_free(ctx);
+}
+
+static void contexts_free_worker(struct work_struct *work)
+{
+ struct drm_i915_private *i915 =
+ container_of(work, typeof(*i915), contexts.free_work);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ contexts_free(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+}
+
+void i915_gem_context_release(struct kref *ref)
+{
+ struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
+ struct drm_i915_private *i915 = ctx->i915;
+
+ trace_i915_context_free(ctx);
+ if (llist_add(&ctx->free_link, &i915->contexts.free_list))
+ queue_work(i915->wq, &i915->contexts.free_work);
}
static void context_close(struct i915_gem_context *ctx)
@@ -205,7 +249,7 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
int ret;
- ret = ida_simple_get(&dev_priv->context_hw_ida,
+ ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0) {
/* Contexts are only released when no longer active.
@@ -213,7 +257,7 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
* stale contexts and try again.
*/
i915_gem_retire_requests(dev_priv);
- ret = ida_simple_get(&dev_priv->context_hw_ida,
+ ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0)
return ret;
@@ -265,7 +309,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
}
kref_init(&ctx->ref);
- list_add_tail(&ctx->link, &dev_priv->context_list);
+ list_add_tail(&ctx->link, &dev_priv->contexts.list);
ctx->i915 = dev_priv;
ctx->priority = I915_PRIORITY_NORMAL;
@@ -354,6 +398,9 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ /* Reap the most stale context */
+ contexts_free_first(dev_priv);
+
ctx = __create_hw_context(dev_priv, file_priv);
if (IS_ERR(ctx))
return ctx;
@@ -418,7 +465,7 @@ out:
return ctx;
}
-int i915_gem_context_init(struct drm_i915_private *dev_priv)
+int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
@@ -427,6 +474,10 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
if (WARN_ON(dev_priv->kernel_context))
return 0;
+ INIT_LIST_HEAD(&dev_priv->contexts.list);
+ INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
+ init_llist_head(&dev_priv->contexts.free_list);
+
if (intel_vgpu_active(dev_priv) &&
HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (!i915.enable_execlists) {
@@ -437,7 +488,7 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
/* Using the simple ida interface, the max is limited by sizeof(int) */
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
- ida_init(&dev_priv->context_hw_ida);
+ ida_init(&dev_priv->contexts.hw_ida);
ctx = i915_gem_create_context(dev_priv, NULL);
if (IS_ERR(ctx)) {
@@ -463,7 +514,7 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
return 0;
}
-void i915_gem_context_lost(struct drm_i915_private *dev_priv)
+void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -484,7 +535,7 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
if (!i915.enable_execlists) {
struct i915_gem_context *ctx;
- list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
if (!i915_gem_context_is_default(ctx))
continue;
@@ -503,18 +554,20 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
}
}
-void i915_gem_context_fini(struct drm_i915_private *dev_priv)
+void i915_gem_contexts_fini(struct drm_i915_private *i915)
{
- struct i915_gem_context *dctx = dev_priv->kernel_context;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ struct i915_gem_context *ctx;
- GEM_BUG_ON(!i915_gem_context_is_kernel(dctx));
+ lockdep_assert_held(&i915->drm.struct_mutex);
- context_close(dctx);
- dev_priv->kernel_context = NULL;
+ /* Keep the context so that we can free it immediately ourselves */
+ ctx = i915_gem_context_get(fetch_and_zero(&i915->kernel_context));
+ GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+ context_close(ctx);
+ i915_gem_context_free(ctx);
- ida_destroy(&dev_priv->context_hw_ida);
+ /* Must free all deferred contexts (via flush_workqueue) first */
+ ida_destroy(&i915->contexts.hw_ida);
}
static int context_idr_cleanup(int id, void *p, void *data)
@@ -525,32 +578,32 @@ static int context_idr_cleanup(int id, void *p, void *data)
return 0;
}
-int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
+int i915_gem_context_open(struct drm_i915_private *i915,
+ struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_gem_context *ctx;
idr_init(&file_priv->context_idr);
- mutex_lock(&dev->struct_mutex);
- ctx = i915_gem_create_context(to_i915(dev), file_priv);
- mutex_unlock(&dev->struct_mutex);
-
- GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
-
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = i915_gem_create_context(i915, file_priv);
+ mutex_unlock(&i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
idr_destroy(&file_priv->context_idr);
return PTR_ERR(ctx);
}
+ GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
+
return 0;
}
-void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
+void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- lockdep_assert_held(&dev->struct_mutex);
+ lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
@@ -981,20 +1034,19 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
return -ENOENT;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
- if (IS_ERR(ctx)) {
- mutex_unlock(&dev->struct_mutex);
- return PTR_ERR(ctx);
- }
+ if (!ctx)
+ return -ENOENT;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ goto out;
__destroy_hw_context(ctx, file_priv);
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("HW context %d destroyed\n", args->ctx_id);
+out:
+ i915_gem_context_put(ctx);
return 0;
}
@@ -1004,17 +1056,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data;
struct i915_gem_context *ctx;
- int ret;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ int ret = 0;
ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
- if (IS_ERR(ctx)) {
- mutex_unlock(&dev->struct_mutex);
- return PTR_ERR(ctx);
- }
+ if (!ctx)
+ return -ENOENT;
args->size = 0;
switch (args->param) {
@@ -1042,8 +1088,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
break;
}
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_context_put(ctx);
return ret;
}
@@ -1055,15 +1101,13 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
struct i915_gem_context *ctx;
int ret;
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
+ if (!ctx)
+ return -ENOENT;
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
- return ret;
-
- ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
- if (IS_ERR(ctx)) {
- mutex_unlock(&dev->struct_mutex);
- return PTR_ERR(ctx);
- }
+ goto out;
switch (args->param) {
case I915_CONTEXT_PARAM_BAN_PERIOD:
@@ -1101,6 +1145,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
}
mutex_unlock(&dev->struct_mutex);
+out:
+ i915_gem_context_put(ctx);
return ret;
}
@@ -1115,27 +1161,31 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
if (args->flags || args->pad)
return -EINVAL;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ ret = -ENOENT;
+ rcu_read_lock();
+ ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
+ if (!ctx)
+ goto out;
- ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
- if (IS_ERR(ctx)) {
- mutex_unlock(&dev->struct_mutex);
- return PTR_ERR(ctx);
- }
+ /*
+ * We opt for unserialised reads here. This may result in tearing
+ * in the extremely unlikely event of a GPU hang on this context
+ * as we are querying them. If we need that extra layer of protection,
+ * we should wrap the hangstats with a seqlock.
+ */
if (capable(CAP_SYS_ADMIN))
args->reset_count = i915_reset_count(&dev_priv->gpu_error);
else
args->reset_count = 0;
- args->batch_active = ctx->guilty_count;
- args->batch_pending = ctx->active_count;
+ args->batch_active = READ_ONCE(ctx->guilty_count);
+ args->batch_pending = READ_ONCE(ctx->active_count);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
+ ret = 0;
+out:
+ rcu_read_unlock();
+ return ret;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 82c99ba92ad3..04320f80f9f4 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -86,6 +86,7 @@ struct i915_gem_context {
/** link: place with &drm_i915_private.context_list */
struct list_head link;
+ struct llist_node free_link;
/**
* @ref: reference count
@@ -99,6 +100,11 @@ struct i915_gem_context {
struct kref ref;
/**
+ * @rcu: rcu_head for deferred freeing.
+ */
+ struct rcu_head rcu;
+
+ /**
* @flags: small set of booleans
*/
unsigned long flags;
@@ -273,14 +279,18 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
}
/* i915_gem_context.c */
-int __must_check i915_gem_context_init(struct drm_i915_private *dev_priv);
-void i915_gem_context_lost(struct drm_i915_private *dev_priv);
-void i915_gem_context_fini(struct drm_i915_private *dev_priv);
-int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
-void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
+int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
+void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
+void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
+
+int i915_gem_context_open(struct drm_i915_private *i915,
+ struct drm_file *file);
+void i915_gem_context_close(struct drm_file *file);
+
int i915_switch_context(struct drm_i915_gem_request *req);
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
-void i915_gem_context_free(struct kref *ctx_ref);
+
+void i915_gem_context_release(struct kref *ctx_ref);
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev);
@@ -295,4 +305,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+static inline struct i915_gem_context *
+i915_gem_context_get(struct i915_gem_context *ctx)
+{
+ kref_get(&ctx->ref);
+ return ctx;
+}
+
+static inline void i915_gem_context_put(struct i915_gem_context *ctx)
+{
+ kref_put(&ctx->ref, i915_gem_context_release);
+}
+
#endif /* !__I915_GEM_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index eb46dfa374a7..929f275e67aa 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -288,20 +288,26 @@ static int eb_create(struct i915_execbuffer *eb)
* direct lookup.
*/
do {
+ unsigned int flags;
+
+ /* While we can still reduce the allocation size, don't
+ * raise a warning and allow the allocation to fail.
+ * On the last pass though, we want to try as hard
+ * as possible to perform the allocation and warn
+ * if it fails.
+ */
+ flags = GFP_TEMPORARY;
+ if (size > 1)
+ flags |= __GFP_NORETRY | __GFP_NOWARN;
+
eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
- GFP_TEMPORARY |
- __GFP_NORETRY |
- __GFP_NOWARN);
+ flags);
if (eb->buckets)
break;
} while (--size);
- if (unlikely(!eb->buckets)) {
- eb->buckets = kzalloc(sizeof(struct hlist_head),
- GFP_TEMPORARY);
- if (unlikely(!eb->buckets))
- return -ENOMEM;
- }
+ if (unlikely(!size))
+ return -ENOMEM;
eb->lut_size = size;
} else {
@@ -452,7 +458,7 @@ eb_add_vma(struct i915_execbuffer *eb,
return err;
}
- if (eb->lut_size >= 0) {
+ if (eb->lut_size > 0) {
vma->exec_handle = entry->handle;
hlist_add_head(&vma->exec_node,
&eb->buckets[hash_32(entry->handle,
@@ -669,16 +675,17 @@ static int eb_select_context(struct i915_execbuffer *eb)
struct i915_gem_context *ctx;
ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
- if (unlikely(IS_ERR(ctx)))
- return PTR_ERR(ctx);
+ if (unlikely(!ctx))
+ return -ENOENT;
if (unlikely(i915_gem_context_is_banned(ctx))) {
DRM_DEBUG("Context %u tried to submit while banned\n",
ctx->user_handle);
+ i915_gem_context_put(ctx);
return -EIO;
}
- eb->ctx = i915_gem_context_get(ctx);
+ eb->ctx = ctx;
eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
eb->context_flags = 0;
@@ -878,6 +885,7 @@ static void eb_release_vmas(const struct i915_execbuffer *eb)
GEM_BUG_ON(vma->exec_entry != entry);
vma->exec_entry = NULL;
+ __exec_to_vma(entry) = 0;
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
__eb_unreserve_vma(vma, entry);
@@ -893,7 +901,7 @@ static void eb_release_vmas(const struct i915_execbuffer *eb)
static void eb_reset_vmas(const struct i915_execbuffer *eb)
{
eb_release_vmas(eb);
- if (eb->lut_size >= 0)
+ if (eb->lut_size > 0)
memset(eb->buckets, 0,
sizeof(struct hlist_head) << eb->lut_size);
}
@@ -902,7 +910,7 @@ static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
- if (eb->lut_size >= 0)
+ if (eb->lut_size > 0)
kfree(eb->buckets);
}
@@ -1199,7 +1207,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
reservation_object_unlock(batch->resv);
i915_vma_unpin(batch);
- i915_vma_move_to_active(vma, rq, true);
+ i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
reservation_object_lock(vma->resv, NULL);
reservation_object_add_excl_fence(vma->resv, &rq->fence);
reservation_object_unlock(vma->resv);
@@ -2127,7 +2135,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;
- eb.ctx = NULL;
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
if (USES_FULL_PPGTT(eb.i915))
eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
@@ -2179,8 +2186,15 @@ i915_gem_do_execbuffer(struct drm_device *dev,
}
}
- if (eb_create(&eb))
- return -ENOMEM;
+ err = eb_create(&eb);
+ if (err)
+ goto err_out_fence;
+
+ GEM_BUG_ON(!eb.lut_size);
+
+ err = eb_select_context(&eb);
+ if (unlikely(err))
+ goto err_destroy;
/*
* Take a local wakeref for preparing to dispatch the execbuf as
@@ -2190,14 +2204,11 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* 100ms.
*/
intel_runtime_pm_get(eb.i915);
+
err = i915_mutex_lock_interruptible(dev);
if (err)
goto err_rpm;
- err = eb_select_context(&eb);
- if (unlikely(err))
- goto err_unlock;
-
err = eb_relocate(&eb);
if (err)
/*
@@ -2333,12 +2344,13 @@ err_batch_unpin:
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
- i915_gem_context_put(eb.ctx);
-err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put(eb.i915);
+ i915_gem_context_put(eb.ctx);
+err_destroy:
eb_destroy(&eb);
+err_out_fence:
if (out_fence_fd != -1)
put_unused_fd(out_fence_fd);
err_in_fence:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 61fc7e90a7da..10aa7762d9a6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -207,8 +207,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
if (vma->obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
- vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
- cache_level, pte_flags);
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
return 0;
}
@@ -907,37 +906,35 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
}
static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
- struct sg_table *pages,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = {
- .sg = pages->sgl,
+ .sg = vma->pages->sgl,
.dma = sg_dma_address(iter.sg),
.max = iter.dma + iter.sg->length,
};
- struct gen8_insert_pte idx = gen8_insert_pte(start);
+ struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
cache_level);
}
static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
- struct sg_table *pages,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = {
- .sg = pages->sgl,
+ .sg = vma->pages->sgl,
.dma = sg_dma_address(iter.sg),
.max = iter.dma + iter.sg->length,
};
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
- struct gen8_insert_pte idx = gen8_insert_pte(start);
+ struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
&idx, cache_level))
@@ -1621,13 +1618,12 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
}
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
- struct sg_table *pages,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- unsigned first_entry = start >> PAGE_SHIFT;
+ unsigned first_entry = vma->node.start >> PAGE_SHIFT;
unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES;
const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
@@ -1635,7 +1631,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
gen6_pte_t *vaddr;
vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
- iter.sg = pages->sgl;
+ iter.sg = vma->pages->sgl;
iter.dma = sg_dma_address(iter.sg);
iter.max = iter.dma + iter.sg->length;
do {
@@ -2090,8 +2086,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
- struct sg_table *st,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level level,
u32 unused)
{
@@ -2102,8 +2097,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
dma_addr_t addr;
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
- gtt_entries += start >> PAGE_SHIFT;
- for_each_sgt_dma(addr, sgt_iter, st)
+ gtt_entries += vma->node.start >> PAGE_SHIFT;
+ for_each_sgt_dma(addr, sgt_iter, vma->pages)
gen8_set_pte(gtt_entries++, pte_encode | addr);
wmb();
@@ -2137,17 +2132,16 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
- struct sg_table *st,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level level,
u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
- unsigned int i = start >> PAGE_SHIFT;
+ unsigned int i = vma->node.start >> PAGE_SHIFT;
struct sgt_iter iter;
dma_addr_t addr;
- for_each_sgt_dma(addr, iter, st)
+ for_each_sgt_dma(addr, iter, vma->pages)
iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
wmb();
@@ -2229,8 +2223,7 @@ static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
struct insert_entries {
struct i915_address_space *vm;
- struct sg_table *st;
- u64 start;
+ struct i915_vma *vma;
enum i915_cache_level level;
};
@@ -2238,19 +2231,18 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
- gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
+ gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
}
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
- struct sg_table *st,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level level,
u32 unused)
{
- struct insert_entries arg = { vm, st, start, level };
+ struct insert_entries arg = { vm, vma, level };
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
}
@@ -2316,15 +2308,15 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
}
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
- struct sg_table *pages,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
{
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
+ intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
+ flags);
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
@@ -2353,8 +2345,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
pte_flags |= PTE_READ_ONLY;
intel_runtime_pm_get(i915);
- vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
- cache_level, pte_flags);
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
intel_runtime_pm_put(i915);
/*
@@ -2407,16 +2398,13 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
goto err_pages;
}
- appgtt->base.insert_entries(&appgtt->base,
- vma->pages, vma->node.start,
- cache_level, pte_flags);
+ appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
+ pte_flags);
}
if (flags & I915_VMA_GLOBAL_BIND) {
intel_runtime_pm_get(i915);
- vma->vm->insert_entries(vma->vm,
- vma->pages, vma->node.start,
- cache_level, pte_flags);
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
intel_runtime_pm_put(i915);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 1b2a56c3e5d3..b4e3aa7c0ce1 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -313,8 +313,7 @@ struct i915_address_space {
enum i915_cache_level cache_level,
u32 flags);
void (*insert_entries)(struct i915_address_space *vm,
- struct sg_table *st,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
void (*cleanup)(struct i915_address_space *vm);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 8c59c79cbd8b..483af8921060 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -384,7 +384,11 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
engine->context_unpin(engine, engine->last_retired_context);
engine->last_retired_context = request->ctx;
- dma_fence_signal(&request->fence);
+ spin_lock_irq(&request->lock);
+ if (request->waitboost)
+ atomic_dec(&request->i915->rps.num_waiters);
+ dma_fence_signal_locked(&request->fence);
+ spin_unlock_irq(&request->lock);
i915_priotree_fini(request->i915, &request->priotree);
i915_gem_request_put(request);
@@ -639,6 +643,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->file_priv = NULL;
req->batch = NULL;
req->capture_list = NULL;
+ req->waitboost = false;
/*
* Reserve space in the ring buffer for all the commands required to
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 7b7c84369d78..49a4c8994ff0 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -129,7 +129,7 @@ struct drm_i915_gem_request {
* It is used by the driver to then queue the request for execution.
*/
struct i915_sw_fence submit;
- wait_queue_t submitq;
+ wait_queue_entry_t submitq;
wait_queue_head_t execute;
/* A list of everyone we wait upon, and everyone who waits upon us.
@@ -184,6 +184,8 @@ struct drm_i915_gem_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
+ bool waitboost;
+
/** engine->request_list entry for this request */
struct list_head link;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index e18f350bc364..ae70283470a6 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -463,6 +463,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " hangcheck action timestamp: %lu, %u ms ago\n",
ee->hangcheck_timestamp,
jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
+ err_printf(m, " engine reset count: %u\n", ee->reset_count);
error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
@@ -1236,6 +1237,8 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
ee->hangcheck_action = engine->hangcheck.action;
ee->hangcheck_stalled = engine->hangcheck.stalled;
+ ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
+ engine);
if (USES_PPGTT(dev_priv)) {
int i;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index bce2d1feceb1..eb4f1dca2077 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1091,18 +1091,6 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
return events;
}
-static bool any_waiters(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id)
- if (intel_engine_has_waiter(engine))
- return true;
-
- return false;
-}
-
static void gen6_pm_rps_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
@@ -1114,7 +1102,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->rps.interrupts_enabled) {
pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir);
- client_boost = fetch_and_zero(&dev_priv->rps.client_boost);
+ client_boost = atomic_read(&dev_priv->rps.num_waiters);
}
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1131,7 +1119,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
new_delay = dev_priv->rps.cur_freq;
min = dev_priv->rps.min_freq_softlimit;
max = dev_priv->rps.max_freq_softlimit;
- if (client_boost || any_waiters(dev_priv))
+ if (client_boost)
max = dev_priv->rps.max_freq;
if (client_boost && new_delay < dev_priv->rps.boost_freq) {
new_delay = dev_priv->rps.boost_freq;
@@ -1144,7 +1132,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
if (new_delay >= dev_priv->rps.max_freq_softlimit)
adj = 0;
- } else if (client_boost || any_waiters(dev_priv)) {
+ } else if (client_boost) {
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
@@ -2599,60 +2587,93 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
return ret;
}
+struct wedge_me {
+ struct delayed_work work;
+ struct drm_i915_private *i915;
+ const char *name;
+};
+
+static void wedge_me(struct work_struct *work)
+{
+ struct wedge_me *w = container_of(work, typeof(*w), work.work);
+
+ dev_err(w->i915->drm.dev,
+ "%s timed out, cancelling all in-flight rendering.\n",
+ w->name);
+ i915_gem_set_wedged(w->i915);
+}
+
+static void __init_wedge(struct wedge_me *w,
+ struct drm_i915_private *i915,
+ long timeout,
+ const char *name)
+{
+ w->i915 = i915;
+ w->name = name;
+
+ INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
+ schedule_delayed_work(&w->work, timeout);
+}
+
+static void __fini_wedge(struct wedge_me *w)
+{
+ cancel_delayed_work_sync(&w->work);
+ destroy_delayed_work_on_stack(&w->work);
+ w->i915 = NULL;
+}
+
+#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
+ for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \
+ (W)->i915; \
+ __fini_wedge((W)))
+
/**
- * i915_reset_and_wakeup - do process context error handling work
+ * i915_reset_device - do process context error handling work
* @dev_priv: i915 device private
*
* Fire an error uevent so userspace can see that a hang or error
* was detected.
*/
-static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
+static void i915_reset_device(struct drm_i915_private *dev_priv)
{
struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
+ struct wedge_me w;
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
- intel_prepare_reset(dev_priv);
+ /* Use a watchdog to ensure that our reset completes */
+ i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
+ intel_prepare_reset(dev_priv);
- set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
- wake_up_all(&dev_priv->gpu_error.wait_queue);
+ /* Signal that locked waiters should reset the GPU */
+ set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
+ wake_up_all(&dev_priv->gpu_error.wait_queue);
- do {
- /*
- * All state reset _must_ be completed before we update the
- * reset counter, for otherwise waiters might miss the reset
- * pending state and not properly drop locks, resulting in
- * deadlocks with the reset work.
+ /* Wait for anyone holding the lock to wakeup, without
+ * blocking indefinitely on struct_mutex.
*/
- if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
- i915_reset(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- }
-
- /* We need to wait for anyone holding the lock to wakeup */
- } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
- I915_RESET_HANDOFF,
- TASK_UNINTERRUPTIBLE,
- HZ));
+ do {
+ if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
+ i915_reset(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ }
+ } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
+ I915_RESET_HANDOFF,
+ TASK_UNINTERRUPTIBLE,
+ 1));
- intel_finish_reset(dev_priv);
+ intel_finish_reset(dev_priv);
+ }
if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
kobject_uevent_env(kobj,
KOBJ_CHANGE, reset_done_event);
-
- /*
- * Note: The wake_up also serves as a memory barrier so that
- * waiters see the updated value of the dev_priv->gpu_error.
- */
- clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
- wake_up_all(&dev_priv->gpu_error.reset_queue);
}
static inline void
@@ -2722,6 +2743,8 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
u32 engine_mask,
const char *fmt, ...)
{
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
va_list args;
char error_msg[80];
@@ -2741,14 +2764,56 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
i915_capture_error_state(dev_priv, engine_mask, error_msg);
i915_clear_error_registers(dev_priv);
+ /*
+ * Try engine reset when available. We fall back to full reset if
+ * single reset fails.
+ */
+ if (intel_has_reset_engine(dev_priv)) {
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ BUILD_BUG_ON(I915_RESET_HANDOFF >= I915_RESET_ENGINE);
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &dev_priv->gpu_error.flags))
+ continue;
+
+ if (i915_reset_engine(engine) == 0)
+ engine_mask &= ~intel_engine_flag(engine);
+
+ clear_bit(I915_RESET_ENGINE + engine->id,
+ &dev_priv->gpu_error.flags);
+ wake_up_bit(&dev_priv->gpu_error.flags,
+ I915_RESET_ENGINE + engine->id);
+ }
+ }
+
if (!engine_mask)
goto out;
- if (test_and_set_bit(I915_RESET_BACKOFF,
- &dev_priv->gpu_error.flags))
+ /* Full reset needs the mutex, stop any other user trying to do so. */
+ if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
+ wait_event(dev_priv->gpu_error.reset_queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &dev_priv->gpu_error.flags));
goto out;
+ }
+
+ /* Prevent any other reset-engine attempt. */
+ for_each_engine(engine, dev_priv, tmp) {
+ while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &dev_priv->gpu_error.flags))
+ wait_on_bit(&dev_priv->gpu_error.flags,
+ I915_RESET_ENGINE + engine->id,
+ TASK_UNINTERRUPTIBLE);
+ }
- i915_reset_and_wakeup(dev_priv);
+ i915_reset_device(dev_priv);
+
+ for_each_engine(engine, dev_priv, tmp) {
+ clear_bit(I915_RESET_ENGINE + engine->id,
+ &dev_priv->gpu_error.flags);
+ }
+
+ clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
+ wake_up_all(&dev_priv->gpu_error.reset_queue);
out:
intel_runtime_pm_put(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index b6a7e363d076..88b9d3e6713a 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -46,7 +46,7 @@ struct i915_params i915 __read_mostly = {
.prefault_disable = 0,
.load_detect_test = 0,
.force_reset_modeset_test = 0,
- .reset = true,
+ .reset = 2,
.error_capture = true,
.invert_brightness = 0,
.disable_display = 0,
@@ -63,8 +63,9 @@ struct i915_params i915 __read_mostly = {
.huc_firmware_path = NULL,
.enable_dp_mst = true,
.inject_load_failure = 0,
- .enable_dpcd_backlight = false,
+ .enable_dpcd_backlight = -1,
.enable_gvt = false,
+ .enable_dbc = true,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -115,8 +116,8 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-module_param_named_unsafe(reset, i915.reset, bool, 0600);
-MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
+module_param_named_unsafe(reset, i915.reset, int, 0600);
+MODULE_PARM_DESC(reset, "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
module_param_named(error_capture, i915.error_capture, bool, 0600);
@@ -246,10 +247,15 @@ MODULE_PARM_DESC(enable_dp_mst,
module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
MODULE_PARM_DESC(inject_load_failure,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
-module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600);
+module_param_named_unsafe(enable_dpcd_backlight, i915.enable_dpcd_backlight, int, 0600);
MODULE_PARM_DESC(enable_dpcd_backlight,
- "Enable support for DPCD backlight control (default:false)");
+ "Enable support for DPCD backlight control "
+ "(-1:auto (default), 0:force disable, 1:force enabled if supported");
module_param_named(enable_gvt, i915.enable_gvt, bool, 0400);
MODULE_PARM_DESC(enable_gvt,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
+
+module_param_named_unsafe(enable_dbc, i915.enable_dbc, bool, 0600);
+MODULE_PARM_DESC(enable_dbc,
+ "Enable support for dynamic backlight control (default:true)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 34148cc8637c..057e203e6bda 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -51,7 +51,9 @@
func(int, use_mmio_flip); \
func(int, mmio_debug); \
func(int, edp_vswing); \
+ func(int, reset); \
func(unsigned int, inject_load_failure); \
+ func(int, enable_dpcd_backlight); \
/* leave bools at the end to not create holes */ \
func(bool, alpha_support); \
func(bool, enable_cmd_parser); \
@@ -60,14 +62,13 @@
func(bool, prefault_disable); \
func(bool, load_detect_test); \
func(bool, force_reset_modeset_test); \
- func(bool, reset); \
func(bool, error_capture); \
func(bool, disable_display); \
func(bool, verbose_state_checks); \
func(bool, nuclear_pageflip); \
func(bool, enable_dp_mst); \
- func(bool, enable_dpcd_backlight); \
- func(bool, enable_gvt)
+ func(bool, enable_gvt); \
+ func(bool, enable_dbc)
#define MEMBER(T, member) T member
struct i915_params {
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 506ec32b9e53..a1e6b696bcfa 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -310,7 +310,8 @@ static const struct intel_device_info intel_haswell_info = {
BDW_COLORS, \
.has_logical_ring_contexts = 1, \
.has_full_48bit_ppgtt = 1, \
- .has_64bit_reloc = 1
+ .has_64bit_reloc = 1, \
+ .has_reset_engine = 1
#define BDW_PLATFORM \
BDW_FEATURES, \
@@ -342,6 +343,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_gmch_display = 1,
.has_aliasing_ppgtt = 1,
.has_full_ppgtt = 1,
+ .has_reset_engine = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS,
@@ -387,6 +389,7 @@ static const struct intel_device_info intel_skylake_gt3_info = {
.has_aliasing_ppgtt = 1, \
.has_full_ppgtt = 1, \
.has_full_48bit_ppgtt = 1, \
+ .has_reset_engine = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS, \
BDW_COLORS
@@ -446,6 +449,7 @@ static const struct intel_device_info intel_cannonlake_info = {
.gen = 10,
.ddb_size = 1024,
.has_csr = 1,
+ .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
};
/*
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 38c44407bafc..d9f77a4d85db 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1746,7 +1746,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
goto out;
/* Update all contexts now that we've stalled the submission. */
- list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
struct intel_context *ce = &ctx->engine[RCS];
u32 *regs;
@@ -2067,10 +2067,6 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return ret;
}
- ret = alloc_oa_buffer(dev_priv);
- if (ret)
- goto err_oa_buf_alloc;
-
/* PRM - observability performance counters:
*
* OACONTROL, performance counter enable, note:
@@ -2086,6 +2082,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
intel_runtime_pm_get(dev_priv);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ ret = alloc_oa_buffer(dev_priv);
+ if (ret)
+ goto err_oa_buf_alloc;
+
ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv);
if (ret)
goto err_enable;
@@ -2097,11 +2097,11 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return 0;
err_enable:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
free_oa_buffer(dev_priv);
err_oa_buf_alloc:
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_put(dev_priv);
if (stream->ctx)
oa_put_render_ctx_id(stream);
@@ -2444,7 +2444,7 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
list_del(&stream->link);
if (stream->ctx)
- i915_gem_context_put_unlocked(stream->ctx);
+ i915_gem_context_put(stream->ctx);
kfree(stream);
}
@@ -2633,7 +2633,7 @@ err_alloc:
kfree(stream);
err_ctx:
if (specific_ctx)
- i915_gem_context_put_unlocked(specific_ctx);
+ i915_gem_context_put(specific_ctx);
err:
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index bd535f12db18..c712d01f92ab 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1764,8 +1764,11 @@ enum skl_disp_power_wells {
_CNL_PORT_TX_DW2_LN0_AE, \
_CNL_PORT_TX_DW2_LN0_F)
#define SWING_SEL_UPPER(x) ((x >> 3) << 15)
+#define SWING_SEL_UPPER_MASK (1 << 15)
#define SWING_SEL_LOWER(x) ((x & 0x7) << 11)
+#define SWING_SEL_LOWER_MASK (0x7 << 11)
#define RCOMP_SCALAR(x) ((x) << 0)
+#define RCOMP_SCALAR_MASK (0xFF << 0)
#define _CNL_PORT_TX_DW4_GRP_AE 0x162350
#define _CNL_PORT_TX_DW4_GRP_B 0x1623D0
@@ -1795,8 +1798,11 @@ enum skl_disp_power_wells {
_CNL_PORT_TX_DW4_LN0_F)
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
+#define POST_CURSOR_1_MASK (0x3F << 12)
#define POST_CURSOR_2(x) ((x) << 6)
+#define POST_CURSOR_2_MASK (0x3F << 6)
#define CURSOR_COEFF(x) ((x) << 0)
+#define CURSOR_COEFF_MASK (0x3F << 0)
#define _CNL_PORT_TX_DW5_GRP_AE 0x162354
#define _CNL_PORT_TX_DW5_GRP_B 0x1623D4
@@ -1825,7 +1831,9 @@ enum skl_disp_power_wells {
#define TX_TRAINING_EN (1 << 31)
#define TAP3_DISABLE (1 << 29)
#define SCALING_MODE_SEL(x) ((x) << 18)
+#define SCALING_MODE_SEL_MASK (0x7 << 18)
#define RTERM_SELECT(x) ((x) << 3)
+#define RTERM_SELECT_MASK (0x7 << 3)
#define _CNL_PORT_TX_DW7_GRP_AE 0x16235C
#define _CNL_PORT_TX_DW7_GRP_B 0x1623DC
@@ -1852,6 +1860,7 @@ enum skl_disp_power_wells {
_CNL_PORT_TX_DW7_LN0_AE, \
_CNL_PORT_TX_DW7_LN0_F)
#define N_SCALAR(x) ((x) << 24)
+#define N_SCALAR_MASK (0x7F << 24)
/* The spec defines this only for BXT PHY0, but lets assume that this
* would exist for PHY1 too if it had a second channel.
@@ -3513,7 +3522,7 @@ enum skl_disp_power_wells {
#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
-#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
+#define GT_INTERVAL_FROM_US(dev_priv, us) (INTEL_GEN(dev_priv) >= 9 ? \
(IS_GEN9_LP(dev_priv) ? \
INTERVAL_0_833_US(us) : \
INTERVAL_1_33_US(us)) : \
@@ -3522,7 +3531,7 @@ enum skl_disp_power_wells {
#define INTERVAL_1_28_TO_US(interval) (((interval) << 7) / 100)
#define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3)
#define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6)
-#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \
+#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (INTEL_GEN(dev_priv) >= 9 ? \
(IS_GEN9_LP(dev_priv) ? \
INTERVAL_0_833_TO_US(interval) : \
INTERVAL_1_33_TO_US(interval)) : \
@@ -8334,6 +8343,7 @@ enum {
#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25)
#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25)
#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
+#define DPLL_CFGCR0_DCO_FRAC_SHIFT (10)
#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
#define CNL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR0, _CNL_DPLL1_CFGCR0)
@@ -8341,6 +8351,7 @@ enum {
#define _CNL_DPLL0_CFGCR1 0x6C004
#define _CNL_DPLL1_CFGCR1 0x6C084
#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
+#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
#define DPLL_CFGCR1_KDIV_MASK (7 << 6)
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 474d23c0c0ce..f29540f922af 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -125,7 +125,7 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
struct list_head *continuation)
{
wait_queue_head_t *x = &fence->wait;
- wait_queue_t *pos, *next;
+ wait_queue_entry_t *pos, *next;
unsigned long flags;
debug_fence_deactivate(fence);
@@ -133,31 +133,30 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
/*
* To prevent unbounded recursion as we traverse the graph of
- * i915_sw_fences, we move the task_list from this, the next ready
- * fence, to the tail of the original fence's task_list
+ * i915_sw_fences, we move the entry list from this, the next ready
+ * fence, to the tail of the original fence's entry list
* (and so added to the list to be woken).
*/
spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
if (continuation) {
- list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
+ list_for_each_entry_safe(pos, next, &x->head, entry) {
if (pos->func == autoremove_wake_function)
pos->func(pos, TASK_NORMAL, 0, continuation);
else
- list_move_tail(&pos->task_list, continuation);
+ list_move_tail(&pos->entry, continuation);
}
} else {
LIST_HEAD(extra);
do {
- list_for_each_entry_safe(pos, next,
- &x->task_list, task_list)
+ list_for_each_entry_safe(pos, next, &x->head, entry)
pos->func(pos, TASK_NORMAL, 0, &extra);
if (list_empty(&extra))
break;
- list_splice_tail_init(&extra, &x->task_list);
+ list_splice_tail_init(&extra, &x->head);
} while (1);
}
spin_unlock_irqrestore(&x->lock, flags);
@@ -222,9 +221,9 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence)
i915_sw_fence_complete(fence);
}
-static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key)
+static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key)
{
- list_del(&wq->task_list);
+ list_del(&wq->entry);
__i915_sw_fence_complete(wq->private, key);
if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
@@ -235,7 +234,7 @@ static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *
static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
const struct i915_sw_fence * const signaler)
{
- wait_queue_t *wq;
+ wait_queue_entry_t *wq;
if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
return false;
@@ -243,7 +242,7 @@ static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
if (fence == signaler)
return true;
- list_for_each_entry(wq, &fence->wait.task_list, task_list) {
+ list_for_each_entry(wq, &fence->wait.head, entry) {
if (wq->func != i915_sw_fence_wake)
continue;
@@ -256,12 +255,12 @@ static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
{
- wait_queue_t *wq;
+ wait_queue_entry_t *wq;
if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
return;
- list_for_each_entry(wq, &fence->wait.task_list, task_list) {
+ list_for_each_entry(wq, &fence->wait.head, entry) {
if (wq->func != i915_sw_fence_wake)
continue;
@@ -288,7 +287,7 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *signaler,
- wait_queue_t *wq, gfp_t gfp)
+ wait_queue_entry_t *wq, gfp_t gfp)
{
unsigned long flags;
int pending;
@@ -318,7 +317,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
pending |= I915_SW_FENCE_FLAG_ALLOC;
}
- INIT_LIST_HEAD(&wq->task_list);
+ INIT_LIST_HEAD(&wq->entry);
wq->flags = pending;
wq->func = i915_sw_fence_wake;
wq->private = fence;
@@ -327,7 +326,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
spin_lock_irqsave(&signaler->wait.lock, flags);
if (likely(!i915_sw_fence_done(signaler))) {
- __add_wait_queue_tail(&signaler->wait, wq);
+ __add_wait_queue_entry_tail(&signaler->wait, wq);
pending = 1;
} else {
i915_sw_fence_wake(wq, 0, 0, NULL);
@@ -340,7 +339,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *signaler,
- wait_queue_t *wq)
+ wait_queue_entry_t *wq)
{
return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 1d3b6051daaf..fe2ef4dadfc6 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -65,7 +65,7 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence);
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *after,
- wait_queue_t *wq);
+ wait_queue_entry_t *wq);
int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
struct i915_sw_fence *after,
gfp_t gfp);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 1eef3fae4db3..7fcf00622c4c 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -96,7 +96,7 @@ static struct attribute *rc6_attrs[] = {
NULL
};
-static struct attribute_group rc6_attr_group = {
+static const struct attribute_group rc6_attr_group = {
.name = power_group_name,
.attrs = rc6_attrs
};
@@ -107,7 +107,7 @@ static struct attribute *rc6p_attrs[] = {
NULL
};
-static struct attribute_group rc6p_attr_group = {
+static const struct attribute_group rc6p_attr_group = {
.name = power_group_name,
.attrs = rc6p_attrs
};
@@ -117,7 +117,7 @@ static struct attribute *media_rc6_attrs[] = {
NULL
};
-static struct attribute_group media_rc6_attr_group = {
+static const struct attribute_group media_rc6_attr_group = {
.name = power_group_name,
.attrs = media_rc6_attrs
};
@@ -209,7 +209,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
memcpy(*remap_info + (offset/4), buf, count);
/* NB: We defer the remapping until we switch to the context */
- list_for_each_entry(ctx, &dev_priv->context_list, link)
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link)
ctx->remap_slice |= (1<<slice);
ret = count;
@@ -253,7 +253,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else {
u32 rpstat = I915_READ(GEN6_RPSTAT1);
- if (IS_GEN9(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 532c709febbd..958be0a95960 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -579,11 +579,17 @@ err_unpin:
static void i915_vma_destroy(struct i915_vma *vma)
{
+ int i;
+
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(i915_vma_is_active(vma));
GEM_BUG_ON(!i915_vma_is_closed(vma));
GEM_BUG_ON(vma->fence);
+ for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
+ GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
+ GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
+
list_del(&vma->vm_link);
if (!i915_vma_is_ggtt(vma))
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
@@ -672,12 +678,16 @@ int i915_vma_unbind(struct i915_vma *vma)
break;
}
+ if (!ret) {
+ ret = i915_gem_active_retire(&vma->last_fence,
+ &vma->vm->i915->drm.struct_mutex);
+ }
+
__i915_vma_unpin(vma);
if (ret)
return ret;
-
- GEM_BUG_ON(i915_vma_is_active(vma));
}
+ GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_pinned(vma))
return -EBUSY;
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index eb638a1e69d2..42fb436f6cdc 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -15,13 +15,9 @@ static struct intel_dsm_priv {
acpi_handle dhandle;
} intel_dsm_priv;
-static const u8 intel_dsm_guid[] = {
- 0xd3, 0x73, 0xd8, 0x7e,
- 0xd0, 0xc2,
- 0x4f, 0x4e,
- 0xa8, 0x54,
- 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
-};
+static const guid_t intel_dsm_guid =
+ GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f,
+ 0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c);
static char *intel_dsm_port_name(u8 id)
{
@@ -80,7 +76,7 @@ static void intel_dsm_platform_mux_info(void)
int i;
union acpi_object *pkg, *connector_count;
- pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, intel_dsm_guid,
+ pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, &intel_dsm_guid,
INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
NULL, ACPI_TYPE_PACKAGE);
if (!pkg) {
@@ -118,7 +114,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
if (!dhandle)
return false;
- if (!acpi_check_dsm(dhandle, intel_dsm_guid, INTEL_DSM_REVISION_ID,
+ if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID,
1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
DRM_DEBUG_KMS("no _DSM method for intel device\n");
return false;
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 4325cb0a04f5..ee76fab7bb6f 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -114,6 +114,8 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_plane_state *state = &intel_state->base;
struct intel_plane *intel_plane = to_intel_plane(plane);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
int ret;
/*
@@ -173,6 +175,19 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ /*
+ * Y-tiling is not supported in IF-ID Interlace mode in
+ * GEN9 and above.
+ */
+ if (state->fb && INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (state->fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ state->fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
+ DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
+ return -EINVAL;
+ }
+ }
+
/* FIXME pre-g4x don't work like this */
if (intel_state->base.visible)
crtc_state->active_planes |= BIT(intel_plane->id);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 639d45c1dd2e..82b144cdfa1d 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1187,6 +1187,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (is_dvi) {
info->alternate_ddc_pin = ddc_pin;
+ /*
+ * All VBTs that we got so far for B Stepping has this
+ * information wrong for Port D. So, let's just ignore for now.
+ */
+ if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0) &&
+ port == PORT_D) {
+ info->alternate_ddc_pin = 0;
+ }
+
sanitize_ddc_pin(dev_priv, port);
}
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index b8914db7d2e1..1241e5891b29 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -491,6 +491,14 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
int cdclk = cdclk_state->cdclk;
u32 val, cmd;
+ /* There are cases where we can end up here with power domains
+ * off and a CDCLK frequency other than the minimum, like when
+ * issuing a modeset without actually changing any display after
+ * a system suspend. So grab the PIPE-A domain, which covers
+ * the HW blocks needed for the following programming.
+ */
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
cmd = 2;
else if (cdclk == 266667)
@@ -549,6 +557,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
intel_update_cdclk(dev_priv);
vlv_program_pfi_credits(dev_priv);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
@@ -568,6 +578,14 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
return;
}
+ /* There are cases where we can end up here with power domains
+ * off and a CDCLK frequency other than the minimum, like when
+ * issuing a modeset without actually changing any display after
+ * a system suspend. So grab the PIPE-A domain, which covers
+ * the HW blocks needed for the following programming.
+ */
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+
/*
* Specs are full of misinformation, but testing on actual
* hardware has shown that we just need to write the desired
@@ -590,6 +608,8 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
intel_update_cdclk(dev_priv);
vlv_program_pfi_credits(dev_priv);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
static int bdw_calc_cdclk(int max_pixclk)
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 306c6b06b330..f85d57555957 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -615,7 +615,7 @@ void intel_color_init(struct drm_crtc *crtc)
IS_BROXTON(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = broadwell_load_luts;
- } else if (IS_GEMINILAKE(dev_priv)) {
+ } else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = glk_load_luts;
} else {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index db8093863f0c..efb13582dc73 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1103,6 +1103,62 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
return dco_freq / (p0 * p1 * p2 * 5);
}
+static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ uint32_t pll_id)
+{
+ uint32_t cfgcr0, cfgcr1;
+ uint32_t p0, p1, p2, dco_freq, ref_clock;
+
+ cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
+ cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
+
+ p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+ p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
+
+ if (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+ p1 = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ DPLL_CFGCR1_QDIV_RATIO_SHIFT;
+ else
+ p1 = 1;
+
+
+ switch (p0) {
+ case DPLL_CFGCR1_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR1_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR1_PDIV_5:
+ p0 = 5;
+ break;
+ case DPLL_CFGCR1_PDIV_7:
+ p0 = 7;
+ break;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR1_KDIV_1:
+ p2 = 1;
+ break;
+ case DPLL_CFGCR1_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR1_KDIV_4:
+ p2 = 4;
+ break;
+ }
+
+ ref_clock = dev_priv->cdclk.hw.ref;
+
+ dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock;
+
+ dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ DPLL_CFGCR0_DCO_FRAC_SHIFT) * ref_clock) / 0x8000;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
{
int dotclock;
@@ -1124,6 +1180,59 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
+static void cnl_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int link_clock = 0;
+ uint32_t cfgcr0, pll_id;
+
+ pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+
+ cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
+
+ if (cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
+ link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ } else {
+ link_clock = cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
+
+ switch (link_clock) {
+ case DPLL_CFGCR0_LINK_RATE_810:
+ link_clock = 81000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_2700:
+ link_clock = 270000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_3240:
+ link_clock = 324000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_4050:
+ link_clock = 405000;
+ break;
+ default:
+ WARN(1, "Unsupported link rate\n");
+ break;
+ }
+ link_clock *= 2;
+ }
+
+ pipe_config->port_clock = link_clock;
+
+ ddi_dotclock_get(pipe_config);
+}
+
static void skl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -1267,6 +1376,8 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
skl_ddi_clock_get(encoder, pipe_config);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_clock_get(encoder, pipe_config);
+ else if (IS_CANNONLAKE(dev_priv))
+ cnl_ddi_clock_get(encoder, pipe_config);
}
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@@ -1813,11 +1924,14 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
/* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */
val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+ val &= ~SCALING_MODE_SEL_MASK;
val |= SCALING_MODE_SEL(2);
I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
/* Program PORT_TX_DW2 */
val = I915_READ(CNL_PORT_TX_DW2_LN0(port));
+ val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+ RCOMP_SCALAR_MASK);
val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
/* Rcomp scalar is fixed as 0x98 for every table entry */
@@ -1828,6 +1942,8 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
/* We cannot write to GRP. It would overrite individual loadgen */
for (ln = 0; ln < 4; ln++) {
val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
+ val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+ CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
@@ -1837,12 +1953,14 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
/* Program PORT_TX_DW5 */
/* All DW5 values are fixed for every table entry */
val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+ val &= ~RTERM_SELECT_MASK;
val |= RTERM_SELECT(6);
val |= TAP3_DISABLE;
I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
/* Program PORT_TX_DW7 */
val = I915_READ(CNL_PORT_TX_DW7_LN0(port));
+ val &= ~N_SCALAR_MASK;
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
I915_WRITE(CNL_PORT_TX_DW7_GRP(port), val);
}
@@ -1861,9 +1979,12 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
if ((intel_dp) && (type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP)) {
width = intel_dp->lane_count;
rate = intel_dp->link_rate;
- } else {
+ } else if (type == INTEL_OUTPUT_HDMI) {
width = 4;
/* Rate is always < than 6GHz for HDMI */
+ } else {
+ MISSING_CASE(type);
+ return;
}
/*
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 77d3214e1a77..5f91ddc78c7a 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -363,7 +363,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
*/
if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
- (dev_priv->pch_type == PCH_CPT &&
+ (HAS_PCH_CPT(dev_priv) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
DRM_INFO("Display fused off, disabling\n");
info->num_pipes = 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index da5d49407594..e92fd14c06c7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3311,7 +3311,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl = PLANE_CTL_ENABLE;
- if (!IS_GEMINILAKE(dev_priv)) {
+ if (!IS_GEMINILAKE(dev_priv) && !IS_CANNONLAKE(dev_priv)) {
plane_ctl |=
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE |
@@ -3367,7 +3367,7 @@ static void skylake_update_primary_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (IS_GEMINILAKE(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
PLANE_COLOR_PIPE_GAMMA_ENABLE |
PLANE_COLOR_PIPE_CSC_ENABLE |
@@ -4612,6 +4612,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
&crtc_state->scaler_state;
struct intel_crtc *intel_crtc =
to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
int need_scaling;
/*
@@ -4622,6 +4625,18 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
need_scaling = src_w != dst_w || src_h != dst_h;
/*
+ * Scaling/fitting not supported in IF-ID mode in GEN9+
+ * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
+ * Once NV12 is enabled, handle it here while allocating scaler
+ * for NV12.
+ */
+ if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
+ need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
+ return -EINVAL;
+ }
+
+ /*
* if plane is being disabled or scaler is no more required or force detach
* - free scaler binded to this plane/crtc
* - in order to do this, update crtc->scaler_usage
@@ -14773,6 +14788,17 @@ static void quirk_backlight_present(struct drm_device *dev)
DRM_INFO("applying backlight present quirk\n");
}
+/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
+ * which is 300 ms greater than eDP spec T12 min.
+ */
+static void quirk_increase_t12_delay(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
+ DRM_INFO("Applying T12 delay quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -14856,6 +14882,9 @@ static struct intel_quirk intel_quirks[] = {
/* Dell Chromebook 11 (2015 version) */
{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
+
+ /* Toshiba Satellite P50-C-18C */
+ { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
};
static void intel_init_quirks(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 64fa774c855b..2d42d09428c9 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4418,8 +4418,6 @@ static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
u32 bit;
switch (port->port) {
- case PORT_A:
- return true;
case PORT_B:
bit = SDE_PORTB_HOTPLUG;
break;
@@ -4443,8 +4441,6 @@ static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
u32 bit;
switch (port->port) {
- case PORT_A:
- return true;
case PORT_B:
bit = SDE_PORTB_HOTPLUG_CPT;
break;
@@ -4454,12 +4450,28 @@ static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
case PORT_D:
bit = SDE_PORTD_HOTPLUG_CPT;
break;
+ default:
+ MISSING_CASE(port->port);
+ return false;
+ }
+
+ return I915_READ(SDEISR) & bit;
+}
+
+static bool spt_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ u32 bit;
+
+ switch (port->port) {
+ case PORT_A:
+ bit = SDE_PORTA_HOTPLUG_SPT;
+ break;
case PORT_E:
bit = SDE_PORTE_HOTPLUG_SPT;
break;
default:
- MISSING_CASE(port->port);
- return false;
+ return cpt_digital_port_connected(dev_priv, port);
}
return I915_READ(SDEISR) & bit;
@@ -4511,6 +4523,42 @@ static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
return I915_READ(PORT_HOTPLUG_STAT) & bit;
}
+static bool ilk_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ if (port->port == PORT_A)
+ return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
+ else
+ return ibx_digital_port_connected(dev_priv, port);
+}
+
+static bool snb_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ if (port->port == PORT_A)
+ return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
+ else
+ return cpt_digital_port_connected(dev_priv, port);
+}
+
+static bool ivb_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ if (port->port == PORT_A)
+ return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
+ else
+ return cpt_digital_port_connected(dev_priv, port);
+}
+
+static bool bdw_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ if (port->port == PORT_A)
+ return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
+ else
+ return cpt_digital_port_connected(dev_priv, port);
+}
+
static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *intel_dig_port)
{
@@ -4547,16 +4595,25 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
- if (HAS_PCH_IBX(dev_priv))
- return ibx_digital_port_connected(dev_priv, port);
- else if (HAS_PCH_SPLIT(dev_priv))
- return cpt_digital_port_connected(dev_priv, port);
+ if (HAS_GMCH_DISPLAY(dev_priv)) {
+ if (IS_GM45(dev_priv))
+ return gm45_digital_port_connected(dev_priv, port);
+ else
+ return g4x_digital_port_connected(dev_priv, port);
+ }
+
+ if (IS_GEN5(dev_priv))
+ return ilk_digital_port_connected(dev_priv, port);
+ else if (IS_GEN6(dev_priv))
+ return snb_digital_port_connected(dev_priv, port);
+ else if (IS_GEN7(dev_priv))
+ return ivb_digital_port_connected(dev_priv, port);
+ else if (IS_GEN8(dev_priv))
+ return bdw_digital_port_connected(dev_priv, port);
else if (IS_GEN9_LP(dev_priv))
return bxt_digital_port_connected(dev_priv, port);
- else if (IS_GM45(dev_priv))
- return gm45_digital_port_connected(dev_priv, port);
else
- return g4x_digital_port_connected(dev_priv, port);
+ return spt_digital_port_connected(dev_priv, port);
}
static struct edid *
@@ -5121,12 +5178,8 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
PANEL_POWER_DOWN_DELAY_SHIFT;
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) {
- u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
- BXT_POWER_CYCLE_DELAY_SHIFT;
- if (tmp > 0)
- seq->t11_t12 = (tmp - 1) * 1000;
- else
- seq->t11_t12 = 0;
+ seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
+ BXT_POWER_CYCLE_DELAY_SHIFT) * 1000;
} else {
seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
@@ -5177,6 +5230,21 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
intel_pps_dump_state("cur", &cur);
vbt = dev_priv->vbt.edp.pps;
+ /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
+ * of 500ms appears to be too short. Ocassionally the panel
+ * just fails to power back on. Increasing the delay to 800ms
+ * seems sufficient to avoid this problem.
+ */
+ if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
+ vbt.t11_t12 = max_t(u16, vbt.t11_t12, 800 * 10);
+ DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
+ vbt.t11_t12);
+ }
+ /* T11_T12 delay is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ vbt.t11_t12 += 100 * 10;
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
* our hw here, which are all in 100usec. */
@@ -5280,7 +5348,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) {
pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
- pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
+ pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
<< BXT_POWER_CYCLE_DELAY_SHIFT);
} else {
pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index 228ca06d9f0b..b25cd88fc1c5 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -98,13 +98,105 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
}
}
+/*
+ * Set PWM Frequency divider to match desired frequency in vbt.
+ * The PWM Frequency is calculated as 27Mhz / (F x P).
+ * - Where F = PWM Frequency Pre-Divider value programmed by field 7:0 of the
+ * EDP_BACKLIGHT_FREQ_SET register (DPCD Address 00728h)
+ * - Where P = 2^Pn, where Pn is the value programmed by field 4:0 of the
+ * EDP_PWMGEN_BIT_COUNT register (DPCD Address 00724h)
+ */
+static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
+ u8 pn, pn_min, pn_max;
+
+ /* Find desired value of (F x P)
+ * Note that, if F x P is out of supported range, the maximum value or
+ * minimum value will applied automatically. So no need to check that.
+ */
+ freq = dev_priv->vbt.backlight.pwm_freq_hz;
+ DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
+ if (!freq) {
+ DRM_DEBUG_KMS("Use panel default backlight frequency\n");
+ return false;
+ }
+
+ fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq);
+
+ /* Use highest possible value of Pn for more granularity of brightness
+ * adjustment while satifying the conditions below.
+ * - Pn is in the range of Pn_min and Pn_max
+ * - F is in the range of 1 and 255
+ * - FxP is within 25% of desired value.
+ * Note: 25% is arbitrary value and may need some tweak.
+ */
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
+ DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
+ return false;
+ }
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
+ DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
+ return false;
+ }
+ pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+ pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+
+ fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
+ fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
+ if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
+ DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
+ return false;
+ }
+
+ for (pn = pn_max; pn >= pn_min; pn--) {
+ f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
+ fxp_actual = f << pn;
+ if (fxp_min <= fxp_actual && fxp_actual <= fxp_max)
+ break;
+ }
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
+ DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+ return false;
+ }
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) {
+ DRM_DEBUG_KMS("Failed to write aux backlight freq\n");
+ return false;
+ }
+ return true;
+}
+
+/*
+* Set minimum / maximum dynamic brightness percentage. This value is expressed
+* as the percentage of normal brightness in 5% increments.
+*/
+static bool
+intel_dp_aux_set_dynamic_backlight_percent(struct intel_dp *intel_dp,
+ u32 min, u32 max)
+{
+ u8 dbc[] = { DIV_ROUND_CLOSEST(min, 5), DIV_ROUND_CLOSEST(max, 5) };
+
+ if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET,
+ dbc, sizeof(dbc)) < 0) {
+ DRM_DEBUG_KMS("Failed to write aux DBC brightness level\n");
+ return false;
+ }
+ return true;
+}
+
static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
- uint8_t dpcd_buf = 0;
- uint8_t edp_backlight_mode = 0;
+ uint8_t dpcd_buf, new_dpcd_buf, edp_backlight_mode;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
@@ -113,18 +205,15 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
return;
}
+ new_dpcd_buf = dpcd_buf;
edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
switch (edp_backlight_mode) {
case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM:
case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET:
case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT:
- dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
- dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD;
- if (drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf) < 0) {
- DRM_DEBUG_KMS("Failed to write aux backlight mode\n");
- }
+ new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
+ new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD;
break;
/* Do nothing when it is already DPCD mode */
@@ -133,6 +222,25 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
break;
}
+ if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP)
+ if (intel_dp_aux_set_pwm_freq(connector))
+ new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
+
+ if (i915.enable_dbc &&
+ (intel_dp->edp_dpcd[2] & DP_EDP_DYNAMIC_BACKLIGHT_CAP)) {
+ if(intel_dp_aux_set_dynamic_backlight_percent(intel_dp, 0, 100)) {
+ new_dpcd_buf |= DP_EDP_DYNAMIC_BACKLIGHT_ENABLE;
+ DRM_DEBUG_KMS("Enable dynamic brightness.\n");
+ }
+ }
+
+ if (new_dpcd_buf != dpcd_buf) {
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) {
+ DRM_DEBUG_KMS("Failed to write aux backlight mode\n");
+ }
+ }
+
set_aux_backlight_enable(intel_dp, true);
intel_dp_aux_set_backlight(conn_state, connector->panel.backlight.level);
}
@@ -169,15 +277,66 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
/* Check the eDP Display control capabilities registers to determine if
* the panel can support backlight control over the aux channel
*/
- if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
- (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) &&
- !(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) {
+ if ((intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP) &&
+ (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)) {
DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
return true;
}
return false;
}
+/*
+ * Heuristic function whether we should use AUX for backlight adjustment or not.
+ *
+ * We should use AUX for backlight brightness adjustment if panel doesn't this
+ * via PWM pin or using AUX is better than using PWM pin.
+ *
+ * The heuristic to determine that using AUX pin is better than using PWM pin is
+ * that the panel support any of the feature list here.
+ * - Regional backlight brightness adjustment
+ * - Backlight PWM frequency set
+ * - More than 8 bits resolution of brightness level
+ * - Backlight enablement via AUX and not by BL_ENABLE pin
+ *
+ * If all above are not true, assume that using PWM pin is better.
+ */
+static bool
+intel_dp_aux_display_control_heuristic(struct intel_connector *connector)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ uint8_t reg_val;
+
+ /* Panel doesn't support adjusting backlight brightness via PWN pin */
+ if (!(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))
+ return true;
+
+ /* Panel supports regional backlight brightness adjustment */
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_GENERAL_CAP_3,
+ &reg_val) != 1) {
+ DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
+ DP_EDP_GENERAL_CAP_3);
+ return false;
+ }
+ if (reg_val > 0)
+ return true;
+
+ /* Panel supports backlight PWM frequency set */
+ if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP)
+ return true;
+
+ /* Panel supports more than 8 bits resolution of brightness level */
+ if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
+ return true;
+
+ /* Panel supports enabling backlight via AUX but not by BL_ENABLE pin */
+ if ((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
+ !(intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP))
+ return true;
+
+ return false;
+
+}
+
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
@@ -188,6 +347,10 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
if (!intel_dp_aux_display_control_capable(intel_connector))
return -ENODEV;
+ if (i915.enable_dpcd_backlight == -1 &&
+ !intel_dp_aux_display_control_heuristic(intel_connector))
+ return -ENODEV;
+
panel->backlight.setup = intel_dp_aux_setup_backlight;
panel->backlight.enable = intel_dp_aux_enable_backlight;
panel->backlight.disable = intel_dp_aux_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d93efb49a2e2..d17a32437f07 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1858,9 +1858,8 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct drm_i915_private *dev_priv,
- struct intel_rps_client *rps,
- unsigned long submitted);
+void gen6_rps_boost(struct drm_i915_gem_request *rq,
+ struct intel_rps_client *rps);
void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index a4487c5b7e37..24db316e0fd1 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -149,6 +149,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
switch (INTEL_GEN(dev_priv)) {
default:
MISSING_CASE(INTEL_GEN(dev_priv));
+ case 10:
case 9:
return GEN9_LR_CONTEXT_RENDER_SIZE;
case 8:
@@ -291,11 +292,9 @@ cleanup:
*/
int intel_engines_init(struct drm_i915_private *dev_priv)
{
- struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
struct intel_engine_cs *engine;
enum intel_engine_id id, err_id;
- unsigned int mask = 0;
- int err = 0;
+ int err;
for_each_engine(engine, dev_priv, id) {
const struct engine_class_info *class_info =
@@ -306,40 +305,30 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
init = class_info->init_execlists;
else
init = class_info->init_legacy;
- if (!init) {
- kfree(engine);
- dev_priv->engine[id] = NULL;
- continue;
- }
+
+ err = -EINVAL;
+ err_id = id;
+
+ if (GEM_WARN_ON(!init))
+ goto cleanup;
err = init(engine);
- if (err) {
- err_id = id;
+ if (err)
goto cleanup;
- }
GEM_BUG_ON(!engine->submit_request);
- mask |= ENGINE_MASK(id);
}
- /*
- * Catch failures to update intel_engines table when the new engines
- * are added to the driver by a warning and disabling the forgotten
- * engines.
- */
- if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
- device_info->ring_mask = mask;
-
- device_info->num_rings = hweight32(mask);
-
return 0;
cleanup:
for_each_engine(engine, dev_priv, id) {
- if (id >= err_id)
+ if (id >= err_id) {
kfree(engine);
- else
+ dev_priv->engine[id] = NULL;
+ } else {
dev_priv->gt.cleanup_engine(engine);
+ }
}
return err;
}
@@ -821,9 +810,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
- /* WaDisableKillLogic:bxt,skl,kbl,cfl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- ECOCHK_DIS_TLB);
+ /* WaDisableKillLogic:bxt,skl,kbl */
+ if (!IS_COFFEELAKE(dev_priv))
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ ECOCHK_DIS_TLB);
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
@@ -894,10 +884,9 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT);
- /* WaDisableHDCInvalidation:skl,bxt,kbl */
- if (!IS_COFFEELAKE(dev_priv))
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
+ /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ BDW_DISABLE_HDC_INVALIDATION);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
if (IS_SKYLAKE(dev_priv) ||
@@ -1340,6 +1329,7 @@ void intel_engines_mark_idle(struct drm_i915_private *i915)
for_each_engine(engine, i915, id) {
intel_engine_disarm_breadcrumbs(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
+ tasklet_kill(&engine->irq_tasklet);
engine->no_priolist = false;
}
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index ec6198040381..b953365a3eec 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -542,13 +542,14 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
drm_fb_helper_fini(&ifbdev->helper);
- if (ifbdev->fb) {
+ if (ifbdev->vma) {
mutex_lock(&ifbdev->helper.dev->struct_mutex);
intel_unpin_fb_vma(ifbdev->vma);
mutex_unlock(&ifbdev->helper.dev->struct_mutex);
+ }
+ if (ifbdev->fb)
drm_framebuffer_remove(&ifbdev->fb->base);
- }
kfree(ifbdev);
}
@@ -772,7 +773,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
struct intel_fbdev *ifbdev = dev_priv->fbdev;
struct fb_info *info;
- if (!ifbdev || !ifbdev->fb)
+ if (!ifbdev || !ifbdev->vma)
return;
info = ifbdev->helper.fbdev;
@@ -819,7 +820,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
- if (ifbdev && ifbdev->fb)
+ if (ifbdev)
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
@@ -831,7 +832,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
return;
intel_fbdev_sync(ifbdev);
- if (!ifbdev->fb)
+ if (!ifbdev->vma)
return;
if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7404cf2aac28..699868d81de8 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2071,7 +2071,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
* So to avoid that we reset the context images upon resume. For
* simplicity, we just zero everything out.
*/
- list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
for_each_engine(engine, dev_priv, id) {
struct intel_context *ce = &ctx->engine[engine->id];
u32 *reg;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 48ea0fca1f72..ee2a349cfe68 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3837,7 +3837,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
uint_fixed_16_16_t downscale_h, downscale_w;
if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
- return u32_to_fixed_16_16(0);
+ return u32_to_fixed16(0);
/* n.b., src is 16.16 fixed point, dst is whole integer */
if (plane->id == PLANE_CURSOR) {
@@ -3861,10 +3861,10 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
dst_h = drm_rect_height(&pstate->base.dst);
}
- fp_w_ratio = fixed_16_16_div(src_w, dst_w);
- fp_h_ratio = fixed_16_16_div(src_h, dst_h);
- downscale_w = max_fixed_16_16(fp_w_ratio, u32_to_fixed_16_16(1));
- downscale_h = max_fixed_16_16(fp_h_ratio, u32_to_fixed_16_16(1));
+ fp_w_ratio = div_fixed16(src_w, dst_w);
+ fp_h_ratio = div_fixed16(src_h, dst_h);
+ downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
+ downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
return mul_fixed16(downscale_w, downscale_h);
}
@@ -3872,7 +3872,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
static uint_fixed_16_16_t
skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
{
- uint_fixed_16_16_t pipe_downscale = u32_to_fixed_16_16(1);
+ uint_fixed_16_16_t pipe_downscale = u32_to_fixed16(1);
if (!crtc_state->base.enable)
return pipe_downscale;
@@ -3891,10 +3891,10 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
if (!dst_w || !dst_h)
return pipe_downscale;
- fp_w_ratio = fixed_16_16_div(src_w, dst_w);
- fp_h_ratio = fixed_16_16_div(src_h, dst_h);
- downscale_w = max_fixed_16_16(fp_w_ratio, u32_to_fixed_16_16(1));
- downscale_h = max_fixed_16_16(fp_h_ratio, u32_to_fixed_16_16(1));
+ fp_w_ratio = div_fixed16(src_w, dst_w);
+ fp_h_ratio = div_fixed16(src_h, dst_h);
+ downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
+ downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
pipe_downscale = mul_fixed16(downscale_w, downscale_h);
}
@@ -3913,14 +3913,14 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
int crtc_clock, dotclk;
uint32_t pipe_max_pixel_rate;
uint_fixed_16_16_t pipe_downscale;
- uint_fixed_16_16_t max_downscale = u32_to_fixed_16_16(1);
+ uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
if (!cstate->base.enable)
return 0;
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
uint_fixed_16_16_t plane_downscale;
- uint_fixed_16_16_t fp_9_div_8 = fixed_16_16_div(9, 8);
+ uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8);
int bpp;
if (!intel_wm_plane_visible(cstate,
@@ -3938,7 +3938,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
plane_downscale = mul_fixed16(plane_downscale,
fp_9_div_8);
- max_downscale = max_fixed_16_16(plane_downscale, max_downscale);
+ max_downscale = max_fixed16(plane_downscale, max_downscale);
}
pipe_downscale = skl_pipe_downscale_amount(cstate);
@@ -4276,7 +4276,7 @@ static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp,
return FP_16_16_MAX;
wm_intermediate_val = latency * pixel_rate * cpp;
- ret = fixed_16_16_div_u64(wm_intermediate_val, 1000 * 512);
+ ret = div_fixed16(wm_intermediate_val, 1000 * 512);
return ret;
}
@@ -4294,7 +4294,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
wm_intermediate_val = latency * pixel_rate;
wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
pipe_htotal * 1000);
- ret = mul_u32_fixed_16_16(wm_intermediate_val, plane_blocks_per_line);
+ ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
return ret;
}
@@ -4306,15 +4306,15 @@ intel_get_linetime_us(struct intel_crtc_state *cstate)
uint_fixed_16_16_t linetime_us;
if (!cstate->base.active)
- return u32_to_fixed_16_16(0);
+ return u32_to_fixed16(0);
pixel_rate = cstate->pixel_rate;
if (WARN_ON(pixel_rate == 0))
- return u32_to_fixed_16_16(0);
+ return u32_to_fixed16(0);
crtc_htotal = cstate->base.adjusted_mode.crtc_htotal;
- linetime_us = fixed_16_16_div_u64(crtc_htotal * 1000, pixel_rate);
+ linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
return linetime_us;
}
@@ -4361,7 +4361,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint32_t plane_bytes_per_line;
uint32_t res_blocks, res_lines;
uint8_t cpp;
- uint32_t width = 0, height = 0;
+ uint32_t width = 0;
uint32_t plane_pixel_rate;
uint_fixed_16_16_t y_tile_minimum;
uint32_t y_min_scanlines;
@@ -4390,7 +4390,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (plane->id == PLANE_CURSOR) {
width = intel_pstate->base.crtc_w;
- height = intel_pstate->base.crtc_h;
} else {
/*
* Src coordinates are already rotated by 270 degrees for
@@ -4398,16 +4397,13 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
* GTT mapping), hence no need to account for rotation here.
*/
width = drm_rect_width(&intel_pstate->base.src) >> 16;
- height = drm_rect_height(&intel_pstate->base.src) >> 16;
}
- cpp = fb->format->cpp[0];
+ cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] :
+ fb->format->cpp[0];
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
if (drm_rotation_90_or_270(pstate->rotation)) {
- int cpp = (fb->format->format == DRM_FORMAT_NV12) ?
- fb->format->cpp[1] :
- fb->format->cpp[0];
switch (cpp) {
case 1:
@@ -4434,14 +4430,14 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (y_tiled) {
interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line *
y_min_scanlines, 512);
- plane_blocks_per_line = fixed_16_16_div(interm_pbpl,
+ plane_blocks_per_line = div_fixed16(interm_pbpl,
y_min_scanlines);
} else if (x_tiled) {
interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512);
- plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
+ plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
} else {
interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
- plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
+ plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
}
method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
@@ -4450,35 +4446,35 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
latency,
plane_blocks_per_line);
- y_tile_minimum = mul_u32_fixed_16_16(y_min_scanlines,
- plane_blocks_per_line);
+ y_tile_minimum = mul_u32_fixed16(y_min_scanlines,
+ plane_blocks_per_line);
if (y_tiled) {
- selected_result = max_fixed_16_16(method2, y_tile_minimum);
+ selected_result = max_fixed16(method2, y_tile_minimum);
} else {
uint32_t linetime_us;
- linetime_us = fixed_16_16_to_u32_round_up(
+ linetime_us = fixed16_to_u32_round_up(
intel_get_linetime_us(cstate));
if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
(plane_bytes_per_line / 512 < 1))
selected_result = method2;
else if ((ddb_allocation && ddb_allocation /
- fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1)
- selected_result = min_fixed_16_16(method1, method2);
+ fixed16_to_u32_round_up(plane_blocks_per_line)) >= 1)
+ selected_result = min_fixed16(method1, method2);
else if (latency >= linetime_us)
- selected_result = min_fixed_16_16(method1, method2);
+ selected_result = min_fixed16(method1, method2);
else
selected_result = method1;
}
- res_blocks = fixed_16_16_to_u32_round_up(selected_result) + 1;
+ res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
res_lines = div_round_up_fixed16(selected_result,
plane_blocks_per_line);
if (level >= 1 && level <= 7) {
if (y_tiled) {
- res_blocks += fixed_16_16_to_u32_round_up(y_tile_minimum);
+ res_blocks += fixed16_to_u32_round_up(y_tile_minimum);
res_lines += y_min_scanlines;
} else {
res_blocks++;
@@ -4563,8 +4559,7 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
if (is_fixed16_zero(linetime_us))
return 0;
- linetime_wm = fixed_16_16_to_u32_round_up(mul_u32_fixed_16_16(8,
- linetime_us));
+ linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
/* Display WA #1135: bxt. */
if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled)
@@ -5852,7 +5847,7 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
- if (IS_GEN9(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
limits = (dev_priv->rps.max_freq_softlimit) << 23;
if (val <= dev_priv->rps.min_freq_softlimit)
limits |= (dev_priv->rps.min_freq_softlimit) << 14;
@@ -5994,7 +5989,7 @@ static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
if (val != dev_priv->rps.cur_freq) {
gen6_set_rps_thresholds(dev_priv, val);
- if (IS_GEN9(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
I915_WRITE(GEN6_RPNSWREQ,
GEN9_FREQUENCY(val));
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -6126,47 +6121,35 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
gen6_sanitize_rps_pm_mask(dev_priv, ~0));
}
mutex_unlock(&dev_priv->rps.hw_lock);
-
- spin_lock(&dev_priv->rps.client_lock);
- while (!list_empty(&dev_priv->rps.clients))
- list_del_init(dev_priv->rps.clients.next);
- spin_unlock(&dev_priv->rps.client_lock);
}
-void gen6_rps_boost(struct drm_i915_private *dev_priv,
- struct intel_rps_client *rps,
- unsigned long submitted)
+void gen6_rps_boost(struct drm_i915_gem_request *rq,
+ struct intel_rps_client *rps)
{
+ struct drm_i915_private *i915 = rq->i915;
+ bool boost;
+
/* This is intentionally racy! We peek at the state here, then
* validate inside the RPS worker.
*/
- if (!(dev_priv->gt.awake &&
- dev_priv->rps.enabled &&
- dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
+ if (!i915->rps.enabled)
return;
- /* Force a RPS boost (and don't count it against the client) if
- * the GPU is severely congested.
- */
- if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
- rps = NULL;
-
- spin_lock(&dev_priv->rps.client_lock);
- if (rps == NULL || list_empty(&rps->link)) {
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->rps.interrupts_enabled) {
- dev_priv->rps.client_boost = true;
- schedule_work(&dev_priv->rps.work);
- }
- spin_unlock_irq(&dev_priv->irq_lock);
-
- if (rps != NULL) {
- list_add(&rps->link, &dev_priv->rps.clients);
- rps->boosts++;
- } else
- dev_priv->rps.boosts++;
+ boost = false;
+ spin_lock_irq(&rq->lock);
+ if (!rq->waitboost && !i915_gem_request_completed(rq)) {
+ atomic_inc(&i915->rps.num_waiters);
+ rq->waitboost = true;
+ boost = true;
}
- spin_unlock(&dev_priv->rps.client_lock);
+ spin_unlock_irq(&rq->lock);
+ if (!boost)
+ return;
+
+ if (READ_ONCE(i915->rps.cur_freq) < i915->rps.boost_freq)
+ schedule_work(&i915->rps.work);
+
+ atomic_inc(rps ? &rps->boosts : &i915->rps.boosts);
}
int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
@@ -6365,7 +6348,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
- IS_GEN9_BC(dev_priv)) {
+ IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
u32 ddcc_status = 0;
if (sandybridge_pcode_read(dev_priv,
@@ -6378,7 +6361,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
dev_priv->rps.max_freq);
}
- if (IS_GEN9_BC(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* Store the frequency values in 16.66 MHZ units, which is
* the natural hardware unit for SKL
*/
@@ -6684,7 +6667,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
/* convert DDR frequency from units of 266.6MHz to bandwidth */
min_ring_freq = mult_frac(min_ring_freq, 8, 3);
- if (IS_GEN9_BC(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
@@ -6702,7 +6685,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
int diff = max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
- if (IS_GEN9_BC(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/*
* ring_freq = 2 * GT. ring_freq is in 100MHz units
* No floor required for ring frequency on SKL.
@@ -7833,7 +7816,7 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
} else if (INTEL_GEN(dev_priv) >= 9) {
gen9_enable_rc6(dev_priv);
gen9_enable_rps(dev_priv);
- if (IS_GEN9_BC(dev_priv))
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
gen6_update_ring_freq(dev_priv);
} else if (IS_BROADWELL(dev_priv)) {
gen8_enable_rps(dev_priv);
@@ -9078,7 +9061,7 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- if (IS_GEN9(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
GEN9_FREQ_SCALER);
else if (IS_CHERRYVIEW(dev_priv))
@@ -9091,7 +9074,7 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- if (IS_GEN9(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
GT_FREQUENCY_MULTIPLIER);
else if (IS_CHERRYVIEW(dev_priv))
@@ -9113,7 +9096,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
struct drm_i915_gem_request *req = boost->req;
if (!i915_gem_request_completed(req))
- gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
+ gen6_rps_boost(req, NULL);
i915_gem_request_put(req);
kfree(boost);
@@ -9142,11 +9125,10 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
void intel_pm_setup(struct drm_i915_private *dev_priv)
{
mutex_init(&dev_priv->rps.hw_lock);
- spin_lock_init(&dev_priv->rps.client_lock);
INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
__intel_autoenable_gt_powersave);
- INIT_LIST_HEAD(&dev_priv->rps.clients);
+ atomic_set(&dev_priv->rps.num_waiters, 0);
dev_priv->pm.suspended = false;
atomic_set(&dev_priv->pm.wakeref_count, 0);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index acd1da9b62a3..5224b7abb8a3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2140,7 +2140,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
- num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
+ num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
if (INTEL_GEN(dev_priv) >= 8) {
engine->emit_breadcrumb_sz += num_rings * 6;
} else {
@@ -2184,8 +2184,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
engine->semaphore.signal = gen8_rcs_signal;
- num_rings =
- hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
+ num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
engine->emit_breadcrumb_sz += num_rings * 8;
}
} else if (INTEL_GEN(dev_priv) >= 6) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6aa20ac8cde3..d33c93444c0d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -121,6 +121,7 @@ struct intel_engine_hangcheck {
unsigned long action_timestamp;
int deadlock;
struct intel_instdone instdone;
+ struct drm_i915_gem_request *active_request;
bool stalled;
};
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index efe80ed5fd4d..f630d632a976 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -341,6 +341,59 @@ static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
1 << PIPE_C | 1 << PIPE_B);
}
+static void gen9_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int id = power_well->id;
+
+ /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
+ WARN_ON(intel_wait_for_register(dev_priv,
+ HSW_PWR_WELL_DRIVER,
+ SKL_POWER_WELL_STATE(id),
+ SKL_POWER_WELL_STATE(id),
+ 1));
+}
+
+static u32 gen9_power_well_requesters(struct drm_i915_private *dev_priv, int id)
+{
+ u32 req_mask = SKL_POWER_WELL_REQ(id);
+ u32 ret;
+
+ ret = I915_READ(HSW_PWR_WELL_BIOS) & req_mask ? 1 : 0;
+ ret |= I915_READ(HSW_PWR_WELL_DRIVER) & req_mask ? 2 : 0;
+ ret |= I915_READ(HSW_PWR_WELL_KVMR) & req_mask ? 4 : 0;
+ ret |= I915_READ(HSW_PWR_WELL_DEBUG) & req_mask ? 8 : 0;
+
+ return ret;
+}
+
+static void gen9_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int id = power_well->id;
+ bool disabled;
+ u32 reqs;
+
+ /*
+ * Bspec doesn't require waiting for PWs to get disabled, but still do
+ * this for paranoia. The known cases where a PW will be forced on:
+ * - a KVMR request on any power well via the KVMR request register
+ * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
+ * DEBUG request registers
+ * Skip the wait in case any of the request bits are set and print a
+ * diagnostic message.
+ */
+ wait_for((disabled = !(I915_READ(HSW_PWR_WELL_DRIVER) &
+ SKL_POWER_WELL_STATE(id))) ||
+ (reqs = gen9_power_well_requesters(dev_priv, id)), 1);
+ if (disabled)
+ return;
+
+ DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
+ power_well->name,
+ !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
+}
+
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
@@ -549,7 +602,9 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
"DC9 already programmed to be enabled.\n");
WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
"DC5 still not disabled to enable DC9.\n");
- WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
+ WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER) &
+ SKL_POWER_WELL_REQ(SKL_DISP_PW_2),
+ "Power well 2 on.\n");
WARN_ONCE(intel_irqs_enabled(dev_priv),
"Interrupts not disabled yet.\n");
@@ -744,45 +799,6 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
}
-static void
-gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum skl_disp_power_wells power_well_id = power_well->id;
- u32 val;
- u32 mask;
-
- mask = SKL_POWER_WELL_REQ(power_well_id);
-
- val = I915_READ(HSW_PWR_WELL_KVMR);
- if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
- power_well->name))
- I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
-
- val = I915_READ(HSW_PWR_WELL_BIOS);
- val |= I915_READ(HSW_PWR_WELL_DEBUG);
-
- if (!(val & mask))
- return;
-
- /*
- * DMC is known to force on the request bits for power well 1 on SKL
- * and BXT and the misc IO power well on SKL but we don't expect any
- * other request bits to be set, so WARN for those.
- */
- if (power_well_id == SKL_DISP_PW_1 ||
- (IS_GEN9_BC(dev_priv) &&
- power_well_id == SKL_DISP_PW_MISC_IO))
- DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
- "by DMC\n", power_well->name);
- else
- WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
- power_well->name);
-
- I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
- I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
-}
-
static void skl_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
@@ -846,6 +862,8 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
check_fuse_status = true;
}
+
+ gen9_wait_for_power_well_enable(dev_priv, power_well);
} else {
if (enable_requested) {
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
@@ -853,14 +871,9 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
}
- gen9_sanitize_power_well_requests(dev_priv, power_well);
+ gen9_wait_for_power_well_disable(dev_priv, power_well);
}
- if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
- 1))
- DRM_ERROR("%s %s timeout\n",
- power_well->name, enable ? "enable" : "disable");
-
if (check_fuse_status) {
if (power_well->id == SKL_DISP_PW_1) {
if (intel_wait_for_register(dev_priv,
@@ -2479,7 +2492,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc;
int max_dc;
- if (IS_GEN9_BC(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
max_dc = 2;
mask = 0;
} else if (IS_GEN9_LP(dev_priv)) {
@@ -2694,13 +2707,18 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
- intel_power_well_disable(dev_priv, well);
-
+ /*
+ * BSpec says to keep the MISC IO power well enabled here, only
+ * remove our request for power well 1.
+ * Note that even though the driver's request is removed power well 1
+ * may stay enabled after this due to DMC's own request on it.
+ */
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock);
+
+ usleep_range(10, 30); /* 10 us delay per Bspec */
}
void bxt_display_core_init(struct drm_i915_private *dev_priv,
@@ -2751,13 +2769,19 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
/* The spec doesn't call for removing the reset handshake flag */
- /* Disable PG1 */
+ /*
+ * Disable PW1 (PG1).
+ * Note that even though the driver's request is removed power well 1
+ * may stay enabled after this due to DMC's own request on it.
+ */
mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock);
+
+ usleep_range(10, 30); /* 10 us delay per Bspec */
}
#define CNL_PROCMON_IDX(val) \
@@ -2821,7 +2845,10 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
val |= CL_POWER_DOWN_ENABLE;
I915_WRITE(CNL_PORT_CL1CM_DW5, val);
- /* 4. Enable Power Well 1 (PG1) and Aux IO Power */
+ /*
+ * 4. Enable Power Well 1 (PG1).
+ * The AUX IO power wells will be enabled on demand.
+ */
mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_enable(dev_priv, well);
@@ -2853,12 +2880,18 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
/* 3. Disable CD clock */
cnl_uninit_cdclk(dev_priv);
- /* 4. Disable Power Well 1 (PG1) and Aux IO Power */
+ /*
+ * 4. Disable Power Well 1 (PG1).
+ * The AUX IO power wells are toggled on demand, so they are already
+ * disabled at this point.
+ */
mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock);
+ usleep_range(10, 30); /* 10 us delay per Bspec */
+
/* 5. Disable Comp */
val = I915_READ(CHICKEN_MISC_2);
val |= COMP_PWR_DOWN;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 85d9ff361e74..e58a47db9a9d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1344,7 +1344,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_CPT)
+ if (HAS_PCH_CPT(dev_priv))
sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 0c650c2cbca8..94f9a1332dbf 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -262,7 +262,7 @@ skl_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (IS_GEMINILAKE(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
PLANE_COLOR_PIPE_GAMMA_ENABLE |
PLANE_COLOR_PIPE_CSC_ENABLE |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 9882724bc2b6..deb4430541cf 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -643,7 +643,7 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
{ .start = (s), .end = (e), .domains = (d) }
#define HAS_FWTABLE(dev_priv) \
- (IS_GEN9(dev_priv) || \
+ (INTEL_GEN(dev_priv) >= 9 || \
IS_CHERRYVIEW(dev_priv) || \
IS_VALLEYVIEW(dev_priv))
@@ -1072,7 +1072,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
}
- if (IS_GEN9(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
@@ -1719,6 +1719,17 @@ bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
return intel_get_gpu_reset(dev_priv) != NULL;
}
+/*
+ * When GuC submission is enabled, GuC manages ELSP and can initiate the
+ * engine reset too. For now, fall back to full GPU reset if it is enabled.
+ */
+bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
+{
+ return (dev_priv->info.has_reset_engine &&
+ !dev_priv->guc.execbuf_client &&
+ i915.reset >= 2);
+}
+
int intel_guc_reset(struct drm_i915_private *dev_priv)
{
int ret;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
index d15cc9d3a5cd..89dc25a5a53b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
@@ -246,9 +246,9 @@ static int igt_dmabuf_export_vmap(void *arg)
i915_gem_object_put(obj);
ptr = dma_buf_vmap(dmabuf);
- if (IS_ERR(ptr)) {
- err = PTR_ERR(ptr);
- pr_err("dma_buf_vmap failed with err=%d\n", err);
+ if (!ptr) {
+ pr_err("dma_buf_vmap failed\n");
+ err = -ENOMEM;
goto out;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 50710e3f1caa..6b132caffa18 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -197,6 +197,9 @@ static int lowlevel_hole(struct drm_i915_private *i915,
{
I915_RND_STATE(seed_prng);
unsigned int size;
+ struct i915_vma mock_vma;
+
+ memset(&mock_vma, 0, sizeof(struct i915_vma));
/* Keep creating larger objects until one cannot fit into the hole */
for (size = 12; (hole_end - hole_start) >> size; size++) {
@@ -255,8 +258,11 @@ static int lowlevel_hole(struct drm_i915_private *i915,
vm->allocate_va_range(vm, addr, BIT_ULL(size)))
break;
- vm->insert_entries(vm, obj->mm.pages, addr,
- I915_CACHE_NONE, 0);
+ mock_vma.pages = obj->mm.pages;
+ mock_vma.node.size = BIT_ULL(size);
+ mock_vma.node.start = addr;
+
+ vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
}
count = n;
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index fb9072d5877f..2e86ec136b35 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -186,16 +186,20 @@ static int igt_vma_create(void *arg)
goto end;
}
- list_for_each_entry_safe(ctx, cn, &contexts, link)
+ list_for_each_entry_safe(ctx, cn, &contexts, link) {
+ list_del_init(&ctx->link);
mock_context_close(ctx);
+ }
}
end:
/* Final pass to lookup all created contexts */
err = create_vmas(i915, &objects, &contexts);
out:
- list_for_each_entry_safe(ctx, cn, &contexts, link)
+ list_for_each_entry_safe(ctx, cn, &contexts, link) {
+ list_del_init(&ctx->link);
mock_context_close(ctx);
+ }
list_for_each_entry_safe(obj, on, &objects, st_link)
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index aa31d6c0cdfb..7096c3911cd3 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -316,6 +316,56 @@ static int igt_global_reset(void *arg)
GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ wake_up_all(&i915->gpu_error.reset_queue);
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ err = -EIO;
+
+ return err;
+}
+
+static int igt_reset_engine(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int reset_count, reset_engine_count;
+ int err = 0;
+
+ /* Check that we can issue a global GPU and engine reset */
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ for_each_engine(engine, i915, id) {
+ set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags);
+ reset_count = i915_reset_count(&i915->gpu_error);
+ reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
+ engine);
+
+ err = i915_reset_engine(engine);
+ if (err) {
+ pr_err("i915_reset_engine failed\n");
+ break;
+ }
+
+ if (i915_reset_count(&i915->gpu_error) != reset_count) {
+ pr_err("Full GPU reset recorded! (engine reset expected)\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (i915_reset_engine_count(&i915->gpu_error, engine) ==
+ reset_engine_count) {
+ pr_err("No %s engine reset recorded!\n", engine->name);
+ err = -EINVAL;
+ break;
+ }
+
+ clear_bit(I915_RESET_ENGINE + engine->id,
+ &i915->gpu_error.flags);
+ }
+
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
@@ -404,6 +454,7 @@ fini:
unlock:
mutex_unlock(&i915->drm.struct_mutex);
clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ wake_up_all(&i915->gpu_error.reset_queue);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
@@ -519,11 +570,117 @@ fini:
unlock:
mutex_unlock(&i915->drm.struct_mutex);
clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ wake_up_all(&i915->gpu_error.reset_queue);
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return -EIO;
+
+ return err;
+}
+
+static int igt_render_engine_reset_fallback(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine = i915->engine[RCS];
+ struct hang h;
+ struct drm_i915_gem_request *rq;
+ unsigned int reset_count, reset_engine_count;
+ int err = 0;
+
+ /* Check that we can issue a global GPU and engine reset */
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ mutex_lock(&i915->drm.struct_mutex);
+
+ err = hang_init(&h, i915);
+ if (err)
+ goto err_unlock;
+
+ rq = hang_create_request(&h, engine, i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_fini;
+ }
+
+ i915_gem_request_get(rq);
+ __i915_add_request(rq, true);
+
+ /* make reset engine fail */
+ rq->fence.error = -EIO;
+
+ if (!wait_for_hang(&h, rq)) {
+ pr_err("Failed to start request %x\n", rq->fence.seqno);
+ err = -EIO;
+ goto err_request;
+ }
+
+ reset_engine_count = i915_reset_engine_count(&i915->gpu_error, engine);
+ reset_count = fake_hangcheck(rq);
+
+ /* unlock since we'll call handle_error */
+ mutex_unlock(&i915->drm.struct_mutex);
+ clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ wake_up_all(&i915->gpu_error.reset_queue);
+
+ i915_handle_error(i915, intel_engine_flag(engine), "live test");
+
+ if (i915_reset_engine_count(&i915->gpu_error, engine) !=
+ reset_engine_count) {
+ pr_err("render engine reset recorded! (full reset expected)\n");
+ err = -EINVAL;
+ goto out_rq;
+ }
+
+ if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ pr_err("No full GPU reset recorded!\n");
+ err = -EINVAL;
+ goto out_rq;
+ }
+
+ /*
+ * by using fence.error = -EIO, full reset sets the wedged flag, do one
+ * more full reset to re-enable the hw.
+ */
+ if (i915_terminally_wedged(&i915->gpu_error)) {
+ set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ rq->fence.error = 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
+ i915_reset(i915);
+ GEM_BUG_ON(test_bit(I915_RESET_HANDOFF,
+ &i915->gpu_error.flags));
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ pr_err("No full GPU reset recorded!\n");
+ err = -EINVAL;
+ goto out_rq;
+ }
+ }
+
+out_rq:
+ i915_gem_request_put(rq);
+ hang_fini(&h);
+out_backoff:
+ clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ wake_up_all(&i915->gpu_error.reset_queue);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
return err;
+
+err_request:
+ i915_gem_request_put(rq);
+err_fini:
+ hang_fini(&h);
+err_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ goto out_backoff;
}
int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
@@ -531,8 +688,10 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(igt_hang_sanitycheck),
SUBTEST(igt_global_reset),
+ SUBTEST(igt_reset_engine),
SUBTEST(igt_wait_reset),
SUBTEST(igt_reset_queue),
+ SUBTEST(igt_render_engine_reset_fallback),
};
if (!intel_has_gpu_reset(i915))
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index f8b9cc212b02..9c7c68181f82 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -48,7 +48,7 @@ mock_context(struct drm_i915_private *i915,
if (!ctx->vma_lut.ht)
goto err_free;
- ret = ida_simple_get(&i915->context_hw_ida,
+ ret = ida_simple_get(&i915->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0)
goto err_vma_ht;
@@ -86,3 +86,12 @@ void mock_context_close(struct i915_gem_context *ctx)
i915_gem_context_put(ctx);
}
+
+void mock_init_contexts(struct drm_i915_private *i915)
+{
+ INIT_LIST_HEAD(&i915->contexts.list);
+ ida_init(&i915->contexts.hw_ida);
+
+ INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
+ init_llist_head(&i915->contexts.free_list);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.h b/drivers/gpu/drm/i915/selftests/mock_context.h
index 2427e5c0916a..383941a61124 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.h
+++ b/drivers/gpu/drm/i915/selftests/mock_context.h
@@ -25,6 +25,8 @@
#ifndef __MOCK_CONTEXT_H
#define __MOCK_CONTEXT_H
+void mock_init_contexts(struct drm_i915_private *i915);
+
struct i915_gem_context *
mock_context(struct drm_i915_private *i915,
const char *name);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 627e2aa09766..47613d20bba8 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -57,11 +57,12 @@ static void mock_device_release(struct drm_device *dev)
cancel_delayed_work_sync(&i915->gt.retire_work);
cancel_delayed_work_sync(&i915->gt.idle_work);
+ flush_workqueue(i915->wq);
mutex_lock(&i915->drm.struct_mutex);
for_each_engine(engine, i915, id)
mock_engine_free(engine);
- i915_gem_context_fini(i915);
+ i915_gem_contexts_fini(i915);
mutex_unlock(&i915->drm.struct_mutex);
drain_workqueue(i915->wq);
@@ -160,7 +161,7 @@ struct drm_i915_private *mock_gem_device(void)
INIT_LIST_HEAD(&i915->mm.unbound_list);
INIT_LIST_HEAD(&i915->mm.bound_list);
- ida_init(&i915->context_hw_ida);
+ mock_init_contexts(i915);
INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler);
INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index a61309c7cb3e..f2118cf535a0 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -33,8 +33,7 @@ static void mock_insert_page(struct i915_address_space *vm,
}
static void mock_insert_entries(struct i915_address_space *vm,
- struct sg_table *st,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level level, u32 flags)
{
}
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index bf2e5be1ab30..e37b55a23a65 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -1,4 +1,5 @@
-mediatek-drm-y := mtk_disp_ovl.o \
+mediatek-drm-y := mtk_disp_color.o \
+ mtk_disp_ovl.o \
mtk_disp_rdma.o \
mtk_drm_crtc.o \
mtk_drm_ddp.o \
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
new file mode 100644
index 000000000000..ef79a6d55646
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_COLOR_CFG_MAIN 0x0400
+#define DISP_COLOR_START_MT2701 0x0f00
+#define DISP_COLOR_START_MT8173 0x0c00
+#define DISP_COLOR_START(comp) ((comp)->data->color_offset)
+#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50)
+#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54)
+
+#define COLOR_BYPASS_ALL BIT(7)
+#define COLOR_SEQ_SEL BIT(13)
+
+struct mtk_disp_color_data {
+ unsigned int color_offset;
+};
+
+/**
+ * struct mtk_disp_color - DISP_COLOR driver structure
+ * @ddp_comp - structure containing type enum and hardware resources
+ * @crtc - associated crtc to report irq events to
+ */
+struct mtk_disp_color {
+ struct mtk_ddp_comp ddp_comp;
+ struct drm_crtc *crtc;
+ const struct mtk_disp_color_data *data;
+};
+
+static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
+{
+ return container_of(comp, struct mtk_disp_color, ddp_comp);
+}
+
+static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc)
+{
+ struct mtk_disp_color *color = comp_to_color(comp);
+
+ writel(w, comp->regs + DISP_COLOR_WIDTH(color));
+ writel(h, comp->regs + DISP_COLOR_HEIGHT(color));
+}
+
+static void mtk_color_start(struct mtk_ddp_comp *comp)
+{
+ struct mtk_disp_color *color = comp_to_color(comp);
+
+ writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
+ comp->regs + DISP_COLOR_CFG_MAIN);
+ writel(0x1, comp->regs + DISP_COLOR_START(color));
+}
+
+static const struct mtk_ddp_comp_funcs mtk_disp_color_funcs = {
+ .config = mtk_color_config,
+ .start = mtk_color_start,
+};
+
+static int mtk_disp_color_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct mtk_disp_color *priv = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register component %s: %d\n",
+ dev->of_node->full_name, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mtk_disp_color_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct mtk_disp_color *priv = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+
+ mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
+}
+
+static const struct component_ops mtk_disp_color_component_ops = {
+ .bind = mtk_disp_color_bind,
+ .unbind = mtk_disp_color_unbind,
+};
+
+static int mtk_disp_color_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_disp_color *priv;
+ int comp_id;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_COLOR);
+ if (comp_id < 0) {
+ dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
+ return comp_id;
+ }
+
+ ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
+ &mtk_disp_color_funcs);
+ if (ret) {
+ dev_err(dev, "Failed to initialize component: %d\n", ret);
+ return ret;
+ }
+
+ priv->data = of_device_get_match_data(dev);
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = component_add(dev, &mtk_disp_color_component_ops);
+ if (ret)
+ dev_err(dev, "Failed to add component: %d\n", ret);
+
+ return ret;
+}
+
+static int mtk_disp_color_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &mtk_disp_color_component_ops);
+
+ return 0;
+}
+
+static const struct mtk_disp_color_data mt2701_color_driver_data = {
+ .color_offset = DISP_COLOR_START_MT2701,
+};
+
+static const struct mtk_disp_color_data mt8173_color_driver_data = {
+ .color_offset = DISP_COLOR_START_MT8173,
+};
+
+static const struct of_device_id mtk_disp_color_driver_dt_match[] = {
+ { .compatible = "mediatek,mt2701-disp-color",
+ .data = &mt2701_color_driver_data},
+ { .compatible = "mediatek,mt8173-disp-color",
+ .data = &mt8173_color_driver_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_disp_color_driver_dt_match);
+
+struct platform_driver mtk_disp_color_driver = {
+ .probe = mtk_disp_color_probe,
+ .remove = mtk_disp_color_remove,
+ .driver = {
+ .name = "mediatek-disp-color",
+ .owner = THIS_MODULE,
+ .of_match_table = mtk_disp_color_driver_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index a14d7d64d7b1..35bc5babdbf7 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -42,9 +42,12 @@
#define OVL_RDMA_MEM_GMC 0x40402020
#define OVL_CON_BYTE_SWAP BIT(24)
+#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
#define OVL_CON_CLRFMT_RGB (1 << 12)
#define OVL_CON_CLRFMT_RGBA8888 (2 << 12)
#define OVL_CON_CLRFMT_ARGB8888 (3 << 12)
+#define OVL_CON_CLRFMT_UYVY (4 << 12)
+#define OVL_CON_CLRFMT_YUYV (5 << 12)
#define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
0 : OVL_CON_CLRFMT_RGB)
#define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
@@ -176,6 +179,10 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP;
+ case DRM_FORMAT_UYVY:
+ return OVL_CON_CLRFMT_UYVY | OVL_CON_MTX_YUV_TO_RGB;
+ case DRM_FORMAT_YUYV:
+ return OVL_CON_CLRFMT_YUYV | OVL_CON_MTX_YUV_TO_RGB;
}
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index fc65c57dda8c..1f0ef17aa455 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -561,6 +561,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
sizeof(*mtk_crtc->ddp_comp),
GFP_KERNEL);
+ if (!mtk_crtc->ddp_comp)
+ return -ENOMEM;
mtk_crtc->mutex = mtk_disp_mutex_get(priv->mutex_dev, pipe);
if (IS_ERR(mtk_crtc->mutex)) {
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 8b52416b6e41..07d7ea2268ef 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -38,13 +38,6 @@
#define DISP_REG_UFO_START 0x0000
-#define DISP_COLOR_CFG_MAIN 0x0400
-#define DISP_COLOR_START_MT2701 0x0f00
-#define DISP_COLOR_START_MT8173 0x0c00
-#define DISP_COLOR_START(comp) ((comp)->data->color_offset)
-#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50)
-#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54)
-
#define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030
@@ -55,9 +48,6 @@
#define LUT_10BIT_MASK 0x03ff
-#define COLOR_BYPASS_ALL BIT(7)
-#define COLOR_SEQ_SEL BIT(13)
-
#define OD_RELAYMODE BIT(0)
#define UFO_BYPASS BIT(2)
@@ -82,20 +72,6 @@
#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0)
-struct mtk_disp_color_data {
- unsigned int color_offset;
-};
-
-struct mtk_disp_color {
- struct mtk_ddp_comp ddp_comp;
- const struct mtk_disp_color_data *data;
-};
-
-static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
-{
- return container_of(comp, struct mtk_disp_color, ddp_comp);
-}
-
void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
unsigned int CFG)
{
@@ -119,25 +95,6 @@ void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
}
}
-static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
-{
- struct mtk_disp_color *color = comp_to_color(comp);
-
- writel(w, comp->regs + DISP_COLOR_WIDTH(color));
- writel(h, comp->regs + DISP_COLOR_HEIGHT(color));
-}
-
-static void mtk_color_start(struct mtk_ddp_comp *comp)
-{
- struct mtk_disp_color *color = comp_to_color(comp);
-
- writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
- comp->regs + DISP_COLOR_CFG_MAIN);
- writel(0x1, comp->regs + DISP_COLOR_START(color));
-}
-
static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc)
@@ -229,11 +186,6 @@ static const struct mtk_ddp_comp_funcs ddp_gamma = {
.stop = mtk_gamma_stop,
};
-static const struct mtk_ddp_comp_funcs ddp_color = {
- .config = mtk_color_config,
- .start = mtk_color_start,
-};
-
static const struct mtk_ddp_comp_funcs ddp_od = {
.config = mtk_od_config,
.start = mtk_od_start,
@@ -268,8 +220,8 @@ struct mtk_ddp_comp_match {
static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal },
[DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
- [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color },
- [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color },
+ [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL },
+ [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
[DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL },
[DDP_COMPONENT_DSI1] = { MTK_DSI, 1, NULL },
@@ -286,22 +238,6 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
};
-static const struct mtk_disp_color_data mt2701_color_driver_data = {
- .color_offset = DISP_COLOR_START_MT2701,
-};
-
-static const struct mtk_disp_color_data mt8173_color_driver_data = {
- .color_offset = DISP_COLOR_START_MT8173,
-};
-
-static const struct of_device_id mtk_disp_color_driver_dt_match[] = {
- { .compatible = "mediatek,mt2701-disp-color",
- .data = &mt2701_color_driver_data},
- { .compatible = "mediatek,mt8173-disp-color",
- .data = &mt8173_color_driver_data},
- {},
-};
-
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type)
{
@@ -324,23 +260,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
enum mtk_ddp_comp_type type;
struct device_node *larb_node;
struct platform_device *larb_pdev;
- const struct of_device_id *match;
- struct mtk_disp_color *color;
if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
return -EINVAL;
type = mtk_ddp_matches[comp_id].type;
- if (type == MTK_DISP_COLOR) {
- devm_kfree(dev, comp);
- color = devm_kzalloc(dev, sizeof(*color), GFP_KERNEL);
- if (!color)
- return -ENOMEM;
-
- match = of_match_node(mtk_disp_color_driver_dt_match, node);
- color->data = match->data;
- comp = &color->ddp_comp;
- }
comp->id = comp_id;
comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index b2596f35104b..c8163525d444 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -443,11 +443,12 @@ static int mtk_drm_probe(struct platform_device *pdev)
private->comp_node[comp_id] = of_node_get(node);
/*
- * Currently only the OVL, RDMA, DSI, and DPI blocks have
+ * Currently only the COLOR, OVL, RDMA, DSI, and DPI blocks have
* separate component platform drivers and initialize their own
* DDP component structure. The others are initialized here.
*/
- if (comp_type == MTK_DISP_OVL ||
+ if (comp_type == MTK_DISP_COLOR ||
+ comp_type == MTK_DISP_OVL ||
comp_type == MTK_DISP_RDMA ||
comp_type == MTK_DSI ||
comp_type == MTK_DPI) {
@@ -570,6 +571,7 @@ static struct platform_driver mtk_drm_platform_driver = {
static struct platform_driver * const mtk_drm_drivers[] = {
&mtk_ddp_driver,
+ &mtk_disp_color_driver,
&mtk_disp_ovl_driver,
&mtk_disp_rdma_driver,
&mtk_dpi_driver,
@@ -580,33 +582,14 @@ static struct platform_driver * const mtk_drm_drivers[] = {
static int __init mtk_drm_init(void)
{
- int ret;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mtk_drm_drivers); i++) {
- ret = platform_driver_register(mtk_drm_drivers[i]);
- if (ret < 0) {
- pr_err("Failed to register %s driver: %d\n",
- mtk_drm_drivers[i]->driver.name, ret);
- goto err;
- }
- }
-
- return 0;
-
-err:
- while (--i >= 0)
- platform_driver_unregister(mtk_drm_drivers[i]);
-
- return ret;
+ return platform_register_drivers(mtk_drm_drivers,
+ ARRAY_SIZE(mtk_drm_drivers));
}
static void __exit mtk_drm_exit(void)
{
- int i;
-
- for (i = ARRAY_SIZE(mtk_drm_drivers) - 1; i >= 0; i--)
- platform_driver_unregister(mtk_drm_drivers[i]);
+ platform_unregister_drivers(mtk_drm_drivers,
+ ARRAY_SIZE(mtk_drm_drivers));
}
module_init(mtk_drm_init);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index aef8747d810b..c3378c452c0a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -59,6 +59,7 @@ struct mtk_drm_private {
};
extern struct platform_driver mtk_ddp_driver;
+extern struct platform_driver mtk_disp_color_driver;
extern struct platform_driver mtk_disp_ovl_driver;
extern struct platform_driver mtk_disp_rdma_driver;
extern struct platform_driver mtk_dpi_driver;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index e405e89ed5e5..1a59b9ab4aa8 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -28,6 +28,8 @@ static const u32 formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
};
static void mtk_plane_reset(struct drm_plane *plane)
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index b5cc6e12334c..97253c8f813b 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -930,7 +930,7 @@ static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data)
DRM_INFO("type is 0x02, try again\n");
break;
default:
- DRM_INFO("type(0x%x) cannot be non-recognite\n", type);
+ DRM_INFO("type(0x%x) not recognized\n", type);
break;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 5c0d02444bd3..252d373990bf 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1778,33 +1778,14 @@ static struct platform_driver * const mtk_hdmi_drivers[] = {
static int __init mtk_hdmitx_init(void)
{
- int ret;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mtk_hdmi_drivers); i++) {
- ret = platform_driver_register(mtk_hdmi_drivers[i]);
- if (ret < 0) {
- pr_err("Failed to register %s driver: %d\n",
- mtk_hdmi_drivers[i]->driver.name, ret);
- goto err;
- }
- }
-
- return 0;
-
-err:
- while (--i >= 0)
- platform_driver_unregister(mtk_hdmi_drivers[i]);
-
- return ret;
+ return platform_register_drivers(mtk_hdmi_drivers,
+ ARRAY_SIZE(mtk_hdmi_drivers));
}
static void __exit mtk_hdmitx_exit(void)
{
- int i;
-
- for (i = ARRAY_SIZE(mtk_hdmi_drivers) - 1; i >= 0; i--)
- platform_driver_unregister(mtk_hdmi_drivers[i]);
+ platform_unregister_drivers(mtk_hdmi_drivers,
+ ARRAY_SIZE(mtk_hdmi_drivers));
}
module_init(mtk_hdmitx_init);
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index 45cf363d25ad..a45bb22275a7 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -159,6 +159,8 @@ extern int mga_dma_bootstrap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_dma_init(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int mga_getparam(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern int mga_dma_flush(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_dma_reset(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 729bfd56b55f..245fb2e359cf 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -61,46 +61,25 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_mga_init32_t init32;
- drm_mga_init_t __user *init;
- int err = 0, i;
+ drm_mga_init_t init;
if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
return -EFAULT;
- init = compat_alloc_user_space(sizeof(*init));
- if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
- || __put_user(init32.func, &init->func)
- || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
- || __put_user(init32.chipset, &init->chipset)
- || __put_user(init32.sgram, &init->sgram)
- || __put_user(init32.maccess, &init->maccess)
- || __put_user(init32.fb_cpp, &init->fb_cpp)
- || __put_user(init32.front_offset, &init->front_offset)
- || __put_user(init32.front_pitch, &init->front_pitch)
- || __put_user(init32.back_offset, &init->back_offset)
- || __put_user(init32.back_pitch, &init->back_pitch)
- || __put_user(init32.depth_cpp, &init->depth_cpp)
- || __put_user(init32.depth_offset, &init->depth_offset)
- || __put_user(init32.depth_pitch, &init->depth_pitch)
- || __put_user(init32.fb_offset, &init->fb_offset)
- || __put_user(init32.mmio_offset, &init->mmio_offset)
- || __put_user(init32.status_offset, &init->status_offset)
- || __put_user(init32.warp_offset, &init->warp_offset)
- || __put_user(init32.primary_offset, &init->primary_offset)
- || __put_user(init32.buffers_offset, &init->buffers_offset))
- return -EFAULT;
-
- for (i = 0; i < MGA_NR_TEX_HEAPS; i++) {
- err |=
- __put_user(init32.texture_offset[i],
- &init->texture_offset[i]);
- err |=
- __put_user(init32.texture_size[i], &init->texture_size[i]);
- }
- if (err)
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_MGA_INIT, (unsigned long)init);
+ init.func = init32.func;
+ init.sarea_priv_offset = init32.sarea_priv_offset;
+ memcpy(&init.chipset, &init32.chipset,
+ offsetof(drm_mga_init_t, fb_offset) -
+ offsetof(drm_mga_init_t, chipset));
+ init.fb_offset = init32.fb_offset;
+ init.mmio_offset = init32.mmio_offset;
+ init.status_offset = init32.status_offset;
+ init.warp_offset = init32.warp_offset;
+ init.primary_offset = init32.primary_offset;
+ init.buffers_offset = init32.buffers_offset;
+
+ return drm_ioctl_kernel(file, mga_dma_init, &init,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
typedef struct drm_mga_getparam32 {
@@ -112,19 +91,14 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_mga_getparam32_t getparam32;
- drm_mga_getparam_t __user *getparam;
+ drm_mga_getparam_t getparam;
if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
return -EFAULT;
- getparam = compat_alloc_user_space(sizeof(*getparam));
- if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam))
- || __put_user(getparam32.param, &getparam->param)
- || __put_user((void __user *)(unsigned long)getparam32.value,
- &getparam->value))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
+ getparam.param = getparam32.param;
+ getparam.value = compat_ptr(getparam32.value);
+ return drm_ioctl_kernel(file, mga_getparam, &getparam, DRM_AUTH);
}
typedef struct drm_mga_drm_bootstrap32 {
@@ -141,48 +115,33 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_mga_dma_bootstrap32_t dma_bootstrap32;
- drm_mga_dma_bootstrap_t __user *dma_bootstrap;
+ drm_mga_dma_bootstrap_t dma_bootstrap;
int err;
if (copy_from_user(&dma_bootstrap32, (void __user *)arg,
sizeof(dma_bootstrap32)))
return -EFAULT;
- dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap));
- if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap))
- || __put_user(dma_bootstrap32.texture_handle,
- &dma_bootstrap->texture_handle)
- || __put_user(dma_bootstrap32.texture_size,
- &dma_bootstrap->texture_size)
- || __put_user(dma_bootstrap32.primary_size,
- &dma_bootstrap->primary_size)
- || __put_user(dma_bootstrap32.secondary_bin_count,
- &dma_bootstrap->secondary_bin_count)
- || __put_user(dma_bootstrap32.secondary_bin_size,
- &dma_bootstrap->secondary_bin_size)
- || __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
- || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
- return -EFAULT;
+ dma_bootstrap.texture_handle = dma_bootstrap32.texture_handle;
+ dma_bootstrap.texture_size = dma_bootstrap32.texture_size;
+ dma_bootstrap.primary_size = dma_bootstrap32.primary_size;
+ dma_bootstrap.secondary_bin_count = dma_bootstrap32.secondary_bin_count;
+ dma_bootstrap.secondary_bin_size = dma_bootstrap32.secondary_bin_size;
+ dma_bootstrap.agp_mode = dma_bootstrap32.agp_mode;
+ dma_bootstrap.agp_size = dma_bootstrap32.agp_size;
- err = drm_ioctl(file, DRM_IOCTL_MGA_DMA_BOOTSTRAP,
- (unsigned long)dma_bootstrap);
+ err = drm_ioctl_kernel(file, mga_dma_bootstrap, &dma_bootstrap,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
if (err)
return err;
- if (__get_user(dma_bootstrap32.texture_handle,
- &dma_bootstrap->texture_handle)
- || __get_user(dma_bootstrap32.texture_size,
- &dma_bootstrap->texture_size)
- || __get_user(dma_bootstrap32.primary_size,
- &dma_bootstrap->primary_size)
- || __get_user(dma_bootstrap32.secondary_bin_count,
- &dma_bootstrap->secondary_bin_count)
- || __get_user(dma_bootstrap32.secondary_bin_size,
- &dma_bootstrap->secondary_bin_size)
- || __get_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
- || __get_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
- return -EFAULT;
-
+ dma_bootstrap32.texture_handle = dma_bootstrap.texture_handle;
+ dma_bootstrap32.texture_size = dma_bootstrap.texture_size;
+ dma_bootstrap32.primary_size = dma_bootstrap.primary_size;
+ dma_bootstrap32.secondary_bin_count = dma_bootstrap.secondary_bin_count;
+ dma_bootstrap32.secondary_bin_size = dma_bootstrap.secondary_bin_size;
+ dma_bootstrap32.agp_mode = dma_bootstrap.agp_mode;
+ dma_bootstrap32.agp_size = dma_bootstrap.agp_size;
if (copy_to_user((void __user *)arg, &dma_bootstrap32,
sizeof(dma_bootstrap32)))
return -EFAULT;
@@ -190,10 +149,14 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
return 0;
}
-drm_ioctl_compat_t *mga_compat_ioctls[] = {
- [DRM_MGA_INIT] = compat_mga_init,
- [DRM_MGA_GETPARAM] = compat_mga_getparam,
- [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
+static struct {
+ drm_ioctl_compat_t *fn;
+ char *name;
+} mga_compat_ioctls[] = {
+#define DRM_IOCTL32_DEF(n, f)[DRM_##n] = {.fn = f, .name = #n}
+ DRM_IOCTL32_DEF(MGA_INIT, compat_mga_init),
+ DRM_IOCTL32_DEF(MGA_GETPARAM, compat_mga_getparam),
+ DRM_IOCTL32_DEF(MGA_DMA_BOOTSTRAP, compat_mga_dma_bootstrap),
};
/**
@@ -208,19 +171,27 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
+ struct drm_file *file_priv = filp->private_data;
drm_ioctl_compat_t *fn = NULL;
int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
-
- if (fn != NULL)
- ret = (*fn) (filp, cmd, arg);
- else
- ret = drm_ioctl(filp, cmd, arg);
-
+ if (nr >= DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
+ return drm_ioctl(filp, cmd, arg);
+
+ fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE].fn;
+ if (!fn)
+ return drm_ioctl(filp, cmd, arg);
+
+ DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated,
+ mga_compat_ioctls[nr - DRM_COMMAND_BASE].name);
+ ret = (*fn) (filp, cmd, arg);
+ if (ret)
+ DRM_DEBUG("ret = %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index 792f924496fc..e5f6b735f575 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1005,7 +1005,7 @@ static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *fil
return 0;
}
-static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_getparam_t *param = data;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 39468c218027..7459ef9943ec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -60,15 +60,13 @@ bool nouveau_is_v1_dsm(void) {
}
#ifdef CONFIG_VGA_SWITCHEROO
-static const char nouveau_dsm_muid[] = {
- 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
- 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
-};
+static const guid_t nouveau_dsm_muid =
+ GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48,
+ 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4);
-static const char nouveau_op_dsm_muid[] = {
- 0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47,
- 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0,
-};
+static const guid_t nouveau_op_dsm_muid =
+ GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B,
+ 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0);
static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
{
@@ -86,7 +84,7 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
args_buff[i] = (arg >> i * 8) & 0xFF;
*result = 0;
- obj = acpi_evaluate_dsm_typed(handle, nouveau_op_dsm_muid, 0x00000100,
+ obj = acpi_evaluate_dsm_typed(handle, &nouveau_op_dsm_muid, 0x00000100,
func, &argv4, ACPI_TYPE_BUFFER);
if (!obj) {
acpi_handle_info(handle, "failed to evaluate _DSM\n");
@@ -138,7 +136,7 @@ static int nouveau_dsm(acpi_handle handle, int func, int arg)
.integer.value = arg,
};
- obj = acpi_evaluate_dsm_typed(handle, nouveau_dsm_muid, 0x00000102,
+ obj = acpi_evaluate_dsm_typed(handle, &nouveau_dsm_muid, 0x00000102,
func, &argv4, ACPI_TYPE_INTEGER);
if (!obj) {
acpi_handle_info(handle, "failed to evaluate _DSM\n");
@@ -259,7 +257,7 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
if (!acpi_has_method(dhandle, "_DSM"))
return;
- supports_mux = acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
+ supports_mux = acpi_check_dsm(dhandle, &nouveau_dsm_muid, 0x00000102,
1 << NOUVEAU_DSM_POWER);
optimus_funcs = nouveau_dsm_get_optimus_functions(dhandle);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index e3e2f5e83815..f44682d62f75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -81,10 +81,9 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
{
struct nvkm_subdev *subdev = &mxm->subdev;
struct nvkm_device *device = subdev->device;
- static char muid[] = {
- 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
- 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
- };
+ static guid_t muid =
+ GUID_INIT(0x4004A400, 0x917D, 0x4CF2,
+ 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65);
u32 mxms_args[] = { 0x00000000 };
union acpi_object argv4 = {
.buffer.type = ACPI_TYPE_BUFFER,
@@ -105,7 +104,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
* unless you pass in exactly the version it supports..
*/
rev = (version & 0xf0) << 4 | (version & 0x0f);
- obj = acpi_evaluate_dsm(handle, muid, rev, 0x00000010, &argv4);
+ obj = acpi_evaluate_dsm(handle, &muid, rev, 0x00000010, &argv4);
if (!obj) {
nvkm_debug(subdev, "DSM MXMS failed\n");
return false;
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index a5d3cd3ecb5f..4acbb944bcd2 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -105,7 +105,6 @@ radeon-y += \
vce_v2_0.o \
radeon_kfd.o
-radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
radeon-$(CONFIG_ACPI) += radeon_acpi.o
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index fa4f8f008e4d..e67ed383e11b 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -31,6 +31,7 @@
#include "radeon_asic.h"
#include "atom.h"
#include <linux/backlight.h>
+#include <linux/dmi.h>
extern int atom_debug;
@@ -2184,9 +2185,17 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
goto assigned;
}
- /* on DCE32 and encoder can driver any block so just crtc id */
+ /*
+ * On DCE32 any encoder can drive any block so usually just use crtc id,
+ * but Apple thinks different at least on iMac10,1, so there use linkb,
+ * otherwise the internal eDP panel will stay dark.
+ */
if (ASIC_IS_DCE32(rdev)) {
- enc_idx = radeon_crtc->crtc_id;
+ if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
+ enc_idx = (dig->linkb) ? 1 : 0;
+ else
+ enc_idx = radeon_crtc->crtc_id;
+
goto assigned;
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 68be1bfa22b9..5008f3d4cccc 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -377,7 +377,7 @@ struct radeon_fence {
unsigned ring;
bool is_vm_update;
- wait_queue_t fence_wake;
+ wait_queue_entry_t fence_wake;
};
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0a6444d72000..997131d58c7f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -113,7 +113,6 @@ static inline bool radeon_is_atpx_hybrid(void) { return false; }
#endif
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
-#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
struct radeon_px_quirk {
u32 chip_vendor;
@@ -140,8 +139,6 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
* https://bugs.freedesktop.org/show_bug.cgi?id=101491
*/
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
- /* macbook pro 8.2 */
- { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
{ 0, 0, 0, 0, 0 },
};
@@ -1245,25 +1242,17 @@ static void radeon_check_arguments(struct radeon_device *rdev)
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct radeon_device *rdev = dev->dev_private;
if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
- unsigned d3_delay = dev->pdev->d3_delay;
-
pr_info("radeon: switched on\n");
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
- dev->pdev->d3_delay = 20;
-
radeon_resume_kms(dev, true, true);
- dev->pdev->d3_delay = d3_delay;
-
dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
} else {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ff2641cbf172..b401f1689bc1 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -38,6 +38,7 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
+#include <linux/compat.h>
#include <drm/drm_gem.h>
#include <drm/drm_fb_helper.h>
@@ -150,8 +151,6 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj);
struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *);
void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
@@ -509,6 +508,21 @@ long radeon_drm_ioctl(struct file *filp,
return ret;
}
+#ifdef CONFIG_COMPAT
+static long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+ int ret;
+
+ if (nr < DRM_COMMAND_BASE)
+ return drm_compat_ioctl(filp, cmd, arg);
+
+ ret = radeon_drm_ioctl(filp, cmd, arg);
+
+ return ret;
+}
+#endif
+
static const struct dev_pm_ops radeon_pm_ops = {
.suspend = radeon_pmops_suspend,
.resume = radeon_pmops_resume,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index ef09f0a63754..e86f2bd38410 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -158,7 +158,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
* for the fence locking itself, so unlocked variants are used for
* fence_signal, and remove_wait_queue.
*/
-static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
+static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key)
{
struct radeon_fence *fence;
u64 seq;
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
deleted file mode 100644
index 0b98ea134579..000000000000
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ /dev/null
@@ -1,424 +0,0 @@
-/**
- * \file radeon_ioc32.c
- *
- * 32-bit ioctl compatibility routines for the Radeon DRM.
- *
- * \author Paul Mackerras <paulus@samba.org>
- *
- * Copyright (C) Paul Mackerras 2005
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-#include <linux/compat.h>
-
-#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
-#include "radeon_drv.h"
-
-typedef struct drm_radeon_init32 {
- int func;
- u32 sarea_priv_offset;
- int is_pci;
- int cp_mode;
- int gart_size;
- int ring_size;
- int usec_timeout;
-
- unsigned int fb_bpp;
- unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
- unsigned int depth_bpp;
- unsigned int depth_offset, depth_pitch;
-
- u32 fb_offset;
- u32 mmio_offset;
- u32 ring_offset;
- u32 ring_rptr_offset;
- u32 buffers_offset;
- u32 gart_textures_offset;
-} drm_radeon_init32_t;
-
-static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_init32_t init32;
- drm_radeon_init_t __user *init;
-
- if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
- return -EFAULT;
-
- init = compat_alloc_user_space(sizeof(*init));
- if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
- || __put_user(init32.func, &init->func)
- || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
- || __put_user(init32.is_pci, &init->is_pci)
- || __put_user(init32.cp_mode, &init->cp_mode)
- || __put_user(init32.gart_size, &init->gart_size)
- || __put_user(init32.ring_size, &init->ring_size)
- || __put_user(init32.usec_timeout, &init->usec_timeout)
- || __put_user(init32.fb_bpp, &init->fb_bpp)
- || __put_user(init32.front_offset, &init->front_offset)
- || __put_user(init32.front_pitch, &init->front_pitch)
- || __put_user(init32.back_offset, &init->back_offset)
- || __put_user(init32.back_pitch, &init->back_pitch)
- || __put_user(init32.depth_bpp, &init->depth_bpp)
- || __put_user(init32.depth_offset, &init->depth_offset)
- || __put_user(init32.depth_pitch, &init->depth_pitch)
- || __put_user(init32.fb_offset, &init->fb_offset)
- || __put_user(init32.mmio_offset, &init->mmio_offset)
- || __put_user(init32.ring_offset, &init->ring_offset)
- || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
- || __put_user(init32.buffers_offset, &init->buffers_offset)
- || __put_user(init32.gart_textures_offset,
- &init->gart_textures_offset))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
-}
-
-typedef struct drm_radeon_clear32 {
- unsigned int flags;
- unsigned int clear_color;
- unsigned int clear_depth;
- unsigned int color_mask;
- unsigned int depth_mask; /* misnamed field: should be stencil */
- u32 depth_boxes;
-} drm_radeon_clear32_t;
-
-static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_clear32_t clr32;
- drm_radeon_clear_t __user *clr;
-
- if (copy_from_user(&clr32, (void __user *)arg, sizeof(clr32)))
- return -EFAULT;
-
- clr = compat_alloc_user_space(sizeof(*clr));
- if (!access_ok(VERIFY_WRITE, clr, sizeof(*clr))
- || __put_user(clr32.flags, &clr->flags)
- || __put_user(clr32.clear_color, &clr->clear_color)
- || __put_user(clr32.clear_depth, &clr->clear_depth)
- || __put_user(clr32.color_mask, &clr->color_mask)
- || __put_user(clr32.depth_mask, &clr->depth_mask)
- || __put_user((void __user *)(unsigned long)clr32.depth_boxes,
- &clr->depth_boxes))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
-}
-
-typedef struct drm_radeon_stipple32 {
- u32 mask;
-} drm_radeon_stipple32_t;
-
-static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_stipple32_t __user *argp = (void __user *)arg;
- drm_radeon_stipple_t __user *request;
- u32 mask;
-
- if (get_user(mask, &argp->mask))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user((unsigned int __user *)(unsigned long)mask,
- &request->mask))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
-}
-
-typedef struct drm_radeon_tex_image32 {
- unsigned int x, y; /* Blit coordinates */
- unsigned int width, height;
- u32 data;
-} drm_radeon_tex_image32_t;
-
-typedef struct drm_radeon_texture32 {
- unsigned int offset;
- int pitch;
- int format;
- int width; /* Texture image coordinates */
- int height;
- u32 image;
-} drm_radeon_texture32_t;
-
-static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_texture32_t req32;
- drm_radeon_texture_t __user *request;
- drm_radeon_tex_image32_t img32;
- drm_radeon_tex_image_t __user *image;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
- if (req32.image == 0)
- return -EINVAL;
- if (copy_from_user(&img32, (void __user *)(unsigned long)req32.image,
- sizeof(img32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request) + sizeof(*image));
- if (!access_ok(VERIFY_WRITE, request,
- sizeof(*request) + sizeof(*image)))
- return -EFAULT;
- image = (drm_radeon_tex_image_t __user *) (request + 1);
-
- if (__put_user(req32.offset, &request->offset)
- || __put_user(req32.pitch, &request->pitch)
- || __put_user(req32.format, &request->format)
- || __put_user(req32.width, &request->width)
- || __put_user(req32.height, &request->height)
- || __put_user(image, &request->image)
- || __put_user(img32.x, &image->x)
- || __put_user(img32.y, &image->y)
- || __put_user(img32.width, &image->width)
- || __put_user(img32.height, &image->height)
- || __put_user((const void __user *)(unsigned long)img32.data,
- &image->data))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
-}
-
-typedef struct drm_radeon_vertex2_32 {
- int idx; /* Index of vertex buffer */
- int discard; /* Client finished with buffer? */
- int nr_states;
- u32 state;
- int nr_prims;
- u32 prim;
-} drm_radeon_vertex2_32_t;
-
-static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_vertex2_32_t req32;
- drm_radeon_vertex2_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.idx, &request->idx)
- || __put_user(req32.discard, &request->discard)
- || __put_user(req32.nr_states, &request->nr_states)
- || __put_user((void __user *)(unsigned long)req32.state,
- &request->state)
- || __put_user(req32.nr_prims, &request->nr_prims)
- || __put_user((void __user *)(unsigned long)req32.prim,
- &request->prim))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
-}
-
-typedef struct drm_radeon_cmd_buffer32 {
- int bufsz;
- u32 buf;
- int nbox;
- u32 boxes;
-} drm_radeon_cmd_buffer32_t;
-
-static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_cmd_buffer32_t req32;
- drm_radeon_cmd_buffer_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.bufsz, &request->bufsz)
- || __put_user((void __user *)(unsigned long)req32.buf,
- &request->buf)
- || __put_user(req32.nbox, &request->nbox)
- || __put_user((void __user *)(unsigned long)req32.boxes,
- &request->boxes))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
-}
-
-typedef struct drm_radeon_getparam32 {
- int param;
- u32 value;
-} drm_radeon_getparam32_t;
-
-static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_getparam32_t req32;
- drm_radeon_getparam_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.param, &request->param)
- || __put_user((void __user *)(unsigned long)req32.value,
- &request->value))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
-}
-
-typedef struct drm_radeon_mem_alloc32 {
- int region;
- int alignment;
- int size;
- u32 region_offset; /* offset from start of fb or GART */
-} drm_radeon_mem_alloc32_t;
-
-static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_mem_alloc32_t req32;
- drm_radeon_mem_alloc_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.region, &request->region)
- || __put_user(req32.alignment, &request->alignment)
- || __put_user(req32.size, &request->size)
- || __put_user((int __user *)(unsigned long)req32.region_offset,
- &request->region_offset))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
-}
-
-typedef struct drm_radeon_irq_emit32 {
- u32 irq_seq;
-} drm_radeon_irq_emit32_t;
-
-static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_irq_emit32_t req32;
- drm_radeon_irq_emit_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user((int __user *)(unsigned long)req32.irq_seq,
- &request->irq_seq))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
-}
-
-/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
-#if defined (CONFIG_X86_64) || defined(CONFIG_IA64)
-typedef struct drm_radeon_setparam32 {
- int param;
- u64 value;
-} __attribute__((packed)) drm_radeon_setparam32_t;
-
-static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_setparam32_t req32;
- drm_radeon_setparam_t __user *request;
-
- if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.param, &request->param)
- || __put_user((void __user *)(unsigned long)req32.value,
- &request->value))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
-}
-#else
-#define compat_radeon_cp_setparam NULL
-#endif /* X86_64 || IA64 */
-
-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
- [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
- [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
- [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
- [DRM_RADEON_TEXTURE] = compat_radeon_cp_texture,
- [DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2,
- [DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf,
- [DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam,
- [DRM_RADEON_SETPARAM] = compat_radeon_cp_setparam,
- [DRM_RADEON_ALLOC] = compat_radeon_mem_alloc,
- [DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit,
-};
-
-/**
- * Called whenever a 32-bit process running under a 64-bit kernel
- * performs an ioctl on /dev/dri/card<n>.
- *
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
- */
-long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- unsigned int nr = DRM_IOCTL_NR(cmd);
- drm_ioctl_compat_t *fn = NULL;
- int ret;
-
- if (nr < DRM_COMMAND_BASE)
- return drm_compat_ioctl(filp, cmd, arg);
-
- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
-
- if (fn != NULL)
- ret = (*fn) (filp, cmd, arg);
- else
- ret = drm_ioctl(filp, cmd, arg);
-
- return ret;
-}
-
-long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- unsigned int nr = DRM_IOCTL_NR(cmd);
- int ret;
-
- if (nr < DRM_COMMAND_BASE)
- return drm_compat_ioctl(filp, cmd, arg);
-
- ret = radeon_drm_ioctl(filp, cmd, arg);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 14fa1f8351e8..9b0b0588bbed 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -1195,7 +1195,7 @@ static int cdn_dp_probe(struct platform_device *pdev)
continue;
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
- if (!dp)
+ if (!port)
return -ENOMEM;
port->extcon = extcon;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 47905faf5586..c7e96b82cf63 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -45,13 +45,13 @@ struct rockchip_crtc_state {
*
* @crtc: array of enabled CRTCs, used to map from "pipe" to drm_crtc.
* @num_pipe: number of pipes for this device.
+ * @mm_lock: protect drm_mm on multi-threads.
*/
struct rockchip_drm_private {
struct drm_fb_helper fbdev_helper;
struct drm_gem_object *fbdev_bo;
struct drm_atomic_state *state;
struct iommu_domain *domain;
- /* protect drm_mm on multi-threads */
struct mutex mm_lock;
struct drm_mm mm;
struct list_head psr_list;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index df9e57064f19..b74ac717e56a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -29,12 +29,11 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
ssize_t ret;
mutex_lock(&private->mm_lock);
-
ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
rk_obj->base.size, PAGE_SIZE,
0, 0);
-
mutex_unlock(&private->mm_lock);
+
if (ret < 0) {
DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
return ret;
@@ -56,7 +55,9 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
return 0;
err_remove_node:
+ mutex_lock(&private->mm_lock);
drm_mm_remove_node(&rk_obj->mm);
+ mutex_unlock(&private->mm_lock);
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a6d7fcb99c0b..22b57020790d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1353,7 +1353,6 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
mem_type);
return ret;
}
- dma_fence_put(man->move);
man->use_type = false;
man->has_type = false;
@@ -1369,6 +1368,9 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
ret = (*man->func->takedown)(man);
}
+ dma_fence_put(man->move);
+ man->move = NULL;
+
return ret;
}
EXPORT_SYMBOL(ttm_bo_clean_mm);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 13db8a2851ed..1f013d45c9e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
list_for_each_entry_safe(entry, next, &man->list, head)
vmw_cmdbuf_res_free(man, entry);
+ drm_ht_remove(&man->resources);
kfree(man);
}
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 92f1452dad57..76875f6299b8 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -417,7 +417,7 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
{
struct vga_device *vgadev, *conflict;
unsigned long flags;
- wait_queue_t wait;
+ wait_queue_entry_t wait;
int rc = 0;
vga_check_first_use();