summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-01 17:48:47 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-01 17:48:47 -0800
commit4bf772b14675411a69b3c807f73006de0fe4b649 (patch)
treeb841e3ba0e3429695589cb0ab73871fa12f42c38 /drivers/gpu/drm/i915
parent3879ae653a3e98380fe2daf653338830b7ca0097 (diff)
parent24b8ef699e8221d2b7f813adaab13eec053e1507 (diff)
downloadlinux-4bf772b14675411a69b3c807f73006de0fe4b649.tar.bz2
Merge tag 'drm-for-v4.16' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "This seems to have been a comparatively quieter merge window, I assume due to holidays etc. The "biggest" change is AMD header cleanups, which merge/remove a bunch of them. The AMD gpu scheduler is now being made generic with the etnaviv driver wanting to reuse the code, hopefully other drivers can go in the same direction. Otherwise it's the usual lots of stuff in i915/amdgpu, not so much stuff elsewhere. Core: - Add .last_close and .output_poll_changed helpers to reduce driver footprints - Fix plane clipping - Improved debug printing support - Add panel orientation property - Update edid derived properties at edid setting - Reduction in fbdev driver footprint - Move amdgpu scheduler into core for other drivers to use. i915: - Selftest and IGT improvements - Fast boot prep work on IPS, pipe config - HW workarounds for Cannonlake, Geminilake - Cannonlake clock and HDMI2.0 fixes - GPU cache invalidation and context switch improvements - Display planes cleanup - New PMU interface for perf queries - New firmware support for KBL/SKL - Geminilake HW workaround for perforamce - Coffeelake stolen memory improvements - GPU reset robustness work - Cannonlake horizontal plane flipping - GVT work amdgpu/radeon: - RV and Vega header file cleanups (lots of lines gone!) - TTM operation context support - 48-bit GPUVM support for Vega/RV - ECC support for Vega - Resizeable BAR support - Multi-display sync support - Enable swapout for reserved BOs during allocation - S3 fixes on Raven - GPU reset cleanup and fixes - 2+1 level GPU page table amdkfd: - GFX7/8 SDMA user queues support - Hardware scheduling for multiple processes - dGPU prep work rcar: - Added R8A7743/5 support - System suspend/resume support sun4i: - Multi-plane support for YUV formats - A83T and LVDS support msm: - Devfreq support for GPU tegra: - Prep work for adding Tegra186 support - Tegra186 HDMI support - HDMI2.0 and zpos support by using generic helpers tilcdc: - Misc fixes omapdrm: - Support memory bandwidth limits - DSI command mode panel cleanups - DMM error handling exynos: - drop the old IPP subdriver. etnaviv: - Occlusion query fixes - Job handling fixes - Prep work for hooking in gpu scheduler armada: - Move closer to atomic modesetting - Allow disabling primary plane if overlay is full screen imx: - Format modifier support - Add tile prefetch to PRE - Runtime PM support for PRG ast: - fix LUT loading" * tag 'drm-for-v4.16' of git://people.freedesktop.org/~airlied/linux: (1471 commits) drm/ast: Load lut in crtc_commit drm: Check for lessee in DROP_MASTER ioctl drm: fix gpu scheduler link order drm/amd/display: Demote error print to debug print when ATOM impl missing dma-buf: fix reservation_object_wait_timeout_rcu once more v2 drm/amdgpu: Avoid leaking PM domain on driver unbind (v2) drm/amd/amdgpu: Add Polaris version check drm/amdgpu: Reenable manual GPU reset from sysfs drm/amdgpu: disable MMHUB power gating on raven drm/ttm: Don't unreserve swapped BOs that were previously reserved drm/ttm: Don't add swapped BOs to swap-LRU list drm/amdgpu: only check for ECC on Vega10 drm/amd/powerplay: Fix smu_table_entry.handle type drm/ttm: add VADDR_FLAG_UPDATED_COUNT to correctly update dma_page global count drm: Fix PANEL_ORIENTATION_QUIRKS breaking the Kconfig DRM menuconfig drm/radeon: fill in rb backend map on evergreen/ni. drm/amdgpu/gfx9: fix ngg enablement to clear gds reserved memory (v2) drm/ttm: only free pages rather than update global memory count together drm/amdgpu: fix CPU based VM updates drm/amdgpu: fix typo in amdgpu_vce_validate_bo ...
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug29
-rw-r--r--drivers/gpu/drm/i915/Makefile32
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile3
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c264
-rw-r--r--drivers/gpu/drm/i915/gvt/debug.h24
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c212
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c80
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c537
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.h67
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c498
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c514
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h169
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c298
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h45
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c153
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h127
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c842
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c301
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c89
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c412
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h (renamed from drivers/gpu/drm/i915/gvt/render.h)9
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h79
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c360
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c405
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c670
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h35
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h15
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c73
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c20
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c438
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c182
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h643
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c538
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c368
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c85
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c192
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c139
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c68
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c138
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c323
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c195
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c14
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c7
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.c109
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.h34
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.c121
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.h34
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.c4
-rw-r--r--drivers/gpu/drm/i915/i915_params.c59
-rw-r--r--drivers/gpu/drm/i915/i915_params.h14
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c4
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c153
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c865
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h111
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h93
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h2
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c3
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c33
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h40
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h22
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c40
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h35
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c202
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c24
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c105
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c393
-rw-r--r--drivers/gpu/drm/i915/intel_color.c4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c46
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c13
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c347
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c200
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h183
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1001
-rw-r--r--drivers/gpu/drm/i915/intel_display.h321
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c594
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c79
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c122
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c107
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h80
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c47
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c475
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c48
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c3
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c152
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h21
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.c1
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c245
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.h2
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h40
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c29
-rw-r--r--drivers/gpu/drm/i915/intel_guc_reg.h (renamed from drivers/gpu/drm/i915/i915_guc_reg.h)14
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c (renamed from drivers/gpu/drm/i915/i915_guc_submission.c)751
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.h (renamed from drivers/gpu/drm/i915/i915_guc_submission.h)17
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c7
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c61
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c114
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c83
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c61
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c283
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h5
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c1
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c6
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.h106
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c88
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c437
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c747
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h263
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c10
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c13
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c6
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c267
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h23
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c6
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.h7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c172
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h8
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c181
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c16
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c52
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_request.c22
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_syncmap.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c353
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c331
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c41
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c4
173 files changed, 13654 insertions, 7581 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index aed7d207ea84..108d21f34777 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -18,6 +18,7 @@ config DRM_I915_WERROR
config DRM_I915_DEBUG
bool "Enable additional driver debugging"
depends on DRM_I915
+ select DEBUG_FS
select PREEMPT_COUNT
select I2C_CHARDEV
select DRM_DP_AUX_CHARDEV
@@ -49,6 +50,20 @@ config DRM_I915_DEBUG_GEM
If in doubt, say "N".
+config DRM_I915_TRACE_GEM
+ bool "Insert extra ftrace output from the GEM internals"
+ depends on DRM_I915_DEBUG_GEM
+ select TRACING
+ default n
+ help
+ Enable additional and verbose debugging output that will spam
+ ordinary tests, but may be vital for post-mortem debugging when
+ used with /proc/sys/kernel/ftrace_dump_on_oops
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_SW_FENCE_DEBUG_OBJECTS
bool "Enable additional driver debugging for fence objects"
depends on DRM_I915
@@ -90,6 +105,20 @@ config DRM_I915_SELFTEST
If in doubt, say "N".
+config DRM_I915_SELFTEST_BROKEN
+ bool "Enable broken and dangerous selftests"
+ depends on DRM_I915_SELFTEST
+ depends on BROKEN
+ default n
+ help
+ This option enables the execution of selftests that are "dangerous"
+ and may trigger unintended HW side-effects as they break strict
+ rules given in the HW specification. For science.
+
+ Recommended for masochistic driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_LOW_LEVEL_TRACEPOINTS
bool "Enable low level request tracing events"
depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 2acf3b3c5f9d..091aef281963 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -3,7 +3,26 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
+# Add a set of useful warning flags and enable -Werror for CI to prevent
+# trivial mistakes from creeping in. We have to do this piecemeal as we reject
+# any patch that isn't warning clean, so turning on -Wall -Wextra (or W=1) we
+# need to filter out dubious warnings. Still it is our interest
+# to keep running locally with W=1 C=1 until we are completely clean.
+#
+# Note the danger in using -Wall -Wextra is that when CI updates gcc we
+# will most likely get a sudden build breakage... Hopefully we will fix
+# new warnings before CI updates!
+subdir-ccflags-y := -Wall -Wextra
+subdir-ccflags-y += $(call cc-disable-warning, unused-parameter)
+subdir-ccflags-y += $(call cc-disable-warning, type-limits)
+subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
+subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
+subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
+
+# Fine grained warnings disable
+CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
+CFLAGS_intel_fbdev.o = $(call cc-disable-warning, override-init)
+
subdir-ccflags-y += \
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
@@ -27,6 +46,7 @@ i915-y := i915_drv.o \
i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
+i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# GEM code
i915-y += i915_cmd_parser.o \
@@ -64,10 +84,10 @@ i915-y += intel_uc.o \
intel_uc_fw.o \
intel_guc.o \
intel_guc_ct.o \
- intel_guc_log.o \
intel_guc_fw.o \
- intel_huc.o \
- i915_guc_submission.o
+ intel_guc_log.o \
+ intel_guc_submission.o \
+ intel_huc.o
# autogenerated null render state
i915-y += intel_renderstate_gen6.o \
@@ -144,7 +164,9 @@ i915-y += i915_perf.o \
i915_oa_kblgt2.o \
i915_oa_kblgt3.o \
i915_oa_glk.o \
- i915_oa_cflgt2.o
+ i915_oa_cflgt2.o \
+ i915_oa_cflgt3.o \
+ i915_oa_cnl.o
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index 2641ba510a61..347116faa558 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -2,7 +2,8 @@
GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
- execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
+ execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
+ fb_decoder.o dmabuf.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 4ce2e6bd0680..97bfc00d2a82 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -335,7 +335,8 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
case INTEL_GVT_PCI_OPREGION:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
- ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data);
+ ret = intel_vgpu_opregion_base_write_handler(vgpu,
+ *(u32 *)p_data);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 49af94627c8a..c8454ac43fae 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -709,18 +709,13 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
print_opcode(cmd_val(s, 0), s->ring_id);
- /* print the whole page to trace */
- pr_err(" ip_va=%p: %08x %08x %08x %08x\n",
- s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
- cmd_val(s, 2), cmd_val(s, 3));
-
s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
while (cnt < 1024) {
- pr_err("ip_va=%p: ", s->ip_va);
+ gvt_dbg_cmd("ip_va=%p: ", s->ip_va);
for (i = 0; i < 8; i++)
- pr_err("%08x ", cmd_val(s, i));
- pr_err("\n");
+ gvt_dbg_cmd("%08x ", cmd_val(s, i));
+ gvt_dbg_cmd("\n");
s->ip_va += 8 * sizeof(u32);
cnt += 8;
@@ -825,11 +820,26 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
offset, data);
- return -EINVAL;
+ return -EPERM;
}
return 0;
}
+static inline bool is_mocs_mmio(unsigned int offset)
+{
+ return ((offset >= 0xc800) && (offset <= 0xcff8)) ||
+ ((offset >= 0xb020) && (offset <= 0xb0a0));
+}
+
+static int mocs_cmd_reg_handler(struct parser_exec_state *s,
+ unsigned int offset, unsigned int index)
+{
+ if (!is_mocs_mmio(offset))
+ return -EINVAL;
+ vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1);
+ return 0;
+}
+
static int cmd_reg_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index, char *cmd)
{
@@ -839,7 +849,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
if (offset + 4 > gvt->device_info.mmio_size) {
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
cmd, offset);
- return -EINVAL;
+ return -EFAULT;
}
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
@@ -853,10 +863,14 @@ static int cmd_reg_handler(struct parser_exec_state *s,
return 0;
}
- if (is_force_nonpriv_mmio(offset) &&
- force_nonpriv_reg_handler(s, offset, index))
+ if (is_mocs_mmio(offset) &&
+ mocs_cmd_reg_handler(s, offset, index))
return -EINVAL;
+ if (is_force_nonpriv_mmio(offset) &&
+ force_nonpriv_reg_handler(s, offset, index))
+ return -EPERM;
+
if (offset == i915_mmio_reg_offset(DERRMR) ||
offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
@@ -894,11 +908,14 @@ static int cmd_handler_lri(struct parser_exec_state *s)
i915_mmio_reg_offset(DERRMR))
ret |= 0;
else
- ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
+ ret |= (cmd_reg_inhibit(s, i)) ?
+ -EBADRQC : 0;
}
if (ret)
break;
ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
+ if (ret)
+ break;
}
return ret;
}
@@ -912,11 +929,15 @@ static int cmd_handler_lrr(struct parser_exec_state *s)
if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
ret |= ((cmd_reg_inhibit(s, i) ||
(cmd_reg_inhibit(s, i + 1)))) ?
- -EINVAL : 0;
+ -EBADRQC : 0;
if (ret)
break;
ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
+ if (ret)
+ break;
ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
+ if (ret)
+ break;
}
return ret;
}
@@ -934,15 +955,19 @@ static int cmd_handler_lrm(struct parser_exec_state *s)
for (i = 1; i < cmd_len;) {
if (IS_BROADWELL(gvt->dev_priv))
- ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
+ ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
if (ret)
break;
ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
+ if (ret)
+ break;
if (cmd_val(s, 0) & (1 << 22)) {
gma = cmd_gma(s, i + 1);
if (gmadr_bytes == 8)
gma |= (cmd_gma_hi(s, i + 2)) << 32;
ret |= cmd_address_audit(s, gma, sizeof(u32), false);
+ if (ret)
+ break;
}
i += gmadr_dw_number(s) + 1;
}
@@ -958,11 +983,15 @@ static int cmd_handler_srm(struct parser_exec_state *s)
for (i = 1; i < cmd_len;) {
ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
+ if (ret)
+ break;
if (cmd_val(s, 0) & (1 << 22)) {
gma = cmd_gma(s, i + 1);
if (gmadr_bytes == 8)
gma |= (cmd_gma_hi(s, i + 2)) << 32;
ret |= cmd_address_audit(s, gma, sizeof(u32), false);
+ if (ret)
+ break;
}
i += gmadr_dw_number(s) + 1;
}
@@ -1116,7 +1145,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
v = (dword0 & GENMASK(21, 19)) >> 19;
if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
- return -EINVAL;
+ return -EBADRQC;
info->pipe = gen8_plane_code[v].pipe;
info->plane = gen8_plane_code[v].plane;
@@ -1136,7 +1165,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
info->surf_reg = SPRSURF(info->pipe);
} else {
WARN_ON(1);
- return -EINVAL;
+ return -EBADRQC;
}
return 0;
}
@@ -1185,7 +1214,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
default:
gvt_vgpu_err("unknown plane code %d\n", plane);
- return -EINVAL;
+ return -EBADRQC;
}
info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
@@ -1210,13 +1239,13 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
return 0;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
- tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
+ stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
+ tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
} else {
- stride = (vgpu_vreg(s->vgpu, info->stride_reg) &
+ stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) &
GENMASK(15, 6)) >> 6;
- tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
+ tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
}
if (stride != info->stride_val)
@@ -1235,21 +1264,21 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
struct intel_vgpu *vgpu = s->vgpu;
- set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
+ set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
+ set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
- set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
+ set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
info->tile_val << 10);
} else {
- set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
+ set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(15, 6),
info->stride_val << 6);
- set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
+ set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(10, 10),
info->tile_val << 10);
}
- vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, info->event);
return 0;
}
@@ -1348,10 +1377,13 @@ static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
{
unsigned long addr;
unsigned long gma_high, gma_low;
- int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ struct intel_vgpu *vgpu = s->vgpu;
+ int gmadr_bytes = vgpu->gvt->device_info.gmadr_bytes_in_cmd;
- if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
+ if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) {
+ gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes);
return INTEL_GVT_INVALID_ADDR;
+ }
gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
if (gmadr_bytes == 4) {
@@ -1374,16 +1406,16 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
if (op_size > max_surface_size) {
gvt_vgpu_err("command address audit fail name %s\n",
s->info->name);
- return -EINVAL;
+ return -EFAULT;
}
if (index_mode) {
- if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) {
- ret = -EINVAL;
+ if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
+ ret = -EFAULT;
goto err;
}
} else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
- ret = -EINVAL;
+ ret = -EFAULT;
goto err;
}
@@ -1439,7 +1471,7 @@ static inline int unexpected_cmd(struct parser_exec_state *s)
gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
- return -EINVAL;
+ return -EBADRQC;
}
static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
@@ -1545,10 +1577,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
return -EFAULT;
}
- offset = gma & (GTT_PAGE_SIZE - 1);
+ offset = gma & (I915_GTT_PAGE_SIZE - 1);
- copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ?
- GTT_PAGE_SIZE - offset : end_gma - gma;
+ copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
+ I915_GTT_PAGE_SIZE - offset : end_gma - gma;
intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
@@ -1576,110 +1608,113 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
return 1;
}
-static int find_bb_size(struct parser_exec_state *s)
+static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
{
unsigned long gma = 0;
struct cmd_info *info;
- int bb_size = 0;
uint32_t cmd_len = 0;
- bool met_bb_end = false;
+ bool bb_end = false;
struct intel_vgpu *vgpu = s->vgpu;
u32 cmd;
+ *bb_size = 0;
+
/* get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
- cmd = cmd_val(s, 0);
+ if (gma == INTEL_GVT_INVALID_ADDR)
+ return -EFAULT;
+ cmd = cmd_val(s, 0);
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
- return -EINVAL;
+ return -EBADRQC;
}
do {
- copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
- gma, gma + 4, &cmd);
+ if (copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+ gma, gma + 4, &cmd) < 0)
+ return -EFAULT;
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
- return -EINVAL;
+ return -EBADRQC;
}
if (info->opcode == OP_MI_BATCH_BUFFER_END) {
- met_bb_end = true;
+ bb_end = true;
} else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
- if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) {
+ if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)
/* chained batch buffer */
- met_bb_end = true;
- }
+ bb_end = true;
}
cmd_len = get_cmd_length(info, cmd) << 2;
- bb_size += cmd_len;
+ *bb_size += cmd_len;
gma += cmd_len;
+ } while (!bb_end);
- } while (!met_bb_end);
-
- return bb_size;
+ return 0;
}
static int perform_bb_shadow(struct parser_exec_state *s)
{
- struct intel_shadow_bb_entry *entry_obj;
struct intel_vgpu *vgpu = s->vgpu;
+ struct intel_vgpu_shadow_bb *bb;
unsigned long gma = 0;
- int bb_size;
- void *dst = NULL;
+ unsigned long bb_size;
int ret = 0;
/* get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
+ if (gma == INTEL_GVT_INVALID_ADDR)
+ return -EFAULT;
- /* get the size of the batch buffer */
- bb_size = find_bb_size(s);
- if (bb_size < 0)
- return -EINVAL;
+ ret = find_bb_size(s, &bb_size);
+ if (ret)
+ return ret;
- /* allocate shadow batch buffer */
- entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
- if (entry_obj == NULL)
+ bb = kzalloc(sizeof(*bb), GFP_KERNEL);
+ if (!bb)
return -ENOMEM;
- entry_obj->obj =
- i915_gem_object_create(s->vgpu->gvt->dev_priv,
- roundup(bb_size, PAGE_SIZE));
- if (IS_ERR(entry_obj->obj)) {
- ret = PTR_ERR(entry_obj->obj);
- goto free_entry;
+ bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv,
+ roundup(bb_size, PAGE_SIZE));
+ if (IS_ERR(bb->obj)) {
+ ret = PTR_ERR(bb->obj);
+ goto err_free_bb;
}
- entry_obj->len = bb_size;
- INIT_LIST_HEAD(&entry_obj->list);
- dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
- if (IS_ERR(dst)) {
- ret = PTR_ERR(dst);
- goto put_obj;
- }
+ ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush);
+ if (ret)
+ goto err_free_obj;
- ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
- if (ret) {
- gvt_vgpu_err("failed to set shadow batch to CPU\n");
- goto unmap_src;
+ bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
+ if (IS_ERR(bb->va)) {
+ ret = PTR_ERR(bb->va);
+ goto err_finish_shmem_access;
}
- entry_obj->va = dst;
- entry_obj->bb_start_cmd_va = s->ip_va;
+ if (bb->clflush & CLFLUSH_BEFORE) {
+ drm_clflush_virt_range(bb->va, bb->obj->base.size);
+ bb->clflush &= ~CLFLUSH_BEFORE;
+ }
- /* copy batch buffer to shadow batch buffer*/
ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
gma, gma + bb_size,
- dst);
+ bb->va);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
- goto unmap_src;
+ ret = -EFAULT;
+ goto err_unmap;
}
- list_add(&entry_obj->list, &s->workload->shadow_bb);
+ INIT_LIST_HEAD(&bb->list);
+ list_add(&bb->list, &s->workload->shadow_bb);
+
+ bb->accessing = true;
+ bb->bb_start_cmd_va = s->ip_va;
+
/*
* ip_va saves the virtual address of the shadow batch buffer, while
* ip_gma saves the graphics address of the original batch buffer.
@@ -1688,17 +1723,17 @@ static int perform_bb_shadow(struct parser_exec_state *s)
* buffer's gma in pair. After all, we don't want to pin the shadow
* buffer here (too early).
*/
- s->ip_va = dst;
+ s->ip_va = bb->va;
s->ip_gma = gma;
-
return 0;
-
-unmap_src:
- i915_gem_object_unpin_map(entry_obj->obj);
-put_obj:
- i915_gem_object_put(entry_obj->obj);
-free_entry:
- kfree(entry_obj);
+err_unmap:
+ i915_gem_object_unpin_map(bb->obj);
+err_finish_shmem_access:
+ i915_gem_obj_finish_shmem_access(bb->obj);
+err_free_obj:
+ i915_gem_object_put(bb->obj);
+err_free_bb:
+ kfree(bb);
return ret;
}
@@ -1710,13 +1745,13 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
- return -EINVAL;
+ return -EFAULT;
}
second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
- return -EINVAL;
+ return -EFAULT;
}
s->saved_buf_addr_type = s->buf_addr_type;
@@ -1740,7 +1775,6 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
if (ret < 0)
return ret;
}
-
return ret;
}
@@ -2430,7 +2464,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (info == NULL) {
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
- return -EINVAL;
+ return -EBADRQC;
}
s->info = info;
@@ -2465,6 +2499,10 @@ static inline bool gma_out_of_range(unsigned long gma,
return (gma > gma_tail) && (gma < gma_head);
}
+/* Keep the consistent return type, e.g EBADRQC for unknown
+ * cmd, EFAULT for invalid address, EPERM for nonpriv. later
+ * works as the input of VM healthy status.
+ */
static int command_scan(struct parser_exec_state *s,
unsigned long rb_head, unsigned long rb_tail,
unsigned long rb_start, unsigned long rb_len)
@@ -2487,7 +2525,7 @@ static int command_scan(struct parser_exec_state *s,
s->ip_gma, rb_start,
gma_bottom);
parser_exec_state_dump(s);
- return -EINVAL;
+ return -EFAULT;
}
if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
gvt_vgpu_err("ip_gma %lx out of range."
@@ -2516,7 +2554,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
int ret = 0;
/* ring base is page aligned */
- if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE)))
+ if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE)))
return -EINVAL;
gma_head = workload->rb_start + workload->rb_head;
@@ -2565,7 +2603,8 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx);
/* ring base is page aligned */
- if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
+ if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma,
+ I915_GTT_PAGE_SIZE)))
return -EINVAL;
ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
@@ -2604,6 +2643,7 @@ out:
static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
void *shadow_ring_buffer_va;
int ring_id = workload->ring_id;
@@ -2619,19 +2659,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size;
- if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) {
- void *va = vgpu->reserve_ring_buffer_va[ring_id];
+ if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
+ void *p;
+
/* realloc the new ring buffer if needed */
- vgpu->reserve_ring_buffer_va[ring_id] =
- krealloc(va, workload->rb_len, GFP_KERNEL);
- if (!vgpu->reserve_ring_buffer_va[ring_id]) {
- gvt_vgpu_err("fail to alloc reserve ring buffer\n");
+ p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
+ GFP_KERNEL);
+ if (!p) {
+ gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
return -ENOMEM;
}
- vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len;
+ s->ring_scan_buffer[ring_id] = p;
+ s->ring_scan_buffer_size[ring_id] = workload->rb_len;
}
- shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id];
+ shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
/* get shadow ring buffer va */
workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
index b0cff4dc2684..c6027125c1ec 100644
--- a/drivers/gpu/drm/i915/gvt/debug.h
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -25,41 +25,41 @@
#define __GVT_DEBUG_H__
#define gvt_err(fmt, args...) \
- DRM_ERROR("gvt: "fmt, ##args)
+ pr_err("gvt: "fmt, ##args)
#define gvt_vgpu_err(fmt, args...) \
do { \
if (IS_ERR_OR_NULL(vgpu)) \
- DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \
+ pr_err("gvt: "fmt, ##args); \
else \
- DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
+ pr_err("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
} while (0)
#define gvt_dbg_core(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
+ pr_debug("gvt: core: "fmt, ##args)
#define gvt_dbg_irq(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: irq: "fmt, ##args)
+ pr_debug("gvt: irq: "fmt, ##args)
#define gvt_dbg_mm(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: mm: "fmt, ##args)
+ pr_debug("gvt: mm: "fmt, ##args)
#define gvt_dbg_mmio(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: mmio: "fmt, ##args)
+ pr_debug("gvt: mmio: "fmt, ##args)
#define gvt_dbg_dpy(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: dpy: "fmt, ##args)
+ pr_debug("gvt: dpy: "fmt, ##args)
#define gvt_dbg_el(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args)
+ pr_debug("gvt: el: "fmt, ##args)
#define gvt_dbg_sched(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args)
+ pr_debug("gvt: sched: "fmt, ##args)
#define gvt_dbg_render(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args)
+ pr_debug("gvt: render: "fmt, ##args)
#define gvt_dbg_cmd(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args)
+ pr_debug("gvt: cmd: "fmt, ##args)
#endif
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
new file mode 100644
index 000000000000..32a66dfdf112
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright(c) 2011-2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/debugfs.h>
+#include <linux/list_sort.h>
+#include "i915_drv.h"
+#include "gvt.h"
+
+struct mmio_diff_param {
+ struct intel_vgpu *vgpu;
+ int total;
+ int diff;
+ struct list_head diff_mmio_list;
+};
+
+struct diff_mmio {
+ struct list_head node;
+ u32 offset;
+ u32 preg;
+ u32 vreg;
+};
+
+/* Compare two diff_mmio items. */
+static int mmio_offset_compare(void *priv,
+ struct list_head *a, struct list_head *b)
+{
+ struct diff_mmio *ma;
+ struct diff_mmio *mb;
+
+ ma = container_of(a, struct diff_mmio, node);
+ mb = container_of(b, struct diff_mmio, node);
+ if (ma->offset < mb->offset)
+ return -1;
+ else if (ma->offset > mb->offset)
+ return 1;
+ return 0;
+}
+
+static inline int mmio_diff_handler(struct intel_gvt *gvt,
+ u32 offset, void *data)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct mmio_diff_param *param = data;
+ struct diff_mmio *node;
+ u32 preg, vreg;
+
+ preg = I915_READ_NOTRACE(_MMIO(offset));
+ vreg = vgpu_vreg(param->vgpu, offset);
+
+ if (preg != vreg) {
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ node->offset = offset;
+ node->preg = preg;
+ node->vreg = vreg;
+ list_add(&node->node, &param->diff_mmio_list);
+ param->diff++;
+ }
+ param->total++;
+ return 0;
+}
+
+/* Show the all the different values of tracked mmio. */
+static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
+{
+ struct intel_vgpu *vgpu = s->private;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct mmio_diff_param param = {
+ .vgpu = vgpu,
+ .total = 0,
+ .diff = 0,
+ };
+ struct diff_mmio *node, *next;
+
+ INIT_LIST_HEAD(&param.diff_mmio_list);
+
+ mutex_lock(&gvt->lock);
+ spin_lock_bh(&gvt->scheduler.mmio_context_lock);
+
+ mmio_hw_access_pre(gvt->dev_priv);
+ /* Recognize all the diff mmios to list. */
+ intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
+ mmio_hw_access_post(gvt->dev_priv);
+
+ spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
+ mutex_unlock(&gvt->lock);
+
+ /* In an ascending order by mmio offset. */
+ list_sort(NULL, &param.diff_mmio_list, mmio_offset_compare);
+
+ seq_printf(s, "%-8s %-8s %-8s %-8s\n", "Offset", "HW", "vGPU", "Diff");
+ list_for_each_entry_safe(node, next, &param.diff_mmio_list, node) {
+ u32 diff = node->preg ^ node->vreg;
+
+ seq_printf(s, "%08x %08x %08x %*pbl\n",
+ node->offset, node->preg, node->vreg,
+ 32, &diff);
+ list_del(&node->node);
+ kfree(node);
+ }
+ seq_printf(s, "Total: %d, Diff: %d\n", param.total, param.diff);
+ return 0;
+}
+
+static int vgpu_mmio_diff_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vgpu_mmio_diff_show, inode->i_private);
+}
+
+static const struct file_operations vgpu_mmio_diff_fops = {
+ .open = vgpu_mmio_diff_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
+{
+ struct dentry *ent;
+ char name[10] = "";
+
+ sprintf(name, "vgpu%d", vgpu->id);
+ vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
+ if (!vgpu->debugfs)
+ return -ENOMEM;
+
+ ent = debugfs_create_bool("active", 0444, vgpu->debugfs,
+ &vgpu->active);
+ if (!ent)
+ return -ENOMEM;
+
+ ent = debugfs_create_file("mmio_diff", 0444, vgpu->debugfs,
+ vgpu, &vgpu_mmio_diff_fops);
+ if (!ent)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * intel_gvt_debugfs_remove_vgpu - remove debugfs entries of a vGPU
+ * @vgpu: a vGPU
+ */
+void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
+{
+ debugfs_remove_recursive(vgpu->debugfs);
+ vgpu->debugfs = NULL;
+}
+
+/**
+ * intel_gvt_debugfs_init - register gvt debugfs root entry
+ * @gvt: GVT device
+ *
+ * Returns:
+ * zero on success, negative if failed.
+ */
+int intel_gvt_debugfs_init(struct intel_gvt *gvt)
+{
+ struct drm_minor *minor = gvt->dev_priv->drm.primary;
+ struct dentry *ent;
+
+ gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
+ if (!gvt->debugfs_root) {
+ gvt_err("Cannot create debugfs dir\n");
+ return -ENOMEM;
+ }
+
+ ent = debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root,
+ &gvt->mmio.num_tracked_mmio);
+ if (!ent)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * intel_gvt_debugfs_clean - remove debugfs entries
+ * @gvt: GVT device
+ */
+void intel_gvt_debugfs_clean(struct intel_gvt *gvt)
+{
+ debugfs_remove_recursive(gvt->debugfs_root);
+ gvt->debugfs_root = NULL;
+}
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 309f3fa6794a..dd96ffc878ac 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -59,7 +59,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
+ if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
return 0;
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
@@ -67,14 +67,14 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
return 1;
}
-static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
+int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
- if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
+ if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
return 1;
if (edp_pipe_is_enabled(vgpu) &&
@@ -169,105 +169,105 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
+ vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
+ vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
- vgpu_vreg(vgpu, SKL_FUSE_STATUS) |=
+ vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
SKL_FUSE_DOWNLOAD_STATUS |
SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
- vgpu_vreg(vgpu, LCPLL1_CTL) |=
+ vgpu_vreg_t(vgpu, LCPLL1_CTL) |=
LCPLL_PLL_ENABLE |
LCPLL_PLL_LOCK;
- vgpu_vreg(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
+ vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
- vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) &=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) &=
~PORT_CLK_SEL_MASK;
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) |=
PORT_CLK_SEL_LCPLL_810;
}
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) &=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) &=
~PORT_CLK_SEL_MASK;
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) |=
PORT_CLK_SEL_LCPLL_810;
}
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
- vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) &=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) &=
~PORT_CLK_SEL_MASK;
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) |=
PORT_CLK_SEL_LCPLL_810;
}
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
- vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
}
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
if (IS_BROADWELL(dev_priv))
- vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |=
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_PORT_DP_A_HOTPLUG;
else
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
}
/* Clear host CRT status, so guest couldn't detect this host CRT. */
if (IS_BROADWELL(dev_priv))
- vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
+ vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
- vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
+ vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -369,12 +369,12 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
if (!pipe_is_enabled(vgpu, pipe))
continue;
- vgpu_vreg(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, event);
}
if (pipe_is_enabled(vgpu, pipe)) {
- vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index d73de22102e2..b46b86892d58 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -179,4 +179,6 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
+int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
new file mode 100644
index 000000000000..2ab584f97dfb
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -0,0 +1,537 @@
+/*
+ * Copyright 2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Xiaoguang Chen
+ * Tina Zhang <tina.zhang@intel.com>
+ */
+
+#include <linux/dma-buf.h>
+#include <drm/drmP.h>
+#include <linux/vfio.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
+
+static int vgpu_gem_get_pages(
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int i, ret;
+ gen8_pte_t __iomem *gtt_entries;
+ struct intel_vgpu_fb_info *fb_info;
+
+ fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
+ if (WARN_ON(!fb_info))
+ return -ENODEV;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (unlikely(!st))
+ return -ENOMEM;
+
+ ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL);
+ if (ret) {
+ kfree(st);
+ return ret;
+ }
+ gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
+ (fb_info->start >> PAGE_SHIFT);
+ for_each_sg(st->sgl, sg, fb_info->size, i) {
+ sg->offset = 0;
+ sg->length = PAGE_SIZE;
+ sg_dma_address(sg) =
+ GEN8_DECODE_PTE(readq(&gtt_entries[i]));
+ sg_dma_len(sg) = PAGE_SIZE;
+ }
+
+ __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
+
+ return 0;
+}
+
+static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static void dmabuf_gem_object_free(struct kref *kref)
+{
+ struct intel_vgpu_dmabuf_obj *obj =
+ container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
+ struct intel_vgpu *vgpu = obj->vgpu;
+ struct list_head *pos;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+
+ if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
+ list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
+ dmabuf_obj = container_of(pos,
+ struct intel_vgpu_dmabuf_obj, list);
+ if (dmabuf_obj == obj) {
+ intel_gvt_hypervisor_put_vfio_device(vgpu);
+ idr_remove(&vgpu->object_idr,
+ dmabuf_obj->dmabuf_id);
+ kfree(dmabuf_obj->info);
+ kfree(dmabuf_obj);
+ list_del(pos);
+ break;
+ }
+ }
+ } else {
+ /* Free the orphan dmabuf_objs here */
+ kfree(obj->info);
+ kfree(obj);
+ }
+}
+
+
+static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
+{
+ kref_get(&obj->kref);
+}
+
+static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
+{
+ kref_put(&obj->kref, dmabuf_gem_object_free);
+}
+
+static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
+{
+
+ struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
+ struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
+ struct intel_vgpu *vgpu = obj->vgpu;
+
+ if (vgpu) {
+ mutex_lock(&vgpu->dmabuf_lock);
+ gem_obj->base.dma_buf = NULL;
+ dmabuf_obj_put(obj);
+ mutex_unlock(&vgpu->dmabuf_lock);
+ } else {
+ /* vgpu is NULL, as it has been removed already */
+ gem_obj->base.dma_buf = NULL;
+ dmabuf_obj_put(obj);
+ }
+}
+
+static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
+ .flags = I915_GEM_OBJECT_IS_PROXY,
+ .get_pages = vgpu_gem_get_pages,
+ .put_pages = vgpu_gem_put_pages,
+ .release = vgpu_gem_release,
+};
+
+static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
+ struct intel_vgpu_fb_info *info)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_alloc(dev_priv);
+ if (obj == NULL)
+ return NULL;
+
+ drm_gem_private_object_init(dev, &obj->base,
+ info->size << PAGE_SHIFT);
+ i915_gem_object_init(obj, &intel_vgpu_gem_ops);
+
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain = 0;
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ unsigned int tiling_mode = 0;
+ unsigned int stride = 0;
+
+ switch (info->drm_format_mod << 10) {
+ case PLANE_CTL_TILED_LINEAR:
+ tiling_mode = I915_TILING_NONE;
+ break;
+ case PLANE_CTL_TILED_X:
+ tiling_mode = I915_TILING_X;
+ stride = info->stride;
+ break;
+ case PLANE_CTL_TILED_Y:
+ tiling_mode = I915_TILING_Y;
+ stride = info->stride;
+ break;
+ default:
+ gvt_dbg_core("not supported tiling mode\n");
+ }
+ obj->tiling_and_stride = tiling_mode | stride;
+ } else {
+ obj->tiling_and_stride = info->drm_format_mod ?
+ I915_TILING_X : 0;
+ }
+
+ return obj;
+}
+
+static int vgpu_get_plane_info(struct drm_device *dev,
+ struct intel_vgpu *vgpu,
+ struct intel_vgpu_fb_info *info,
+ int plane_id)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_vgpu_primary_plane_format p;
+ struct intel_vgpu_cursor_plane_format c;
+ int ret;
+
+ if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
+ ret = intel_vgpu_decode_primary_plane(vgpu, &p);
+ if (ret)
+ return ret;
+ info->start = p.base;
+ info->start_gpa = p.base_gpa;
+ info->width = p.width;
+ info->height = p.height;
+ info->stride = p.stride;
+ info->drm_format = p.drm_format;
+ info->drm_format_mod = p.tiled;
+ info->size = (((p.stride * p.height * p.bpp) / 8) +
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
+ ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
+ if (ret)
+ return ret;
+ info->start = c.base;
+ info->start_gpa = c.base_gpa;
+ info->width = c.width;
+ info->height = c.height;
+ info->stride = c.width * (c.bpp / 8);
+ info->drm_format = c.drm_format;
+ info->drm_format_mod = 0;
+ info->x_pos = c.x_pos;
+ info->y_pos = c.y_pos;
+
+ /* The invalid cursor hotspot value is delivered to host
+ * until we find a way to get the cursor hotspot info of
+ * guest OS.
+ */
+ info->x_hot = UINT_MAX;
+ info->y_hot = UINT_MAX;
+ info->size = (((info->stride * c.height * c.bpp) / 8)
+ + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ } else {
+ gvt_vgpu_err("invalid plane id:%d\n", plane_id);
+ return -EINVAL;
+ }
+
+ if (info->size == 0) {
+ gvt_vgpu_err("fb size is zero\n");
+ return -EINVAL;
+ }
+
+ if (info->start & (PAGE_SIZE - 1)) {
+ gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
+ return -EFAULT;
+ }
+ if (((info->start >> PAGE_SHIFT) + info->size) >
+ ggtt_total_entries(&dev_priv->ggtt)) {
+ gvt_vgpu_err("Invalid GTT offset or size\n");
+ return -EFAULT;
+ }
+
+ if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
+ gvt_vgpu_err("invalid gma addr\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static struct intel_vgpu_dmabuf_obj *
+pick_dmabuf_by_info(struct intel_vgpu *vgpu,
+ struct intel_vgpu_fb_info *latest_info)
+{
+ struct list_head *pos;
+ struct intel_vgpu_fb_info *fb_info;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
+ struct intel_vgpu_dmabuf_obj *ret = NULL;
+
+ list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
+ dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
+ list);
+ if ((dmabuf_obj == NULL) ||
+ (dmabuf_obj->info == NULL))
+ continue;
+
+ fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
+ if ((fb_info->start == latest_info->start) &&
+ (fb_info->start_gpa == latest_info->start_gpa) &&
+ (fb_info->size == latest_info->size) &&
+ (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
+ (fb_info->drm_format == latest_info->drm_format) &&
+ (fb_info->width == latest_info->width) &&
+ (fb_info->height == latest_info->height)) {
+ ret = dmabuf_obj;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static struct intel_vgpu_dmabuf_obj *
+pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
+{
+ struct list_head *pos;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
+ struct intel_vgpu_dmabuf_obj *ret = NULL;
+
+ list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
+ dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
+ list);
+ if (!dmabuf_obj)
+ continue;
+
+ if (dmabuf_obj->dmabuf_id == id) {
+ ret = dmabuf_obj;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
+ struct intel_vgpu_fb_info *fb_info)
+{
+ gvt_dmabuf->drm_format = fb_info->drm_format;
+ gvt_dmabuf->width = fb_info->width;
+ gvt_dmabuf->height = fb_info->height;
+ gvt_dmabuf->stride = fb_info->stride;
+ gvt_dmabuf->size = fb_info->size;
+ gvt_dmabuf->x_pos = fb_info->x_pos;
+ gvt_dmabuf->y_pos = fb_info->y_pos;
+ gvt_dmabuf->x_hot = fb_info->x_hot;
+ gvt_dmabuf->y_hot = fb_info->y_hot;
+}
+
+int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
+{
+ struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+ struct vfio_device_gfx_plane_info *gfx_plane_info = args;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+ struct intel_vgpu_fb_info fb_info;
+ int ret = 0;
+
+ if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
+ VFIO_GFX_PLANE_TYPE_PROBE))
+ return ret;
+ else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
+ (!gfx_plane_info->flags))
+ return -EINVAL;
+
+ ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
+ gfx_plane_info->drm_plane_type);
+ if (ret != 0)
+ goto out;
+
+ mutex_lock(&vgpu->dmabuf_lock);
+ /* If exists, pick up the exposed dmabuf_obj */
+ dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
+ if (dmabuf_obj) {
+ update_fb_info(gfx_plane_info, &fb_info);
+ gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
+
+ /* This buffer may be released between query_plane ioctl and
+ * get_dmabuf ioctl. Add the refcount to make sure it won't
+ * be released between the two ioctls.
+ */
+ if (!dmabuf_obj->initref) {
+ dmabuf_obj->initref = true;
+ dmabuf_obj_get(dmabuf_obj);
+ }
+ ret = 0;
+ gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
+ vgpu->id, kref_read(&dmabuf_obj->kref),
+ gfx_plane_info->dmabuf_id);
+ mutex_unlock(&vgpu->dmabuf_lock);
+ goto out;
+ }
+
+ mutex_unlock(&vgpu->dmabuf_lock);
+
+ /* Need to allocate a new one*/
+ dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
+ if (unlikely(!dmabuf_obj)) {
+ gvt_vgpu_err("alloc dmabuf_obj failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
+ GFP_KERNEL);
+ if (unlikely(!dmabuf_obj->info)) {
+ gvt_vgpu_err("allocate intel vgpu fb info failed\n");
+ ret = -ENOMEM;
+ goto out_free_dmabuf;
+ }
+ memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
+
+ ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
+
+ dmabuf_obj->vgpu = vgpu;
+
+ ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
+ if (ret < 0)
+ goto out_free_info;
+ gfx_plane_info->dmabuf_id = ret;
+ dmabuf_obj->dmabuf_id = ret;
+
+ dmabuf_obj->initref = true;
+
+ kref_init(&dmabuf_obj->kref);
+
+ mutex_lock(&vgpu->dmabuf_lock);
+ if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
+ gvt_vgpu_err("get vfio device failed\n");
+ mutex_unlock(&vgpu->dmabuf_lock);
+ goto out_free_info;
+ }
+ mutex_unlock(&vgpu->dmabuf_lock);
+
+ update_fb_info(gfx_plane_info, &fb_info);
+
+ INIT_LIST_HEAD(&dmabuf_obj->list);
+ mutex_lock(&vgpu->dmabuf_lock);
+ list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
+ mutex_unlock(&vgpu->dmabuf_lock);
+
+ gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
+ __func__, kref_read(&dmabuf_obj->kref), ret);
+
+ return 0;
+
+out_free_info:
+ kfree(dmabuf_obj->info);
+out_free_dmabuf:
+ kfree(dmabuf_obj);
+out:
+ /* ENODEV means plane isn't ready, which might be a normal case. */
+ return (ret == -ENODEV) ? 0 : ret;
+}
+
+/* To associate an exposed dmabuf with the dmabuf_obj */
+int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
+{
+ struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+ struct drm_i915_gem_object *obj;
+ struct dma_buf *dmabuf;
+ int dmabuf_fd;
+ int ret = 0;
+
+ mutex_lock(&vgpu->dmabuf_lock);
+
+ dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
+ if (dmabuf_obj == NULL) {
+ gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ obj = vgpu_create_gem(dev, dmabuf_obj->info);
+ if (obj == NULL) {
+ gvt_vgpu_err("create gvt gem obj failed:%d\n", vgpu->id);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ obj->gvt_info = dmabuf_obj->info;
+
+ dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
+ if (IS_ERR(dmabuf)) {
+ gvt_vgpu_err("export dma-buf failed\n");
+ ret = PTR_ERR(dmabuf);
+ goto out_free_gem;
+ }
+ obj->base.dma_buf = dmabuf;
+
+ i915_gem_object_put(obj);
+
+ ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
+ if (ret < 0) {
+ gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
+ goto out_free_dmabuf;
+ }
+ dmabuf_fd = ret;
+
+ dmabuf_obj_get(dmabuf_obj);
+
+ if (dmabuf_obj->initref) {
+ dmabuf_obj->initref = false;
+ dmabuf_obj_put(dmabuf_obj);
+ }
+
+ mutex_unlock(&vgpu->dmabuf_lock);
+
+ gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
+ " file count: %ld, GEM ref: %d\n",
+ vgpu->id, dmabuf_obj->dmabuf_id,
+ kref_read(&dmabuf_obj->kref),
+ dmabuf_fd,
+ file_count(dmabuf->file),
+ kref_read(&obj->base.refcount));
+
+ return dmabuf_fd;
+
+out_free_dmabuf:
+ dma_buf_put(dmabuf);
+out_free_gem:
+ i915_gem_object_put(obj);
+out:
+ mutex_unlock(&vgpu->dmabuf_lock);
+ return ret;
+}
+
+void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+
+ mutex_lock(&vgpu->dmabuf_lock);
+ list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
+ dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
+ list);
+ dmabuf_obj->vgpu = NULL;
+
+ idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
+ intel_gvt_hypervisor_put_vfio_device(vgpu);
+ list_del(pos);
+
+ /* dmabuf_obj might be freed in dmabuf_obj_put */
+ if (dmabuf_obj->initref) {
+ dmabuf_obj->initref = false;
+ dmabuf_obj_put(dmabuf_obj);
+ }
+
+ }
+ mutex_unlock(&vgpu->dmabuf_lock);
+}
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.h b/drivers/gpu/drm/i915/gvt/dmabuf.h
new file mode 100644
index 000000000000..5f8f03fb1d1b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Xiaoguang Chen
+ * Tina Zhang <tina.zhang@intel.com>
+ */
+
+#ifndef _GVT_DMABUF_H_
+#define _GVT_DMABUF_H_
+#include <linux/vfio.h>
+
+struct intel_vgpu_fb_info {
+ __u64 start;
+ __u64 start_gpa;
+ __u64 drm_format_mod;
+ __u32 drm_format; /* drm format of plane */
+ __u32 width; /* width of plane */
+ __u32 height; /* height of plane */
+ __u32 stride; /* stride of plane */
+ __u32 size; /* size of plane in bytes, align on page */
+ __u32 x_pos; /* horizontal position of cursor plane */
+ __u32 y_pos; /* vertical position of cursor plane */
+ __u32 x_hot; /* horizontal position of cursor hotspot */
+ __u32 y_hot; /* vertical position of cursor hotspot */
+ struct intel_vgpu_dmabuf_obj *obj;
+};
+
+/**
+ * struct intel_vgpu_dmabuf_obj- Intel vGPU device buffer object
+ */
+struct intel_vgpu_dmabuf_obj {
+ struct intel_vgpu *vgpu;
+ struct intel_vgpu_fb_info *info;
+ __u32 dmabuf_id;
+ struct kref kref;
+ bool initref;
+ struct list_head list;
+};
+
+int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args);
+int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id);
+void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 42cd09ec63fa..f61337632969 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -95,9 +95,9 @@ static inline int get_port_from_gmbus0(u32 gmbus0)
static void reset_gmbus_controller(struct intel_vgpu *vgpu)
{
- vgpu_vreg(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
if (!vgpu->display.i2c_edid.edid_available)
- vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
}
@@ -123,16 +123,16 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
vgpu->display.i2c_edid.state = I2C_GMBUS;
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
- vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
- vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
if (intel_vgpu_has_monitor_on_port(vgpu, port) &&
!intel_vgpu_port_is_dp(vgpu, port)) {
vgpu->display.i2c_edid.port = port;
vgpu->display.i2c_edid.edid_available = true;
- vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
} else
- vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
return 0;
}
@@ -159,8 +159,8 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* 2) HW_RDY bit asserted
*/
if (wvalue & GMBUS_SW_CLR_INT) {
- vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
- vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
}
/* For virtualization, we suppose that HW is always ready,
@@ -208,7 +208,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* visible in gmbus interface)
*/
i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
- vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
}
break;
case NIDX_NS_W:
@@ -220,7 +220,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* START (-->INDEX) -->DATA
*/
i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
- vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
break;
default:
gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
@@ -256,7 +256,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
u32 reg_data = 0;
/* Data can only be recevied if previous settings correct */
- if (vgpu_vreg(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
+ if (vgpu_vreg_t(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
if (byte_left <= 0) {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 940cdaaa3f24..769c1c24ae75 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -46,8 +46,6 @@
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
((a)->lrca == (b)->lrca))
-static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
-
static int context_switch_events[] = {
[RCS] = RCS_AS_CONTEXT_SWITCH,
[BCS] = BCS_AS_CONTEXT_SWITCH,
@@ -135,6 +133,8 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 write_pointer;
u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
+ unsigned long hwsp_gpa;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
_EL_OFFSET_STATUS_PTR);
@@ -160,6 +160,20 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
ctx_status_ptr.write_ptr = write_pointer;
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
+ /* Update the CSB and CSB write pointer in HWSP */
+ hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ vgpu->hws_pga[ring_id]);
+ if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
+ intel_gvt_hypervisor_write_gpa(vgpu,
+ hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 +
+ write_pointer * 8,
+ status, 8);
+ intel_gvt_hypervisor_write_gpa(vgpu,
+ hwsp_gpa +
+ intel_hws_csb_write_index(dev_priv) * 4,
+ &write_pointer, 4);
+ }
+
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
vgpu->id, write_pointer, offset, status->ldw, status->udw);
@@ -358,218 +372,47 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
return 0;
}
-static void free_workload(struct intel_vgpu_workload *workload)
-{
- intel_vgpu_unpin_mm(workload->shadow_mm);
- intel_gvt_mm_unreference(workload->shadow_mm);
- kmem_cache_free(workload->vgpu->workloads, workload);
-}
-
#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
-static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
-{
- const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
- struct intel_shadow_bb_entry *entry_obj;
-
- /* pin the gem object to ggtt */
- list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
- struct i915_vma *vma;
-
- vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
- if (IS_ERR(vma)) {
- return PTR_ERR(vma);
- }
-
- /* FIXME: we are not tracking our pinned VMA leaving it
- * up to the core to fix up the stray pin_count upon
- * free.
- */
-
- /* update the relocate gma with shadow batch buffer*/
- entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
- if (gmadr_bytes == 8)
- entry_obj->bb_start_cmd_va[2] = 0;
- }
- return 0;
-}
-
-static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
-{
- struct intel_vgpu_workload *workload = container_of(wa_ctx,
- struct intel_vgpu_workload,
- wa_ctx);
- int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
- struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->engine[ring_id].state->obj;
- struct execlist_ring_context *shadow_ring_context;
- struct page *page;
-
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap_atomic(page);
-
- shadow_ring_context->bb_per_ctx_ptr.val =
- (shadow_ring_context->bb_per_ctx_ptr.val &
- (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
- shadow_ring_context->rcs_indirect_ctx.val =
- (shadow_ring_context->rcs_indirect_ctx.val &
- (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
-
- kunmap_atomic(shadow_ring_context);
- return 0;
-}
-
-static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
-{
- struct i915_vma *vma;
- unsigned char *per_ctx_va =
- (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
- wa_ctx->indirect_ctx.size;
-
- if (wa_ctx->indirect_ctx.size == 0)
- return 0;
-
- vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
- 0, CACHELINE_BYTES, 0);
- if (IS_ERR(vma)) {
- return PTR_ERR(vma);
- }
-
- /* FIXME: we are not tracking our pinned VMA leaving it
- * up to the core to fix up the stray pin_count upon
- * free.
- */
-
- wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
-
- wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
- memset(per_ctx_va, 0, CACHELINE_BYTES);
-
- update_wa_ctx_2_shadow_ctx(wa_ctx);
- return 0;
-}
-
-static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
-{
- /* release all the shadow batch buffer */
- if (!list_empty(&workload->shadow_bb)) {
- struct intel_shadow_bb_entry *entry_obj =
- list_first_entry(&workload->shadow_bb,
- struct intel_shadow_bb_entry,
- list);
- struct intel_shadow_bb_entry *temp;
-
- list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
- list) {
- i915_gem_object_unpin_map(entry_obj->obj);
- i915_gem_object_put(entry_obj->obj);
- list_del(&entry_obj->list);
- kfree(entry_obj);
- }
- }
-}
-
static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
struct execlist_ctx_descriptor_format ctx[2];
int ring_id = workload->ring_id;
int ret;
- ret = intel_vgpu_pin_mm(workload->shadow_mm);
- if (ret) {
- gvt_vgpu_err("fail to vgpu pin mm\n");
- goto out;
- }
-
- ret = intel_vgpu_sync_oos_pages(workload->vgpu);
- if (ret) {
- gvt_vgpu_err("fail to vgpu sync oos pages\n");
- goto err_unpin_mm;
- }
-
- ret = intel_vgpu_flush_post_shadow(workload->vgpu);
- if (ret) {
- gvt_vgpu_err("fail to flush post shadow\n");
- goto err_unpin_mm;
- }
-
- ret = intel_gvt_generate_request(workload);
- if (ret) {
- gvt_vgpu_err("fail to generate request\n");
- goto err_unpin_mm;
- }
-
- ret = prepare_shadow_batch_buffer(workload);
- if (ret) {
- gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
- goto err_unpin_mm;
- }
-
- ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
- if (ret) {
- gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
- goto err_shadow_batch;
- }
-
if (!workload->emulate_schedule_in)
return 0;
- ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
- ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
+ ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
+ ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
- ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
- if (!ret)
- goto out;
- else
+ ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx);
+ if (ret) {
gvt_vgpu_err("fail to emulate execlist schedule in\n");
-
- release_shadow_wa_ctx(&workload->wa_ctx);
-err_shadow_batch:
- release_shadow_batch_buffer(workload);
-err_unpin_mm:
- intel_vgpu_unpin_mm(workload->shadow_mm);
-out:
- return ret;
+ return ret;
+ }
+ return 0;
}
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
int ring_id = workload->ring_id;
- struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
struct intel_vgpu_workload *next_workload;
struct list_head *next = workload_q_head(vgpu, ring_id)->next;
bool lite_restore = false;
- int ret;
+ int ret = 0;
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
- if (!workload->status) {
- release_shadow_batch_buffer(workload);
- release_shadow_wa_ctx(&workload->wa_ctx);
- }
-
- if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
- /* if workload->status is not successful means HW GPU
- * has occurred GPU hang or something wrong with i915/GVT,
- * and GVT won't inject context switch interrupt to guest.
- * So this error is a vGPU hang actually to the guest.
- * According to this we should emunlate a vGPU hang. If
- * there are pending workloads which are already submitted
- * from guest, we should clean them up like HW GPU does.
- *
- * if it is in middle of engine resetting, the pending
- * workloads won't be submitted to HW GPU and will be
- * cleaned up during the resetting process later, so doing
- * the workload clean up here doesn't have any impact.
- **/
- clean_workloads(vgpu, ENGINE_MASK(ring_id));
+ if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id)))
goto out;
- }
if (!list_empty(workload_q_head(vgpu, ring_id))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
@@ -584,213 +427,60 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
if (lite_restore) {
gvt_dbg_el("next context == current - no schedule-out\n");
- free_workload(workload);
- return 0;
+ goto out;
}
ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
- if (ret)
- goto err;
out:
- free_workload(workload);
- return 0;
-err:
- free_workload(workload);
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+ intel_vgpu_destroy_workload(workload);
return ret;
}
-#define RING_CTX_OFF(x) \
- offsetof(struct execlist_ring_context, x)
-
-static void read_guest_pdps(struct intel_vgpu *vgpu,
- u64 ring_context_gpa, u32 pdp[8])
-{
- u64 gpa;
- int i;
-
- gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
-
- for (i = 0; i < 8; i++)
- intel_gvt_hypervisor_read_gpa(vgpu,
- gpa + i * 8, &pdp[7 - i], 4);
-}
-
-static int prepare_mm(struct intel_vgpu_workload *workload)
-{
- struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
- struct intel_vgpu_mm *mm;
- struct intel_vgpu *vgpu = workload->vgpu;
- int page_table_level;
- u32 pdp[8];
-
- if (desc->addressing_mode == 1) { /* legacy 32-bit */
- page_table_level = 3;
- } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
- page_table_level = 4;
- } else {
- gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
- return -EINVAL;
- }
-
- read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
-
- mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
- if (mm) {
- intel_gvt_mm_reference(mm);
- } else {
-
- mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
- pdp, page_table_level, 0);
- if (IS_ERR(mm)) {
- gvt_vgpu_err("fail to create mm object.\n");
- return PTR_ERR(mm);
- }
- }
- workload->shadow_mm = mm;
- return 0;
-}
-
-#define get_last_workload(q) \
- (list_empty(q) ? NULL : container_of(q->prev, \
- struct intel_vgpu_workload, list))
-
static int submit_context(struct intel_vgpu *vgpu, int ring_id,
struct execlist_ctx_descriptor_format *desc,
bool emulate_schedule_in)
{
- struct list_head *q = workload_q_head(vgpu, ring_id);
- struct intel_vgpu_workload *last_workload = get_last_workload(q);
+ struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_workload *workload = NULL;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u64 ring_context_gpa;
- u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
- int ret;
-
- ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
- (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
- if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
- return -EINVAL;
- }
-
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
- RING_CTX_OFF(ring_header.val), &head, 4);
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
- RING_CTX_OFF(ring_tail.val), &tail, 4);
+ workload = intel_vgpu_create_workload(vgpu, ring_id, desc);
+ if (IS_ERR(workload))
+ return PTR_ERR(workload);
- head &= RB_HEAD_OFF_MASK;
- tail &= RB_TAIL_OFF_MASK;
-
- if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
- gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
- gvt_dbg_el("ctx head %x real head %lx\n", head,
- last_workload->rb_tail);
- /*
- * cannot use guest context head pointer here,
- * as it might not be updated at this time
- */
- head = last_workload->rb_tail;
- }
-
- gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
-
- workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
- if (!workload)
- return -ENOMEM;
-
- /* record some ring buffer register values for scan and shadow */
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
- RING_CTX_OFF(rb_start.val), &start, 4);
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
- RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
- RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
-
- INIT_LIST_HEAD(&workload->list);
- INIT_LIST_HEAD(&workload->shadow_bb);
-
- init_waitqueue_head(&workload->shadow_ctx_status_wq);
- atomic_set(&workload->shadow_ctx_active, 0);
-
- workload->vgpu = vgpu;
- workload->ring_id = ring_id;
- workload->ctx_desc = *desc;
- workload->ring_context_gpa = ring_context_gpa;
- workload->rb_head = head;
- workload->rb_tail = tail;
- workload->rb_start = start;
- workload->rb_ctl = ctl;
workload->prepare = prepare_execlist_workload;
workload->complete = complete_execlist_workload;
- workload->status = -EINPROGRESS;
workload->emulate_schedule_in = emulate_schedule_in;
- workload->shadowed = false;
-
- if (ring_id == RCS) {
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
- RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
- RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
-
- workload->wa_ctx.indirect_ctx.guest_gma =
- indirect_ctx & INDIRECT_CTX_ADDR_MASK;
- workload->wa_ctx.indirect_ctx.size =
- (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
- CACHELINE_BYTES;
- workload->wa_ctx.per_ctx.guest_gma =
- per_ctx & PER_CTX_ADDR_MASK;
- workload->wa_ctx.per_ctx.valid = per_ctx & 1;
- }
if (emulate_schedule_in)
- workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords;
-
- gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
- workload, ring_id, head, tail, start, ctl);
+ workload->elsp_dwords = s->execlist[ring_id].elsp_dwords;
gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
emulate_schedule_in);
- ret = prepare_mm(workload);
- if (ret) {
- kmem_cache_free(vgpu->workloads, workload);
- return ret;
- }
-
- /* Only scan and shadow the first workload in the queue
- * as there is only one pre-allocated buf-obj for shadow.
- */
- if (list_empty(workload_q_head(vgpu, ring_id))) {
- intel_runtime_pm_get(dev_priv);
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_gvt_scan_and_shadow_workload(workload);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
- }
-
- queue_workload(workload);
+ intel_vgpu_queue_workload(workload);
return 0;
}
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
{
- struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
- struct execlist_ctx_descriptor_format desc[2];
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct execlist_ctx_descriptor_format *desc[2];
int i, ret;
- desc[0] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
- desc[1] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
+ desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
+ desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
- if (!desc[0].valid) {
+ if (!desc[0]->valid) {
gvt_vgpu_err("invalid elsp submission, desc0 is invalid\n");
goto inv_desc;
}
for (i = 0; i < ARRAY_SIZE(desc); i++) {
- if (!desc[i].valid)
+ if (!desc[i]->valid)
continue;
- if (!desc[i].privilege_access) {
+ if (!desc[i]->privilege_access) {
gvt_vgpu_err("unexpected GGTT elsp submission\n");
goto inv_desc;
}
@@ -798,9 +488,9 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
/* submit workload */
for (i = 0; i < ARRAY_SIZE(desc); i++) {
- if (!desc[i].valid)
+ if (!desc[i]->valid)
continue;
- ret = submit_context(vgpu, ring_id, &desc[i], i == 0);
+ ret = submit_context(vgpu, ring_id, desc[i], i == 0);
if (ret) {
gvt_vgpu_err("failed to submit desc %d\n", i);
return ret;
@@ -811,13 +501,14 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
inv_desc:
gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n",
- desc[0].udw, desc[0].ldw, desc[1].udw, desc[1].ldw);
+ desc[0]->udw, desc[0]->ldw, desc[1]->udw, desc[1]->ldw);
return -EINVAL;
}
static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
{
- struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 ctx_status_ptr_reg;
@@ -837,91 +528,40 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
-static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine;
- struct intel_vgpu_workload *pos, *n;
- unsigned int tmp;
-
- /* free the unsubmited workloads in the queues. */
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
- list_for_each_entry_safe(pos, n,
- &vgpu->workload_q_head[engine->id], list) {
- list_del_init(&pos->list);
- free_workload(pos);
- }
-
- clear_bit(engine->id, vgpu->shadow_ctx_desc_updated);
- }
-}
-
-void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
+static void clean_execlist(struct intel_vgpu *vgpu)
{
enum intel_engine_id i;
struct intel_engine_cs *engine;
- clean_workloads(vgpu, ALL_ENGINES);
- kmem_cache_destroy(vgpu->workloads);
-
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- kfree(vgpu->reserve_ring_buffer_va[i]);
- vgpu->reserve_ring_buffer_va[i] = NULL;
- vgpu->reserve_ring_buffer_size[i] = 0;
- }
+ struct intel_vgpu_submission *s = &vgpu->submission;
-}
-
-#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8)
-int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
-{
- enum intel_engine_id i;
- struct intel_engine_cs *engine;
-
- /* each ring has a virtual execlist engine */
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- init_vgpu_execlist(vgpu, i);
- INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
- }
-
- vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
- sizeof(struct intel_vgpu_workload), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
-
- if (!vgpu->workloads)
- return -ENOMEM;
-
- /* each ring has a shadow ring buffer until vgpu destroyed */
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- vgpu->reserve_ring_buffer_va[i] =
- kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
- if (!vgpu->reserve_ring_buffer_va[i]) {
- gvt_vgpu_err("fail to alloc reserve ring buffer\n");
- goto out;
- }
- vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
+ kfree(s->ring_scan_buffer[i]);
+ s->ring_scan_buffer[i] = NULL;
+ s->ring_scan_buffer_size[i] = 0;
}
- return 0;
-out:
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- if (vgpu->reserve_ring_buffer_size[i]) {
- kfree(vgpu->reserve_ring_buffer_va[i]);
- vgpu->reserve_ring_buffer_va[i] = NULL;
- vgpu->reserve_ring_buffer_size[i] = 0;
- }
- }
- return -ENOMEM;
}
-void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
+static void reset_execlist(struct intel_vgpu *vgpu,
unsigned long engine_mask)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
unsigned int tmp;
- clean_workloads(vgpu, engine_mask);
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id);
}
+
+static int init_execlist(struct intel_vgpu *vgpu)
+{
+ reset_execlist(vgpu, ALL_ENGINES);
+ return 0;
+}
+
+const struct intel_vgpu_submission_ops intel_vgpu_execlist_submission_ops = {
+ .name = "execlist",
+ .init = init_execlist,
+ .reset = reset_execlist,
+ .clean = clean_execlist,
+};
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 7eced40a1e30..427e40e64d41 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -37,10 +37,6 @@
struct execlist_ctx_descriptor_format {
union {
- u32 udw;
- u32 context_id;
- };
- union {
u32 ldw;
struct {
u32 valid : 1;
@@ -54,6 +50,10 @@ struct execlist_ctx_descriptor_format {
u32 lrca : 20;
};
};
+ union {
+ u32 udw;
+ u32 context_id;
+ };
};
struct execlist_status_format {
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
new file mode 100644
index 000000000000..6b50fe78dc1b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -0,0 +1,514 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Bing Niu <bing.niu@intel.com>
+ * Xu Han <xu.han@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Xiaoguang Chen <xiaoguang.chen@intel.com>
+ * Yang Liu <yang2.liu@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#include <uapi/drm/drm_fourcc.h>
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define PRIMARY_FORMAT_NUM 16
+struct pixel_format {
+ int drm_format; /* Pixel format in DRM definition */
+ int bpp; /* Bits per pixel, 0 indicates invalid */
+ char *desc; /* The description */
+};
+
+static struct pixel_format bdw_pixel_formats[] = {
+ {DRM_FORMAT_C8, 8, "8-bit Indexed"},
+ {DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"},
+ {DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"},
+ {DRM_FORMAT_XBGR2101010, 32, "32-bit RGBX (2:10:10:10 MSB-X:B:G:R)"},
+
+ {DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"},
+ {DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"},
+
+ /* non-supported format has bpp default to 0 */
+ {0, 0, NULL},
+};
+
+static struct pixel_format skl_pixel_formats[] = {
+ {DRM_FORMAT_YUYV, 16, "16-bit packed YUYV (8:8:8:8 MSB-V:Y2:U:Y1)"},
+ {DRM_FORMAT_UYVY, 16, "16-bit packed UYVY (8:8:8:8 MSB-Y2:V:Y1:U)"},
+ {DRM_FORMAT_YVYU, 16, "16-bit packed YVYU (8:8:8:8 MSB-U:Y2:V:Y1)"},
+ {DRM_FORMAT_VYUY, 16, "16-bit packed VYUY (8:8:8:8 MSB-Y2:U:Y1:V)"},
+
+ {DRM_FORMAT_C8, 8, "8-bit Indexed"},
+ {DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"},
+ {DRM_FORMAT_ABGR8888, 32, "32-bit RGBA (8:8:8:8 MSB-A:B:G:R)"},
+ {DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"},
+
+ {DRM_FORMAT_ARGB8888, 32, "32-bit BGRA (8:8:8:8 MSB-A:R:G:B)"},
+ {DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"},
+ {DRM_FORMAT_XBGR2101010, 32, "32-bit RGBX (2:10:10:10 MSB-X:B:G:R)"},
+ {DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"},
+
+ /* non-supported format has bpp default to 0 */
+ {0, 0, NULL},
+};
+
+static int bdw_format_to_drm(int format)
+{
+ int bdw_pixel_formats_index = 6;
+
+ switch (format) {
+ case DISPPLANE_8BPP:
+ bdw_pixel_formats_index = 0;
+ break;
+ case DISPPLANE_BGRX565:
+ bdw_pixel_formats_index = 1;
+ break;
+ case DISPPLANE_BGRX888:
+ bdw_pixel_formats_index = 2;
+ break;
+ case DISPPLANE_RGBX101010:
+ bdw_pixel_formats_index = 3;
+ break;
+ case DISPPLANE_BGRX101010:
+ bdw_pixel_formats_index = 4;
+ break;
+ case DISPPLANE_RGBX888:
+ bdw_pixel_formats_index = 5;
+ break;
+
+ default:
+ break;
+ }
+
+ return bdw_pixel_formats_index;
+}
+
+static int skl_format_to_drm(int format, bool rgb_order, bool alpha,
+ int yuv_order)
+{
+ int skl_pixel_formats_index = 12;
+
+ switch (format) {
+ case PLANE_CTL_FORMAT_INDEXED:
+ skl_pixel_formats_index = 4;
+ break;
+ case PLANE_CTL_FORMAT_RGB_565:
+ skl_pixel_formats_index = 5;
+ break;
+ case PLANE_CTL_FORMAT_XRGB_8888:
+ if (rgb_order)
+ skl_pixel_formats_index = alpha ? 6 : 7;
+ else
+ skl_pixel_formats_index = alpha ? 8 : 9;
+ break;
+ case PLANE_CTL_FORMAT_XRGB_2101010:
+ skl_pixel_formats_index = rgb_order ? 10 : 11;
+ break;
+ case PLANE_CTL_FORMAT_YUV422:
+ skl_pixel_formats_index = yuv_order >> 16;
+ if (skl_pixel_formats_index > 3)
+ return -EINVAL;
+ break;
+
+ default:
+ break;
+ }
+
+ return skl_pixel_formats_index;
+}
+
+static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
+ u32 tiled, int stride_mask, int bpp)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
+ u32 stride = stride_reg;
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ switch (tiled) {
+ case PLANE_CTL_TILED_LINEAR:
+ stride = stride_reg * 64;
+ break;
+ case PLANE_CTL_TILED_X:
+ stride = stride_reg * 512;
+ break;
+ case PLANE_CTL_TILED_Y:
+ stride = stride_reg * 128;
+ break;
+ case PLANE_CTL_TILED_YF:
+ if (bpp == 8)
+ stride = stride_reg * 64;
+ else if (bpp == 16 || bpp == 32 || bpp == 64)
+ stride = stride_reg * 128;
+ else
+ gvt_dbg_core("skl: unsupported bpp:%d\n", bpp);
+ break;
+ default:
+ gvt_dbg_core("skl: unsupported tile format:%x\n",
+ tiled);
+ }
+ }
+
+ return stride;
+}
+
+static int get_active_pipe(struct intel_vgpu *vgpu)
+{
+ int i;
+
+ for (i = 0; i < I915_MAX_PIPES; i++)
+ if (pipe_is_enabled(vgpu, i))
+ break;
+
+ return i;
+}
+
+/**
+ * intel_vgpu_decode_primary_plane - Decode primary plane
+ * @vgpu: input vgpu
+ * @plane: primary plane to save decoded info
+ * This function is called for decoding plane
+ *
+ * Returns:
+ * 0 on success, non-zero if failed.
+ */
+int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_primary_plane_format *plane)
+{
+ u32 val, fmt;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ int pipe;
+
+ pipe = get_active_pipe(vgpu);
+ if (pipe >= I915_MAX_PIPES)
+ return -ENODEV;
+
+ val = vgpu_vreg_t(vgpu, DSPCNTR(pipe));
+ plane->enabled = !!(val & DISPLAY_PLANE_ENABLE);
+ if (!plane->enabled)
+ return -ENODEV;
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
+ _PLANE_CTL_TILED_SHIFT;
+ fmt = skl_format_to_drm(
+ val & PLANE_CTL_FORMAT_MASK,
+ val & PLANE_CTL_ORDER_RGBX,
+ val & PLANE_CTL_ALPHA_MASK,
+ val & PLANE_CTL_YUV422_ORDER_MASK);
+
+ if (fmt >= ARRAY_SIZE(skl_pixel_formats)) {
+ gvt_vgpu_err("Out-of-bounds pixel format index\n");
+ return -EINVAL;
+ }
+
+ plane->bpp = skl_pixel_formats[fmt].bpp;
+ plane->drm_format = skl_pixel_formats[fmt].drm_format;
+ } else {
+ plane->tiled = !!(val & DISPPLANE_TILED);
+ fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
+ plane->bpp = bdw_pixel_formats[fmt].bpp;
+ plane->drm_format = bdw_pixel_formats[fmt].drm_format;
+ }
+
+ if (!plane->bpp) {
+ gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt);
+ return -EINVAL;
+ }
+
+ plane->hw_format = fmt;
+
+ plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
+ if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
+ if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
+ (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) ?
+ (_PRI_PLANE_STRIDE_MASK >> 6) :
+ _PRI_PLANE_STRIDE_MASK, plane->bpp);
+
+ plane->width = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >>
+ _PIPE_H_SRCSZ_SHIFT;
+ plane->width += 1;
+ plane->height = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) &
+ _PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT;
+ plane->height += 1; /* raw height is one minus the real value */
+
+ val = vgpu_vreg_t(vgpu, DSPTILEOFF(pipe));
+ plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >>
+ _PRI_PLANE_X_OFF_SHIFT;
+ plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >>
+ _PRI_PLANE_Y_OFF_SHIFT;
+
+ return 0;
+}
+
+#define CURSOR_FORMAT_NUM (1 << 6)
+struct cursor_mode_format {
+ int drm_format; /* Pixel format in DRM definition */
+ u8 bpp; /* Bits per pixel; 0 indicates invalid */
+ u32 width; /* In pixel */
+ u32 height; /* In lines */
+ char *desc; /* The description */
+};
+
+static struct cursor_mode_format cursor_pixel_formats[] = {
+ {DRM_FORMAT_ARGB8888, 32, 128, 128, "128x128 32bpp ARGB"},
+ {DRM_FORMAT_ARGB8888, 32, 256, 256, "256x256 32bpp ARGB"},
+ {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"},
+ {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"},
+
+ /* non-supported format has bpp default to 0 */
+ {0, 0, 0, 0, NULL},
+};
+
+static int cursor_mode_to_drm(int mode)
+{
+ int cursor_pixel_formats_index = 4;
+
+ switch (mode) {
+ case CURSOR_MODE_128_ARGB_AX:
+ cursor_pixel_formats_index = 0;
+ break;
+ case CURSOR_MODE_256_ARGB_AX:
+ cursor_pixel_formats_index = 1;
+ break;
+ case CURSOR_MODE_64_ARGB_AX:
+ cursor_pixel_formats_index = 2;
+ break;
+ case CURSOR_MODE_64_32B_AX:
+ cursor_pixel_formats_index = 3;
+ break;
+
+ default:
+ break;
+ }
+
+ return cursor_pixel_formats_index;
+}
+
+/**
+ * intel_vgpu_decode_cursor_plane - Decode sprite plane
+ * @vgpu: input vgpu
+ * @plane: cursor plane to save decoded info
+ * This function is called for decoding plane
+ *
+ * Returns:
+ * 0 on success, non-zero if failed.
+ */
+int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_cursor_plane_format *plane)
+{
+ u32 val, mode, index;
+ u32 alpha_plane, alpha_force;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ int pipe;
+
+ pipe = get_active_pipe(vgpu);
+ if (pipe >= I915_MAX_PIPES)
+ return -ENODEV;
+
+ val = vgpu_vreg_t(vgpu, CURCNTR(pipe));
+ mode = val & CURSOR_MODE;
+ plane->enabled = (mode != CURSOR_MODE_DISABLE);
+ if (!plane->enabled)
+ return -ENODEV;
+
+ index = cursor_mode_to_drm(mode);
+
+ if (!cursor_pixel_formats[index].bpp) {
+ gvt_vgpu_err("Non-supported cursor mode (0x%x)\n", mode);
+ return -EINVAL;
+ }
+ plane->mode = mode;
+ plane->bpp = cursor_pixel_formats[index].bpp;
+ plane->drm_format = cursor_pixel_formats[index].drm_format;
+ plane->width = cursor_pixel_formats[index].width;
+ plane->height = cursor_pixel_formats[index].height;
+
+ alpha_plane = (val & _CURSOR_ALPHA_PLANE_MASK) >>
+ _CURSOR_ALPHA_PLANE_SHIFT;
+ alpha_force = (val & _CURSOR_ALPHA_FORCE_MASK) >>
+ _CURSOR_ALPHA_FORCE_SHIFT;
+ if (alpha_plane || alpha_force)
+ gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n",
+ alpha_plane, alpha_force);
+
+ plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
+ if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
+ if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ val = vgpu_vreg_t(vgpu, CURPOS(pipe));
+ plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT;
+ plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT;
+ plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
+ plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT;
+
+ return 0;
+}
+
+#define SPRITE_FORMAT_NUM (1 << 3)
+
+static struct pixel_format sprite_pixel_formats[SPRITE_FORMAT_NUM] = {
+ [0x0] = {DRM_FORMAT_YUV422, 16, "YUV 16-bit 4:2:2 packed"},
+ [0x1] = {DRM_FORMAT_XRGB2101010, 32, "RGB 32-bit 2:10:10:10"},
+ [0x2] = {DRM_FORMAT_XRGB8888, 32, "RGB 32-bit 8:8:8:8"},
+ [0x4] = {DRM_FORMAT_AYUV, 32,
+ "YUV 32-bit 4:4:4 packed (8:8:8:8 MSB-X:Y:U:V)"},
+};
+
+/**
+ * intel_vgpu_decode_sprite_plane - Decode sprite plane
+ * @vgpu: input vgpu
+ * @plane: sprite plane to save decoded info
+ * This function is called for decoding plane
+ *
+ * Returns:
+ * 0 on success, non-zero if failed.
+ */
+int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_sprite_plane_format *plane)
+{
+ u32 val, fmt;
+ u32 color_order, yuv_order;
+ int drm_format;
+ int pipe;
+
+ pipe = get_active_pipe(vgpu);
+ if (pipe >= I915_MAX_PIPES)
+ return -ENODEV;
+
+ val = vgpu_vreg_t(vgpu, SPRCTL(pipe));
+ plane->enabled = !!(val & SPRITE_ENABLE);
+ if (!plane->enabled)
+ return -ENODEV;
+
+ plane->tiled = !!(val & SPRITE_TILED);
+ color_order = !!(val & SPRITE_RGB_ORDER_RGBX);
+ yuv_order = (val & SPRITE_YUV_BYTE_ORDER_MASK) >>
+ _SPRITE_YUV_ORDER_SHIFT;
+
+ fmt = (val & SPRITE_PIXFORMAT_MASK) >> _SPRITE_FMT_SHIFT;
+ if (!sprite_pixel_formats[fmt].bpp) {
+ gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt);
+ return -EINVAL;
+ }
+ plane->hw_format = fmt;
+ plane->bpp = sprite_pixel_formats[fmt].bpp;
+ drm_format = sprite_pixel_formats[fmt].drm_format;
+
+ /* Order of RGB values in an RGBxxx buffer may be ordered RGB or
+ * BGR depending on the state of the color_order field
+ */
+ if (!color_order) {
+ if (drm_format == DRM_FORMAT_XRGB2101010)
+ drm_format = DRM_FORMAT_XBGR2101010;
+ else if (drm_format == DRM_FORMAT_XRGB8888)
+ drm_format = DRM_FORMAT_XBGR8888;
+ }
+
+ if (drm_format == DRM_FORMAT_YUV422) {
+ switch (yuv_order) {
+ case 0:
+ drm_format = DRM_FORMAT_YUYV;
+ break;
+ case 1:
+ drm_format = DRM_FORMAT_UYVY;
+ break;
+ case 2:
+ drm_format = DRM_FORMAT_YVYU;
+ break;
+ case 3:
+ drm_format = DRM_FORMAT_VYUY;
+ break;
+ default:
+ /* yuv_order has only 2 bits */
+ break;
+ }
+ }
+
+ plane->drm_format = drm_format;
+
+ plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
+ if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
+ if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ plane->stride = vgpu_vreg_t(vgpu, SPRSTRIDE(pipe)) &
+ _SPRITE_STRIDE_MASK;
+
+ val = vgpu_vreg_t(vgpu, SPRSIZE(pipe));
+ plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >>
+ _SPRITE_SIZE_HEIGHT_SHIFT;
+ plane->width = (val & _SPRITE_SIZE_WIDTH_MASK) >>
+ _SPRITE_SIZE_WIDTH_SHIFT;
+ plane->height += 1; /* raw height is one minus the real value */
+ plane->width += 1; /* raw width is one minus the real value */
+
+ val = vgpu_vreg_t(vgpu, SPRPOS(pipe));
+ plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT;
+ plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT;
+
+ val = vgpu_vreg_t(vgpu, SPROFFSET(pipe));
+ plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >>
+ _SPRITE_OFFSET_START_X_SHIFT;
+ plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >>
+ _SPRITE_OFFSET_START_Y_SHIFT;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
new file mode 100644
index 000000000000..cb055f3c81a2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Bing Niu <bing.niu@intel.com>
+ * Xu Han <xu.han@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Xiaoguang Chen <xiaoguang.chen@intel.com>
+ * Yang Liu <yang2.liu@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#ifndef _GVT_FB_DECODER_H_
+#define _GVT_FB_DECODER_H_
+
+#define _PLANE_CTL_FORMAT_SHIFT 24
+#define _PLANE_CTL_TILED_SHIFT 10
+#define _PIPE_V_SRCSZ_SHIFT 0
+#define _PIPE_V_SRCSZ_MASK (0xfff << _PIPE_V_SRCSZ_SHIFT)
+#define _PIPE_H_SRCSZ_SHIFT 16
+#define _PIPE_H_SRCSZ_MASK (0x1fff << _PIPE_H_SRCSZ_SHIFT)
+
+#define _PRI_PLANE_FMT_SHIFT 26
+#define _PRI_PLANE_STRIDE_MASK (0x3ff << 6)
+#define _PRI_PLANE_X_OFF_SHIFT 0
+#define _PRI_PLANE_X_OFF_MASK (0x1fff << _PRI_PLANE_X_OFF_SHIFT)
+#define _PRI_PLANE_Y_OFF_SHIFT 16
+#define _PRI_PLANE_Y_OFF_MASK (0xfff << _PRI_PLANE_Y_OFF_SHIFT)
+
+#define _CURSOR_MODE 0x3f
+#define _CURSOR_ALPHA_FORCE_SHIFT 8
+#define _CURSOR_ALPHA_FORCE_MASK (0x3 << _CURSOR_ALPHA_FORCE_SHIFT)
+#define _CURSOR_ALPHA_PLANE_SHIFT 10
+#define _CURSOR_ALPHA_PLANE_MASK (0x3 << _CURSOR_ALPHA_PLANE_SHIFT)
+#define _CURSOR_POS_X_SHIFT 0
+#define _CURSOR_POS_X_MASK (0x1fff << _CURSOR_POS_X_SHIFT)
+#define _CURSOR_SIGN_X_SHIFT 15
+#define _CURSOR_SIGN_X_MASK (1 << _CURSOR_SIGN_X_SHIFT)
+#define _CURSOR_POS_Y_SHIFT 16
+#define _CURSOR_POS_Y_MASK (0xfff << _CURSOR_POS_Y_SHIFT)
+#define _CURSOR_SIGN_Y_SHIFT 31
+#define _CURSOR_SIGN_Y_MASK (1 << _CURSOR_SIGN_Y_SHIFT)
+
+#define _SPRITE_FMT_SHIFT 25
+#define _SPRITE_COLOR_ORDER_SHIFT 20
+#define _SPRITE_YUV_ORDER_SHIFT 16
+#define _SPRITE_STRIDE_SHIFT 6
+#define _SPRITE_STRIDE_MASK (0x1ff << _SPRITE_STRIDE_SHIFT)
+#define _SPRITE_SIZE_WIDTH_SHIFT 0
+#define _SPRITE_SIZE_HEIGHT_SHIFT 16
+#define _SPRITE_SIZE_WIDTH_MASK (0x1fff << _SPRITE_SIZE_WIDTH_SHIFT)
+#define _SPRITE_SIZE_HEIGHT_MASK (0xfff << _SPRITE_SIZE_HEIGHT_SHIFT)
+#define _SPRITE_POS_X_SHIFT 0
+#define _SPRITE_POS_Y_SHIFT 16
+#define _SPRITE_POS_X_MASK (0x1fff << _SPRITE_POS_X_SHIFT)
+#define _SPRITE_POS_Y_MASK (0xfff << _SPRITE_POS_Y_SHIFT)
+#define _SPRITE_OFFSET_START_X_SHIFT 0
+#define _SPRITE_OFFSET_START_Y_SHIFT 16
+#define _SPRITE_OFFSET_START_X_MASK (0x1fff << _SPRITE_OFFSET_START_X_SHIFT)
+#define _SPRITE_OFFSET_START_Y_MASK (0xfff << _SPRITE_OFFSET_START_Y_SHIFT)
+
+enum GVT_FB_EVENT {
+ FB_MODE_SET_START = 1,
+ FB_MODE_SET_END,
+ FB_DISPLAY_FLIP,
+};
+
+enum DDI_PORT {
+ DDI_PORT_NONE = 0,
+ DDI_PORT_B = 1,
+ DDI_PORT_C = 2,
+ DDI_PORT_D = 3,
+ DDI_PORT_E = 4
+};
+
+struct intel_gvt;
+
+/* color space conversion and gamma correction are not included */
+struct intel_vgpu_primary_plane_format {
+ u8 enabled; /* plane is enabled */
+ u8 tiled; /* X-tiled */
+ u8 bpp; /* bits per pixel */
+ u32 hw_format; /* format field in the PRI_CTL register */
+ u32 drm_format; /* format in DRM definition */
+ u32 base; /* framebuffer base in graphics memory */
+ u64 base_gpa;
+ u32 x_offset; /* in pixels */
+ u32 y_offset; /* in lines */
+ u32 width; /* in pixels */
+ u32 height; /* in lines */
+ u32 stride; /* in bytes */
+};
+
+struct intel_vgpu_sprite_plane_format {
+ u8 enabled; /* plane is enabled */
+ u8 tiled; /* X-tiled */
+ u8 bpp; /* bits per pixel */
+ u32 hw_format; /* format field in the SPR_CTL register */
+ u32 drm_format; /* format in DRM definition */
+ u32 base; /* sprite base in graphics memory */
+ u64 base_gpa;
+ u32 x_pos; /* in pixels */
+ u32 y_pos; /* in lines */
+ u32 x_offset; /* in pixels */
+ u32 y_offset; /* in lines */
+ u32 width; /* in pixels */
+ u32 height; /* in lines */
+ u32 stride; /* in bytes */
+};
+
+struct intel_vgpu_cursor_plane_format {
+ u8 enabled;
+ u8 mode; /* cursor mode select */
+ u8 bpp; /* bits per pixel */
+ u32 drm_format; /* format in DRM definition */
+ u32 base; /* cursor base in graphics memory */
+ u64 base_gpa;
+ u32 x_pos; /* in pixels */
+ u32 y_pos; /* in lines */
+ u8 x_sign; /* X Position Sign */
+ u8 y_sign; /* Y Position Sign */
+ u32 width; /* in pixels */
+ u32 height; /* in lines */
+ u32 x_hot; /* in pixels */
+ u32 y_hot; /* in pixels */
+};
+
+struct intel_vgpu_pipe_format {
+ struct intel_vgpu_primary_plane_format primary;
+ struct intel_vgpu_sprite_plane_format sprite;
+ struct intel_vgpu_cursor_plane_format cursor;
+ enum DDI_PORT ddi_port; /* the DDI port that pipe is connected to */
+};
+
+struct intel_vgpu_fb_format {
+ struct intel_vgpu_pipe_format pipes[I915_MAX_PIPES];
+};
+
+int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_primary_plane_format *plane);
+int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_cursor_plane_format *plane);
+int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_sprite_plane_format *plane);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index a26c1705430e..a73e1d418c22 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -66,20 +66,23 @@ static struct bin_attribute firmware_attr = {
.mmap = NULL,
};
-static int expose_firmware_sysfs(struct intel_gvt *gvt)
+static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
{
struct drm_i915_private *dev_priv = gvt->dev_priv;
+
+ *(u32 *)(data + offset) = I915_READ_NOTRACE(_MMIO(offset));
+ return 0;
+}
+
+static int expose_firmware_sysfs(struct intel_gvt *gvt)
+{
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
- struct intel_gvt_mmio_info *e;
- struct gvt_mmio_block *block = gvt->mmio.mmio_block;
- int num = gvt->mmio.num_mmio_block;
struct gvt_firmware_header *h;
void *firmware;
void *p;
unsigned long size, crc32_start;
- int i, j;
- int ret;
+ int i, ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
firmware = vzalloc(size);
@@ -104,15 +107,8 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
p = firmware + h->mmio_offset;
- hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
- *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
-
- for (i = 0; i < num; i++, block++) {
- for (j = 0; j < block->size; j += 4)
- *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
- I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
- block->offset) + j));
- }
+ /* Take a snapshot of hw mmio registers. */
+ intel_gvt_for_each_tracked_mmio(gvt, mmio_snapshot_handler, p);
memcpy(gvt->firmware.mmio, p, info->mmio_size);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 64d67ff9bf08..a529d2bd393c 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -94,12 +94,12 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
u64 h_addr;
int ret;
- ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
+ ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
&h_addr);
if (ret)
return ret;
- *h_index = h_addr >> GTT_PAGE_SHIFT;
+ *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
return 0;
}
@@ -109,12 +109,12 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
u64 g_addr;
int ret;
- ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
+ ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
&g_addr);
if (ret)
return ret;
- *g_index = g_addr >> GTT_PAGE_SHIFT;
+ *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
return 0;
}
@@ -156,13 +156,15 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
struct gtt_type_table_entry {
int entry_type;
+ int pt_type;
int next_pt_type;
int pse_entry_type;
};
-#define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
+#define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
[type] = { \
.entry_type = e_type, \
+ .pt_type = cpt_type, \
.next_pt_type = npt_type, \
.pse_entry_type = pse_type, \
}
@@ -170,55 +172,68 @@ struct gtt_type_table_entry {
static struct gtt_type_table_entry gtt_type_table[] = {
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+ GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_INVALID,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_INVALID,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
GTT_TYPE_GGTT_PTE,
GTT_TYPE_INVALID,
+ GTT_TYPE_INVALID,
GTT_TYPE_INVALID),
};
@@ -227,6 +242,11 @@ static inline int get_next_pt_type(int type)
return gtt_type_table[type].next_pt_type;
}
+static inline int get_pt_type(int type)
+{
+ return gtt_type_table[type].pt_type;
+}
+
static inline int get_entry_type(int type)
{
return gtt_type_table[type].entry_type;
@@ -351,7 +371,7 @@ static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
return false;
e->type = get_entry_type(e->type);
- if (!(e->val64 & (1 << 7)))
+ if (!(e->val64 & BIT(7)))
return false;
e->type = get_pse_type(e->type);
@@ -369,12 +389,17 @@ static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
|| e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
return (e->val64 != 0);
else
- return (e->val64 & (1 << 0));
+ return (e->val64 & BIT(0));
}
static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
{
- e->val64 &= ~(1 << 0);
+ e->val64 &= ~BIT(0);
+}
+
+static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
+{
+ e->val64 |= BIT(0);
}
/*
@@ -382,7 +407,7 @@ static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
*/
static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
{
- unsigned long x = (gma >> GTT_PAGE_SHIFT);
+ unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
trace_gma_index(__func__, gma, x);
return x;
@@ -406,6 +431,7 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
.get_entry = gtt_get_entry64,
.set_entry = gtt_set_entry64,
.clear_present = gtt_entry_clear_present,
+ .set_present = gtt_entry_set_present,
.test_present = gen8_gtt_test_present,
.test_pse = gen8_gtt_test_pse,
.get_pfn = gen8_gtt_get_pfn,
@@ -494,7 +520,7 @@ static inline int ppgtt_spt_get_entry(
return -EINVAL;
ret = ops->get_entry(page_table, e, index, guest,
- spt->guest_page.gfn << GTT_PAGE_SHIFT,
+ spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
spt->vgpu);
if (ret)
return ret;
@@ -516,7 +542,7 @@ static inline int ppgtt_spt_set_entry(
return -EINVAL;
return ops->set_entry(page_table, e, index, guest,
- spt->guest_page.gfn << GTT_PAGE_SHIFT,
+ spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
spt->vgpu);
}
@@ -537,88 +563,103 @@ static inline int ppgtt_spt_set_entry(
spt->shadow_page.type, e, index, false)
/**
- * intel_vgpu_init_guest_page - init a guest page data structure
+ * intel_vgpu_init_page_track - init a page track data structure
* @vgpu: a vGPU
- * @p: a guest page data structure
+ * @t: a page track data structure
* @gfn: guest memory page frame number
- * @handler: function will be called when target guest memory page has
+ * @handler: the function will be called when target guest memory page has
* been modified.
*
- * This function is called when user wants to track a guest memory page.
+ * This function is called when a user wants to prepare a page track data
+ * structure to track a guest memory page.
*
* Returns:
* Zero on success, negative error code if failed.
*/
-int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *p,
+int intel_vgpu_init_page_track(struct intel_vgpu *vgpu,
+ struct intel_vgpu_page_track *t,
unsigned long gfn,
int (*handler)(void *, u64, void *, int),
void *data)
{
- INIT_HLIST_NODE(&p->node);
+ INIT_HLIST_NODE(&t->node);
- p->writeprotection = false;
- p->gfn = gfn;
- p->handler = handler;
- p->data = data;
- p->oos_page = NULL;
- p->write_cnt = 0;
+ t->tracked = false;
+ t->gfn = gfn;
+ t->handler = handler;
+ t->data = data;
- hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn);
+ hash_add(vgpu->gtt.tracked_guest_page_hash_table, &t->node, t->gfn);
return 0;
}
-static int detach_oos_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_oos_page *oos_page);
-
/**
- * intel_vgpu_clean_guest_page - release the resource owned by guest page data
- * structure
+ * intel_vgpu_clean_page_track - release a page track data structure
* @vgpu: a vGPU
- * @p: a tracked guest page
+ * @t: a page track data structure
*
- * This function is called when user tries to stop tracking a guest memory
- * page.
+ * This function is called before a user frees a page track data structure.
*/
-void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *p)
+void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu,
+ struct intel_vgpu_page_track *t)
{
- if (!hlist_unhashed(&p->node))
- hash_del(&p->node);
-
- if (p->oos_page)
- detach_oos_page(vgpu, p->oos_page);
+ if (!hlist_unhashed(&t->node))
+ hash_del(&t->node);
- if (p->writeprotection)
- intel_gvt_hypervisor_unset_wp_page(vgpu, p);
+ if (t->tracked)
+ intel_gvt_hypervisor_disable_page_track(vgpu, t);
}
/**
- * intel_vgpu_find_guest_page - find a guest page data structure by GFN.
+ * intel_vgpu_find_tracked_page - find a tracked guest page
* @vgpu: a vGPU
* @gfn: guest memory page frame number
*
- * This function is called when emulation logic wants to know if a trapped GFN
- * is a tracked guest page.
+ * This function is called when the emulation layer wants to figure out if a
+ * trapped GFN is a tracked guest page.
*
* Returns:
- * Pointer to guest page data structure, NULL if failed.
+ * Pointer to page track data structure, NULL if not found.
*/
-struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
+struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
struct intel_vgpu *vgpu, unsigned long gfn)
{
- struct intel_vgpu_guest_page *p;
+ struct intel_vgpu_page_track *t;
- hash_for_each_possible(vgpu->gtt.guest_page_hash_table,
- p, node, gfn) {
- if (p->gfn == gfn)
- return p;
+ hash_for_each_possible(vgpu->gtt.tracked_guest_page_hash_table,
+ t, node, gfn) {
+ if (t->gfn == gfn)
+ return t;
}
return NULL;
}
+static int init_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p,
+ unsigned long gfn,
+ int (*handler)(void *, u64, void *, int),
+ void *data)
+{
+ p->oos_page = NULL;
+ p->write_cnt = 0;
+
+ return intel_vgpu_init_page_track(vgpu, &p->track, gfn, handler, data);
+}
+
+static int detach_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page);
+
+static void clean_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p)
+{
+ if (p->oos_page)
+ detach_oos_page(vgpu, p->oos_page);
+
+ intel_vgpu_clean_page_track(vgpu, &p->track);
+}
+
static inline int init_shadow_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_shadow_page *p, int type)
+ struct intel_vgpu_shadow_page *p, int type, bool hash)
{
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
dma_addr_t daddr;
@@ -634,8 +675,9 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu,
INIT_HLIST_NODE(&p->node);
- p->mfn = daddr >> GTT_PAGE_SHIFT;
- hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
+ p->mfn = daddr >> I915_GTT_PAGE_SHIFT;
+ if (hash)
+ hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
return 0;
}
@@ -644,7 +686,7 @@ static inline void clean_shadow_page(struct intel_vgpu *vgpu,
{
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
- dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096,
+ dma_unmap_page(kdev, p->mfn << I915_GTT_PAGE_SHIFT, 4096,
PCI_DMA_BIDIRECTIONAL);
if (!hlist_unhashed(&p->node))
@@ -664,6 +706,9 @@ static inline struct intel_vgpu_shadow_page *find_shadow_page(
return NULL;
}
+#define page_track_to_guest_page(ptr) \
+ container_of(ptr, struct intel_vgpu_guest_page, track)
+
#define guest_page_to_ppgtt_spt(ptr) \
container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
@@ -697,7 +742,7 @@ static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
clean_shadow_page(spt->vgpu, &spt->shadow_page);
- intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
+ clean_guest_page(spt->vgpu, &spt->guest_page);
list_del_init(&spt->post_shadow_list);
free_spt(spt);
@@ -713,22 +758,24 @@ static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
}
-static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+static int ppgtt_handle_guest_write_page_table_bytes(
+ struct intel_vgpu_guest_page *gpt,
u64 pa, void *p_data, int bytes);
-static int ppgtt_write_protection_handler(void *gp, u64 pa,
+static int ppgtt_write_protection_handler(void *data, u64 pa,
void *p_data, int bytes)
{
- struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
+ struct intel_vgpu_page_track *t = data;
+ struct intel_vgpu_guest_page *p = page_track_to_guest_page(t);
int ret;
if (bytes != 4 && bytes != 8)
return -EINVAL;
- if (!gpt->writeprotection)
+ if (!t->tracked)
return -EINVAL;
- ret = ppgtt_handle_guest_write_page_table_bytes(gp,
+ ret = ppgtt_handle_guest_write_page_table_bytes(p,
pa, p_data, bytes);
if (ret)
return ret;
@@ -762,13 +809,13 @@ retry:
* TODO: guest page type may be different with shadow page type,
* when we support PSE page in future.
*/
- ret = init_shadow_page(vgpu, &spt->shadow_page, type);
+ ret = init_shadow_page(vgpu, &spt->shadow_page, type, true);
if (ret) {
gvt_vgpu_err("fail to initialize shadow page for spt\n");
goto err;
}
- ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
+ ret = init_guest_page(vgpu, &spt->guest_page,
gfn, ppgtt_write_protection_handler, NULL);
if (ret) {
gvt_vgpu_err("fail to initialize guest page for spt\n");
@@ -798,7 +845,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
#define pt_entries(spt) \
- (GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
+ (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
#define for_each_present_guest_entry(spt, e, i) \
for (i = 0; i < pt_entries(spt); i++) \
@@ -856,7 +903,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
int v = atomic_read(&spt->refcount);
trace_spt_change(spt->vgpu->id, "die", spt,
- spt->guest_page.gfn, spt->shadow_page.type);
+ spt->guest_page.track.gfn, spt->shadow_page.type);
trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
@@ -878,7 +925,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
}
release:
trace_spt_change(spt->vgpu->id, "release", spt,
- spt->guest_page.gfn, spt->shadow_page.type);
+ spt->guest_page.track.gfn, spt->shadow_page.type);
ppgtt_free_shadow_page(spt);
return 0;
fail:
@@ -895,6 +942,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s = NULL;
struct intel_vgpu_guest_page *g;
+ struct intel_vgpu_page_track *t;
int ret;
if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
@@ -902,8 +950,9 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
goto fail;
}
- g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
- if (g) {
+ t = intel_vgpu_find_tracked_page(vgpu, ops->get_pfn(we));
+ if (t) {
+ g = page_track_to_guest_page(t);
s = guest_page_to_ppgtt_spt(g);
ppgtt_get_shadow_page(s);
} else {
@@ -915,7 +964,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
goto fail;
}
- ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page);
+ ret = intel_gvt_hypervisor_enable_page_track(vgpu,
+ &s->guest_page.track);
if (ret)
goto fail;
@@ -923,7 +973,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
if (ret)
goto fail;
- trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn,
+ trace_spt_change(vgpu->id, "new", s, s->guest_page.track.gfn,
s->shadow_page.type);
}
return s;
@@ -953,7 +1003,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
int ret;
trace_spt_change(spt->vgpu->id, "born", spt,
- spt->guest_page.gfn, spt->shadow_page.type);
+ spt->guest_page.track.gfn, spt->shadow_page.type);
if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
for_each_present_guest_entry(spt, &ge, i) {
@@ -1078,11 +1128,11 @@ static int sync_oos_page(struct intel_vgpu *vgpu,
old.type = new.type = get_entry_type(spt->guest_page_type);
old.val64 = new.val64 = 0;
- for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
- index++) {
+ for (index = 0; index < (I915_GTT_PAGE_SIZE >>
+ info->gtt_entry_size_shift); index++) {
ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
ops->get_entry(NULL, &new, index, true,
- oos_page->guest_page->gfn << PAGE_SHIFT, vgpu);
+ oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu);
if (old.val64 == new.val64
&& !test_and_clear_bit(index, spt->post_shadow_bitmap))
@@ -1132,8 +1182,9 @@ static int attach_oos_page(struct intel_vgpu *vgpu,
struct intel_gvt *gvt = vgpu->gvt;
int ret;
- ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT,
- oos_page->mem, GTT_PAGE_SIZE);
+ ret = intel_gvt_hypervisor_read_gpa(vgpu,
+ gpt->track.gfn << I915_GTT_PAGE_SHIFT,
+ oos_page->mem, I915_GTT_PAGE_SIZE);
if (ret)
return ret;
@@ -1152,7 +1203,7 @@ static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
{
int ret;
- ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt);
+ ret = intel_gvt_hypervisor_enable_page_track(vgpu, &gpt->track);
if (ret)
return ret;
@@ -1200,7 +1251,7 @@ static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
- return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt);
+ return intel_gvt_hypervisor_disable_page_track(vgpu, &gpt->track);
}
/**
@@ -1335,10 +1386,10 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
return 0;
}
-static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+static int ppgtt_handle_guest_write_page_table_bytes(
+ struct intel_vgpu_guest_page *gpt,
u64 pa, void *p_data, int bytes)
{
- struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
@@ -1418,7 +1469,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
mm->shadow_page_table = mem + mm->page_table_entry_size;
} else if (mm->type == INTEL_GVT_MM_GGTT) {
mm->page_table_entry_cnt =
- (gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
+ (gvt_ggtt_gm_sz(gvt) >> I915_GTT_PAGE_SHIFT);
mm->page_table_entry_size = mm->page_table_entry_cnt *
info->gtt_entry_size;
mem = vzalloc(mm->page_table_entry_size);
@@ -1740,8 +1791,8 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
gma_ops->gma_to_ggtt_pte_index(gma));
if (ret)
goto err;
- gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
- + (gma & ~GTT_PAGE_MASK);
+ gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
+ + (gma & ~I915_GTT_PAGE_MASK);
trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
return gpa;
@@ -1793,8 +1844,8 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
}
}
- gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
- + (gma & ~GTT_PAGE_MASK);
+ gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
+ + (gma & ~I915_GTT_PAGE_MASK);
trace_gma_translate(vgpu->id, "ppgtt", 0,
mm->page_table_level, gma, gpa);
@@ -1862,7 +1913,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
if (bytes != 4 && bytes != 8)
return -EINVAL;
- gma = g_gtt_index << GTT_PAGE_SHIFT;
+ gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
/* the VM may configure the whole GM space when ballooning is used */
if (!vgpu_gmadr_is_valid(vgpu, gma))
@@ -1881,11 +1932,11 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
* update the entry in this situation p2m will fail
* settting the shadow entry to point to a scratch page
*/
- ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
+ ops->set_pfn(&m, gvt->gtt.scratch_mfn);
}
} else {
m = e;
- ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
+ ops->set_pfn(&m, gvt->gtt.scratch_mfn);
}
ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
@@ -1920,12 +1971,45 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return ret;
}
+int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ret = 0;
+
+ if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
+ struct intel_vgpu_page_track *t;
+
+ mutex_lock(&gvt->lock);
+
+ t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
+ if (t) {
+ if (unlikely(vgpu->failsafe)) {
+ /* remove write protection to prevent furture traps */
+ intel_vgpu_clean_page_track(vgpu, t);
+ } else {
+ ret = t->handler(t, pa, p_data, bytes);
+ if (ret) {
+ gvt_err("guest page write error %d, "
+ "gfn 0x%lx, pa 0x%llx, "
+ "var 0x%x, len %d\n",
+ ret, t->gfn, pa,
+ *(u32 *)p_data, bytes);
+ }
+ }
+ }
+ mutex_unlock(&gvt->lock);
+ }
+ return ret;
+}
+
+
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
intel_gvt_gtt_type_t type)
{
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- int page_entry_num = GTT_PAGE_SIZE >>
+ int page_entry_num = I915_GTT_PAGE_SIZE >>
vgpu->gvt->device_info.gtt_entry_size_shift;
void *scratch_pt;
int i;
@@ -1949,7 +2033,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
return -ENOMEM;
}
gtt->scratch_pt[type].page_mfn =
- (unsigned long)(daddr >> GTT_PAGE_SHIFT);
+ (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
vgpu->id, type, gtt->scratch_pt[type].page_mfn);
@@ -1992,7 +2076,7 @@ static int release_scratch_page_tree(struct intel_vgpu *vgpu)
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
if (vgpu->gtt.scratch_pt[i].page != NULL) {
daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
- GTT_PAGE_SHIFT);
+ I915_GTT_PAGE_SHIFT);
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
__free_page(vgpu->gtt.scratch_pt[i].page);
vgpu->gtt.scratch_pt[i].page = NULL;
@@ -2035,7 +2119,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_vgpu_mm *ggtt_mm;
- hash_init(gtt->guest_page_hash_table);
+ hash_init(gtt->tracked_guest_page_hash_table);
hash_init(gtt->shadow_page_hash_table);
INIT_LIST_HEAD(&gtt->mm_list_head);
@@ -2196,7 +2280,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level)
{
- u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
+ u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
struct intel_vgpu_mm *mm;
if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
@@ -2231,7 +2315,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level)
{
- u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
+ u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
struct intel_vgpu_mm *mm;
if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
@@ -2288,15 +2372,16 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
__free_page(virt_to_page(page));
return -ENOMEM;
}
- gvt->gtt.scratch_ggtt_page = virt_to_page(page);
- gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT);
+
+ gvt->gtt.scratch_page = virt_to_page(page);
+ gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
if (enable_out_of_sync) {
ret = setup_spt_oos(gvt);
if (ret) {
gvt_err("fail to initialize SPT oos\n");
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
- __free_page(gvt->gtt.scratch_ggtt_page);
+ __free_page(gvt->gtt.scratch_page);
return ret;
}
}
@@ -2315,12 +2400,12 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
struct device *dev = &gvt->dev_priv->drm.pdev->dev;
- dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn <<
- GTT_PAGE_SHIFT);
+ dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
+ I915_GTT_PAGE_SHIFT);
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
- __free_page(gvt->gtt.scratch_ggtt_page);
+ __free_page(gvt->gtt.scratch_page);
if (enable_out_of_sync)
clean_spt_oos(gvt);
@@ -2346,7 +2431,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
e.type = GTT_TYPE_GGTT_PTE;
- ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
+ ops->set_pfn(&e, gvt->gtt.scratch_mfn);
e.val64 |= _PAGE_PRESENT;
index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
@@ -2372,8 +2457,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
*/
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
{
- int i;
-
ppgtt_free_all_shadow_page(vgpu);
/* Shadow pages are only created when there is no page
@@ -2383,11 +2466,4 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
intel_vgpu_reset_ggtt(vgpu);
-
- /* clear scratch page for security */
- for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
- if (vgpu->gtt.scratch_pt[i].page != NULL)
- memset(page_address(vgpu->gtt.scratch_pt[i].page),
- 0, PAGE_SIZE);
- }
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 30a4c8d16026..4cc13b5934f1 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -34,9 +34,8 @@
#ifndef _GVT_GTT_H_
#define _GVT_GTT_H_
-#define GTT_PAGE_SHIFT 12
-#define GTT_PAGE_SIZE (1UL << GTT_PAGE_SHIFT)
-#define GTT_PAGE_MASK (~(GTT_PAGE_SIZE-1))
+#define I915_GTT_PAGE_SHIFT 12
+#define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1))
struct intel_vgpu_mm;
@@ -63,6 +62,7 @@ struct intel_gvt_gtt_pte_ops {
struct intel_vgpu *vgpu);
bool (*test_present)(struct intel_gvt_gtt_entry *e);
void (*clear_present)(struct intel_gvt_gtt_entry *e);
+ void (*set_present)(struct intel_gvt_gtt_entry *e);
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
@@ -86,8 +86,8 @@ struct intel_gvt_gtt {
struct list_head oos_page_free_list_head;
struct list_head mm_lru_list_head;
- struct page *scratch_ggtt_page;
- unsigned long scratch_ggtt_mfn;
+ struct page *scratch_page;
+ unsigned long scratch_mfn;
};
enum {
@@ -193,18 +193,16 @@ struct intel_vgpu_scratch_pt {
unsigned long page_mfn;
};
-
struct intel_vgpu_gtt {
struct intel_vgpu_mm *ggtt_mm;
unsigned long active_ppgtt_mm_bitmap;
struct list_head mm_list_head;
DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
- DECLARE_HASHTABLE(guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
- atomic_t n_write_protected_guest_page;
+ DECLARE_HASHTABLE(tracked_guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
+ atomic_t n_tracked_guest_page;
struct list_head oos_page_list_head;
struct list_head post_shadow_list_head;
struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
-
};
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
@@ -228,12 +226,16 @@ struct intel_vgpu_shadow_page {
unsigned long mfn;
};
-struct intel_vgpu_guest_page {
+struct intel_vgpu_page_track {
struct hlist_node node;
- bool writeprotection;
+ bool tracked;
unsigned long gfn;
int (*handler)(void *, u64, void *, int);
void *data;
+};
+
+struct intel_vgpu_guest_page {
+ struct intel_vgpu_page_track track;
unsigned long write_cnt;
struct intel_vgpu_oos_page *oos_page;
};
@@ -243,7 +245,7 @@ struct intel_vgpu_oos_page {
struct list_head list;
struct list_head vm_list;
int id;
- unsigned char mem[GTT_PAGE_SIZE];
+ unsigned char mem[I915_GTT_PAGE_SIZE];
};
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
@@ -258,22 +260,16 @@ struct intel_vgpu_ppgtt_spt {
struct list_head post_shadow_list;
};
-int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *guest_page,
+int intel_vgpu_init_page_track(struct intel_vgpu *vgpu,
+ struct intel_vgpu_page_track *t,
unsigned long gfn,
int (*handler)(void *gp, u64, void *, int),
void *data);
-void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *guest_page);
-
-int intel_vgpu_set_guest_page_writeprotection(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *guest_page);
+void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu,
+ struct intel_vgpu_page_track *t);
-void intel_vgpu_clear_guest_page_writeprotection(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *guest_page);
-
-struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
+struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
struct intel_vgpu *vgpu, unsigned long gfn);
int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
@@ -312,4 +308,7 @@ int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
+int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
+ void *p_data, unsigned int bytes);
+
#endif /* _GVT_GTT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index aaa347f8620c..fac54f32d33f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -36,6 +36,8 @@
#include "i915_drv.h"
#include "gvt.h"
+#include <linux/vfio.h>
+#include <linux/mdev.h>
struct intel_gvt_host intel_gvt_host;
@@ -44,6 +46,129 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
+static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
+ const char *name)
+{
+ int i;
+ struct intel_vgpu_type *t;
+ const char *driver_name = dev_driver_string(
+ &gvt->dev_priv->drm.pdev->dev);
+
+ for (i = 0; i < gvt->num_types; i++) {
+ t = &gvt->types[i];
+ if (!strncmp(t->name, name + strlen(driver_name) + 1,
+ sizeof(t->name)))
+ return t;
+ }
+
+ return NULL;
+}
+
+static ssize_t available_instances_show(struct kobject *kobj,
+ struct device *dev, char *buf)
+{
+ struct intel_vgpu_type *type;
+ unsigned int num = 0;
+ void *gvt = kdev_to_i915(dev)->gvt;
+
+ type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ if (!type)
+ num = 0;
+ else
+ num = type->avail_instance;
+
+ return sprintf(buf, "%u\n", num);
+}
+
+static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
+}
+
+static ssize_t description_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ struct intel_vgpu_type *type;
+ void *gvt = kdev_to_i915(dev)->gvt;
+
+ type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ if (!type)
+ return 0;
+
+ return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
+ "fence: %d\nresolution: %s\n"
+ "weight: %d\n",
+ BYTES_TO_MB(type->low_gm_size),
+ BYTES_TO_MB(type->high_gm_size),
+ type->fence, vgpu_edid_str(type->resolution),
+ type->weight);
+}
+
+static MDEV_TYPE_ATTR_RO(available_instances);
+static MDEV_TYPE_ATTR_RO(device_api);
+static MDEV_TYPE_ATTR_RO(description);
+
+static struct attribute *gvt_type_attrs[] = {
+ &mdev_type_attr_available_instances.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_description.attr,
+ NULL,
+};
+
+static struct attribute_group *gvt_vgpu_type_groups[] = {
+ [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
+};
+
+static bool intel_get_gvt_attrs(struct attribute ***type_attrs,
+ struct attribute_group ***intel_vgpu_type_groups)
+{
+ *type_attrs = gvt_type_attrs;
+ *intel_vgpu_type_groups = gvt_vgpu_type_groups;
+ return true;
+}
+
+static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i, j;
+ struct intel_vgpu_type *type;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ type = &gvt->types[i];
+
+ group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ if (WARN_ON(!group))
+ goto unwind;
+
+ group->name = type->name;
+ group->attrs = gvt_type_attrs;
+ gvt_vgpu_type_groups[i] = group;
+ }
+
+ return true;
+
+unwind:
+ for (j = 0; j < i; j++) {
+ group = gvt_vgpu_type_groups[j];
+ kfree(group);
+ }
+
+ return false;
+}
+
+static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ group = gvt_vgpu_type_groups[i];
+ gvt_vgpu_type_groups[i] = NULL;
+ kfree(group);
+ }
+}
+
static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
@@ -54,6 +179,11 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
+ .gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
+ .get_gvt_attrs = intel_get_gvt_attrs,
+ .vgpu_query_plane = intel_vgpu_query_plane,
+ .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
+ .write_protect_handler = intel_vgpu_write_protect_handler,
};
/**
@@ -191,17 +321,18 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
if (WARN_ON(!gvt))
return;
+ intel_gvt_debugfs_clean(gvt);
clean_service_thread(gvt);
intel_gvt_clean_cmd_parser(gvt);
intel_gvt_clean_sched_policy(gvt);
intel_gvt_clean_workload_scheduler(gvt);
- intel_gvt_clean_opregion(gvt);
intel_gvt_clean_gtt(gvt);
intel_gvt_clean_irq(gvt);
intel_gvt_clean_mmio_info(gvt);
intel_gvt_free_firmware(gvt);
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
+ intel_gvt_cleanup_vgpu_type_groups(gvt);
intel_gvt_clean_vgpu_types(gvt);
idr_destroy(&gvt->vgpu_idr);
@@ -256,6 +387,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret)
goto out_clean_idr;
+ intel_gvt_init_engine_mmio_context(gvt);
+
ret = intel_gvt_load_firmware(gvt);
if (ret)
goto out_clean_mmio_info;
@@ -268,13 +401,9 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret)
goto out_clean_irq;
- ret = intel_gvt_init_opregion(gvt);
- if (ret)
- goto out_clean_gtt;
-
ret = intel_gvt_init_workload_scheduler(gvt);
if (ret)
- goto out_clean_opregion;
+ goto out_clean_gtt;
ret = intel_gvt_init_sched_policy(gvt);
if (ret)
@@ -292,6 +421,12 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret)
goto out_clean_thread;
+ ret = intel_gvt_init_vgpu_type_groups(gvt);
+ if (ret == false) {
+ gvt_err("failed to init vgpu type groups: %d\n", ret);
+ goto out_clean_types;
+ }
+
ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt,
&intel_gvt_ops);
if (ret) {
@@ -307,6 +442,10 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
}
gvt->idle_vgpu = vgpu;
+ ret = intel_gvt_debugfs_init(gvt);
+ if (ret)
+ gvt_err("debugfs registeration failed, go on.\n");
+
gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
return 0;
@@ -321,8 +460,6 @@ out_clean_sched_policy:
intel_gvt_clean_sched_policy(gvt);
out_clean_workload_scheduler:
intel_gvt_clean_workload_scheduler(gvt);
-out_clean_opregion:
- intel_gvt_clean_opregion(gvt);
out_clean_gtt:
intel_gvt_clean_gtt(gvt);
out_clean_irq:
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 9c2e7c0aa38f..7dc7a80213a8 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -44,8 +44,10 @@
#include "execlist.h"
#include "scheduler.h"
#include "sched_policy.h"
-#include "render.h"
+#include "mmio_context.h"
#include "cmd_parser.h"
+#include "fb_decoder.h"
+#include "dmabuf.h"
#define GVT_MAX_VGPU 8
@@ -123,9 +125,10 @@ struct intel_vgpu_irq {
};
struct intel_vgpu_opregion {
+ bool mapped;
void *va;
+ void *va_gopregion;
u32 gfn[INTEL_GVT_OPREGION_PAGES];
- struct page *pages[INTEL_GVT_OPREGION_PAGES];
};
#define vgpu_opregion(vgpu) (&(vgpu->opregion))
@@ -142,6 +145,33 @@ struct vgpu_sched_ctl {
int weight;
};
+enum {
+ INTEL_VGPU_EXECLIST_SUBMISSION = 1,
+ INTEL_VGPU_GUC_SUBMISSION,
+};
+
+struct intel_vgpu_submission_ops {
+ const char *name;
+ int (*init)(struct intel_vgpu *vgpu);
+ void (*clean)(struct intel_vgpu *vgpu);
+ void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
+};
+
+struct intel_vgpu_submission {
+ struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
+ struct list_head workload_q_head[I915_NUM_ENGINES];
+ struct kmem_cache *workloads;
+ atomic_t running_workload_num;
+ struct i915_gem_context *shadow_ctx;
+ DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
+ DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
+ void *ring_scan_buffer[I915_NUM_ENGINES];
+ int ring_scan_buffer_size[I915_NUM_ENGINES];
+ const struct intel_vgpu_submission_ops *ops;
+ int virtual_submission_interface;
+ bool active;
+};
+
struct intel_vgpu {
struct intel_gvt *gvt;
int id;
@@ -161,16 +191,10 @@ struct intel_vgpu {
struct intel_vgpu_gtt gtt;
struct intel_vgpu_opregion opregion;
struct intel_vgpu_display display;
- struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
- struct list_head workload_q_head[I915_NUM_ENGINES];
- struct kmem_cache *workloads;
- atomic_t running_workload_num;
- /* 1/2K for each reserve ring buffer */
- void *reserve_ring_buffer_va[I915_NUM_ENGINES];
- int reserve_ring_buffer_size[I915_NUM_ENGINES];
- DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
- struct i915_gem_context *shadow_ctx;
- DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
+ struct intel_vgpu_submission submission;
+ u32 hws_pga[I915_NUM_ENGINES];
+
+ struct dentry *debugfs;
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
@@ -186,10 +210,22 @@ struct intel_vgpu {
struct kvm *kvm;
struct work_struct release_work;
atomic_t released;
+ struct vfio_device *vfio_device;
} vdev;
#endif
+
+ struct list_head dmabuf_obj_list_head;
+ struct mutex dmabuf_lock;
+ struct idr object_idr;
+
+ struct completion vblank_done;
+
};
+/* validating GM healthy status*/
+#define vgpu_is_vm_unhealthy(ret_val) \
+ (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
+
struct intel_gvt_gm {
unsigned long vgpu_allocated_low_gm_size;
unsigned long vgpu_allocated_high_gm_size;
@@ -231,7 +267,7 @@ struct intel_gvt_mmio {
unsigned int num_mmio_block;
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
- unsigned int num_tracked_mmio;
+ unsigned long num_tracked_mmio;
};
struct intel_gvt_firmware {
@@ -240,11 +276,6 @@ struct intel_gvt_firmware {
bool firmware_loaded;
};
-struct intel_gvt_opregion {
- void *opregion_va;
- u32 opregion_pa;
-};
-
#define NR_MAX_INTEL_VGPU_TYPES 20
struct intel_vgpu_type {
char name[16];
@@ -268,7 +299,6 @@ struct intel_gvt {
struct intel_gvt_firmware firmware;
struct intel_gvt_irq irq;
struct intel_gvt_gtt gtt;
- struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler;
struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
@@ -279,6 +309,10 @@ struct intel_gvt {
struct task_struct *service_thread;
wait_queue_head_t service_thread_wq;
unsigned long service_request;
+
+ struct engine_mmio *engine_mmio_list;
+
+ struct dentry *debugfs_root;
};
static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
@@ -316,7 +350,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
/* Aperture/GM space definitions for GVT device */
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
-#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
+#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
#define gvt_ggtt_sz(gvt) \
@@ -378,23 +412,20 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value);
-/* Macros for easily accessing vGPU virtual/shadow register */
-#define vgpu_vreg(vgpu, reg) \
- (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_vreg8(vgpu, reg) \
- (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_vreg16(vgpu, reg) \
- (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_vreg64(vgpu, reg) \
- (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_sreg(vgpu, reg) \
- (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_sreg8(vgpu, reg) \
- (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_sreg16(vgpu, reg) \
- (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_sreg64(vgpu, reg) \
- (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+/* Macros for easily accessing vGPU virtual/shadow register.
+ Explicitly seperate use for typed MMIO reg or real offset.*/
+#define vgpu_vreg_t(vgpu, reg) \
+ (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
+#define vgpu_vreg(vgpu, offset) \
+ (*(u32 *)(vgpu->mmio.vreg + (offset)))
+#define vgpu_vreg64_t(vgpu, reg) \
+ (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
+#define vgpu_vreg64(vgpu, offset) \
+ (*(u64 *)(vgpu->mmio.vreg + (offset)))
+#define vgpu_sreg_t(vgpu, reg) \
+ (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
+#define vgpu_sreg(vgpu, offset) \
+ (*(u32 *)(vgpu->mmio.sreg + (offset)))
#define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
@@ -484,16 +515,15 @@ static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
PCI_BASE_ADDRESS_MEM_MASK;
}
-void intel_gvt_clean_opregion(struct intel_gvt *gvt);
-int intel_gvt_init_opregion(struct intel_gvt *gvt);
-
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
-int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
+int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
void populate_pvinfo_page(struct intel_vgpu *vgpu);
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
+void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
struct intel_gvt_ops {
int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
@@ -510,12 +540,21 @@ struct intel_gvt_ops {
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
+ struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
+ const char *name);
+ bool (*get_gvt_attrs)(struct attribute ***type_attrs,
+ struct attribute_group ***intel_vgpu_type_groups);
+ int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
+ int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
+ int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
+ unsigned int);
};
enum {
GVT_FAILSAFE_UNSUPPORTED_GUEST,
GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
+ GVT_FAILSAFE_GUEST_ERR,
};
static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
@@ -591,6 +630,12 @@ static inline bool intel_gvt_mmio_has_mode_mask(
return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
}
+int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
+int intel_gvt_debugfs_init(struct intel_gvt *gvt);
+void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
+
+
#include "trace.h"
#include "mpt.h"
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 1f840f6b81bb..92d6468daeee 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -166,7 +166,7 @@ int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
-static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
+void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
{
switch (reason) {
case GVT_FAILSAFE_UNSUPPORTED_GUEST:
@@ -174,6 +174,10 @@ static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
break;
case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
pr_err("Graphics resource is not enough for the guest\n");
+ break;
+ case GVT_FAILSAFE_GUEST_ERR:
+ pr_err("GVT Internal error for the guest\n");
+ break;
default:
break;
}
@@ -339,13 +343,13 @@ static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
- vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_ON;
- vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
- vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
- vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
+ vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON;
+ vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
+ vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
+ vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
} else
- vgpu_vreg(vgpu, PCH_PP_STATUS) &=
+ vgpu_vreg_t(vgpu, PCH_PP_STATUS) &=
~(PP_ON | PP_SEQUENCE_POWER_DOWN
| PP_CYCLE_DELAY_ACTIVE);
return 0;
@@ -499,7 +503,7 @@ static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
} else {
vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
- vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E))
+ vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E))
&= ~DP_TP_STATUS_AUTOTRAIN_DONE;
}
return 0;
@@ -517,9 +521,9 @@ static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
static int fdi_auto_training_started(struct intel_vgpu *vgpu)
{
- u32 ddi_buf_ctl = vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_E));
+ u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E));
u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
- u32 tx_ctl = vgpu_vreg(vgpu, DP_TP_CTL(PORT_E));
+ u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E));
if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
(rx_ctl & FDI_RX_ENABLE) &&
@@ -560,12 +564,12 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
/* If imr bit has been masked */
- if (vgpu_vreg(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
+ if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
return 0;
- if (((vgpu_vreg(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
+ if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
== fdi_tx_check_bits)
- && ((vgpu_vreg(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
+ && ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
== fdi_rx_check_bits))
return 1;
else
@@ -622,17 +626,17 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
if (ret < 0)
return ret;
if (ret)
- vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
+ vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
if (ret < 0)
return ret;
if (ret)
- vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
+ vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
if (offset == _FDI_RXA_CTL)
if (fdi_auto_training_started(vgpu))
- vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E)) |=
+ vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |=
DP_TP_STATUS_AUTOTRAIN_DONE;
return 0;
}
@@ -653,7 +657,7 @@ static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
if (data == 0x2) {
status_reg = DP_TP_STATUS(index);
- vgpu_vreg(vgpu, status_reg) |= (1 << 25);
+ vgpu_vreg_t(vgpu, status_reg) |= (1 << 25);
}
return 0;
}
@@ -717,7 +721,7 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
};
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
@@ -738,7 +742,7 @@ static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
};
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
@@ -1060,9 +1064,9 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+ if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
- unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
+ unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
sbi_offset);
@@ -1087,13 +1091,13 @@ static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) = data;
- if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+ if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
- unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
+ unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
write_virtual_sbi_register(vgpu, sbi_offset,
- vgpu_vreg(vgpu, SBI_DATA));
+ vgpu_vreg_t(vgpu, SBI_DATA));
}
return 0;
}
@@ -1339,7 +1343,7 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
{
u32 value = *(u32 *)p_data;
u32 cmd = value & 0xff;
- u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
+ u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA);
switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY:
@@ -1378,6 +1382,34 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
}
+static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 value = *(u32 *)p_data;
+ int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+
+ if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
+ gvt_vgpu_err("VM(%d) write invalid HWSP address, reg:0x%x, value:0x%x\n",
+ vgpu->id, offset, value);
+ return -EINVAL;
+ }
+ /*
+ * Need to emulate all the HWSP register write to ensure host can
+ * update the VM CSB status correctly. Here listed registers can
+ * support BDW, SKL or other platforms with same HWSP registers.
+ */
+ if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) {
+ gvt_vgpu_err("VM(%d) access unknown hardware status page register:0x%x\n",
+ vgpu->id, offset);
+ return -EINVAL;
+ }
+ vgpu->hws_pga[ring_id] = value;
+ gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
+ vgpu->id, value, offset);
+
+ return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
+}
+
static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
@@ -1441,12 +1473,12 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
u32 data = *(u32 *)p_data;
int ret = 0;
- if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
+ if (WARN_ON(ring_id < 0 || ring_id >= I915_NUM_ENGINES))
return -EINVAL;
- execlist = &vgpu->execlist[ring_id];
+ execlist = &vgpu->submission.execlist[ring_id];
- execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
+ execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3) {
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
if(ret)
@@ -1462,9 +1494,11 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
+ struct intel_vgpu_submission *s = &vgpu->submission;
u32 data = *(u32 *)p_data;
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
bool enable_execlist;
+ int ret;
write_vreg(vgpu, offset, p_data, bytes);
@@ -1486,8 +1520,18 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
(enable_execlist ? "enabling" : "disabling"),
ring_id);
- if (enable_execlist)
- intel_vgpu_start_schedule(vgpu);
+ if (!enable_execlist)
+ return 0;
+
+ if (s->active)
+ return 0;
+
+ ret = intel_vgpu_select_submission_ops(vgpu,
+ INTEL_VGPU_EXECLIST_SUBMISSION);
+ if (ret)
+ return ret;
+
+ intel_vgpu_start_schedule(vgpu);
}
return 0;
}
@@ -1519,7 +1563,7 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
default:
return -EINVAL;
}
- set_bit(id, (void *)vgpu->tlb_handle_pending);
+ set_bit(id, (void *)vgpu->submission.tlb_handle_pending);
return 0;
}
@@ -1542,7 +1586,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
}
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
- ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
+ ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
f, s, am, rm, d, r, w); \
if (ret) \
return ret; \
@@ -1610,22 +1654,22 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
-#define RING_REG(base) (base + 0x28)
+#define RING_REG(base) _MMIO((base) + 0x28)
MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG
-#define RING_REG(base) (base + 0x134)
+#define RING_REG(base) _MMIO((base) + 0x134)
MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG
-#define RING_REG(base) (base + 0x6c)
+#define RING_REG(base) _MMIO((base) + 0x6c)
MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
#undef RING_REG
MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
- MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
- MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL);
MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -1635,7 +1679,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
/* RING MODE */
-#define RING_REG(base) (base + 0x29c)
+#define RING_REG(base) _MMIO((base) + 0x29c)
MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
ring_mode_mmio_write);
#undef RING_REG
@@ -1654,37 +1698,37 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
NULL, NULL);
MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2124), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20e4), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x9030), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20a0), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2420), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2430), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2434), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2438), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x243c), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x7018), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */
- MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_D(0x602a0, D_ALL);
+ MMIO_F(_MMIO(0x60220), 0x20, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_D(_MMIO(0x602a0), D_ALL);
- MMIO_D(0x65050, D_ALL);
- MMIO_D(0x650b4, D_ALL);
+ MMIO_D(_MMIO(0x65050), D_ALL);
+ MMIO_D(_MMIO(0x650b4), D_ALL);
- MMIO_D(0xc4040, D_ALL);
+ MMIO_D(_MMIO(0xc4040), D_ALL);
MMIO_D(DERRMR, D_ALL);
MMIO_D(PIPEDSL(PIPE_A), D_ALL);
@@ -1724,14 +1768,14 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(CURBASE(PIPE_B), D_ALL);
MMIO_D(CURBASE(PIPE_C), D_ALL);
- MMIO_D(0x700ac, D_ALL);
- MMIO_D(0x710ac, D_ALL);
- MMIO_D(0x720ac, D_ALL);
+ MMIO_D(_MMIO(0x700ac), D_ALL);
+ MMIO_D(_MMIO(0x710ac), D_ALL);
+ MMIO_D(_MMIO(0x720ac), D_ALL);
- MMIO_D(0x70090, D_ALL);
- MMIO_D(0x70094, D_ALL);
- MMIO_D(0x70098, D_ALL);
- MMIO_D(0x7009c, D_ALL);
+ MMIO_D(_MMIO(0x70090), D_ALL);
+ MMIO_D(_MMIO(0x70094), D_ALL);
+ MMIO_D(_MMIO(0x70098), D_ALL);
+ MMIO_D(_MMIO(0x7009c), D_ALL);
MMIO_D(DSPCNTR(PIPE_A), D_ALL);
MMIO_D(DSPADDR(PIPE_A), D_ALL);
@@ -1907,24 +1951,24 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
- MMIO_D(0x48268, D_ALL);
+ MMIO_D(_MMIO(0x48268), D_ALL);
MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
gmbus_mmio_write);
MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0xe4f00, 0x28, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_PCH_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_PCH_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
- MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
- MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
+ MMIO_DH(_MMIO(_PCH_TRANSACONF), D_ALL, NULL, transconf_mmio_write);
+ MMIO_DH(_MMIO(_PCH_TRANSBCONF), D_ALL, NULL, transconf_mmio_write);
MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
@@ -1936,30 +1980,30 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
- MMIO_D(_PCH_TRANS_HTOTAL_A, D_ALL);
- MMIO_D(_PCH_TRANS_HBLANK_A, D_ALL);
- MMIO_D(_PCH_TRANS_HSYNC_A, D_ALL);
- MMIO_D(_PCH_TRANS_VTOTAL_A, D_ALL);
- MMIO_D(_PCH_TRANS_VBLANK_A, D_ALL);
- MMIO_D(_PCH_TRANS_VSYNC_A, D_ALL);
- MMIO_D(_PCH_TRANS_VSYNCSHIFT_A, D_ALL);
-
- MMIO_D(_PCH_TRANS_HTOTAL_B, D_ALL);
- MMIO_D(_PCH_TRANS_HBLANK_B, D_ALL);
- MMIO_D(_PCH_TRANS_HSYNC_B, D_ALL);
- MMIO_D(_PCH_TRANS_VTOTAL_B, D_ALL);
- MMIO_D(_PCH_TRANS_VBLANK_B, D_ALL);
- MMIO_D(_PCH_TRANS_VSYNC_B, D_ALL);
- MMIO_D(_PCH_TRANS_VSYNCSHIFT_B, D_ALL);
-
- MMIO_D(_PCH_TRANSA_DATA_M1, D_ALL);
- MMIO_D(_PCH_TRANSA_DATA_N1, D_ALL);
- MMIO_D(_PCH_TRANSA_DATA_M2, D_ALL);
- MMIO_D(_PCH_TRANSA_DATA_N2, D_ALL);
- MMIO_D(_PCH_TRANSA_LINK_M1, D_ALL);
- MMIO_D(_PCH_TRANSA_LINK_N1, D_ALL);
- MMIO_D(_PCH_TRANSA_LINK_M2, D_ALL);
- MMIO_D(_PCH_TRANSA_LINK_N2, D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A), D_ALL);
+
+ MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B), D_ALL);
+
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2), D_ALL);
MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
@@ -1977,38 +2021,38 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
- MMIO_D(_FDI_RXA_MISC, D_ALL);
- MMIO_D(_FDI_RXB_MISC, D_ALL);
- MMIO_D(_FDI_RXA_TUSIZE1, D_ALL);
- MMIO_D(_FDI_RXA_TUSIZE2, D_ALL);
- MMIO_D(_FDI_RXB_TUSIZE1, D_ALL);
- MMIO_D(_FDI_RXB_TUSIZE2, D_ALL);
+ MMIO_D(_MMIO(_FDI_RXA_MISC), D_ALL);
+ MMIO_D(_MMIO(_FDI_RXB_MISC), D_ALL);
+ MMIO_D(_MMIO(_FDI_RXA_TUSIZE1), D_ALL);
+ MMIO_D(_MMIO(_FDI_RXA_TUSIZE2), D_ALL);
+ MMIO_D(_MMIO(_FDI_RXB_TUSIZE1), D_ALL);
+ MMIO_D(_MMIO(_FDI_RXB_TUSIZE2), D_ALL);
MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
MMIO_D(PCH_PP_DIVISOR, D_ALL);
MMIO_D(PCH_PP_STATUS, D_ALL);
MMIO_D(PCH_LVDS, D_ALL);
- MMIO_D(_PCH_DPLL_A, D_ALL);
- MMIO_D(_PCH_DPLL_B, D_ALL);
- MMIO_D(_PCH_FPA0, D_ALL);
- MMIO_D(_PCH_FPA1, D_ALL);
- MMIO_D(_PCH_FPB0, D_ALL);
- MMIO_D(_PCH_FPB1, D_ALL);
+ MMIO_D(_MMIO(_PCH_DPLL_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_DPLL_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_FPA0), D_ALL);
+ MMIO_D(_MMIO(_PCH_FPA1), D_ALL);
+ MMIO_D(_MMIO(_PCH_FPB0), D_ALL);
+ MMIO_D(_MMIO(_PCH_FPB1), D_ALL);
MMIO_D(PCH_DREF_CONTROL, D_ALL);
MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
MMIO_D(PCH_DPLL_SEL, D_ALL);
- MMIO_D(0x61208, D_ALL);
- MMIO_D(0x6120c, D_ALL);
+ MMIO_D(_MMIO(0x61208), D_ALL);
+ MMIO_D(_MMIO(0x6120c), D_ALL);
MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
- MMIO_DH(0xe651c, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe681c), D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe6c04), D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe6e1c), D_ALL, dpy_reg_mmio_read, NULL);
MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
PORTA_HOTPLUG_STATUS_MASK
@@ -2030,11 +2074,11 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SOUTH_CHICKEN1, D_ALL);
MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
- MMIO_D(_TRANSA_CHICKEN1, D_ALL);
- MMIO_D(_TRANSB_CHICKEN1, D_ALL);
+ MMIO_D(_MMIO(_TRANSA_CHICKEN1), D_ALL);
+ MMIO_D(_MMIO(_TRANSB_CHICKEN1), D_ALL);
MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
- MMIO_D(_TRANSA_CHICKEN2, D_ALL);
- MMIO_D(_TRANSB_CHICKEN2, D_ALL);
+ MMIO_D(_MMIO(_TRANSA_CHICKEN2), D_ALL);
+ MMIO_D(_MMIO(_TRANSB_CHICKEN2), D_ALL);
MMIO_D(ILK_DPFC_CB_BASE, D_ALL);
MMIO_D(ILK_DPFC_CONTROL, D_ALL);
@@ -2100,24 +2144,24 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_D(0x60110, D_ALL);
- MMIO_D(0x61110, D_ALL);
- MMIO_F(0x70400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x71400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x72400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x70440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(0x71440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(0x72440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(0x7044c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(0x7144c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(0x7244c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_D(_MMIO(0x60110), D_ALL);
+ MMIO_D(_MMIO(0x61110), D_ALL);
+ MMIO_F(_MMIO(0x70400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x71400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x72400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x70440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(_MMIO(0x71440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(_MMIO(0x72440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(_MMIO(0x7044c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
MMIO_D(SPLL_CTL, D_ALL);
- MMIO_D(_WRPLL_CTL1, D_ALL);
- MMIO_D(_WRPLL_CTL2, D_ALL);
+ MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL);
+ MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
@@ -2128,15 +2172,15 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
- MMIO_D(0x46508, D_ALL);
+ MMIO_D(_MMIO(0x46508), D_ALL);
- MMIO_D(0x49080, D_ALL);
- MMIO_D(0x49180, D_ALL);
- MMIO_D(0x49280, D_ALL);
+ MMIO_D(_MMIO(0x49080), D_ALL);
+ MMIO_D(_MMIO(0x49180), D_ALL);
+ MMIO_D(_MMIO(0x49280), D_ALL);
- MMIO_F(0x49090, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x49190, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x49290, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x49090), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x49190), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x49290), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
@@ -2156,7 +2200,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
MMIO_D(PIXCLK_GATE, D_ALL);
- MMIO_F(_DPA_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_ALL, NULL,
+ MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL,
dp_aux_ch_ctl_mmio_write);
MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
@@ -2177,24 +2221,24 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
- MMIO_F(_DDI_BUF_TRANS_A, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x64e60, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x64eC0, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x64f20, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x64f80, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x64e60), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x64eC0), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x64f20), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x64f80), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
- MMIO_DH(_TRANS_DDI_FUNC_CTL_A, D_ALL, NULL, NULL);
- MMIO_DH(_TRANS_DDI_FUNC_CTL_B, D_ALL, NULL, NULL);
- MMIO_DH(_TRANS_DDI_FUNC_CTL_C, D_ALL, NULL, NULL);
- MMIO_DH(_TRANS_DDI_FUNC_CTL_EDP, D_ALL, NULL, NULL);
+ MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
+ MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
+ MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL);
+ MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL);
- MMIO_D(_TRANSA_MSA_MISC, D_ALL);
- MMIO_D(_TRANSB_MSA_MISC, D_ALL);
- MMIO_D(_TRANSC_MSA_MISC, D_ALL);
- MMIO_D(_TRANS_EDP_MSA_MISC, D_ALL);
+ MMIO_D(_MMIO(_TRANSA_MSA_MISC), D_ALL);
+ MMIO_D(_MMIO(_TRANSB_MSA_MISC), D_ALL);
+ MMIO_D(_MMIO(_TRANSC_MSA_MISC), D_ALL);
+ MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC), D_ALL);
MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
MMIO_D(FORCEWAKE_ACK, D_ALL);
@@ -2260,101 +2304,101 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN6_UCGCTL1, D_ALL);
MMIO_D(GEN6_UCGCTL2, D_ALL);
- MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x4f000), 0x90, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(GEN6_PCODE_DATA, D_ALL);
- MMIO_D(0x13812c, D_ALL);
+ MMIO_D(_MMIO(0x13812c), D_ALL);
MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
MMIO_D(HSW_EDRAM_CAP, D_ALL);
MMIO_D(HSW_IDICR, D_ALL);
MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
- MMIO_D(0x3c, D_ALL);
- MMIO_D(0x860, D_ALL);
+ MMIO_D(_MMIO(0x3c), D_ALL);
+ MMIO_D(_MMIO(0x860), D_ALL);
MMIO_D(ECOSKPD, D_ALL);
- MMIO_D(0x121d0, D_ALL);
+ MMIO_D(_MMIO(0x121d0), D_ALL);
MMIO_D(GEN6_BLITTER_ECOSKPD, D_ALL);
- MMIO_D(0x41d0, D_ALL);
+ MMIO_D(_MMIO(0x41d0), D_ALL);
MMIO_D(GAC_ECO_BITS, D_ALL);
- MMIO_D(0x6200, D_ALL);
- MMIO_D(0x6204, D_ALL);
- MMIO_D(0x6208, D_ALL);
- MMIO_D(0x7118, D_ALL);
- MMIO_D(0x7180, D_ALL);
- MMIO_D(0x7408, D_ALL);
- MMIO_D(0x7c00, D_ALL);
+ MMIO_D(_MMIO(0x6200), D_ALL);
+ MMIO_D(_MMIO(0x6204), D_ALL);
+ MMIO_D(_MMIO(0x6208), D_ALL);
+ MMIO_D(_MMIO(0x7118), D_ALL);
+ MMIO_D(_MMIO(0x7180), D_ALL);
+ MMIO_D(_MMIO(0x7408), D_ALL);
+ MMIO_D(_MMIO(0x7c00), D_ALL);
MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
- MMIO_D(0x911c, D_ALL);
- MMIO_D(0x9120, D_ALL);
+ MMIO_D(_MMIO(0x911c), D_ALL);
+ MMIO_D(_MMIO(0x9120), D_ALL);
MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GAB_CTL, D_ALL);
- MMIO_D(0x48800, D_ALL);
- MMIO_D(0xce044, D_ALL);
- MMIO_D(0xe6500, D_ALL);
- MMIO_D(0xe6504, D_ALL);
- MMIO_D(0xe6600, D_ALL);
- MMIO_D(0xe6604, D_ALL);
- MMIO_D(0xe6700, D_ALL);
- MMIO_D(0xe6704, D_ALL);
- MMIO_D(0xe6800, D_ALL);
- MMIO_D(0xe6804, D_ALL);
+ MMIO_D(_MMIO(0x48800), D_ALL);
+ MMIO_D(_MMIO(0xce044), D_ALL);
+ MMIO_D(_MMIO(0xe6500), D_ALL);
+ MMIO_D(_MMIO(0xe6504), D_ALL);
+ MMIO_D(_MMIO(0xe6600), D_ALL);
+ MMIO_D(_MMIO(0xe6604), D_ALL);
+ MMIO_D(_MMIO(0xe6700), D_ALL);
+ MMIO_D(_MMIO(0xe6704), D_ALL);
+ MMIO_D(_MMIO(0xe6800), D_ALL);
+ MMIO_D(_MMIO(0xe6804), D_ALL);
MMIO_D(PCH_GMBUS4, D_ALL);
MMIO_D(PCH_GMBUS5, D_ALL);
- MMIO_D(0x902c, D_ALL);
- MMIO_D(0xec008, D_ALL);
- MMIO_D(0xec00c, D_ALL);
- MMIO_D(0xec008 + 0x18, D_ALL);
- MMIO_D(0xec00c + 0x18, D_ALL);
- MMIO_D(0xec008 + 0x18 * 2, D_ALL);
- MMIO_D(0xec00c + 0x18 * 2, D_ALL);
- MMIO_D(0xec008 + 0x18 * 3, D_ALL);
- MMIO_D(0xec00c + 0x18 * 3, D_ALL);
- MMIO_D(0xec408, D_ALL);
- MMIO_D(0xec40c, D_ALL);
- MMIO_D(0xec408 + 0x18, D_ALL);
- MMIO_D(0xec40c + 0x18, D_ALL);
- MMIO_D(0xec408 + 0x18 * 2, D_ALL);
- MMIO_D(0xec40c + 0x18 * 2, D_ALL);
- MMIO_D(0xec408 + 0x18 * 3, D_ALL);
- MMIO_D(0xec40c + 0x18 * 3, D_ALL);
- MMIO_D(0xfc810, D_ALL);
- MMIO_D(0xfc81c, D_ALL);
- MMIO_D(0xfc828, D_ALL);
- MMIO_D(0xfc834, D_ALL);
- MMIO_D(0xfcc00, D_ALL);
- MMIO_D(0xfcc0c, D_ALL);
- MMIO_D(0xfcc18, D_ALL);
- MMIO_D(0xfcc24, D_ALL);
- MMIO_D(0xfd000, D_ALL);
- MMIO_D(0xfd00c, D_ALL);
- MMIO_D(0xfd018, D_ALL);
- MMIO_D(0xfd024, D_ALL);
- MMIO_D(0xfd034, D_ALL);
+ MMIO_D(_MMIO(0x902c), D_ALL);
+ MMIO_D(_MMIO(0xec008), D_ALL);
+ MMIO_D(_MMIO(0xec00c), D_ALL);
+ MMIO_D(_MMIO(0xec008 + 0x18), D_ALL);
+ MMIO_D(_MMIO(0xec00c + 0x18), D_ALL);
+ MMIO_D(_MMIO(0xec008 + 0x18 * 2), D_ALL);
+ MMIO_D(_MMIO(0xec00c + 0x18 * 2), D_ALL);
+ MMIO_D(_MMIO(0xec008 + 0x18 * 3), D_ALL);
+ MMIO_D(_MMIO(0xec00c + 0x18 * 3), D_ALL);
+ MMIO_D(_MMIO(0xec408), D_ALL);
+ MMIO_D(_MMIO(0xec40c), D_ALL);
+ MMIO_D(_MMIO(0xec408 + 0x18), D_ALL);
+ MMIO_D(_MMIO(0xec40c + 0x18), D_ALL);
+ MMIO_D(_MMIO(0xec408 + 0x18 * 2), D_ALL);
+ MMIO_D(_MMIO(0xec40c + 0x18 * 2), D_ALL);
+ MMIO_D(_MMIO(0xec408 + 0x18 * 3), D_ALL);
+ MMIO_D(_MMIO(0xec40c + 0x18 * 3), D_ALL);
+ MMIO_D(_MMIO(0xfc810), D_ALL);
+ MMIO_D(_MMIO(0xfc81c), D_ALL);
+ MMIO_D(_MMIO(0xfc828), D_ALL);
+ MMIO_D(_MMIO(0xfc834), D_ALL);
+ MMIO_D(_MMIO(0xfcc00), D_ALL);
+ MMIO_D(_MMIO(0xfcc0c), D_ALL);
+ MMIO_D(_MMIO(0xfcc18), D_ALL);
+ MMIO_D(_MMIO(0xfcc24), D_ALL);
+ MMIO_D(_MMIO(0xfd000), D_ALL);
+ MMIO_D(_MMIO(0xfd00c), D_ALL);
+ MMIO_D(_MMIO(0xfd018), D_ALL);
+ MMIO_D(_MMIO(0xfd024), D_ALL);
+ MMIO_D(_MMIO(0xfd034), D_ALL);
MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
- MMIO_D(0x2054, D_ALL);
- MMIO_D(0x12054, D_ALL);
- MMIO_D(0x22054, D_ALL);
- MMIO_D(0x1a054, D_ALL);
-
- MMIO_D(0x44070, D_ALL);
- MMIO_DFH(0x215c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
-
- MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
- MMIO_D(0x2b00, D_BDW_PLUS);
- MMIO_D(0x2360, D_BDW_PLUS);
- MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
-
- MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(_MMIO(0x2054), D_ALL);
+ MMIO_D(_MMIO(0x12054), D_ALL);
+ MMIO_D(_MMIO(0x22054), D_ALL);
+ MMIO_D(_MMIO(0x1a054), D_ALL);
+
+ MMIO_D(_MMIO(0x44070), D_ALL);
+ MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x12178), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
+ MMIO_D(_MMIO(0x2b00), D_BDW_PLUS);
+ MMIO_D(_MMIO(0x2360), D_BDW_PLUS);
+ MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_DFH(_MMIO(0x1c17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x1c178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
@@ -2368,24 +2412,24 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
- MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
- MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
- MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
- MMIO_DH(0x426c, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
- MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
- MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DH(_MMIO(0x4260), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(_MMIO(0x4264), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(_MMIO(0x4268), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(_MMIO(0x426c), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(_MMIO(0x4270), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
- MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x22178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}
@@ -2459,40 +2503,40 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS,
mmio_read_from_hw, NULL);
-#define RING_REG(base) (base + 0xd0)
+#define RING_REG(base) _MMIO((base) + 0xd0)
MMIO_RING_F(RING_REG, 4, F_RO, 0,
~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
ring_reset_ctl_write);
#undef RING_REG
-#define RING_REG(base) (base + 0x230)
+#define RING_REG(base) _MMIO((base) + 0x230)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
#undef RING_REG
-#define RING_REG(base) (base + 0x234)
+#define RING_REG(base) _MMIO((base) + 0x234)
MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
NULL, NULL);
#undef RING_REG
-#define RING_REG(base) (base + 0x244)
+#define RING_REG(base) _MMIO((base) + 0x244)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG
-#define RING_REG(base) (base + 0x370)
+#define RING_REG(base) _MMIO((base) + 0x370)
MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
-#define RING_REG(base) (base + 0x3a0)
+#define RING_REG(base) _MMIO((base) + 0x3a0)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
#undef RING_REG
MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
- MMIO_D(0x1c1d0, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x1c1d0), D_BDW_PLUS);
MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
- MMIO_D(0x1c054, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x1c054), D_BDW_PLUS);
MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
@@ -2501,11 +2545,11 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(GAMTARBMODE, D_BDW_PLUS);
-#define RING_REG(base) (base + 0x270)
+#define RING_REG(base) _MMIO((base) + 0x270)
MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
- MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
+ MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2514,10 +2558,10 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
MMIO_D(WM_MISC, D_BDW);
- MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
+ MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW);
- MMIO_D(0x66c00, D_BDW_PLUS);
- MMIO_D(0x66c04, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
+ MMIO_D(_MMIO(0x66c04), D_BDW_PLUS);
MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
@@ -2525,54 +2569,54 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
- MMIO_D(0xfdc, D_BDW_PLUS);
+ MMIO_D(_MMIO(0xfdc), D_BDW_PLUS);
MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xb1f0), D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xb1c0), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0xb110, D_BDW);
+ MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(_MMIO(0xb110), D_BDW);
- MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
+ MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
NULL, force_nonpriv_write);
- MMIO_D(0x44484, D_BDW_PLUS);
- MMIO_D(0x4448c, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x44484), D_BDW_PLUS);
+ MMIO_D(_MMIO(0x4448c), D_BDW_PLUS);
- MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
- MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0x110000, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x110000), D_BDW_PLUS);
- MMIO_D(0x48400, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x48400), D_BDW_PLUS);
- MMIO_D(0x6e570, D_BDW_PLUS);
- MMIO_D(0x65f10, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x6e570), D_BDW_PLUS);
+ MMIO_D(_MMIO(0x65f10), D_BDW_PLUS);
- MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
-
- MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL);
-
- MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2580), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(_MMIO(0x2248), D_BDW, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(_MMIO(0xe220), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe230), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe240), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe260), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe270), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe280), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}
@@ -2588,11 +2632,11 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
- MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(_MMIO(_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(_MMIO(_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
/*
@@ -2603,26 +2647,26 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
skl_power_well_ctl_write);
- MMIO_D(0xa210, D_SKL_PLUS);
+ MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL);
- MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL);
- MMIO_D(0x45504, D_SKL_PLUS);
- MMIO_D(0x45520, D_SKL_PLUS);
- MMIO_D(0x46000, D_SKL_PLUS);
- MMIO_DH(0x46010, D_SKL | D_KBL, NULL, skl_lcpll_write);
- MMIO_DH(0x46014, D_SKL | D_KBL, NULL, skl_lcpll_write);
- MMIO_D(0x6C040, D_SKL | D_KBL);
- MMIO_D(0x6C048, D_SKL | D_KBL);
- MMIO_D(0x6C050, D_SKL | D_KBL);
- MMIO_D(0x6C044, D_SKL | D_KBL);
- MMIO_D(0x6C04C, D_SKL | D_KBL);
- MMIO_D(0x6C054, D_SKL | D_KBL);
- MMIO_D(0x6c058, D_SKL | D_KBL);
- MMIO_D(0x6c05c, D_SKL | D_KBL);
- MMIO_DH(0X6c060, D_SKL | D_KBL, dpll_status_read, NULL);
+ MMIO_DH(_MMIO(0x4ddc), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(0x42080), D_SKL_PLUS, NULL, NULL);
+ MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
+ MMIO_DH(_MMIO(0x46010), D_SKL | D_KBL, NULL, skl_lcpll_write);
+ MMIO_DH(_MMIO(0x46014), D_SKL | D_KBL, NULL, skl_lcpll_write);
+ MMIO_D(_MMIO(0x6C040), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6C048), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6C050), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6C044), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6C04C), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6C054), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6c058), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6c05c), D_SKL | D_KBL);
+ MMIO_DH(_MMIO(0x6c060), D_SKL | D_KBL, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
@@ -2711,105 +2755,105 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_D(0x70380, D_SKL_PLUS);
- MMIO_D(0x71380, D_SKL_PLUS);
- MMIO_D(0x72380, D_SKL_PLUS);
- MMIO_D(0x7039c, D_SKL_PLUS);
+ MMIO_D(_MMIO(0x70380), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x71380), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
- MMIO_D(0x8f074, D_SKL | D_KBL);
- MMIO_D(0x8f004, D_SKL | D_KBL);
- MMIO_D(0x8f034, D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x8f004), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x8f034), D_SKL | D_KBL);
- MMIO_D(0xb11c, D_SKL | D_KBL);
+ MMIO_D(_MMIO(0xb11c), D_SKL | D_KBL);
- MMIO_D(0x51000, D_SKL | D_KBL);
- MMIO_D(0x6c00c, D_SKL_PLUS);
+ MMIO_D(_MMIO(0x51000), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
- MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
- MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+ MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+ MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
- MMIO_D(0xd08, D_SKL_PLUS);
- MMIO_DFH(0x20e0, D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0x20ec, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
+ MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* TRTT */
- MMIO_DFH(0x4de0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4de4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4de8, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4dec, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4df0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4df4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
- MMIO_DH(0x4dfc, D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
+ MMIO_DFH(_MMIO(0x4de0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4de4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4de8), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4dec), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4df0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4df4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
+ MMIO_DH(_MMIO(0x4dfc), D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
- MMIO_D(0x45008, D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x45008), D_SKL | D_KBL);
- MMIO_D(0x46430, D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x46430), D_SKL | D_KBL);
- MMIO_D(0x46520, D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x46520), D_SKL | D_KBL);
- MMIO_D(0xc403c, D_SKL | D_KBL);
- MMIO_D(0xb004, D_SKL_PLUS);
+ MMIO_D(_MMIO(0xc403c), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
- MMIO_D(0x65900, D_SKL_PLUS);
- MMIO_D(0x1082c0, D_SKL | D_KBL);
- MMIO_D(0x4068, D_SKL | D_KBL);
- MMIO_D(0x67054, D_SKL | D_KBL);
- MMIO_D(0x6e560, D_SKL | D_KBL);
- MMIO_D(0x6e554, D_SKL | D_KBL);
- MMIO_D(0x2b20, D_SKL | D_KBL);
- MMIO_D(0x65f00, D_SKL | D_KBL);
- MMIO_D(0x65f08, D_SKL | D_KBL);
- MMIO_D(0x320f0, D_SKL | D_KBL);
-
- MMIO_D(0x70034, D_SKL_PLUS);
- MMIO_D(0x71034, D_SKL_PLUS);
- MMIO_D(0x72034, D_SKL_PLUS);
-
- MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL_PLUS);
- MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL_PLUS);
- MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL_PLUS);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL_PLUS);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL_PLUS);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL_PLUS);
-
- MMIO_D(0x44500, D_SKL_PLUS);
+ MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x1082c0), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x4068), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x67054), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6e560), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6e554), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x2b20), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x65f00), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x65f08), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x320f0), D_SKL | D_KBL);
+
+ MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x72034), D_SKL_PLUS);
+
+ MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
+
+ MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_D(0x4ab8, D_KBL);
- MMIO_D(0x2248, D_SKL_PLUS | D_KBL);
+ MMIO_D(_MMIO(0x4ab8), D_KBL);
+ MMIO_D(_MMIO(0x2248), D_SKL_PLUS | D_KBL);
return 0;
}
@@ -2825,8 +2869,8 @@ static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
for (i = 0; i < num; i++, block++) {
if (!(device & block->device))
continue;
- if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
- offset < INTEL_GVT_MMIO_OFFSET(block->offset) + block->size)
+ if (offset >= i915_mmio_reg_offset(block->offset) &&
+ offset < i915_mmio_reg_offset(block->offset) + block->size)
return block;
}
return NULL;
@@ -2906,14 +2950,46 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
gvt->mmio.mmio_block = mmio_blocks;
gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
- gvt_dbg_mmio("traced %u virtual mmio registers\n",
- gvt->mmio.num_tracked_mmio);
return 0;
err:
intel_gvt_clean_mmio_info(gvt);
return ret;
}
+/**
+ * intel_gvt_for_each_tracked_mmio - iterate each tracked mmio
+ * @gvt: a GVT device
+ * @handler: the handler
+ * @data: private data given to handler
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
+ int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
+ void *data)
+{
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ struct intel_gvt_mmio_info *e;
+ int i, j, ret;
+
+ hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
+ ret = handler(gvt, e->offset, data);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
+ for (j = 0; j < block->size; j += 4) {
+ ret = handler(gvt,
+ i915_mmio_reg_offset(block->offset) + j,
+ data);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
/**
* intel_vgpu_default_mmio_read - default MMIO read handler
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index df7f33abd393..a1bd82feb827 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -55,6 +55,9 @@ struct intel_gvt_mpt {
unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
+ int (*set_opregion)(void *vgpu);
+ int (*get_vfio_device)(void *vgpu);
+ void (*put_vfio_device)(void *vgpu);
};
extern struct intel_gvt_mpt xengt_mpt;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 0a7d084da1a2..554d1db1f3c8 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -53,11 +53,23 @@ static const struct intel_gvt_ops *intel_gvt_ops;
#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+
+struct vfio_region;
+struct intel_vgpu_regops {
+ size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+ void (*release)(struct intel_vgpu *vgpu,
+ struct vfio_region *region);
+};
+
struct vfio_region {
u32 type;
u32 subtype;
size_t size;
u32 flags;
+ const struct intel_vgpu_regops *ops;
+ void *data;
};
struct kvmgt_pgfn {
@@ -248,120 +260,6 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
}
}
-static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
- const char *name)
-{
- int i;
- struct intel_vgpu_type *t;
- const char *driver_name = dev_driver_string(
- &gvt->dev_priv->drm.pdev->dev);
-
- for (i = 0; i < gvt->num_types; i++) {
- t = &gvt->types[i];
- if (!strncmp(t->name, name + strlen(driver_name) + 1,
- sizeof(t->name)))
- return t;
- }
-
- return NULL;
-}
-
-static ssize_t available_instances_show(struct kobject *kobj,
- struct device *dev, char *buf)
-{
- struct intel_vgpu_type *type;
- unsigned int num = 0;
- void *gvt = kdev_to_i915(dev)->gvt;
-
- type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
- if (!type)
- num = 0;
- else
- num = type->avail_instance;
-
- return sprintf(buf, "%u\n", num);
-}
-
-static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
- char *buf)
-{
- return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
-}
-
-static ssize_t description_show(struct kobject *kobj, struct device *dev,
- char *buf)
-{
- struct intel_vgpu_type *type;
- void *gvt = kdev_to_i915(dev)->gvt;
-
- type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
- if (!type)
- return 0;
-
- return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
- "fence: %d\nresolution: %s\n"
- "weight: %d\n",
- BYTES_TO_MB(type->low_gm_size),
- BYTES_TO_MB(type->high_gm_size),
- type->fence, vgpu_edid_str(type->resolution),
- type->weight);
-}
-
-static MDEV_TYPE_ATTR_RO(available_instances);
-static MDEV_TYPE_ATTR_RO(device_api);
-static MDEV_TYPE_ATTR_RO(description);
-
-static struct attribute *type_attrs[] = {
- &mdev_type_attr_available_instances.attr,
- &mdev_type_attr_device_api.attr,
- &mdev_type_attr_description.attr,
- NULL,
-};
-
-static struct attribute_group *intel_vgpu_type_groups[] = {
- [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
-};
-
-static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i, j;
- struct intel_vgpu_type *type;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- type = &gvt->types[i];
-
- group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
- if (WARN_ON(!group))
- goto unwind;
-
- group->name = type->name;
- group->attrs = type_attrs;
- intel_vgpu_type_groups[i] = group;
- }
-
- return true;
-
-unwind:
- for (j = 0; j < i; j++) {
- group = intel_vgpu_type_groups[j];
- kfree(group);
- }
-
- return false;
-}
-
-static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- group = intel_vgpu_type_groups[i];
- kfree(group);
- }
-}
-
static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
{
hash_init(info->ptable);
@@ -430,6 +328,108 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
}
}
+static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
+ VFIO_PCI_NUM_REGIONS;
+ void *base = vgpu->vdev.region[i].data;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+ if (pos >= vgpu->vdev.region[i].size || iswrite) {
+ gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
+ return -EINVAL;
+ }
+ count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
+ memcpy(buf, base + pos, count);
+
+ return count;
+}
+
+static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
+ struct vfio_region *region)
+{
+}
+
+static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
+ .rw = intel_vgpu_reg_rw_opregion,
+ .release = intel_vgpu_reg_release_opregion,
+};
+
+static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
+ unsigned int type, unsigned int subtype,
+ const struct intel_vgpu_regops *ops,
+ size_t size, u32 flags, void *data)
+{
+ struct vfio_region *region;
+
+ region = krealloc(vgpu->vdev.region,
+ (vgpu->vdev.num_regions + 1) * sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ vgpu->vdev.region = region;
+ vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
+ vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
+ vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
+ vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
+ vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
+ vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
+ vgpu->vdev.num_regions++;
+ return 0;
+}
+
+static int kvmgt_get_vfio_device(void *p_vgpu)
+{
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+
+ vgpu->vdev.vfio_device = vfio_device_get_from_dev(
+ mdev_dev(vgpu->vdev.mdev));
+ if (!vgpu->vdev.vfio_device) {
+ gvt_vgpu_err("failed to get vfio device\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+
+static int kvmgt_set_opregion(void *p_vgpu)
+{
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+ void *base;
+ int ret;
+
+ /* Each vgpu has its own opregion, although VFIO would create another
+ * one later. This one is used to expose opregion to VFIO. And the
+ * other one created by VFIO later, is used by guest actually.
+ */
+ base = vgpu_opregion(vgpu)->va;
+ if (!base)
+ return -ENOMEM;
+
+ if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+ memunmap(base);
+ return -EINVAL;
+ }
+
+ ret = intel_vgpu_register_reg(vgpu,
+ PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
+ &intel_vgpu_regops_opregion, OPREGION_SIZE,
+ VFIO_REGION_INFO_FLAG_READ, base);
+
+ return ret;
+}
+
+static void kvmgt_put_vfio_device(void *vgpu)
+{
+ if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
+ return;
+
+ vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
+}
+
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = NULL;
@@ -441,7 +441,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
pdev = mdev_parent_dev(mdev);
gvt = kdev_to_i915(pdev)->gvt;
- type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj));
if (!type) {
gvt_vgpu_err("failed to find type %s to create\n",
kobject_name(kobj));
@@ -660,7 +660,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
int ret = -EINVAL;
- if (index >= VFIO_PCI_NUM_REGIONS) {
+ if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL;
}
@@ -688,8 +688,14 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_BAR5_REGION_INDEX:
case VFIO_PCI_VGA_REGION_INDEX:
case VFIO_PCI_ROM_REGION_INDEX:
+ break;
default:
- gvt_vgpu_err("unsupported region: %u\n", index);
+ if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
+ return -EINVAL;
+
+ index -= VFIO_PCI_NUM_REGIONS;
+ return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
+ ppos, is_write);
}
return ret == 0 ? count : ret;
@@ -952,7 +958,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
info.flags = VFIO_DEVICE_FLAGS_PCI;
info.flags |= VFIO_DEVICE_FLAGS_RESET;
- info.num_regions = VFIO_PCI_NUM_REGIONS;
+ info.num_regions = VFIO_PCI_NUM_REGIONS +
+ vgpu->vdev.num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ?
@@ -1024,13 +1031,17 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = 0;
-
info.flags = 0;
+
gvt_dbg_core("get region info bar:%d\n", info.index);
break;
case VFIO_PCI_ROM_REGION_INDEX:
case VFIO_PCI_VGA_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0;
+ info.flags = 0;
+
gvt_dbg_core("get region info index:%d\n", info.index);
break;
default:
@@ -1078,6 +1089,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
}
if (caps.size) {
+ info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size;
info.cap_offset = 0;
@@ -1164,6 +1176,33 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
} else if (cmd == VFIO_DEVICE_RESET) {
intel_gvt_ops->vgpu_reset(vgpu);
return 0;
+ } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
+ struct vfio_device_gfx_plane_info dmabuf;
+ int ret = 0;
+
+ minsz = offsetofend(struct vfio_device_gfx_plane_info,
+ dmabuf_id);
+ if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
+ return -EFAULT;
+ if (dmabuf.argsz < minsz)
+ return -EINVAL;
+
+ ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
+ if (ret != 0)
+ return ret;
+
+ return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
+ -EFAULT : 0;
+ } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
+ __u32 dmabuf_id;
+ __s32 dmabuf_fd;
+
+ if (get_user(dmabuf_id, (__u32 __user *)arg))
+ return -EFAULT;
+
+ dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
+ return dmabuf_fd;
+
}
return 0;
@@ -1193,7 +1232,7 @@ hw_id_show(struct device *dev, struct device_attribute *attr,
struct intel_vgpu *vgpu = (struct intel_vgpu *)
mdev_get_drvdata(mdev);
return sprintf(buf, "%u\n",
- vgpu->shadow_ctx->hw_id);
+ vgpu->submission.shadow_ctx->hw_id);
}
return sprintf(buf, "\n");
}
@@ -1217,8 +1256,7 @@ static const struct attribute_group *intel_vgpu_groups[] = {
NULL,
};
-static const struct mdev_parent_ops intel_vgpu_ops = {
- .supported_type_groups = intel_vgpu_type_groups,
+static struct mdev_parent_ops intel_vgpu_ops = {
.mdev_attr_groups = intel_vgpu_groups,
.create = intel_vgpu_create,
.remove = intel_vgpu_remove,
@@ -1234,17 +1272,20 @@ static const struct mdev_parent_ops intel_vgpu_ops = {
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
{
- if (!intel_gvt_init_vgpu_type_groups(gvt))
- return -EFAULT;
+ struct attribute **kvm_type_attrs;
+ struct attribute_group **kvm_vgpu_type_groups;
intel_gvt_ops = ops;
+ if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
+ &kvm_vgpu_type_groups))
+ return -EFAULT;
+ intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
return mdev_register_device(dev, &intel_vgpu_ops);
}
static void kvmgt_host_exit(struct device *dev, void *gvt)
{
- intel_gvt_cleanup_vgpu_type_groups(gvt);
mdev_unregister_device(dev);
}
@@ -1324,8 +1365,8 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
struct kvmgt_guest_info, track_node);
if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
- intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
- (void *)val, len);
+ intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
+ (void *)val, len);
}
static void kvmgt_page_track_flush_slot(struct kvm *kvm,
@@ -1403,6 +1444,9 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvmgt_protect_table_init(info);
gvt_cache_init(vgpu);
+ mutex_init(&vgpu->dmabuf_lock);
+ init_completion(&vgpu->vblank_done);
+
info->track_node.track_write = kvmgt_page_track_write;
info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_page_track_register_notifier(kvm, &info->track_node);
@@ -1543,6 +1587,9 @@ struct intel_gvt_mpt kvmgt_mpt = {
.read_gpa = kvmgt_read_gpa,
.write_gpa = kvmgt_write_gpa,
.gfn_to_mfn = kvmgt_gfn_to_pfn,
+ .set_opregion = kvmgt_set_opregion,
+ .get_vfio_device = kvmgt_get_vfio_device,
+ .put_vfio_device = kvmgt_put_vfio_device,
};
EXPORT_SYMBOL_GPL(kvmgt_mpt);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 1e1310f50289..562b5ad857a4 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -117,25 +117,6 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
else
memcpy(pt, p_data, bytes);
- } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
- struct intel_vgpu_guest_page *gp;
-
- /* Since we enter the failsafe mode early during guest boot,
- * guest may not have chance to set up its ppgtt table, so
- * there should not be any wp pages for guest. Keep the wp
- * related code here in case we need to handle it in furture.
- */
- gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
- if (gp) {
- /* remove write protection to prevent furture traps */
- intel_vgpu_clean_guest_page(vgpu, gp);
- if (read)
- intel_gvt_hypervisor_read_gpa(vgpu, pa,
- p_data, bytes);
- else
- intel_gvt_hypervisor_write_gpa(vgpu, pa,
- p_data, bytes);
- }
}
mutex_unlock(&gvt->lock);
}
@@ -157,7 +138,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
unsigned int offset = 0;
int ret = -EINVAL;
-
if (vgpu->failsafe) {
failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
return 0;
@@ -166,26 +146,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
if (vgpu_gpa_is_aperture(vgpu, pa)) {
ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
- mutex_unlock(&gvt->lock);
- return ret;
- }
-
- if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
- struct intel_vgpu_guest_page *gp;
-
- gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
- if (gp) {
- ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
- p_data, bytes);
- if (ret) {
- gvt_vgpu_err("guest page read error %d, "
- "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
- ret, gp->gfn, pa, *(u32 *)p_data,
- bytes);
- }
- mutex_unlock(&gvt->lock);
- return ret;
- }
+ goto out;
}
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
@@ -205,14 +166,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
p_data, bytes);
if (ret)
goto err;
- mutex_unlock(&gvt->lock);
- return ret;
+ goto out;
}
if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
- mutex_unlock(&gvt->lock);
- return ret;
+ goto out;
}
if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
@@ -228,11 +187,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
goto err;
intel_gvt_mmio_set_accessed(gvt, offset);
- mutex_unlock(&gvt->lock);
- return 0;
+ ret = 0;
+ goto out;
+
err:
gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
offset, bytes);
+out:
mutex_unlock(&gvt->lock);
return ret;
}
@@ -263,26 +224,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
if (vgpu_gpa_is_aperture(vgpu, pa)) {
ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
- mutex_unlock(&gvt->lock);
- return ret;
- }
-
- if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
- struct intel_vgpu_guest_page *gp;
-
- gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
- if (gp) {
- ret = gp->handler(gp, pa, p_data, bytes);
- if (ret) {
- gvt_err("guest page write error %d, "
- "gfn 0x%lx, pa 0x%llx, "
- "var 0x%x, len %d\n",
- ret, gp->gfn, pa,
- *(u32 *)p_data, bytes);
- }
- mutex_unlock(&gvt->lock);
- return ret;
- }
+ goto out;
}
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
@@ -302,14 +244,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
p_data, bytes);
if (ret)
goto err;
- mutex_unlock(&gvt->lock);
- return ret;
+ goto out;
}
if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
- mutex_unlock(&gvt->lock);
- return ret;
+ goto out;
}
ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
@@ -317,11 +257,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
goto err;
intel_gvt_mmio_set_accessed(gvt, offset);
- mutex_unlock(&gvt->lock);
- return 0;
+ ret = 0;
+ goto out;
err:
gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
bytes);
+out:
mutex_unlock(&gvt->lock);
return ret;
}
@@ -342,10 +283,10 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
- vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+ vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
/* set the bit 0:2(Core C-State ) to C0 */
- vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+ vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
vgpu->mmio.disable_warn_untrack = false;
} else {
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index dbc04ad2c7a1..71b620875943 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -72,12 +72,9 @@ bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
-
-#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
- typeof(reg) __reg = reg; \
- u32 *offset = (u32 *)&__reg; \
- *offset; \
-})
+int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
+ int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
+ void *data);
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
new file mode 100644
index 000000000000..74834395dd89
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "trace.h"
+
+/**
+ * Defined in Intel Open Source PRM.
+ * Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms
+ */
+#define TRVATTL3PTRDW(i) _MMIO(0x4de0 + (i)*4)
+#define TRNULLDETCT _MMIO(0x4de8)
+#define TRINVTILEDETCT _MMIO(0x4dec)
+#define TRVADR _MMIO(0x4df0)
+#define TRTTE _MMIO(0x4df4)
+#define RING_EXCC(base) _MMIO((base) + 0x28)
+#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
+#define VF_GUARDBAND _MMIO(0x83a4)
+
+/* Raw offset is appened to each line for convenience. */
+static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
+ {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+ {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
+ {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+ {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+ {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+ {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+ {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+ {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+ {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+ {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+ {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+ {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
+ { /* Terminated */ }
+};
+
+static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
+ {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+ {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
+ {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+ {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+ {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+ {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+ {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+ {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+ {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+ {RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
+ {RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
+ {RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
+ {RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
+ {RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
+ {RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
+ {RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
+ {RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
+ {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
+ {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
+ {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
+ {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
+ {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
+ {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
+ {RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */
+ {RCS, TRVADR, 0, false}, /* 0x4df0 */
+ {RCS, TRTTE, 0, false}, /* 0x4df4 */
+
+ {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+ {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+ {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
+
+ {VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
+
+ {VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
+
+ {RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
+ {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
+ {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
+
+ {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
+ {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */
+
+ {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
+ {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
+ {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
+ { /* Terminated */ }
+};
+
+static struct {
+ bool initialized;
+ u32 control_table[I915_NUM_ENGINES][64];
+ u32 l3cc_table[32];
+} gen9_render_mocs;
+
+static void load_render_mocs(struct drm_i915_private *dev_priv)
+{
+ i915_reg_t offset;
+ u32 regs[] = {
+ [RCS] = 0xc800,
+ [VCS] = 0xc900,
+ [VCS2] = 0xca00,
+ [BCS] = 0xcc00,
+ [VECS] = 0xcb00,
+ };
+ int ring_id, i;
+
+ for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
+ offset.reg = regs[ring_id];
+ for (i = 0; i < 64; i++) {
+ gen9_render_mocs.control_table[ring_id][i] =
+ I915_READ_FW(offset);
+ offset.reg += 4;
+ }
+ }
+
+ offset.reg = 0xb020;
+ for (i = 0; i < 32; i++) {
+ gen9_render_mocs.l3cc_table[i] =
+ I915_READ_FW(offset);
+ offset.reg += 4;
+ }
+ gen9_render_mocs.initialized = true;
+}
+
+static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ enum forcewake_domains fw;
+ i915_reg_t reg;
+ u32 regs[] = {
+ [RCS] = 0x4260,
+ [VCS] = 0x4264,
+ [VCS2] = 0x4268,
+ [BCS] = 0x426c,
+ [VECS] = 0x4270,
+ };
+
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
+ return;
+
+ reg = _MMIO(regs[ring_id]);
+
+ /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
+ * we need to put a forcewake when invalidating RCS TLB caches,
+ * otherwise device can go to RC6 state and interrupt invalidation
+ * process
+ */
+ fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ FW_REG_READ | FW_REG_WRITE);
+ if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
+ fw |= FORCEWAKE_RENDER;
+
+ intel_uncore_forcewake_get(dev_priv, fw);
+
+ I915_WRITE_FW(reg, 0x1);
+
+ if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
+ gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+ else
+ vgpu_vreg_t(vgpu, reg) = 0;
+
+ intel_uncore_forcewake_put(dev_priv, fw);
+
+ gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
+}
+
+static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
+ int ring_id)
+{
+ struct drm_i915_private *dev_priv;
+ i915_reg_t offset, l3_offset;
+ u32 old_v, new_v;
+
+ u32 regs[] = {
+ [RCS] = 0xc800,
+ [VCS] = 0xc900,
+ [VCS2] = 0xca00,
+ [BCS] = 0xcc00,
+ [VECS] = 0xcb00,
+ };
+ int i;
+
+ dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!pre && !gen9_render_mocs.initialized)
+ load_render_mocs(dev_priv);
+
+ offset.reg = regs[ring_id];
+ for (i = 0; i < 64; i++) {
+ if (pre)
+ old_v = vgpu_vreg_t(pre, offset);
+ else
+ old_v = gen9_render_mocs.control_table[ring_id][i];
+ if (next)
+ new_v = vgpu_vreg_t(next, offset);
+ else
+ new_v = gen9_render_mocs.control_table[ring_id][i];
+
+ if (old_v != new_v)
+ I915_WRITE_FW(offset, new_v);
+
+ offset.reg += 4;
+ }
+
+ if (ring_id == RCS) {
+ l3_offset.reg = 0xb020;
+ for (i = 0; i < 32; i++) {
+ if (pre)
+ old_v = vgpu_vreg_t(pre, l3_offset);
+ else
+ old_v = gen9_render_mocs.l3cc_table[i];
+ if (next)
+ new_v = vgpu_vreg_t(next, l3_offset);
+ else
+ new_v = gen9_render_mocs.l3cc_table[i];
+
+ if (old_v != new_v)
+ I915_WRITE_FW(l3_offset, new_v);
+
+ l3_offset.reg += 4;
+ }
+ }
+}
+
+#define CTX_CONTEXT_CONTROL_VAL 0x03
+
+/* Switch ring mmio values (context). */
+static void switch_mmio(struct intel_vgpu *pre,
+ struct intel_vgpu *next,
+ int ring_id)
+{
+ struct drm_i915_private *dev_priv;
+ struct intel_vgpu_submission *s;
+ u32 *reg_state, ctx_ctrl;
+ u32 inhibit_mask =
+ _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ struct engine_mmio *mmio;
+ u32 old_v, new_v;
+
+ dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ switch_mocs(pre, next, ring_id);
+
+ mmio = dev_priv->gvt->engine_mmio_list;
+ while (i915_mmio_reg_offset((mmio++)->reg)) {
+ if (mmio->ring_id != ring_id)
+ continue;
+ // save
+ if (pre) {
+ vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
+ if (mmio->mask)
+ vgpu_vreg_t(pre, mmio->reg) &=
+ ~(mmio->mask << 16);
+ old_v = vgpu_vreg_t(pre, mmio->reg);
+ } else
+ old_v = mmio->value = I915_READ_FW(mmio->reg);
+
+ // restore
+ if (next) {
+ s = &next->submission;
+ reg_state =
+ s->shadow_ctx->engine[ring_id].lrc_reg_state;
+ ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
+ /*
+ * if it is an inhibit context, load in_context mmio
+ * into HW by mmio write. If it is not, skip this mmio
+ * write.
+ */
+ if (mmio->in_context &&
+ (ctx_ctrl & inhibit_mask) != inhibit_mask)
+ continue;
+
+ if (mmio->mask)
+ new_v = vgpu_vreg_t(next, mmio->reg) |
+ (mmio->mask << 16);
+ else
+ new_v = vgpu_vreg_t(next, mmio->reg);
+ } else {
+ if (mmio->in_context)
+ continue;
+ if (mmio->mask)
+ new_v = mmio->value | (mmio->mask << 16);
+ else
+ new_v = mmio->value;
+ }
+
+ I915_WRITE_FW(mmio->reg, new_v);
+
+ trace_render_mmio(pre ? pre->id : 0,
+ next ? next->id : 0,
+ "switch",
+ i915_mmio_reg_offset(mmio->reg),
+ old_v, new_v);
+ }
+
+ if (next)
+ handle_tlb_pending_event(next, ring_id);
+}
+
+/**
+ * intel_gvt_switch_render_mmio - switch mmio context of specific engine
+ * @pre: the last vGPU that own the engine
+ * @next: the vGPU to switch to
+ * @ring_id: specify the engine
+ *
+ * If pre is null indicates that host own the engine. If next is null
+ * indicates that we are switching to host workload.
+ */
+void intel_gvt_switch_mmio(struct intel_vgpu *pre,
+ struct intel_vgpu *next, int ring_id)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (WARN_ON(!pre && !next))
+ return;
+
+ gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
+ pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
+
+ dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
+
+ /**
+ * We are using raw mmio access wrapper to improve the
+ * performace for batch mmio read/write, so we need
+ * handle forcewake mannually.
+ */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ switch_mmio(pre, next, ring_id);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+/**
+ * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
+ * @gvt: GVT device
+ *
+ */
+void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
+{
+ if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+ gvt->engine_mmio_list = gen9_engine_mmio_list;
+ else
+ gvt->engine_mmio_list = gen8_engine_mmio_list;
+}
diff --git a/drivers/gpu/drm/i915/gvt/render.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 91db1d39d28f..ca2c6a745673 100644
--- a/drivers/gpu/drm/i915/gvt/render.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -36,8 +36,17 @@
#ifndef __GVT_RENDER_H__
#define __GVT_RENDER_H__
+struct engine_mmio {
+ int ring_id;
+ i915_reg_t reg;
+ u32 mask;
+ bool in_context;
+ u32 value;
+};
+
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next, int ring_id);
+void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index f0e5487e6688..ca8005a6d5fa 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -154,51 +154,53 @@ static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
}
/**
- * intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected
+ * intel_gvt_hypervisor_enable - set a guest page to write-protected
* @vgpu: a vGPU
- * @p: intel_vgpu_guest_page
+ * @t: page track data structure
*
* Returns:
* Zero on success, negative error code if failed.
*/
-static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *p)
+static inline int intel_gvt_hypervisor_enable_page_track(
+ struct intel_vgpu *vgpu,
+ struct intel_vgpu_page_track *t)
{
int ret;
- if (p->writeprotection)
+ if (t->tracked)
return 0;
- ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn);
+ ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, t->gfn);
if (ret)
return ret;
- p->writeprotection = true;
- atomic_inc(&vgpu->gtt.n_write_protected_guest_page);
+ t->tracked = true;
+ atomic_inc(&vgpu->gtt.n_tracked_guest_page);
return 0;
}
/**
- * intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a
+ * intel_gvt_hypervisor_disable_page_track - remove the write-protection of a
* guest page
* @vgpu: a vGPU
- * @p: intel_vgpu_guest_page
+ * @t: page track data structure
*
* Returns:
* Zero on success, negative error code if failed.
*/
-static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *p)
+static inline int intel_gvt_hypervisor_disable_page_track(
+ struct intel_vgpu *vgpu,
+ struct intel_vgpu_page_track *t)
{
int ret;
- if (!p->writeprotection)
+ if (!t->tracked)
return 0;
- ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn);
+ ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, t->gfn);
if (ret)
return ret;
- p->writeprotection = false;
- atomic_dec(&vgpu->gtt.n_write_protected_guest_page);
+ t->tracked = false;
+ atomic_dec(&vgpu->gtt.n_tracked_guest_page);
return 0;
}
@@ -292,4 +294,49 @@ static inline int intel_gvt_hypervisor_set_trap_area(
return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
}
+/**
+ * intel_gvt_hypervisor_set_opregion - Set opregion for guest
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
+{
+ if (!intel_gvt_host.mpt->set_opregion)
+ return 0;
+
+ return intel_gvt_host.mpt->set_opregion(vgpu);
+}
+
+/**
+ * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
+{
+ if (!intel_gvt_host.mpt->get_vfio_device)
+ return 0;
+
+ return intel_gvt_host.mpt->get_vfio_device(vgpu);
+}
+
+/**
+ * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
+{
+ if (!intel_gvt_host.mpt->put_vfio_device)
+ return;
+
+ intel_gvt_host.mpt->put_vfio_device(vgpu);
+}
+
#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 311799136d7f..8420d1fc3ddb 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -25,36 +25,237 @@
#include "i915_drv.h"
#include "gvt.h"
-static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
+/*
+ * Note: Only for GVT-g virtual VBT generation, other usage must
+ * not do like this.
+ */
+#define _INTEL_BIOS_PRIVATE
+#include "intel_vbt_defs.h"
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_VBT (1<<3)
+
+/* device handle */
+#define DEVICE_TYPE_CRT 0x01
+#define DEVICE_TYPE_EFP1 0x04
+#define DEVICE_TYPE_EFP2 0x40
+#define DEVICE_TYPE_EFP3 0x20
+#define DEVICE_TYPE_EFP4 0x10
+
+#define DEV_SIZE 38
+
+struct opregion_header {
+ u8 signature[16];
+ u32 size;
+ u32 opregion_ver;
+ u8 bios_ver[32];
+ u8 vbios_ver[16];
+ u8 driver_ver[16];
+ u32 mboxes;
+ u32 driver_model;
+ u32 pcon;
+ u8 dver[32];
+ u8 rsvd[124];
+} __packed;
+
+struct bdb_data_header {
+ u8 id;
+ u16 size; /* data size */
+} __packed;
+
+struct efp_child_device_config {
+ u16 handle;
+ u16 device_type;
+ u16 device_class;
+ u8 i2c_speed;
+ u8 dp_onboard_redriver; /* 158 */
+ u8 dp_ondock_redriver; /* 158 */
+ u8 hdmi_level_shifter_value:4; /* 169 */
+ u8 hdmi_max_data_rate:4; /* 204 */
+ u16 dtd_buf_ptr; /* 161 */
+ u8 edidless_efp:1; /* 161 */
+ u8 compression_enable:1; /* 198 */
+ u8 compression_method:1; /* 198 */
+ u8 ganged_edp:1; /* 202 */
+ u8 skip0:4;
+ u8 compression_structure_index:4; /* 198 */
+ u8 skip1:4;
+ u8 slave_port; /* 202 */
+ u8 skip2;
+ u8 dvo_port;
+ u8 i2c_pin; /* for add-in card */
+ u8 slave_addr; /* for add-in card */
+ u8 ddc_pin;
+ u16 edid_ptr;
+ u8 dvo_config;
+ u8 efp_docked_port:1; /* 158 */
+ u8 lane_reversal:1; /* 184 */
+ u8 onboard_lspcon:1; /* 192 */
+ u8 iboost_enable:1; /* 196 */
+ u8 hpd_invert:1; /* BXT 196 */
+ u8 slip3:3;
+ u8 hdmi_compat:1;
+ u8 dp_compat:1;
+ u8 tmds_compat:1;
+ u8 skip4:5;
+ u8 aux_channel;
+ u8 dongle_detect;
+ u8 pipe_cap:2;
+ u8 sdvo_stall:1; /* 158 */
+ u8 hpd_status:2;
+ u8 integrated_encoder:1;
+ u8 skip5:2;
+ u8 dvo_wiring;
+ u8 mipi_bridge_type; /* 171 */
+ u16 device_class_ext;
+ u8 dvo_function;
+ u8 dp_usb_type_c:1; /* 195 */
+ u8 skip6:7;
+ u8 dp_usb_type_c_2x_gpio_index; /* 195 */
+ u16 dp_usb_type_c_2x_gpio_pin; /* 195 */
+ u8 iboost_dp:4; /* 196 */
+ u8 iboost_hdmi:4; /* 196 */
+} __packed;
+
+struct vbt {
+ /* header->bdb_offset point to bdb_header offset */
+ struct vbt_header header;
+ struct bdb_header bdb_header;
+
+ struct bdb_data_header general_features_header;
+ struct bdb_general_features general_features;
+
+ struct bdb_data_header general_definitions_header;
+ struct bdb_general_definitions general_definitions;
+
+ struct efp_child_device_config child0;
+ struct efp_child_device_config child1;
+ struct efp_child_device_config child2;
+ struct efp_child_device_config child3;
+
+ struct bdb_data_header driver_features_header;
+ struct bdb_driver_features driver_features;
+};
+
+static void virt_vbt_generation(struct vbt *v)
{
- u8 *buf;
- int i;
+ int num_child;
+
+ memset(v, 0, sizeof(struct vbt));
+
+ v->header.signature[0] = '$';
+ v->header.signature[1] = 'V';
+ v->header.signature[2] = 'B';
+ v->header.signature[3] = 'T';
+
+ /* there's features depending on version! */
+ v->header.version = 155;
+ v->header.header_size = sizeof(v->header);
+ v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header);
+ v->header.bdb_offset = offsetof(struct vbt, bdb_header);
+
+ strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
+ v->bdb_header.version = 186; /* child_dev_size = 38 */
+ v->bdb_header.header_size = sizeof(v->bdb_header);
+
+ v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
+ - sizeof(struct bdb_header);
+
+ /* general features */
+ v->general_features_header.id = BDB_GENERAL_FEATURES;
+ v->general_features_header.size = sizeof(struct bdb_general_features);
+ v->general_features.int_crt_support = 0;
+ v->general_features.int_tv_support = 0;
+
+ /* child device */
+ num_child = 4; /* each port has one child */
+ v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
+ /* size will include child devices */
+ v->general_definitions_header.size =
+ sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE;
+ v->general_definitions.child_dev_size = DEV_SIZE;
+
+ /* portA */
+ v->child0.handle = DEVICE_TYPE_EFP1;
+ v->child0.device_type = DEVICE_TYPE_DP;
+ v->child0.dvo_port = DVO_PORT_DPA;
+ v->child0.aux_channel = DP_AUX_A;
+ v->child0.dp_compat = true;
+ v->child0.integrated_encoder = true;
+
+ /* portB */
+ v->child1.handle = DEVICE_TYPE_EFP2;
+ v->child1.device_type = DEVICE_TYPE_DP;
+ v->child1.dvo_port = DVO_PORT_DPB;
+ v->child1.aux_channel = DP_AUX_B;
+ v->child1.dp_compat = true;
+ v->child1.integrated_encoder = true;
+
+ /* portC */
+ v->child2.handle = DEVICE_TYPE_EFP3;
+ v->child2.device_type = DEVICE_TYPE_DP;
+ v->child2.dvo_port = DVO_PORT_DPC;
+ v->child2.aux_channel = DP_AUX_C;
+ v->child2.dp_compat = true;
+ v->child2.integrated_encoder = true;
+
+ /* portD */
+ v->child3.handle = DEVICE_TYPE_EFP4;
+ v->child3.device_type = DEVICE_TYPE_DP;
+ v->child3.dvo_port = DVO_PORT_DPD;
+ v->child3.aux_channel = DP_AUX_D;
+ v->child3.dp_compat = true;
+ v->child3.integrated_encoder = true;
+
+ /* driver features */
+ v->driver_features_header.id = BDB_DRIVER_FEATURES;
+ v->driver_features_header.size = sizeof(struct bdb_driver_features);
+ v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS;
+}
- if (WARN((vgpu_opregion(vgpu)->va),
- "vgpu%d: opregion has been initialized already.\n",
- vgpu->id))
- return -EINVAL;
+/**
+ * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
+ * @vgpu: a vGPU
+ * @gpa: guest physical address of opregion
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
+{
+ u8 *buf;
+ struct opregion_header *header;
+ struct vbt v;
+ const char opregion_signature[16] = OPREGION_SIGNATURE;
+ gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
__GFP_ZERO,
get_order(INTEL_GVT_OPREGION_SIZE));
-
- if (!vgpu_opregion(vgpu)->va)
+ if (!vgpu_opregion(vgpu)->va) {
+ gvt_err("fail to get memory for vgpu virt opregion\n");
return -ENOMEM;
+ }
- memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va,
- INTEL_GVT_OPREGION_SIZE);
-
- for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
- vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
+ /* emulated opregion with VBT mailbox only */
+ buf = (u8 *)vgpu_opregion(vgpu)->va;
+ header = (struct opregion_header *)buf;
+ memcpy(header->signature, opregion_signature,
+ sizeof(opregion_signature));
+ header->size = 0x8;
+ header->opregion_ver = 0x02000000;
+ header->mboxes = MBOX_VBT;
/* for unknown reason, the value in LID field is incorrect
* which block the windows guest, so workaround it by force
* setting it to "OPEN"
*/
- buf = (u8 *)vgpu_opregion(vgpu)->va;
buf[INTEL_GVT_OPREGION_CLID] = 0x3;
+ /* emulated vbt from virt vbt generation */
+ virt_vbt_generation(&v);
+ memcpy(buf + INTEL_GVT_OPREGION_VBT_OFFSET, &v, sizeof(struct vbt));
+
return 0;
}
@@ -79,93 +280,91 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
return ret;
}
}
- return 0;
-}
-/**
- * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
- * @vgpu: a vGPU
- *
- */
-void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
-{
- gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
-
- if (!vgpu_opregion(vgpu)->va)
- return;
-
- if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
- map_vgpu_opregion(vgpu, false);
- free_pages((unsigned long)vgpu_opregion(vgpu)->va,
- get_order(INTEL_GVT_OPREGION_SIZE));
+ vgpu_opregion(vgpu)->mapped = map;
- vgpu_opregion(vgpu)->va = NULL;
- }
+ return 0;
}
/**
- * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
+ * intel_vgpu_opregion_base_write_handler - Opregion base register write handler
+ *
* @vgpu: a vGPU
* @gpa: guest physical address of opregion
*
* Returns:
* Zero on success, negative error code if failed.
*/
-int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
+int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
{
- int ret;
- gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id);
+ int i, ret = 0;
+ unsigned long pfn;
- if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
- gvt_dbg_core("emulate opregion from kernel\n");
+ gvt_dbg_core("emulate opregion from kernel\n");
- ret = init_vgpu_opregion(vgpu, gpa);
- if (ret)
- return ret;
+ switch (intel_gvt_host.hypervisor_type) {
+ case INTEL_GVT_HYPERVISOR_KVM:
+ pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gpa >> PAGE_SHIFT);
+ vgpu_opregion(vgpu)->va_gopregion = memremap(pfn << PAGE_SHIFT,
+ INTEL_GVT_OPREGION_SIZE,
+ MEMREMAP_WB);
+ if (!vgpu_opregion(vgpu)->va_gopregion) {
+ gvt_vgpu_err("failed to map guest opregion\n");
+ ret = -EFAULT;
+ }
+ vgpu_opregion(vgpu)->mapped = true;
+ break;
+ case INTEL_GVT_HYPERVISOR_XEN:
+ /**
+ * Wins guest on Xengt will write this register twice: xen
+ * hvmloader and windows graphic driver.
+ */
+ if (vgpu_opregion(vgpu)->mapped)
+ map_vgpu_opregion(vgpu, false);
+
+ for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
+ vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
ret = map_vgpu_opregion(vgpu, true);
- if (ret)
- return ret;
+ break;
+ default:
+ ret = -EINVAL;
+ gvt_vgpu_err("not supported hypervisor\n");
}
- return 0;
-}
-
-/**
- * intel_gvt_clean_opregion - clean host opergion related stuffs
- * @gvt: a GVT device
- *
- */
-void intel_gvt_clean_opregion(struct intel_gvt *gvt)
-{
- memunmap(gvt->opregion.opregion_va);
- gvt->opregion.opregion_va = NULL;
+ return ret;
}
/**
- * intel_gvt_init_opregion - initialize host opergion related stuffs
- * @gvt: a GVT device
+ * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
+ * @vgpu: a vGPU
*
- * Returns:
- * Zero on success, negative error code if failed.
*/
-int intel_gvt_init_opregion(struct intel_gvt *gvt)
+void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
{
- gvt_dbg_core("init host opregion\n");
+ gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
- pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
- &gvt->opregion.opregion_pa);
+ if (!vgpu_opregion(vgpu)->va)
+ return;
- gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
- INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
- if (!gvt->opregion.opregion_va) {
- gvt_err("fail to map host opregion\n");
- return -EFAULT;
+ if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
+ if (vgpu_opregion(vgpu)->mapped)
+ map_vgpu_opregion(vgpu, false);
+ } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
+ if (vgpu_opregion(vgpu)->mapped) {
+ memunmap(vgpu_opregion(vgpu)->va_gopregion);
+ vgpu_opregion(vgpu)->va_gopregion = NULL;
+ }
}
- return 0;
+ free_pages((unsigned long)vgpu_opregion(vgpu)->va,
+ get_order(INTEL_GVT_OPREGION_SIZE));
+
+ vgpu_opregion(vgpu)->va = NULL;
+
}
+
#define GVT_OPREGION_FUNC(scic) \
({ \
u32 __ret; \
@@ -284,8 +483,21 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
u32 *scic, *parm;
u32 func, subfunc;
- scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
- parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
+ switch (intel_gvt_host.hypervisor_type) {
+ case INTEL_GVT_HYPERVISOR_XEN:
+ scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
+ parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
+ break;
+ case INTEL_GVT_HYPERVISOR_KVM:
+ scic = vgpu_opregion(vgpu)->va_gopregion +
+ INTEL_GVT_OPREGION_SCIC;
+ parm = vgpu_opregion(vgpu)->va_gopregion +
+ INTEL_GVT_OPREGION_PARM;
+ break;
+ default:
+ gvt_vgpu_err("not supported hypervisor\n");
+ return -EINVAL;
+ }
if (!(swsci & SWSCI_SCI_SELECT)) {
gvt_vgpu_err("requesting SMI service\n");
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 7d01c77a0f7a..d4f7ce6dc1d7 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -51,6 +51,9 @@
#define INTEL_GVT_OPREGION_PAGES 2
#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
+#define INTEL_GVT_OPREGION_VBT_OFFSET 0x400
+#define INTEL_GVT_OPREGION_VBT_SIZE \
+ (INTEL_GVT_OPREGION_SIZE - INTEL_GVT_OPREGION_VBT_OFFSET)
#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
@@ -71,6 +74,7 @@
#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
-#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + GTT_PAGE_SIZE)
+#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + \
+ I915_GTT_PAGE_SIZE)
#endif
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
deleted file mode 100644
index 6d066cf35478..000000000000
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Authors:
- * Eddie Dong <eddie.dong@intel.com>
- * Kevin Tian <kevin.tian@intel.com>
- *
- * Contributors:
- * Zhi Wang <zhi.a.wang@intel.com>
- * Changbin Du <changbin.du@intel.com>
- * Zhenyu Wang <zhenyuw@linux.intel.com>
- * Tina Zhang <tina.zhang@intel.com>
- * Bing Niu <bing.niu@intel.com>
- *
- */
-
-#include "i915_drv.h"
-#include "gvt.h"
-#include "trace.h"
-
-struct render_mmio {
- int ring_id;
- i915_reg_t reg;
- u32 mask;
- bool in_context;
- u32 value;
-};
-
-static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = {
- {RCS, _MMIO(0x229c), 0xffff, false},
- {RCS, _MMIO(0x2248), 0x0, false},
- {RCS, _MMIO(0x2098), 0x0, false},
- {RCS, _MMIO(0x20c0), 0xffff, true},
- {RCS, _MMIO(0x24d0), 0, false},
- {RCS, _MMIO(0x24d4), 0, false},
- {RCS, _MMIO(0x24d8), 0, false},
- {RCS, _MMIO(0x24dc), 0, false},
- {RCS, _MMIO(0x24e0), 0, false},
- {RCS, _MMIO(0x24e4), 0, false},
- {RCS, _MMIO(0x24e8), 0, false},
- {RCS, _MMIO(0x24ec), 0, false},
- {RCS, _MMIO(0x24f0), 0, false},
- {RCS, _MMIO(0x24f4), 0, false},
- {RCS, _MMIO(0x24f8), 0, false},
- {RCS, _MMIO(0x24fc), 0, false},
- {RCS, _MMIO(0x7004), 0xffff, true},
- {RCS, _MMIO(0x7008), 0xffff, true},
- {RCS, _MMIO(0x7000), 0xffff, true},
- {RCS, _MMIO(0x7010), 0xffff, true},
- {RCS, _MMIO(0x7300), 0xffff, true},
- {RCS, _MMIO(0x83a4), 0xffff, true},
-
- {BCS, _MMIO(0x2229c), 0xffff, false},
- {BCS, _MMIO(0x2209c), 0xffff, false},
- {BCS, _MMIO(0x220c0), 0xffff, false},
- {BCS, _MMIO(0x22098), 0x0, false},
- {BCS, _MMIO(0x22028), 0x0, false},
-};
-
-static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = {
- {RCS, _MMIO(0x229c), 0xffff, false},
- {RCS, _MMIO(0x2248), 0x0, false},
- {RCS, _MMIO(0x2098), 0x0, false},
- {RCS, _MMIO(0x20c0), 0xffff, true},
- {RCS, _MMIO(0x24d0), 0, false},
- {RCS, _MMIO(0x24d4), 0, false},
- {RCS, _MMIO(0x24d8), 0, false},
- {RCS, _MMIO(0x24dc), 0, false},
- {RCS, _MMIO(0x24e0), 0, false},
- {RCS, _MMIO(0x24e4), 0, false},
- {RCS, _MMIO(0x24e8), 0, false},
- {RCS, _MMIO(0x24ec), 0, false},
- {RCS, _MMIO(0x24f0), 0, false},
- {RCS, _MMIO(0x24f4), 0, false},
- {RCS, _MMIO(0x24f8), 0, false},
- {RCS, _MMIO(0x24fc), 0, false},
- {RCS, _MMIO(0x7004), 0xffff, true},
- {RCS, _MMIO(0x7008), 0xffff, true},
- {RCS, _MMIO(0x7000), 0xffff, true},
- {RCS, _MMIO(0x7010), 0xffff, true},
- {RCS, _MMIO(0x7300), 0xffff, true},
- {RCS, _MMIO(0x83a4), 0xffff, true},
-
- {RCS, _MMIO(0x40e0), 0, false},
- {RCS, _MMIO(0x40e4), 0, false},
- {RCS, _MMIO(0x2580), 0xffff, true},
- {RCS, _MMIO(0x7014), 0xffff, true},
- {RCS, _MMIO(0x20ec), 0xffff, false},
- {RCS, _MMIO(0xb118), 0, false},
- {RCS, _MMIO(0xe100), 0xffff, true},
- {RCS, _MMIO(0xe180), 0xffff, true},
- {RCS, _MMIO(0xe184), 0xffff, true},
- {RCS, _MMIO(0xe188), 0xffff, true},
- {RCS, _MMIO(0xe194), 0xffff, true},
- {RCS, _MMIO(0x4de0), 0, false},
- {RCS, _MMIO(0x4de4), 0, false},
- {RCS, _MMIO(0x4de8), 0, false},
- {RCS, _MMIO(0x4dec), 0, false},
- {RCS, _MMIO(0x4df0), 0, false},
- {RCS, _MMIO(0x4df4), 0, false},
-
- {BCS, _MMIO(0x2229c), 0xffff, false},
- {BCS, _MMIO(0x2209c), 0xffff, false},
- {BCS, _MMIO(0x220c0), 0xffff, false},
- {BCS, _MMIO(0x22098), 0x0, false},
- {BCS, _MMIO(0x22028), 0x0, false},
-
- {VCS2, _MMIO(0x1c028), 0xffff, false},
-
- {VECS, _MMIO(0x1a028), 0xffff, false},
-
- {RCS, _MMIO(0x7304), 0xffff, true},
- {RCS, _MMIO(0x2248), 0x0, false},
- {RCS, _MMIO(0x940c), 0x0, false},
- {RCS, _MMIO(0x4ab8), 0x0, false},
-
- {RCS, _MMIO(0x4ab0), 0x0, false},
- {RCS, _MMIO(0x20d4), 0x0, false},
-
- {RCS, _MMIO(0xb004), 0x0, false},
- {RCS, _MMIO(0x20a0), 0x0, false},
- {RCS, _MMIO(0x20e4), 0xffff, false},
-};
-
-static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
-static u32 gen9_render_mocs_L3[32];
-
-static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- enum forcewake_domains fw;
- i915_reg_t reg;
- u32 regs[] = {
- [RCS] = 0x4260,
- [VCS] = 0x4264,
- [VCS2] = 0x4268,
- [BCS] = 0x426c,
- [VECS] = 0x4270,
- };
-
- if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
- return;
-
- if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
- return;
-
- reg = _MMIO(regs[ring_id]);
-
- /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
- * we need to put a forcewake when invalidating RCS TLB caches,
- * otherwise device can go to RC6 state and interrupt invalidation
- * process
- */
- fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
- FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
- fw |= FORCEWAKE_RENDER;
-
- intel_uncore_forcewake_get(dev_priv, fw);
-
- I915_WRITE_FW(reg, 0x1);
-
- if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
- gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
- else
- vgpu_vreg(vgpu, regs[ring_id]) = 0;
-
- intel_uncore_forcewake_put(dev_priv, fw);
-
- gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
-}
-
-static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- i915_reg_t offset, l3_offset;
- u32 regs[] = {
- [RCS] = 0xc800,
- [VCS] = 0xc900,
- [VCS2] = 0xca00,
- [BCS] = 0xcc00,
- [VECS] = 0xcb00,
- };
- int i;
-
- if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
- return;
-
- offset.reg = regs[ring_id];
- for (i = 0; i < 64; i++) {
- gen9_render_mocs[ring_id][i] = I915_READ_FW(offset);
- I915_WRITE(offset, vgpu_vreg(vgpu, offset));
- offset.reg += 4;
- }
-
- if (ring_id == RCS) {
- l3_offset.reg = 0xb020;
- for (i = 0; i < 32; i++) {
- gen9_render_mocs_L3[i] = I915_READ_FW(l3_offset);
- I915_WRITE_FW(l3_offset, vgpu_vreg(vgpu, l3_offset));
- l3_offset.reg += 4;
- }
- }
-}
-
-static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- i915_reg_t offset, l3_offset;
- u32 regs[] = {
- [RCS] = 0xc800,
- [VCS] = 0xc900,
- [VCS2] = 0xca00,
- [BCS] = 0xcc00,
- [VECS] = 0xcb00,
- };
- int i;
-
- if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
- return;
-
- offset.reg = regs[ring_id];
- for (i = 0; i < 64; i++) {
- vgpu_vreg(vgpu, offset) = I915_READ_FW(offset);
- I915_WRITE_FW(offset, gen9_render_mocs[ring_id][i]);
- offset.reg += 4;
- }
-
- if (ring_id == RCS) {
- l3_offset.reg = 0xb020;
- for (i = 0; i < 32; i++) {
- vgpu_vreg(vgpu, l3_offset) = I915_READ_FW(l3_offset);
- I915_WRITE_FW(l3_offset, gen9_render_mocs_L3[i]);
- l3_offset.reg += 4;
- }
- }
-}
-
-#define CTX_CONTEXT_CONTROL_VAL 0x03
-
-/* Switch ring mmio values (context) from host to a vgpu. */
-static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct render_mmio *mmio;
- u32 v;
- int i, array_size;
- u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state;
- u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
- u32 inhibit_mask =
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- i915_reg_t last_reg = _MMIO(0);
-
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
- mmio = gen9_render_mmio_list;
- array_size = ARRAY_SIZE(gen9_render_mmio_list);
- load_mocs(vgpu, ring_id);
- } else {
- mmio = gen8_render_mmio_list;
- array_size = ARRAY_SIZE(gen8_render_mmio_list);
- }
-
- for (i = 0; i < array_size; i++, mmio++) {
- if (mmio->ring_id != ring_id)
- continue;
-
- mmio->value = I915_READ_FW(mmio->reg);
-
- /*
- * if it is an inhibit context, load in_context mmio
- * into HW by mmio write. If it is not, skip this mmio
- * write.
- */
- if (mmio->in_context &&
- ((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
- i915_modparams.enable_execlists)
- continue;
-
- if (mmio->mask)
- v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
- else
- v = vgpu_vreg(vgpu, mmio->reg);
-
- I915_WRITE_FW(mmio->reg, v);
- last_reg = mmio->reg;
-
- trace_render_mmio(vgpu->id, "load",
- i915_mmio_reg_offset(mmio->reg),
- mmio->value, v);
- }
-
- /* Make sure the swiched MMIOs has taken effect. */
- if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
- I915_READ_FW(last_reg);
-
- handle_tlb_pending_event(vgpu, ring_id);
-}
-
-/* Switch ring mmio values (context) from vgpu to host. */
-static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct render_mmio *mmio;
- i915_reg_t last_reg = _MMIO(0);
- u32 v;
- int i, array_size;
-
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- mmio = gen9_render_mmio_list;
- array_size = ARRAY_SIZE(gen9_render_mmio_list);
- restore_mocs(vgpu, ring_id);
- } else {
- mmio = gen8_render_mmio_list;
- array_size = ARRAY_SIZE(gen8_render_mmio_list);
- }
-
- for (i = 0; i < array_size; i++, mmio++) {
- if (mmio->ring_id != ring_id)
- continue;
-
- vgpu_vreg(vgpu, mmio->reg) = I915_READ_FW(mmio->reg);
-
- if (mmio->mask) {
- vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
- v = mmio->value | (mmio->mask << 16);
- } else
- v = mmio->value;
-
- if (mmio->in_context)
- continue;
-
- I915_WRITE_FW(mmio->reg, v);
- last_reg = mmio->reg;
-
- trace_render_mmio(vgpu->id, "restore",
- i915_mmio_reg_offset(mmio->reg),
- mmio->value, v);
- }
-
- /* Make sure the swiched MMIOs has taken effect. */
- if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
- I915_READ_FW(last_reg);
-}
-
-/**
- * intel_gvt_switch_render_mmio - switch mmio context of specific engine
- * @pre: the last vGPU that own the engine
- * @next: the vGPU to switch to
- * @ring_id: specify the engine
- *
- * If pre is null indicates that host own the engine. If next is null
- * indicates that we are switching to host workload.
- */
-void intel_gvt_switch_mmio(struct intel_vgpu *pre,
- struct intel_vgpu *next, int ring_id)
-{
- struct drm_i915_private *dev_priv;
-
- if (WARN_ON(!pre && !next))
- return;
-
- gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
- pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
-
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
-
- /**
- * We are using raw mmio access wrapper to improve the
- * performace for batch mmio read/write, so we need
- * handle forcewake mannually.
- */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
-
- /**
- * TODO: Optimize for vGPU to vGPU switch by merging
- * switch_mmio_to_host() and switch_mmio_to_vgpu().
- */
- if (pre)
- switch_mmio_to_host(pre, ring_id);
-
- if (next)
- switch_mmio_to_vgpu(next, ring_id);
-
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 03532dfc0cd5..eea1a2f92099 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -372,6 +372,11 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
}
+void intel_gvt_kick_schedule(struct intel_gvt *gvt)
+{
+ intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
+}
+
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{
struct intel_gvt_workload_scheduler *scheduler =
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
index ba00a5f7455f..7b59e3e88b8b 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.h
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -57,4 +57,6 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu);
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
+void intel_gvt_kick_schedule(struct intel_gvt *gvt);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 69f8f0d155b9..0056638b0c16 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -57,7 +57,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
@@ -81,16 +81,16 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
- GTT_PAGE_SHIFT));
+ I915_GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("Invalid guest context descriptor\n");
- return -EINVAL;
+ return -EFAULT;
}
page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
- GTT_PAGE_SIZE);
+ I915_GTT_PAGE_SIZE);
kunmap(page);
i++;
}
@@ -120,7 +120,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
sizeof(*shadow_ring_context),
- GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+ I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap(page);
return 0;
@@ -248,7 +248,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
return 0;
}
-void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
if (!wa_ctx->indirect_ctx.obj)
return;
@@ -267,11 +267,12 @@ void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
*/
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
- struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct intel_vgpu *vgpu = workload->vgpu;
struct intel_ring *ring;
int ret;
@@ -284,7 +285,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated))
+ if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
shadow_context_descriptor_update(shadow_ctx,
dev_priv->engine[ring_id]);
@@ -327,14 +328,15 @@ err_scan:
return ret;
}
-int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
+static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
{
int ring_id = workload->ring_id;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
- struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ret;
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
@@ -358,11 +360,203 @@ err_unpin:
return ret;
}
+static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
+
+static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
+ struct intel_gvt *gvt = workload->vgpu->gvt;
+ const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
+ struct intel_vgpu_shadow_bb *bb;
+ int ret;
+
+ list_for_each_entry(bb, &workload->shadow_bb, list) {
+ bb->vma = i915_gem_object_ggtt_pin(bb->obj, NULL, 0, 0, 0);
+ if (IS_ERR(bb->vma)) {
+ ret = PTR_ERR(bb->vma);
+ goto err;
+ }
+
+ /* relocate shadow batch buffer */
+ bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
+ if (gmadr_bytes == 8)
+ bb->bb_start_cmd_va[2] = 0;
+
+ /* No one is going to touch shadow bb from now on. */
+ if (bb->clflush & CLFLUSH_AFTER) {
+ drm_clflush_virt_range(bb->va, bb->obj->base.size);
+ bb->clflush &= ~CLFLUSH_AFTER;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(bb->obj, false);
+ if (ret)
+ goto err;
+
+ i915_gem_obj_finish_shmem_access(bb->obj);
+ bb->accessing = false;
+
+ i915_vma_move_to_active(bb->vma, workload->req, 0);
+ }
+ return 0;
+err:
+ release_shadow_batch_buffer(workload);
+ return ret;
+}
+
+static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
+ int ring_id = workload->ring_id;
+ struct intel_vgpu_submission *s = &workload->vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap_atomic(page);
+
+ shadow_ring_context->bb_per_ctx_ptr.val =
+ (shadow_ring_context->bb_per_ctx_ptr.val &
+ (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
+ shadow_ring_context->rcs_indirect_ctx.val =
+ (shadow_ring_context->rcs_indirect_ctx.val &
+ (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
+
+ kunmap_atomic(shadow_ring_context);
+ return 0;
+}
+
+static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ struct i915_vma *vma;
+ unsigned char *per_ctx_va =
+ (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
+ wa_ctx->indirect_ctx.size;
+
+ if (wa_ctx->indirect_ctx.size == 0)
+ return 0;
+
+ vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
+ 0, CACHELINE_BYTES, 0);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ /* FIXME: we are not tracking our pinned VMA leaving it
+ * up to the core to fix up the stray pin_count upon
+ * free.
+ */
+
+ wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
+
+ wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
+ memset(per_ctx_va, 0, CACHELINE_BYTES);
+
+ update_wa_ctx_2_shadow_ctx(wa_ctx);
+ return 0;
+}
+
+static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_vgpu_shadow_bb *bb, *pos;
+
+ if (list_empty(&workload->shadow_bb))
+ return;
+
+ bb = list_first_entry(&workload->shadow_bb,
+ struct intel_vgpu_shadow_bb, list);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
+ if (bb->obj) {
+ if (bb->accessing)
+ i915_gem_obj_finish_shmem_access(bb->obj);
+
+ if (bb->va && !IS_ERR(bb->va))
+ i915_gem_object_unpin_map(bb->obj);
+
+ if (bb->vma && !IS_ERR(bb->vma)) {
+ i915_vma_unpin(bb->vma);
+ i915_vma_close(bb->vma);
+ }
+ __i915_gem_object_release_unless_active(bb->obj);
+ }
+ list_del(&bb->list);
+ kfree(bb);
+ }
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+static int prepare_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ int ret = 0;
+
+ ret = intel_vgpu_pin_mm(workload->shadow_mm);
+ if (ret) {
+ gvt_vgpu_err("fail to vgpu pin mm\n");
+ return ret;
+ }
+
+ ret = intel_vgpu_sync_oos_pages(workload->vgpu);
+ if (ret) {
+ gvt_vgpu_err("fail to vgpu sync oos pages\n");
+ goto err_unpin_mm;
+ }
+
+ ret = intel_vgpu_flush_post_shadow(workload->vgpu);
+ if (ret) {
+ gvt_vgpu_err("fail to flush post shadow\n");
+ goto err_unpin_mm;
+ }
+
+ ret = intel_gvt_generate_request(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to generate request\n");
+ goto err_unpin_mm;
+ }
+
+ ret = prepare_shadow_batch_buffer(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
+ goto err_unpin_mm;
+ }
+
+ ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
+ if (ret) {
+ gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
+ goto err_shadow_batch;
+ }
+
+ if (workload->prepare) {
+ ret = workload->prepare(workload);
+ if (ret)
+ goto err_shadow_wa_ctx;
+ }
+
+ return 0;
+err_shadow_wa_ctx:
+ release_shadow_wa_ctx(&workload->wa_ctx);
+err_shadow_batch:
+ release_shadow_batch_buffer(workload);
+err_unpin_mm:
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+ return ret;
+}
+
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
- struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
int ret = 0;
@@ -375,12 +569,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (ret)
goto out;
- if (workload->prepare) {
- ret = workload->prepare(workload);
- if (ret) {
- engine->context_unpin(engine, shadow_ctx);
- goto out;
- }
+ ret = prepare_workload(workload);
+ if (ret) {
+ engine->context_unpin(engine, shadow_ctx);
+ goto out;
}
out:
@@ -448,7 +640,7 @@ static struct intel_vgpu_workload *pick_next_workload(
gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
- atomic_inc(&workload->vgpu->running_workload_num);
+ atomic_inc(&workload->vgpu->submission.running_workload_num);
out:
mutex_unlock(&gvt->lock);
return workload;
@@ -458,8 +650,9 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
@@ -483,7 +676,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
- GTT_PAGE_SHIFT));
+ I915_GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("invalid guest context descriptor\n");
return;
@@ -492,7 +685,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
- GTT_PAGE_SIZE);
+ I915_GTT_PAGE_SIZE);
kunmap(page);
i++;
}
@@ -517,23 +710,41 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
sizeof(*shadow_ring_context),
- GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+ I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap(page);
}
+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+{
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine;
+ struct intel_vgpu_workload *pos, *n;
+ unsigned int tmp;
+
+ /* free the unsubmited workloads in the queues. */
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ list_for_each_entry_safe(pos, n,
+ &s->workload_q_head[engine->id], list) {
+ list_del_init(&pos->list);
+ intel_vgpu_destroy_workload(pos);
+ }
+ clear_bit(engine->id, s->shadow_ctx_desc_updated);
+ }
+}
+
static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- struct intel_vgpu_workload *workload;
- struct intel_vgpu *vgpu;
+ struct intel_vgpu_workload *workload =
+ scheduler->current_workload[ring_id];
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
int event;
mutex_lock(&gvt->lock);
- workload = scheduler->current_workload[ring_id];
- vgpu = workload->vgpu;
-
/* For the workload w/ request, needs to wait for the context
* switch to make sure request is completed.
* For the workload w/o request, directly complete the workload.
@@ -570,7 +781,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
}
mutex_lock(&dev_priv->drm.struct_mutex);
/* unpin shadow ctx as the shadow_ctx update is done */
- engine->context_unpin(engine, workload->vgpu->shadow_ctx);
+ engine->context_unpin(engine, s->shadow_ctx);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
@@ -580,9 +791,32 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
scheduler->current_workload[ring_id] = NULL;
list_del_init(&workload->list);
+
+ if (!workload->status) {
+ release_shadow_batch_buffer(workload);
+ release_shadow_wa_ctx(&workload->wa_ctx);
+ }
+
+ if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+ /* if workload->status is not successful means HW GPU
+ * has occurred GPU hang or something wrong with i915/GVT,
+ * and GVT won't inject context switch interrupt to guest.
+ * So this error is a vGPU hang actually to the guest.
+ * According to this we should emunlate a vGPU hang. If
+ * there are pending workloads which are already submitted
+ * from guest, we should clean them up like HW GPU does.
+ *
+ * if it is in middle of engine resetting, the pending
+ * workloads won't be submitted to HW GPU and will be
+ * cleaned up during the resetting process later, so doing
+ * the workload clean up here doesn't have any impact.
+ **/
+ clean_workloads(vgpu, ENGINE_MASK(ring_id));
+ }
+
workload->complete(workload);
- atomic_dec(&vgpu->running_workload_num);
+ atomic_dec(&s->running_workload_num);
wake_up(&scheduler->workload_complete_wq);
if (gvt->scheduler.need_reschedule)
@@ -665,20 +899,23 @@ complete:
FORCEWAKE_ALL);
intel_runtime_pm_put(gvt->dev_priv);
+ if (ret && (vgpu_is_vm_unhealthy(ret)))
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
return 0;
}
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
{
+ struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- if (atomic_read(&vgpu->running_workload_num)) {
+ if (atomic_read(&s->running_workload_num)) {
gvt_dbg_sched("wait vgpu idle\n");
wait_event(scheduler->workload_complete_wq,
- !atomic_read(&vgpu->running_workload_num));
+ !atomic_read(&s->running_workload_num));
}
}
@@ -743,26 +980,369 @@ err:
return ret;
}
-void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
+/**
+ * intel_vgpu_clean_submission - free submission-related resource for vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is called when a vGPU is being destroyed.
+ *
+ */
+void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
{
- i915_gem_context_put(vgpu->shadow_ctx);
+ struct intel_vgpu_submission *s = &vgpu->submission;
+
+ intel_vgpu_select_submission_ops(vgpu, 0);
+ i915_gem_context_put(s->shadow_ctx);
+ kmem_cache_destroy(s->workloads);
}
-int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
+
+/**
+ * intel_vgpu_reset_submission - reset submission-related resource for vGPU
+ * @vgpu: a vGPU
+ * @engine_mask: engines expected to be reset
+ *
+ * This function is called when a vGPU is being destroyed.
+ *
+ */
+void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
+ unsigned long engine_mask)
{
- atomic_set(&vgpu->running_workload_num, 0);
+ struct intel_vgpu_submission *s = &vgpu->submission;
- vgpu->shadow_ctx = i915_gem_context_create_gvt(
+ if (!s->active)
+ return;
+
+ clean_workloads(vgpu, engine_mask);
+ s->ops->reset(vgpu, engine_mask);
+}
+
+/**
+ * intel_vgpu_setup_submission - setup submission-related resource for vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is called when a vGPU is being created.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+ int ret;
+
+ s->shadow_ctx = i915_gem_context_create_gvt(
&vgpu->gvt->dev_priv->drm);
- if (IS_ERR(vgpu->shadow_ctx))
- return PTR_ERR(vgpu->shadow_ctx);
+ if (IS_ERR(s->shadow_ctx))
+ return PTR_ERR(s->shadow_ctx);
+
+ if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
+ s->shadow_ctx->priority = INT_MAX;
+
+ bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
- if (INTEL_INFO(vgpu->gvt->dev_priv)->has_logical_ring_preemption)
- vgpu->shadow_ctx->priority = INT_MAX;
+ s->workloads = kmem_cache_create("gvt-g_vgpu_workload",
+ sizeof(struct intel_vgpu_workload), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
- vgpu->shadow_ctx->engine[RCS].initialised = true;
+ if (!s->workloads) {
+ ret = -ENOMEM;
+ goto out_shadow_ctx;
+ }
+
+ for_each_engine(engine, vgpu->gvt->dev_priv, i)
+ INIT_LIST_HEAD(&s->workload_q_head[i]);
- bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
+ atomic_set(&s->running_workload_num, 0);
+ bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
return 0;
+
+out_shadow_ctx:
+ i915_gem_context_put(s->shadow_ctx);
+ return ret;
+}
+
+/**
+ * intel_vgpu_select_submission_ops - select virtual submission interface
+ * @vgpu: a vGPU
+ * @interface: expected vGPU virtual submission interface
+ *
+ * This function is called when guest configures submission interface.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
+ unsigned int interface)
+{
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ const struct intel_vgpu_submission_ops *ops[] = {
+ [INTEL_VGPU_EXECLIST_SUBMISSION] =
+ &intel_vgpu_execlist_submission_ops,
+ };
+ int ret;
+
+ if (WARN_ON(interface >= ARRAY_SIZE(ops)))
+ return -EINVAL;
+
+ if (s->active) {
+ s->ops->clean(vgpu);
+ s->active = false;
+ gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n",
+ vgpu->id, s->ops->name);
+ }
+
+ if (interface == 0) {
+ s->ops = NULL;
+ s->virtual_submission_interface = 0;
+ gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id);
+ return 0;
+ }
+
+ ret = ops[interface]->init(vgpu);
+ if (ret)
+ return ret;
+
+ s->ops = ops[interface];
+ s->virtual_submission_interface = interface;
+ s->active = true;
+
+ gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
+ vgpu->id, s->ops->name);
+
+ return 0;
+}
+
+/**
+ * intel_vgpu_destroy_workload - destroy a vGPU workload
+ * @vgpu: a vGPU
+ *
+ * This function is called when destroy a vGPU workload.
+ *
+ */
+void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu_submission *s = &workload->vgpu->submission;
+
+ if (workload->shadow_mm)
+ intel_gvt_mm_unreference(workload->shadow_mm);
+
+ kmem_cache_free(s->workloads, workload);
+}
+
+static struct intel_vgpu_workload *
+alloc_workload(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_vgpu_workload *workload;
+
+ workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
+ if (!workload)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&workload->list);
+ INIT_LIST_HEAD(&workload->shadow_bb);
+
+ init_waitqueue_head(&workload->shadow_ctx_status_wq);
+ atomic_set(&workload->shadow_ctx_active, 0);
+
+ workload->status = -EINPROGRESS;
+ workload->shadowed = false;
+ workload->vgpu = vgpu;
+
+ return workload;
+}
+
+#define RING_CTX_OFF(x) \
+ offsetof(struct execlist_ring_context, x)
+
+static void read_guest_pdps(struct intel_vgpu *vgpu,
+ u64 ring_context_gpa, u32 pdp[8])
+{
+ u64 gpa;
+ int i;
+
+ gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
+
+ for (i = 0; i < 8; i++)
+ intel_gvt_hypervisor_read_gpa(vgpu,
+ gpa + i * 8, &pdp[7 - i], 4);
+}
+
+static int prepare_mm(struct intel_vgpu_workload *workload)
+{
+ struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
+ struct intel_vgpu_mm *mm;
+ struct intel_vgpu *vgpu = workload->vgpu;
+ int page_table_level;
+ u32 pdp[8];
+
+ if (desc->addressing_mode == 1) { /* legacy 32-bit */
+ page_table_level = 3;
+ } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
+ page_table_level = 4;
+ } else {
+ gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
+ return -EINVAL;
+ }
+
+ read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
+
+ mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
+ if (mm) {
+ intel_gvt_mm_reference(mm);
+ } else {
+
+ mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
+ pdp, page_table_level, 0);
+ if (IS_ERR(mm)) {
+ gvt_vgpu_err("fail to create mm object.\n");
+ return PTR_ERR(mm);
+ }
+ }
+ workload->shadow_mm = mm;
+ return 0;
+}
+
+#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
+ ((a)->lrca == (b)->lrca))
+
+#define get_last_workload(q) \
+ (list_empty(q) ? NULL : container_of(q->prev, \
+ struct intel_vgpu_workload, list))
+/**
+ * intel_vgpu_create_workload - create a vGPU workload
+ * @vgpu: a vGPU
+ * @desc: a guest context descriptor
+ *
+ * This function is called when creating a vGPU workload.
+ *
+ * Returns:
+ * struct intel_vgpu_workload * on success, negative error code in
+ * pointer if failed.
+ *
+ */
+struct intel_vgpu_workload *
+intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+ struct execlist_ctx_descriptor_format *desc)
+{
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct list_head *q = workload_q_head(vgpu, ring_id);
+ struct intel_vgpu_workload *last_workload = get_last_workload(q);
+ struct intel_vgpu_workload *workload = NULL;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u64 ring_context_gpa;
+ u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
+ int ret;
+
+ ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
+ if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
+ return ERR_PTR(-EINVAL);
+ }
+
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ring_header.val), &head, 4);
+
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ring_tail.val), &tail, 4);
+
+ head &= RB_HEAD_OFF_MASK;
+ tail &= RB_TAIL_OFF_MASK;
+
+ if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
+ gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
+ gvt_dbg_el("ctx head %x real head %lx\n", head,
+ last_workload->rb_tail);
+ /*
+ * cannot use guest context head pointer here,
+ * as it might not be updated at this time
+ */
+ head = last_workload->rb_tail;
+ }
+
+ gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
+
+ /* record some ring buffer register values for scan and shadow */
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rb_start.val), &start, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
+
+ workload = alloc_workload(vgpu);
+ if (IS_ERR(workload))
+ return workload;
+
+ workload->ring_id = ring_id;
+ workload->ctx_desc = *desc;
+ workload->ring_context_gpa = ring_context_gpa;
+ workload->rb_head = head;
+ workload->rb_tail = tail;
+ workload->rb_start = start;
+ workload->rb_ctl = ctl;
+
+ if (ring_id == RCS) {
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
+
+ workload->wa_ctx.indirect_ctx.guest_gma =
+ indirect_ctx & INDIRECT_CTX_ADDR_MASK;
+ workload->wa_ctx.indirect_ctx.size =
+ (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
+ CACHELINE_BYTES;
+ workload->wa_ctx.per_ctx.guest_gma =
+ per_ctx & PER_CTX_ADDR_MASK;
+ workload->wa_ctx.per_ctx.valid = per_ctx & 1;
+ }
+
+ gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
+ workload, ring_id, head, tail, start, ctl);
+
+ ret = prepare_mm(workload);
+ if (ret) {
+ kmem_cache_free(s->workloads, workload);
+ return ERR_PTR(ret);
+ }
+
+ /* Only scan and shadow the first workload in the queue
+ * as there is only one pre-allocated buf-obj for shadow.
+ */
+ if (list_empty(workload_q_head(vgpu, ring_id))) {
+ intel_runtime_pm_get(dev_priv);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ ret = intel_gvt_scan_and_shadow_workload(workload);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(dev_priv);
+ }
+
+ if (ret && (vgpu_is_vm_unhealthy(ret))) {
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+ intel_vgpu_destroy_workload(workload);
+ return ERR_PTR(ret);
+ }
+
+ return workload;
+}
+
+/**
+ * intel_vgpu_queue_workload - Qeue a vGPU workload
+ * @workload: the workload to queue in
+ */
+void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
+{
+ list_add_tail(&workload->list,
+ workload_q_head(workload->vgpu, workload->ring_id));
+ intel_gvt_kick_schedule(workload->vgpu->gvt);
+ wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index b9f872204d7e..3de77dfa7c59 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -112,24 +112,20 @@ struct intel_vgpu_workload {
struct intel_shadow_wa_ctx wa_ctx;
};
-/* Intel shadow batch buffer is a i915 gem object */
-struct intel_shadow_bb_entry {
+struct intel_vgpu_shadow_bb {
struct list_head list;
struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
void *va;
- unsigned long len;
u32 *bb_start_cmd_va;
+ unsigned int clflush;
+ bool accessing;
};
#define workload_q_head(vgpu, ring_id) \
- (&(vgpu->workload_q_head[ring_id]))
+ (&(vgpu->submission.workload_q_head[ring_id]))
-#define queue_workload(workload) do { \
- list_add_tail(&workload->list, \
- workload_q_head(workload->vgpu, workload->ring_id)); \
- wake_up(&workload->vgpu->gvt-> \
- scheduler.waitq[workload->ring_id]); \
-} while (0)
+void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
@@ -137,12 +133,23 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
-int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
+int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
-void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
+ unsigned long engine_mask);
-void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
-int intel_gvt_generate_request(struct intel_vgpu_workload *workload);
+int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
+ unsigned int interface);
+
+extern const struct intel_vgpu_submission_ops
+intel_vgpu_execlist_submission_ops;
+
+struct intel_vgpu_workload *
+intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+ struct execlist_ctx_descriptor_format *desc);
+
+void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 8c150381d9a4..7a2511538f34 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -330,13 +330,14 @@ TRACE_EVENT(inject_msi,
);
TRACE_EVENT(render_mmio,
- TP_PROTO(int id, char *action, unsigned int reg,
+ TP_PROTO(int old_id, int new_id, char *action, unsigned int reg,
unsigned int old_val, unsigned int new_val),
- TP_ARGS(id, action, reg, new_val, old_val),
+ TP_ARGS(old_id, new_id, action, reg, new_val, old_val),
TP_STRUCT__entry(
- __field(int, id)
+ __field(int, old_id)
+ __field(int, new_id)
__array(char, buf, GVT_TEMP_STR_LEN)
__field(unsigned int, reg)
__field(unsigned int, old_val)
@@ -344,15 +345,17 @@ TRACE_EVENT(render_mmio,
),
TP_fast_assign(
- __entry->id = id;
+ __entry->old_id = old_id;
+ __entry->new_id = new_id;
snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", action);
__entry->reg = reg;
__entry->old_val = old_val;
__entry->new_val = new_val;
),
- TP_printk("VM%u %s reg %x, old %08x new %08x\n",
- __entry->id, __entry->buf, __entry->reg,
+ TP_printk("VM%u -> VM%u %s reg %x, old %08x new %08x\n",
+ __entry->old_id, __entry->new_id,
+ __entry->buf, __entry->reg,
__entry->old_val, __entry->new_val)
);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 02c61a1ad56a..4688619f6a1c 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -38,22 +38,25 @@
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
/* setup the ballooning information */
- vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
- vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
- vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
- vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
- vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
- vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
- vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
+ vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
+ vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
+ vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
+ vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
+
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
+
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
vgpu_aperture_gmadr_base(vgpu);
- vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
vgpu_aperture_sz(vgpu);
- vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
vgpu_hidden_gmadr_base(vgpu);
- vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
vgpu_hidden_sz(vgpu);
- vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
@@ -226,13 +229,14 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
vgpu->active = false;
- if (atomic_read(&vgpu->running_workload_num)) {
+ if (atomic_read(&vgpu->submission.running_workload_num)) {
mutex_unlock(&gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&gvt->lock);
}
intel_vgpu_stop_schedule(vgpu);
+ intel_vgpu_dmabuf_cleanup(vgpu);
mutex_unlock(&gvt->lock);
}
@@ -252,16 +256,17 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
WARN(vgpu->active, "vGPU is still active!\n");
+ intel_gvt_debugfs_remove_vgpu(vgpu);
idr_remove(&gvt->vgpu_idr, vgpu->id);
intel_vgpu_clean_sched_policy(vgpu);
- intel_vgpu_clean_gvt_context(vgpu);
- intel_vgpu_clean_execlist(vgpu);
+ intel_vgpu_clean_submission(vgpu);
intel_vgpu_clean_display(vgpu);
intel_vgpu_clean_opregion(vgpu);
intel_vgpu_clean_gtt(vgpu);
intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu);
intel_vgpu_clean_mmio(vgpu);
+ intel_vgpu_dmabuf_cleanup(vgpu);
vfree(vgpu);
intel_gvt_update_vgpu_types(gvt);
@@ -293,7 +298,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
vgpu->gvt = gvt;
for (i = 0; i < I915_NUM_ENGINES; i++)
- INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
+ INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
ret = intel_vgpu_init_sched_policy(vgpu);
if (ret)
@@ -346,8 +351,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->handle = param->handle;
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
- bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
-
+ INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
+ idr_init(&vgpu->object_idr);
intel_vgpu_init_cfg_space(vgpu, param->primary);
ret = intel_vgpu_init_mmio(vgpu);
@@ -368,32 +373,42 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_detach_hypervisor_vgpu;
- ret = intel_vgpu_init_display(vgpu, param->resolution);
+ ret = intel_vgpu_init_opregion(vgpu);
if (ret)
goto out_clean_gtt;
- ret = intel_vgpu_init_execlist(vgpu);
+ ret = intel_vgpu_init_display(vgpu, param->resolution);
if (ret)
- goto out_clean_display;
+ goto out_clean_opregion;
- ret = intel_vgpu_init_gvt_context(vgpu);
+ ret = intel_vgpu_setup_submission(vgpu);
if (ret)
- goto out_clean_execlist;
+ goto out_clean_display;
ret = intel_vgpu_init_sched_policy(vgpu);
if (ret)
- goto out_clean_shadow_ctx;
+ goto out_clean_submission;
+
+ ret = intel_gvt_debugfs_add_vgpu(vgpu);
+ if (ret)
+ goto out_clean_sched_policy;
+
+ ret = intel_gvt_hypervisor_set_opregion(vgpu);
+ if (ret)
+ goto out_clean_sched_policy;
mutex_unlock(&gvt->lock);
return vgpu;
-out_clean_shadow_ctx:
- intel_vgpu_clean_gvt_context(vgpu);
-out_clean_execlist:
- intel_vgpu_clean_execlist(vgpu);
+out_clean_sched_policy:
+ intel_vgpu_clean_sched_policy(vgpu);
+out_clean_submission:
+ intel_vgpu_clean_submission(vgpu);
out_clean_display:
intel_vgpu_clean_display(vgpu);
+out_clean_opregion:
+ intel_vgpu_clean_opregion(vgpu);
out_clean_gtt:
intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu:
@@ -500,10 +515,10 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
mutex_lock(&gvt->lock);
}
- intel_vgpu_reset_execlist(vgpu, resetting_eng);
-
+ intel_vgpu_reset_submission(vgpu, resetting_eng);
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
+ intel_vgpu_select_submission_ops(vgpu, 0);
/*fence will not be reset during virtual reset */
if (dmlr) {
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 8ba932b22f7c..ccb5ba043b63 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -26,6 +26,7 @@
*/
#include "i915_drv.h"
+#include "intel_ringbuffer.h"
/**
* DOC: batch buffer command parser
@@ -798,22 +799,15 @@ struct cmd_node {
*/
static inline u32 cmd_header_key(u32 x)
{
- u32 shift;
-
switch (x >> INSTR_CLIENT_SHIFT) {
default:
case INSTR_MI_CLIENT:
- shift = STD_MI_OPCODE_SHIFT;
- break;
+ return x >> STD_MI_OPCODE_SHIFT;
case INSTR_RC_CLIENT:
- shift = STD_3D_OPCODE_SHIFT;
- break;
+ return x >> STD_3D_OPCODE_SHIFT;
case INSTR_BC_CLIENT:
- shift = STD_2D_OPCODE_SHIFT;
- break;
+ return x >> STD_2D_OPCODE_SHIFT;
}
-
- return x >> shift;
}
static int init_hash_table(struct intel_engine_cs *engine,
@@ -947,7 +941,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
return;
}
- engine->needs_cmd_parser = true;
+ engine->flags |= I915_ENGINE_NEEDS_CMD_PARSER;
}
/**
@@ -959,7 +953,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
*/
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
{
- if (!engine->needs_cmd_parser)
+ if (!intel_engine_needs_cmd_parser(engine))
return;
fini_hash_table(engine);
@@ -1357,7 +1351,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
/* If the command parser is not enabled, report 0 - unsupported */
for_each_engine(engine, dev_priv, id) {
- if (engine->needs_cmd_parser) {
+ if (intel_engine_needs_cmd_parser(engine)) {
active = true;
break;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index c65e381b85f3..e968aeae1d84 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,47 +30,28 @@
#include <linux/sort.h>
#include <linux/sched/mm.h>
#include "intel_drv.h"
-#include "i915_guc_submission.h"
+#include "intel_guc_submission.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
return to_i915(node->minor->dev);
}
-static __always_inline void seq_print_param(struct seq_file *m,
- const char *name,
- const char *type,
- const void *x)
-{
- if (!__builtin_strcmp(type, "bool"))
- seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
- else if (!__builtin_strcmp(type, "int"))
- seq_printf(m, "i915.%s=%d\n", name, *(const int *)x);
- else if (!__builtin_strcmp(type, "unsigned int"))
- seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
- else if (!__builtin_strcmp(type, "char *"))
- seq_printf(m, "i915.%s=%s\n", name, *(const char **)x);
- else
- BUILD_BUG();
-}
-
static int i915_capabilities(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_device_info *info = INTEL_INFO(dev_priv);
+ struct drm_printer p = drm_seq_file_printer(m);
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
-#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
-#undef PRINT_FLAG
+ intel_device_info_dump_flags(info, &p);
+ intel_device_info_dump_runtime(info, &p);
kernel_param_lock(THIS_MODULE);
-#define PRINT_PARAM(T, x, ...) seq_print_param(m, #x, #T, &i915_modparams.x);
- I915_PARAMS_FOR_EACH(PRINT_PARAM);
-#undef PRINT_PARAM
+ i915_params_dump(&i915_modparams, &p);
kernel_param_unlock(THIS_MODULE);
return 0;
@@ -111,8 +92,8 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
u64 size = 0;
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
+ for_each_ggtt_vma(vma, obj) {
+ if (drm_mm_node_allocated(&vma->node))
size += vma->node.size;
}
@@ -522,8 +503,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
dpy_count, dpy_size);
- seq_printf(m, "%llu [%llu] gtt total\n",
- ggtt->base.total, ggtt->mappable_end);
+ seq_printf(m, "%llu [%pa] gtt total\n",
+ ggtt->base.total, &ggtt->mappable_end);
seq_printf(m, "Supported page sizes: %s\n",
stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
buf, sizeof(buf)));
@@ -664,38 +645,6 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
return 0;
}
-static void i915_ring_seqno_info(struct seq_file *m,
- struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct rb_node *rb;
-
- seq_printf(m, "Current sequence (%s): %x\n",
- engine->name, intel_engine_get_seqno(engine));
-
- spin_lock_irq(&b->rb_lock);
- for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
- struct intel_wait *w = rb_entry(rb, typeof(*w), node);
-
- seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
- engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
- }
- spin_unlock_irq(&b->rb_lock);
-}
-
-static int i915_gem_seqno_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id)
- i915_ring_seqno_info(m, engine);
-
- return 0;
-}
-
-
static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -896,13 +845,12 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
- for_each_engine(engine, dev_priv, id) {
- if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
+ for_each_engine(engine, dev_priv, id) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
engine->name, I915_READ_IMR(engine));
}
- i915_ring_seqno_info(m, engine);
}
intel_runtime_pm_put(dev_priv);
@@ -1151,13 +1099,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
- if (INTEL_GEN(dev_priv) >= 9)
- cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
- else
- cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
- cagf = intel_gpu_freq(dev_priv, cagf);
+ cagf = intel_gpu_freq(dev_priv,
+ intel_get_cagf(dev_priv, rpstat));
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -1639,20 +1582,23 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_fbc *fbc = &dev_priv->fbc;
- if (!HAS_FBC(dev_priv)) {
- seq_puts(m, "FBC unsupported on this chipset\n");
- return 0;
- }
+ if (!HAS_FBC(dev_priv))
+ return -ENODEV;
intel_runtime_pm_get(dev_priv);
- mutex_lock(&dev_priv->fbc.lock);
+ mutex_lock(&fbc->lock);
if (intel_fbc_is_active(dev_priv))
seq_puts(m, "FBC enabled\n");
else
- seq_printf(m, "FBC disabled: %s\n",
- dev_priv->fbc.no_fbc_reason);
+ seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
+
+ if (fbc->work.scheduled)
+ seq_printf(m, "FBC worker scheduled on vblank %u, now %llu\n",
+ fbc->work.scheduled_vblank,
+ drm_crtc_vblank_count(&fbc->crtc->base));
if (intel_fbc_is_active(dev_priv)) {
u32 mask;
@@ -1672,7 +1618,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
seq_printf(m, "Compressing: %s\n", yesno(mask));
}
- mutex_unlock(&dev_priv->fbc.lock);
+ mutex_unlock(&fbc->lock);
intel_runtime_pm_put(dev_priv);
return 0;
@@ -1719,10 +1665,8 @@ static int i915_ips_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- if (!HAS_IPS(dev_priv)) {
- seq_puts(m, "not supported\n");
- return 0;
- }
+ if (!HAS_IPS(dev_priv))
+ return -ENODEV;
intel_runtime_pm_get(dev_priv);
@@ -1808,10 +1752,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
int gpu_freq, ia_freq;
unsigned int max_gpu_freq, min_gpu_freq;
- if (!HAS_LLC(dev_priv)) {
- seq_puts(m, "unsupported on this chipset\n");
- return 0;
- }
+ if (!HAS_LLC(dev_priv))
+ return -ENODEV;
intel_runtime_pm_get(dev_priv);
@@ -1974,7 +1916,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct intel_context *ce = &ctx->engine[engine->id];
seq_printf(m, "%s: ", engine->name);
- seq_putc(m, ce->initialised ? 'I' : 'i');
if (ce->state)
describe_obj(m, ce->state->obj);
if (ce->ring)
@@ -1990,75 +1931,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
return 0;
}
-static void i915_dump_lrc_obj(struct seq_file *m,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- struct i915_vma *vma = ctx->engine[engine->id].state;
- struct page *page;
- int j;
-
- seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
-
- if (!vma) {
- seq_puts(m, "\tFake context\n");
- return;
- }
-
- if (vma->flags & I915_VMA_GLOBAL_BIND)
- seq_printf(m, "\tBound in GGTT at 0x%08x\n",
- i915_ggtt_offset(vma));
-
- if (i915_gem_object_pin_pages(vma->obj)) {
- seq_puts(m, "\tFailed to get pages for context object\n\n");
- return;
- }
-
- page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
- if (page) {
- u32 *reg_state = kmap_atomic(page);
-
- for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
- seq_printf(m,
- "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- j * 4,
- reg_state[j], reg_state[j + 1],
- reg_state[j + 2], reg_state[j + 3]);
- }
- kunmap_atomic(reg_state);
- }
-
- i915_gem_object_unpin_pages(vma->obj);
- seq_putc(m, '\n');
-}
-
-static int i915_dump_lrc(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- enum intel_engine_id id;
- int ret;
-
- if (!i915_modparams.enable_execlists) {
- seq_printf(m, "Logical Ring Contexts are disabled\n");
- return 0;
- }
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- list_for_each_entry(ctx, &dev_priv->contexts.list, link)
- for_each_engine(engine, dev_priv, id)
- i915_dump_lrc_obj(m, ctx, engine);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static const char *swizzle_string(unsigned swizzle)
{
switch (swizzle) {
@@ -2361,8 +2233,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_printer p;
- if (!HAS_HUC_UCODE(dev_priv))
- return 0;
+ if (!HAS_HUC(dev_priv))
+ return -ENODEV;
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->huc.fw, &p);
@@ -2380,8 +2252,8 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
struct drm_printer p;
u32 tmp, i;
- if (!HAS_GUC_UCODE(dev_priv))
- return 0;
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->guc.fw, &p);
@@ -2434,7 +2306,7 @@ static void i915_guc_log_info(struct seq_file *m,
static void i915_guc_client_info(struct seq_file *m,
struct drm_i915_private *dev_priv,
- struct i915_guc_client *client)
+ struct intel_guc_client *client)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -2454,29 +2326,16 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tTotal: %llu\n", tot);
}
-static bool check_guc_submission(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->guc;
-
- if (!guc->execbuf_client) {
- seq_printf(m, "GuC submission %s\n",
- HAS_GUC_SCHED(dev_priv) ?
- "disabled" :
- "not supported");
- return false;
- }
-
- return true;
-}
-
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_guc *guc = &dev_priv->guc;
- if (!check_guc_submission(m))
- return 0;
+ if (!USES_GUC_SUBMISSION(dev_priv))
+ return -ENODEV;
+
+ GEM_BUG_ON(!guc->execbuf_client);
+ GEM_BUG_ON(!guc->preempt_client);
seq_printf(m, "Doorbell map:\n");
seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
@@ -2484,6 +2343,8 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
i915_guc_client_info(m, dev_priv, guc->execbuf_client);
+ seq_printf(m, "\nGuC preempt client @ %p:\n", guc->preempt_client);
+ i915_guc_client_info(m, dev_priv, guc->preempt_client);
i915_guc_log_info(m, dev_priv);
@@ -2497,12 +2358,12 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_guc *guc = &dev_priv->guc;
struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
- struct i915_guc_client *client = guc->execbuf_client;
+ struct intel_guc_client *client = guc->execbuf_client;
unsigned int tmp;
int index;
- if (!check_guc_submission(m))
- return 0;
+ if (!USES_GUC_SUBMISSION(dev_priv))
+ return -ENODEV;
for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
struct intel_engine_cs *engine;
@@ -2555,6 +2416,9 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
u32 *log;
int i = 0;
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
+
if (dump_load_err)
obj = dev_priv->guc.load_err_log;
else if (dev_priv->guc.log.vma)
@@ -2586,6 +2450,9 @@ static int i915_guc_log_control_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
+
if (!dev_priv->guc.log.vma)
return -EINVAL;
@@ -2599,6 +2466,9 @@ static int i915_guc_log_control_set(void *data, u64 val)
struct drm_i915_private *dev_priv = data;
int ret;
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
+
if (!dev_priv->guc.log.vma)
return -EINVAL;
@@ -2649,10 +2519,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
enum pipe pipe;
bool enabled = false;
- if (!HAS_PSR(dev_priv)) {
- seq_puts(m, "PSR not supported\n");
- return 0;
- }
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
intel_runtime_pm_get(dev_priv);
@@ -2734,39 +2602,76 @@ static int i915_sink_crc(struct seq_file *m, void *data)
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
struct intel_dp *intel_dp = NULL;
+ struct drm_modeset_acquire_ctx ctx;
int ret;
u8 crc[6];
- drm_modeset_lock_all(dev);
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+
drm_connector_list_iter_begin(dev, &conn_iter);
+
for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_crtc *crtc;
+ struct drm_connector_state *state;
+ struct intel_crtc_state *crtc_state;
- if (!connector->base.state->best_encoder)
+ if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
continue;
- crtc = connector->base.state->crtc;
- if (!crtc->state->active)
+retry:
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
+ if (ret)
+ goto err;
+
+ state = connector->base.state;
+ if (!state->best_encoder)
continue;
- if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
+ crtc = state->crtc;
+ ret = drm_modeset_lock(&crtc->mutex, &ctx);
+ if (ret)
+ goto err;
+
+ crtc_state = to_intel_crtc_state(crtc->state);
+ if (!crtc_state->base.active)
continue;
- intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
+ /*
+ * We need to wait for all crtc updates to complete, to make
+ * sure any pending modesets and plane updates are completed.
+ */
+ if (crtc_state->base.commit) {
+ ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
+
+ if (ret)
+ goto err;
+ }
- ret = intel_dp_sink_crc(intel_dp, crc);
+ intel_dp = enc_to_intel_dp(state->best_encoder);
+
+ ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
if (ret)
- goto out;
+ goto err;
seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
crc[0], crc[1], crc[2],
crc[3], crc[4], crc[5]);
goto out;
+
+err:
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
+ goto out;
}
ret = -ENODEV;
out:
drm_connector_list_iter_end(&conn_iter);
- drm_modeset_unlock_all(dev);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
return ret;
}
@@ -2854,10 +2759,8 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_csr *csr;
- if (!HAS_CSR(dev_priv)) {
- seq_puts(m, "not supported\n");
- return 0;
- }
+ if (!HAS_CSR(dev_priv))
+ return -ENODEV;
csr = &dev_priv->csr;
@@ -3049,7 +2952,7 @@ static void intel_connector_info(struct seq_file *m,
break;
case DRM_MODE_CONNECTOR_HDMIA:
if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
- intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
+ intel_encoder->type == INTEL_OUTPUT_DDI)
intel_hdmi_info(m, intel_connector);
break;
default:
@@ -3244,10 +3147,12 @@ static int i915_engine_info(struct seq_file *m, void *unused)
yesno(dev_priv->gt.awake));
seq_printf(m, "Global active requests: %d\n",
dev_priv->gt.active_requests);
+ seq_printf(m, "CS timestamp frequency: %u kHz\n",
+ dev_priv->info.cs_timestamp_frequency_khz);
p = drm_seq_file_printer(m);
for_each_engine(engine, dev_priv, id)
- intel_engine_dump(engine, &p);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
intel_runtime_pm_put(dev_priv);
@@ -3264,69 +3169,6 @@ static int i915_shrinker_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_semaphore_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
- int num_rings = INTEL_INFO(dev_priv)->num_rings;
- enum intel_engine_id id;
- int j, ret;
-
- if (!i915_modparams.semaphores) {
- seq_puts(m, "Semaphores are disabled\n");
- return 0;
- }
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- if (IS_BROADWELL(dev_priv)) {
- struct page *page;
- uint64_t *seqno;
-
- page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
-
- seqno = (uint64_t *)kmap_atomic(page);
- for_each_engine(engine, dev_priv, id) {
- uint64_t offset;
-
- seq_printf(m, "%s\n", engine->name);
-
- seq_puts(m, " Last signal:");
- for (j = 0; j < num_rings; j++) {
- offset = id * I915_NUM_ENGINES + j;
- seq_printf(m, "0x%08llx (0x%02llx) ",
- seqno[offset], offset * 8);
- }
- seq_putc(m, '\n');
-
- seq_puts(m, " Last wait: ");
- for (j = 0; j < num_rings; j++) {
- offset = id + (j * I915_NUM_ENGINES);
- seq_printf(m, "0x%08llx (0x%02llx) ",
- seqno[offset], offset * 8);
- }
- seq_putc(m, '\n');
-
- }
- kunmap_atomic(seqno);
- } else {
- seq_puts(m, " Last signal:");
- for_each_engine(engine, dev_priv, id)
- for (j = 0; j < num_rings; j++)
- seq_printf(m, "0x%08x\n",
- I915_READ(engine->semaphore.mbox.signal[j]));
- seq_putc(m, '\n');
- }
-
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -3454,7 +3296,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
int plane;
if (INTEL_GEN(dev_priv) < 9)
- return 0;
+ return -ENODEV;
drm_modeset_lock_all(dev);
@@ -3601,7 +3443,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
continue;
seq_printf(m, "MST Source Port %c\n",
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
}
drm_connector_list_iter_end(&conn_iter);
@@ -4448,6 +4290,61 @@ static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
}
}
+static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
+ struct sseu_dev_info *sseu)
+{
+ const struct intel_device_info *info = INTEL_INFO(dev_priv);
+ int s_max = 6, ss_max = 4;
+ int s, ss;
+ u32 s_reg[s_max], eu_reg[2 * s_max], eu_mask[2];
+
+ for (s = 0; s < s_max; s++) {
+ /*
+ * FIXME: Valid SS Mask respects the spec and read
+ * only valid bits for those registers, excluding reserverd
+ * although this seems wrong because it would leave many
+ * subslices without ACK.
+ */
+ s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
+ GEN10_PGCTL_VALID_SS_MASK(s);
+ eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
+ eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
+ }
+
+ eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+ GEN9_PGCTL_SSA_EU19_ACK |
+ GEN9_PGCTL_SSA_EU210_ACK |
+ GEN9_PGCTL_SSA_EU311_ACK;
+ eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+ GEN9_PGCTL_SSB_EU19_ACK |
+ GEN9_PGCTL_SSB_EU210_ACK |
+ GEN9_PGCTL_SSB_EU311_ACK;
+
+ for (s = 0; s < s_max; s++) {
+ if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+ /* skip disabled slice */
+ continue;
+
+ sseu->slice_mask |= BIT(s);
+ sseu->subslice_mask = info->sseu.subslice_mask;
+
+ for (ss = 0; ss < ss_max; ss++) {
+ unsigned int eu_cnt;
+
+ if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+ /* skip disabled subslice */
+ continue;
+
+ eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
+ eu_mask[ss % 2]);
+ sseu->eu_total += eu_cnt;
+ sseu->eu_per_subslice = max_t(unsigned int,
+ sseu->eu_per_subslice,
+ eu_cnt);
+ }
+ }
+}
+
static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_info *sseu)
{
@@ -4483,7 +4380,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->slice_mask |= BIT(s);
- if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
+ if (IS_GEN9_BC(dev_priv))
sseu->subslice_mask =
INTEL_INFO(dev_priv)->sseu.subslice_mask;
@@ -4589,8 +4486,10 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
cherryview_sseu_device_status(dev_priv, &sseu);
} else if (IS_BROADWELL(dev_priv)) {
broadwell_sseu_device_status(dev_priv, &sseu);
- } else if (INTEL_GEN(dev_priv) >= 9) {
+ } else if (IS_GEN9(dev_priv)) {
gen9_sseu_device_status(dev_priv, &sseu);
+ } else if (INTEL_GEN(dev_priv) >= 10) {
+ gen10_sseu_device_status(dev_priv, &sseu);
}
intel_runtime_pm_put(dev_priv);
@@ -4712,7 +4611,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_stolen", i915_gem_stolen_list_info },
- {"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
@@ -4736,7 +4634,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_vbt", i915_vbt, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
- {"i915_dump_lrc", i915_dump_lrc, 0},
{"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
@@ -4750,7 +4647,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_display_info", i915_display_info, 0},
{"i915_engine_info", i915_engine_info, 0},
{"i915_shrinker_info", i915_shrinker_info, 0},
- {"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2cf10d17acfb..6c8da9d20c33 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -48,6 +48,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
+#include "i915_pmu.h"
#include "i915_vgpu.h"
#include "intel_drv.h"
#include "intel_uc.h"
@@ -321,7 +322,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = USES_PPGTT(dev_priv);
break;
case I915_PARAM_HAS_SEMAPHORES:
- value = i915_modparams.semaphores;
+ value = HAS_LEGACY_SEMAPHORES(dev_priv);
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
@@ -371,10 +372,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) {
value |= I915_SCHEDULER_CAP_ENABLED;
value |= I915_SCHEDULER_CAP_PRIORITY;
-
- if (INTEL_INFO(dev_priv)->has_logical_ring_preemption &&
- i915_modparams.enable_execlists &&
- !i915_modparams.enable_guc_submission)
+ if (HAS_LOGICAL_RING_PREEMPTION(dev_priv))
value |= I915_SCHEDULER_CAP_PREEMPTION;
}
break;
@@ -407,6 +405,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
*/
value = 1;
break;
+ case I915_PARAM_HAS_CONTEXT_ISOLATION:
+ value = intel_engines_has_context_isolation(dev_priv);
+ break;
case I915_PARAM_SLICE_MASK:
value = INTEL_INFO(dev_priv)->sseu.slice_mask;
if (!value)
@@ -417,6 +418,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
if (!value)
return -ENODEV;
break;
+ case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
+ value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz;
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -613,10 +617,12 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv);
+ intel_uc_fini(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_uc_fini_wq(dev_priv);
i915_gem_cleanup_userptr(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
@@ -677,7 +683,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_uc;
- intel_modeset_gem_init(dev);
+ intel_setup_overlay(dev_priv);
if (INTEL_INFO(dev_priv)->num_pipes == 0)
return 0;
@@ -689,8 +695,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(dev_priv);
- drm_kms_helper_poll_init(dev);
-
return 0;
cleanup_gem:
@@ -724,7 +728,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap)
return -ENOMEM;
- ap->ranges[0].base = ggtt->mappable_base;
+ ap->ranges[0].base = ggtt->gmadr.start;
ap->ranges[0].size = ggtt->mappable_end;
primary =
@@ -838,6 +842,11 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
* We don't keep the workarounds for pre-production hardware, so we expect our
* driver to fail on these machines in one way or another. A little warning on
* dmesg may help both the user and the bug triagers.
+ *
+ * Our policy for removing pre-production workarounds is to keep the
+ * current gen workarounds as a guide to the bring-up of the next gen
+ * (workarounds have a habit of persisting!). Anything older than that
+ * should be removed along with the complications they introduce.
*/
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
@@ -892,7 +901,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
mutex_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
- spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
@@ -923,12 +931,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_display_crc_init(dev_priv);
- intel_device_info_dump(dev_priv);
-
intel_detect_preproduction_hw(dev_priv);
- i915_perf_init(dev_priv);
-
return 0;
err_irq:
@@ -945,7 +949,6 @@ err_engines:
*/
static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
{
- i915_perf_fini(dev_priv);
i915_gem_load_cleanup(dev_priv);
intel_irq_fini(dev_priv);
i915_workqueues_cleanup(dev_priv);
@@ -1048,10 +1051,6 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
- i915_modparams.enable_execlists =
- intel_sanitize_enable_execlists(dev_priv,
- i915_modparams.enable_execlists);
-
/*
* i915.enable_ppgtt is read-only, so do an early pass to validate the
* user's requested state against the hardware/driver capabilities. We
@@ -1063,11 +1062,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
i915_modparams.enable_ppgtt);
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
- i915_modparams.semaphores =
- intel_sanitize_semaphores(dev_priv, i915_modparams.semaphores);
- DRM_DEBUG_DRIVER("use GPU semaphores? %s\n",
- yesno(i915_modparams.semaphores));
-
intel_uc_sanitize_options(dev_priv);
intel_gvt_sanitize_options(dev_priv);
@@ -1088,10 +1082,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (i915_inject_load_failure())
return -ENODEV;
- intel_device_info_runtime_init(dev_priv);
+ intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
intel_sanitize_options(dev_priv);
+ i915_perf_init(dev_priv);
+
ret = i915_ggtt_probe_hw(dev_priv);
if (ret)
return ret;
@@ -1197,6 +1193,8 @@ static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
+ i915_perf_fini(dev_priv);
+
if (pdev->msi_enabled)
pci_disable_msi(pdev);
@@ -1215,7 +1213,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
- i915_gem_shrinker_init(dev_priv);
+ i915_gem_shrinker_register(dev_priv);
+ i915_pmu_register(dev_priv);
/*
* Notify a valid surface after modesetting,
@@ -1254,6 +1253,13 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* cannot run before the connectors are registered.
*/
intel_fbdev_initial_config_async(dev);
+
+ /*
+ * We need to coordinate the hotplugs with the asynchronous fbdev
+ * configuration, for which we use the fbdev->async_cookie.
+ */
+ if (INTEL_INFO(dev_priv)->num_pipes)
+ drm_kms_helper_poll_init(dev);
}
/**
@@ -1265,17 +1271,40 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_fbdev_unregister(dev_priv);
intel_audio_deinit(dev_priv);
+ /*
+ * After flushing the fbdev (incl. a late async config which will
+ * have delayed queuing of a hotplug event), then flush the hotplug
+ * events.
+ */
+ drm_kms_helper_poll_fini(&dev_priv->drm);
+
intel_gpu_ips_teardown();
acpi_video_unregister();
intel_opregion_unregister(dev_priv);
i915_perf_unregister(dev_priv);
+ i915_pmu_unregister(dev_priv);
i915_teardown_sysfs(dev_priv);
i915_guc_log_unregister(dev_priv);
drm_dev_unregister(&dev_priv->drm);
- i915_gem_shrinker_cleanup(dev_priv);
+ i915_gem_shrinker_unregister(dev_priv);
+}
+
+static void i915_welcome_messages(struct drm_i915_private *dev_priv)
+{
+ if (drm_debug & DRM_UT_DRIVER) {
+ struct drm_printer p = drm_debug_printer("i915 device info:");
+
+ intel_device_info_dump(&dev_priv->info, &p);
+ intel_device_info_dump_runtime(&dev_priv->info, &p);
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ DRM_INFO("DRM_I915_DEBUG enabled\n");
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
}
/**
@@ -1363,13 +1392,10 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
intel_init_ipc(dev_priv);
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
- DRM_INFO("DRM_I915_DEBUG enabled\n");
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
-
intel_runtime_pm_put(dev_priv);
+ i915_welcome_messages(dev_priv);
+
return 0;
out_cleanup_hw:
@@ -1682,8 +1708,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_csr_ucode_resume(dev_priv);
- i915_gem_resume(dev_priv);
-
i915_restore_state(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
intel_opregion_setup(dev_priv);
@@ -1704,14 +1728,7 @@ static int i915_drm_resume(struct drm_device *dev)
drm_mode_config_reset(dev);
- mutex_lock(&dev->struct_mutex);
- if (i915_gem_init_hw(dev_priv)) {
- DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
- i915_gem_set_wedged(dev_priv);
- }
- mutex_unlock(&dev->struct_mutex);
-
- intel_guc_resume(dev_priv);
+ i915_gem_resume(dev_priv);
intel_modeset_init_hw(dev);
intel_init_clock_gating(dev_priv);
@@ -1745,8 +1762,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
- intel_autoenable_gt_powersave(dev_priv);
-
enable_rpm_wakeref_asserts(dev_priv);
return 0;
@@ -1874,7 +1889,9 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
{
struct i915_gpu_error *error = &i915->gpu_error;
int ret;
+ int i;
+ might_sleep();
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
@@ -1892,22 +1909,30 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
disable_irq(i915->drm.irq);
ret = i915_gem_reset_prepare(i915);
if (ret) {
- DRM_ERROR("GPU recovery failed\n");
+ dev_err(i915->drm.dev, "GPU recovery failed\n");
intel_gpu_reset(i915, ALL_ENGINES);
- goto error;
+ goto taint;
}
- ret = intel_gpu_reset(i915, ALL_ENGINES);
- if (ret) {
- if (ret != -ENODEV)
- DRM_ERROR("Failed to reset chip: %i\n", ret);
+ if (!intel_has_gpu_reset(i915)) {
+ if (i915_modparams.reset)
+ dev_err(i915->drm.dev, "GPU reset not supported\n");
else
DRM_DEBUG_DRIVER("GPU reset disabled\n");
goto error;
}
- i915_gem_reset(i915);
- intel_overlay_reset(i915);
+ for (i = 0; i < 3; i++) {
+ ret = intel_gpu_reset(i915, ALL_ENGINES);
+ if (ret == 0)
+ break;
+
+ msleep(100);
+ }
+ if (ret) {
+ dev_err(i915->drm.dev, "Failed to reset chip\n");
+ goto taint;
+ }
/* Ok, now get things going again... */
@@ -1921,6 +1946,9 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
goto error;
}
+ i915_gem_reset(i915);
+ intel_overlay_reset(i915);
+
/*
* Next we need to restore the context, but we don't use those
* yet either...
@@ -1946,12 +1974,32 @@ wakeup:
wake_up_bit(&error->flags, I915_RESET_HANDOFF);
return;
+taint:
+ /*
+ * History tells us that if we cannot reset the GPU now, we
+ * never will. This then impacts everything that is run
+ * subsequently. On failing the reset, we mark the driver
+ * as wedged, preventing further execution on the GPU.
+ * We also want to go one step further and add a taint to the
+ * kernel so that any subsequent faults can be traced back to
+ * this failure. This is important for CI, where if the
+ * GPU/driver fails we would like to reboot and restart testing
+ * rather than continue on into oblivion. For everyone else,
+ * the system should still plod along, but they have been warned!
+ */
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
error:
i915_gem_set_wedged(i915);
i915_gem_retire_requests(i915);
goto finish;
}
+static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
+{
+ return intel_gpu_reset(dev_priv, intel_engine_flag(engine));
+}
+
/**
* i915_reset_engine - reset GPU engine to recover from a hang
* @engine: engine to reset
@@ -1973,23 +2021,27 @@ int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+ active_request = i915_gem_reset_prepare_engine(engine);
+ if (IS_ERR_OR_NULL(active_request)) {
+ /* Either the previous reset failed, or we pardon the reset. */
+ ret = PTR_ERR(active_request);
+ goto out;
+ }
+
if (!(flags & I915_RESET_QUIET)) {
dev_notice(engine->i915->drm.dev,
"Resetting %s after gpu hang\n", engine->name);
}
error->reset_engine_count[engine->id]++;
- active_request = i915_gem_reset_prepare_engine(engine);
- if (IS_ERR(active_request)) {
- DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n");
- ret = PTR_ERR(active_request);
- goto out;
- }
-
- ret = intel_gpu_reset(engine->i915, intel_engine_flag(engine));
+ if (!engine->i915->guc.execbuf_client)
+ ret = intel_gt_reset_engine(engine->i915, engine);
+ else
+ ret = intel_guc_reset_engine(&engine->i915->guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
- DRM_DEBUG_DRIVER("Failed to reset %s, ret=%d\n",
+ DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
+ engine->i915->guc.execbuf_client ? "GuC " : "",
engine->name, ret);
goto out;
}
@@ -2504,7 +2556,7 @@ static int intel_runtime_suspend(struct device *kdev)
struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
- if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && intel_rc6_enabled())))
+ if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
return -ENODEV;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
@@ -2524,6 +2576,8 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_disable_interrupts(dev_priv);
+ intel_uncore_suspend(dev_priv);
+
ret = 0;
if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_uninit(dev_priv);
@@ -2536,6 +2590,8 @@ static int intel_runtime_suspend(struct device *kdev)
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
+ intel_uncore_runtime_resume(dev_priv);
+
intel_runtime_pm_enable_interrupts(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
@@ -2543,8 +2599,6 @@ static int intel_runtime_suspend(struct device *kdev)
return ret;
}
- intel_uncore_suspend(dev_priv);
-
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e143004e66d5..caebd5825279 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -40,6 +40,7 @@
#include <linux/hash.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
+#include <linux/perf_event.h>
#include <linux/pm_qos.h>
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
@@ -55,19 +56,21 @@
#include "i915_reg.h"
#include "i915_utils.h"
-#include "intel_uncore.h"
#include "intel_bios.h"
+#include "intel_device_info.h"
+#include "intel_display.h"
#include "intel_dpll_mgr.h"
-#include "intel_uc.h"
#include "intel_lrc.h"
+#include "intel_opregion.h"
#include "intel_ringbuffer.h"
+#include "intel_uncore.h"
+#include "intel_uc.h"
#include "i915_gem.h"
#include "i915_gem_context.h"
#include "i915_gem_fence_reg.h"
#include "i915_gem_object.h"
#include "i915_gem_gtt.h"
-#include "i915_gem_render_state.h"
#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
@@ -80,8 +83,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20171023"
-#define DRIVER_TIMESTAMP 1508748913
+#define DRIVER_DATE "20171222"
+#define DRIVER_TIMESTAMP 1513971710
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -243,173 +246,6 @@ static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
return clamp_u64_to_fixed16(interm_sum);
}
-static inline const char *yesno(bool v)
-{
- return v ? "yes" : "no";
-}
-
-static inline const char *onoff(bool v)
-{
- return v ? "on" : "off";
-}
-
-static inline const char *enableddisabled(bool v)
-{
- return v ? "enabled" : "disabled";
-}
-
-enum pipe {
- INVALID_PIPE = -1,
- PIPE_A = 0,
- PIPE_B,
- PIPE_C,
- _PIPE_EDP,
- I915_MAX_PIPES = _PIPE_EDP
-};
-#define pipe_name(p) ((p) + 'A')
-
-enum transcoder {
- TRANSCODER_A = 0,
- TRANSCODER_B,
- TRANSCODER_C,
- TRANSCODER_EDP,
- TRANSCODER_DSI_A,
- TRANSCODER_DSI_C,
- I915_MAX_TRANSCODERS
-};
-
-static inline const char *transcoder_name(enum transcoder transcoder)
-{
- switch (transcoder) {
- case TRANSCODER_A:
- return "A";
- case TRANSCODER_B:
- return "B";
- case TRANSCODER_C:
- return "C";
- case TRANSCODER_EDP:
- return "EDP";
- case TRANSCODER_DSI_A:
- return "DSI A";
- case TRANSCODER_DSI_C:
- return "DSI C";
- default:
- return "<invalid>";
- }
-}
-
-static inline bool transcoder_is_dsi(enum transcoder transcoder)
-{
- return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
-}
-
-/*
- * Global legacy plane identifier. Valid only for primary/sprite
- * planes on pre-g4x, and only for primary planes on g4x+.
- */
-enum plane {
- PLANE_A,
- PLANE_B,
- PLANE_C,
-};
-#define plane_name(p) ((p) + 'A')
-
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
-
-/*
- * Per-pipe plane identifier.
- * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
- * number of planes per CRTC. Not all platforms really have this many planes,
- * which means some arrays of size I915_MAX_PLANES may have unused entries
- * between the topmost sprite plane and the cursor plane.
- *
- * This is expected to be passed to various register macros
- * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
- */
-enum plane_id {
- PLANE_PRIMARY,
- PLANE_SPRITE0,
- PLANE_SPRITE1,
- PLANE_SPRITE2,
- PLANE_CURSOR,
- I915_MAX_PLANES,
-};
-
-#define for_each_plane_id_on_crtc(__crtc, __p) \
- for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
- for_each_if ((__crtc)->plane_ids_mask & BIT(__p))
-
-enum port {
- PORT_NONE = -1,
- PORT_A = 0,
- PORT_B,
- PORT_C,
- PORT_D,
- PORT_E,
- I915_MAX_PORTS
-};
-#define port_name(p) ((p) + 'A')
-
-#define I915_NUM_PHYS_VLV 2
-
-enum dpio_channel {
- DPIO_CH0,
- DPIO_CH1
-};
-
-enum dpio_phy {
- DPIO_PHY0,
- DPIO_PHY1,
- DPIO_PHY2,
-};
-
-enum intel_display_power_domain {
- POWER_DOMAIN_PIPE_A,
- POWER_DOMAIN_PIPE_B,
- POWER_DOMAIN_PIPE_C,
- POWER_DOMAIN_PIPE_A_PANEL_FITTER,
- POWER_DOMAIN_PIPE_B_PANEL_FITTER,
- POWER_DOMAIN_PIPE_C_PANEL_FITTER,
- POWER_DOMAIN_TRANSCODER_A,
- POWER_DOMAIN_TRANSCODER_B,
- POWER_DOMAIN_TRANSCODER_C,
- POWER_DOMAIN_TRANSCODER_EDP,
- POWER_DOMAIN_TRANSCODER_DSI_A,
- POWER_DOMAIN_TRANSCODER_DSI_C,
- POWER_DOMAIN_PORT_DDI_A_LANES,
- POWER_DOMAIN_PORT_DDI_B_LANES,
- POWER_DOMAIN_PORT_DDI_C_LANES,
- POWER_DOMAIN_PORT_DDI_D_LANES,
- POWER_DOMAIN_PORT_DDI_E_LANES,
- POWER_DOMAIN_PORT_DDI_A_IO,
- POWER_DOMAIN_PORT_DDI_B_IO,
- POWER_DOMAIN_PORT_DDI_C_IO,
- POWER_DOMAIN_PORT_DDI_D_IO,
- POWER_DOMAIN_PORT_DDI_E_IO,
- POWER_DOMAIN_PORT_DSI,
- POWER_DOMAIN_PORT_CRT,
- POWER_DOMAIN_PORT_OTHER,
- POWER_DOMAIN_VGA,
- POWER_DOMAIN_AUDIO,
- POWER_DOMAIN_PLLS,
- POWER_DOMAIN_AUX_A,
- POWER_DOMAIN_AUX_B,
- POWER_DOMAIN_AUX_C,
- POWER_DOMAIN_AUX_D,
- POWER_DOMAIN_GMBUS,
- POWER_DOMAIN_MODESET,
- POWER_DOMAIN_INIT,
-
- POWER_DOMAIN_NUM,
-};
-
-#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
-#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
- ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
-#define POWER_DOMAIN_TRANSCODER(tran) \
- ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
- (tran) + POWER_DOMAIN_TRANSCODER_A)
-
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -471,122 +307,6 @@ struct i915_hotplug {
I915_GEM_DOMAIN_INSTRUCTION | \
I915_GEM_DOMAIN_VERTEX)
-#define for_each_pipe(__dev_priv, __p) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
-#define for_each_pipe_masked(__dev_priv, __p, __mask) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
- for_each_if ((__mask) & (1 << (__p)))
-#define for_each_universal_plane(__dev_priv, __pipe, __p) \
- for ((__p) = 0; \
- (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
- (__p)++)
-#define for_each_sprite(__dev_priv, __p, __s) \
- for ((__s) = 0; \
- (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
- (__s)++)
-
-#define for_each_port_masked(__port, __ports_mask) \
- for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
- for_each_if ((__ports_mask) & (1 << (__port)))
-
-#define for_each_crtc(dev, crtc) \
- list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
-
-#define for_each_intel_plane(dev, intel_plane) \
- list_for_each_entry(intel_plane, \
- &(dev)->mode_config.plane_list, \
- base.head)
-
-#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
- list_for_each_entry(intel_plane, \
- &(dev)->mode_config.plane_list, \
- base.head) \
- for_each_if ((plane_mask) & \
- (1 << drm_plane_index(&intel_plane->base)))
-
-#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
- list_for_each_entry(intel_plane, \
- &(dev)->mode_config.plane_list, \
- base.head) \
- for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
-
-#define for_each_intel_crtc(dev, intel_crtc) \
- list_for_each_entry(intel_crtc, \
- &(dev)->mode_config.crtc_list, \
- base.head)
-
-#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
- list_for_each_entry(intel_crtc, \
- &(dev)->mode_config.crtc_list, \
- base.head) \
- for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
-
-#define for_each_intel_encoder(dev, intel_encoder) \
- list_for_each_entry(intel_encoder, \
- &(dev)->mode_config.encoder_list, \
- base.head)
-
-#define for_each_intel_connector_iter(intel_connector, iter) \
- while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
-
-#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
- list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
- for_each_if ((intel_encoder)->base.crtc == (__crtc))
-
-#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
- list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
- for_each_if ((intel_connector)->base.encoder == (__encoder))
-
-#define for_each_power_domain(domain, mask) \
- for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
- for_each_if (BIT_ULL(domain) & (mask))
-
-#define for_each_power_well(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
- (__power_well) - (__dev_priv)->power_domains.power_wells < \
- (__dev_priv)->power_domains.power_well_count; \
- (__power_well)++)
-
-#define for_each_power_well_rev(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
- (__dev_priv)->power_domains.power_well_count - 1; \
- (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
- (__power_well)--)
-
-#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well(__dev_priv, __power_well) \
- for_each_if ((__power_well)->domains & (__domain_mask))
-
-#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well_rev(__dev_priv, __power_well) \
- for_each_if ((__power_well)->domains & (__domain_mask))
-
-#define for_each_intel_plane_in_state(__state, plane, plane_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->base.dev->mode_config.num_total_plane && \
- ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
- (plane_state) = to_intel_plane_state((__state)->base.planes[__i].state), 1); \
- (__i)++) \
- for_each_if (plane_state)
-
-#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->base.dev->mode_config.num_crtc && \
- ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
- (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
- (__i)++) \
- for_each_if (crtc)
-
-
-#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->base.dev->mode_config.num_total_plane && \
- ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
- (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
- (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
- (__i)++) \
- for_each_if (plane)
-
struct drm_i915_private;
struct i915_mm_struct;
struct i915_mmu_object;
@@ -623,20 +343,6 @@ struct drm_i915_file_private {
atomic_t context_bans;
};
-/* Used by dp and fdi links */
-struct intel_link_m_n {
- uint32_t tu;
- uint32_t gmch_m;
- uint32_t gmch_n;
- uint32_t link_m;
- uint32_t link_n;
-};
-
-void intel_link_compute_m_n(int bpp, int nlanes,
- int pixel_clock, int link_clock,
- struct intel_link_m_n *m_n,
- bool reduce_m_n);
-
/* Interface history:
*
* 1.1: Original.
@@ -651,27 +357,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
#define DRIVER_MINOR 6
#define DRIVER_PATCHLEVEL 0
-struct opregion_header;
-struct opregion_acpi;
-struct opregion_swsci;
-struct opregion_asle;
-
-struct intel_opregion {
- struct opregion_header *header;
- struct opregion_acpi *acpi;
- struct opregion_swsci *swsci;
- u32 swsci_gbda_sub_functions;
- u32 swsci_sbcb_sub_functions;
- struct opregion_asle *asle;
- void *rvda;
- void *vbt_firmware;
- const void *vbt;
- u32 vbt_size;
- u32 *lid_state;
- struct work_struct asle_work;
-};
-#define OPREGION_SIZE (8*1024)
-
struct intel_overlay;
struct intel_overlay_error_state;
@@ -699,7 +384,8 @@ struct drm_i915_display_funcs {
struct intel_cdclk_state *cdclk_state);
void (*set_cdclk)(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state);
- int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane);
+ int (*get_fifo_size)(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
int (*compute_intermediate_wm)(struct drm_device *dev,
struct intel_crtc *intel_crtc,
@@ -726,10 +412,12 @@ struct drm_i915_display_funcs {
void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
struct drm_atomic_state *old_state);
void (*update_crtcs)(struct drm_atomic_state *state);
- void (*audio_codec_enable)(struct drm_connector *connector,
- struct intel_encoder *encoder,
- const struct drm_display_mode *adjusted_mode);
- void (*audio_codec_disable)(struct intel_encoder *encoder);
+ void (*audio_codec_enable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+ void (*audio_codec_disable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
void (*fdi_link_train)(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void (*init_clock_gating)(struct drm_i915_private *dev_priv);
@@ -761,135 +449,6 @@ struct intel_csr {
uint32_t allowed_dc_mask;
};
-#define DEV_INFO_FOR_EACH_FLAG(func) \
- func(is_mobile); \
- func(is_lp); \
- func(is_alpha_support); \
- /* Keep has_* in alphabetical order */ \
- func(has_64bit_reloc); \
- func(has_aliasing_ppgtt); \
- func(has_csr); \
- func(has_ddi); \
- func(has_dp_mst); \
- func(has_reset_engine); \
- func(has_fbc); \
- func(has_fpga_dbg); \
- func(has_full_ppgtt); \
- func(has_full_48bit_ppgtt); \
- func(has_gmch_display); \
- func(has_guc); \
- func(has_guc_ct); \
- func(has_hotplug); \
- func(has_l3_dpf); \
- func(has_llc); \
- func(has_logical_ring_contexts); \
- func(has_logical_ring_preemption); \
- func(has_overlay); \
- func(has_pooled_eu); \
- func(has_psr); \
- func(has_rc6); \
- func(has_rc6p); \
- func(has_resource_streamer); \
- func(has_runtime_pm); \
- func(has_snoop); \
- func(unfenced_needs_alignment); \
- func(cursor_needs_physical); \
- func(hws_needs_physical); \
- func(overlay_needs_physical); \
- func(supports_tv); \
- func(has_ipc);
-
-struct sseu_dev_info {
- u8 slice_mask;
- u8 subslice_mask;
- u8 eu_total;
- u8 eu_per_subslice;
- u8 min_eu_in_pool;
- /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
- u8 subslice_7eu[3];
- u8 has_slice_pg:1;
- u8 has_subslice_pg:1;
- u8 has_eu_pg:1;
-};
-
-static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
-{
- return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
-}
-
-/* Keep in gen based order, and chronological order within a gen */
-enum intel_platform {
- INTEL_PLATFORM_UNINITIALIZED = 0,
- INTEL_I830,
- INTEL_I845G,
- INTEL_I85X,
- INTEL_I865G,
- INTEL_I915G,
- INTEL_I915GM,
- INTEL_I945G,
- INTEL_I945GM,
- INTEL_G33,
- INTEL_PINEVIEW,
- INTEL_I965G,
- INTEL_I965GM,
- INTEL_G45,
- INTEL_GM45,
- INTEL_IRONLAKE,
- INTEL_SANDYBRIDGE,
- INTEL_IVYBRIDGE,
- INTEL_VALLEYVIEW,
- INTEL_HASWELL,
- INTEL_BROADWELL,
- INTEL_CHERRYVIEW,
- INTEL_SKYLAKE,
- INTEL_BROXTON,
- INTEL_KABYLAKE,
- INTEL_GEMINILAKE,
- INTEL_COFFEELAKE,
- INTEL_CANNONLAKE,
- INTEL_MAX_PLATFORMS
-};
-
-struct intel_device_info {
- u16 device_id;
- u16 gen_mask;
-
- u8 gen;
- u8 gt; /* GT number, 0 if undefined */
- u8 num_rings;
- u8 ring_mask; /* Rings supported by the HW */
-
- enum intel_platform platform;
- u32 platform_mask;
-
- u32 display_mmio_offset;
-
- u8 num_pipes;
- u8 num_sprites[I915_MAX_PIPES];
- u8 num_scalers[I915_MAX_PIPES];
-
- unsigned int page_sizes; /* page sizes supported by the HW */
-
-#define DEFINE_FLAG(name) u8 name:1
- DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
-#undef DEFINE_FLAG
- u16 ddb_size; /* in blocks */
-
- /* Register offsets for the various display pipes and transcoders */
- int pipe_offsets[I915_MAX_TRANSCODERS];
- int trans_offsets[I915_MAX_TRANSCODERS];
- int palette_offsets[I915_MAX_PIPES];
- int cursor_offsets[I915_MAX_PIPES];
-
- /* Slice/subslice/EU info */
- struct sseu_dev_info sseu;
-
- struct color_luts {
- u16 degamma_lut_size;
- u16 gamma_lut_size;
- } color;
-};
-
struct intel_display_error_state;
struct i915_gpu_state {
@@ -911,6 +470,12 @@ struct i915_gpu_state {
struct intel_device_info device_info;
struct i915_params params;
+ struct i915_error_uc {
+ struct intel_uc_fw guc_fw;
+ struct intel_uc_fw huc_fw;
+ struct drm_i915_error_object *guc_log;
+ } uc;
+
/* Generic register state */
u32 eir;
u32 pgtbl_er;
@@ -933,12 +498,11 @@ struct i915_gpu_state {
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
- struct drm_i915_error_object *semaphore;
- struct drm_i915_error_object *guc_log;
struct drm_i915_error_engine {
int engine_id;
/* Software tracked state */
+ bool idle;
bool waiting;
int num_waiters;
unsigned long hangcheck_timestamp;
@@ -1001,6 +565,7 @@ struct i915_gpu_state {
long user_bo_count;
struct drm_i915_error_object *wa_ctx;
+ struct drm_i915_error_object *default_state;
struct drm_i915_error_request {
long jiffies;
@@ -1137,7 +702,7 @@ struct intel_fbc {
struct {
enum pipe pipe;
- enum plane plane;
+ enum i9xx_plane_id i9xx_plane;
unsigned int fence_y_offset;
} crtc;
@@ -1386,7 +951,6 @@ struct intel_gen6_power_mgmt {
struct intel_rps rps;
struct intel_rc6 rc6;
struct intel_llc_pstate llc_pstate;
- struct delayed_work autoenable_work;
};
/* defined intel_pm.c */
@@ -1529,9 +1093,6 @@ struct i915_gem_mm {
*/
struct pagevec wc_stash;
- /** Usable portion of the GTT for GEM */
- dma_addr_t stolen_base; /* limited to low memory (32-bit) */
-
/**
* tmpfs instance used for shmem backed objects
*/
@@ -1580,6 +1141,8 @@ struct drm_i915_error_state_buf {
loff_t pos;
};
+#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
+
#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
@@ -1698,6 +1261,8 @@ enum modeset_restore {
#define DDC_PIN_D 0x06
struct ddi_vbt_port_info {
+ int max_tmds_clock;
+
/*
* This is an index in the HDMI/DVI DDI buffer translation table.
* The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
@@ -2228,6 +1793,7 @@ struct i915_oa_ops {
struct intel_cdclk_state {
unsigned int cdclk, vco, ref;
+ u8 voltage_level;
};
struct drm_i915_private {
@@ -2242,6 +1808,30 @@ struct drm_i915_private {
const struct intel_device_info info;
+ /**
+ * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
+ * end of stolen which we can optionally use to create GEM objects
+ * backed by stolen memory. Note that stolen_usable_size tells us
+ * exactly how much of this we are actually allowed to use, given that
+ * some portion of it is in fact reserved for use by hardware functions.
+ */
+ struct resource dsm;
+ /**
+ * Reseved portion of Data Stolen Memory
+ */
+ struct resource dsm_reserved;
+
+ /*
+ * Stolen memory is segmented in hardware with different portions
+ * offlimits to certain functions.
+ *
+ * The drm_mm is initialised to the total accessible range, as found
+ * from the PCI config. On Broadwell+, this is further restricted to
+ * avoid the first page! The upper end of stolen memory is reserved for
+ * hardware functions and similarly removed from the accessible range.
+ */
+ resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
+
void __iomem *regs;
struct intel_uncore uncore;
@@ -2281,7 +1871,8 @@ struct drm_i915_private {
struct i915_gem_context *kernel_context;
/* Context only to be used for injecting preemption commands */
struct i915_gem_context *preempt_context;
- struct i915_vma *semaphore;
+ struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
+ [MAX_ENGINE_INSTANCE + 1];
struct drm_dma_handle *status_page_dmah;
struct resource mch_res;
@@ -2339,6 +1930,7 @@ struct drm_i915_private {
unsigned int max_dotclk_freq;
unsigned int rawclk_freq;
unsigned int hpll_freq;
+ unsigned int fdi_pll_freq;
unsigned int czclk_freq;
struct {
@@ -2418,6 +2010,8 @@ struct drm_i915_private {
unsigned int active_crtcs;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
+ /* minimum acceptable voltage level for each pipe */
+ u8 min_voltage_level[I915_MAX_PIPES];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
@@ -2609,7 +2203,6 @@ struct drm_i915_private {
bool periodic;
int period_exponent;
- int timestamp_frequency;
struct i915_oa_config test_config;
@@ -2754,6 +2347,8 @@ struct drm_i915_private {
int irq;
} lpe_audio;
+ struct i915_pmu pmu;
+
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
@@ -3049,6 +2644,8 @@ intel_info(const struct drm_i915_private *dev_priv)
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
(dev_priv)->info.gt == 2)
+#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
+ (dev_priv)->info.gt == 3)
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
@@ -3130,6 +2727,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
+#define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv)
+
#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc)
#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop)
#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
@@ -3140,6 +2739,11 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
((dev_priv)->info.has_logical_ring_contexts)
+#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
+ ((dev_priv)->info.has_logical_ring_preemption)
+
+#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
+
#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
@@ -3191,8 +2795,10 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr)
+
#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
+#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr)
@@ -3210,8 +2816,16 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_GUC_CT(dev_priv) ((dev_priv)->info.has_guc_ct)
#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
+
+/* For now, anything with a GuC has also HuC */
+#define HAS_HUC(dev_priv) (HAS_GUC(dev_priv))
#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
+/* Having a GuC is not the same as using a GuC */
+#define USES_GUC(dev_priv) intel_uc_is_using_guc()
+#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission()
+#define USES_HUC(dev_priv) intel_uc_is_using_huc()
+
#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
@@ -3288,8 +2902,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt);
-bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value);
-
/* i915_drv.c */
void __printf(3, 4)
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
@@ -3318,7 +2930,9 @@ extern int i915_reset_engine(struct intel_engine_cs *engine,
unsigned int flags);
extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
-extern int intel_guc_reset(struct drm_i915_private *dev_priv);
+extern int intel_reset_guc(struct drm_i915_private *dev_priv);
+extern int intel_guc_reset_engine(struct intel_guc *guc,
+ struct intel_engine_cs *engine);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -3855,6 +3469,8 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm);
+void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);
+
/* belongs in i915_gem_gtt.h */
static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
{
@@ -3876,12 +3492,13 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size);
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size);
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
- u32 stolen_offset,
- u32 gtt_offset,
- u32 size);
+ resource_size_t stolen_offset,
+ resource_size_t gtt_offset,
+ resource_size_t size);
/* i915_gem_internal.c */
struct drm_i915_gem_object *
@@ -3889,7 +3506,7 @@ i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
phys_addr_t size);
/* i915_gem_shrinker.c */
-unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
+unsigned long i915_gem_shrink(struct drm_i915_private *i915,
unsigned long target,
unsigned long *nr_scanned,
unsigned flags);
@@ -3898,9 +3515,9 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
#define I915_SHRINK_BOUND 0x4
#define I915_SHRINK_ACTIVE 0x8
#define I915_SHRINK_VMAPS 0x10
-unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
-void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
-void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
+unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
+void i915_gem_shrinker_register(struct drm_i915_private *i915);
+void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
/* i915_gem_tiling.c */
@@ -4052,41 +3669,6 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
enum port port);
-
-/* intel_opregion.c */
-#ifdef CONFIG_ACPI
-extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
-extern void intel_opregion_register(struct drm_i915_private *dev_priv);
-extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
-extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
-extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
- bool enable);
-extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
- pci_power_t state);
-extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
-#else
-static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
-static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { }
-static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { }
-static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
-{
-}
-static inline int
-intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
-{
- return 0;
-}
-static inline int
-intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
-{
- return 0;
-}
-static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
-{
- return -ENODEV;
-}
-#endif
-
/* intel_acpi.c */
#ifdef CONFIG_ACPI
extern void intel_register_dsm_handler(void);
@@ -4103,14 +3685,9 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
return (struct intel_device_info *)&dev_priv->info;
}
-const char *intel_platform_name(enum intel_platform platform);
-void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
-void intel_device_info_dump(struct drm_i915_private *dev_priv);
-
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
extern int intel_modeset_init(struct drm_device *dev);
-extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_connector_register(struct drm_connector *);
extern void intel_connector_unregister(struct drm_connector *);
@@ -4177,8 +3754,7 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
-uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
- uint8_t lane_count);
+uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count);
void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
uint8_t lane_lat_optim_mask);
uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
@@ -4187,24 +3763,39 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale);
void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
bool reset);
-void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
-void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void chv_phy_release_cl2_override(struct intel_encoder *encoder);
-void chv_phy_post_pll_disable(struct intel_encoder *encoder);
+void chv_phy_post_pll_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state);
void vlv_set_phy_signal_level(struct intel_encoder *encoder,
u32 demph_reg_value, u32 preemph_reg_value,
u32 uniqtranscale_reg_value, u32 tx3_demph);
-void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
-void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
-void vlv_phy_reset_lanes(struct intel_encoder *encoder);
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void vlv_phy_reset_lanes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state);
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
+u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
const i915_reg_t reg);
+u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
+
+static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
+ const i915_reg_t reg)
+{
+ return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
+}
+
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5cfba89ed586..8bc3283484be 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -531,7 +531,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
* @obj: i915 gem object
* @flags: how to wait (under a lock, for all rendering or just for writes etc)
* @timeout: how long to wait
- * @rps: client (user process) to charge for any waitboosting
+ * @rps_client: client (user process) to charge for any waitboosting
*/
int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
@@ -666,17 +666,13 @@ fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}
-static void
-flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
+void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
- if (!(obj->base.write_domain & flush_domains))
- return;
-
- /* No actual flushing is required for the GTT write domain. Writes
- * to it "immediately" go to main memory as far as we know, so there's
- * no chipset flush. It also doesn't land in render cache.
+ /*
+ * No actual flushing is required for the GTT write domain for reads
+ * from the GTT domain. Writes to it "immediately" go to main memory
+ * as far as we know, so there's no chipset flush. It also doesn't
+ * land in the GPU render cache.
*
* However, we do have to enforce the order so that all writes through
* the GTT land before any writes to the device, such as updates to
@@ -687,22 +683,43 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
* timing. This issue has only been observed when switching quickly
* between GTT writes and CPU reads from inside the kernel on recent hw,
* and it appears to only affect discrete GTT blocks (i.e. on LLC
- * system agents we cannot reproduce this behaviour).
+ * system agents we cannot reproduce this behaviour, until Cannonlake
+ * that was!).
*/
+
wmb();
+ intel_runtime_pm_get(dev_priv);
+ spin_lock_irq(&dev_priv->uncore.lock);
+
+ POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
+
+ spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_runtime_pm_put(dev_priv);
+}
+
+static void
+flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_vma *vma;
+
+ if (!(obj->base.write_domain & flush_domains))
+ return;
+
switch (obj->base.write_domain) {
case I915_GEM_DOMAIN_GTT:
- if (!HAS_LLC(dev_priv)) {
- intel_runtime_pm_get(dev_priv);
- spin_lock_irq(&dev_priv->uncore.lock);
- POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base));
- spin_unlock_irq(&dev_priv->uncore.lock);
- intel_runtime_pm_put(dev_priv);
- }
+ i915_gem_flush_ggtt_writes(dev_priv);
intel_fb_obj_flush(obj,
fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
+
+ for_each_ggtt_vma(vma, obj) {
+ if (vma->iomap)
+ continue;
+
+ i915_vma_unset_ggtt_write(vma);
+ }
break;
case I915_GEM_DOMAIN_CPU:
@@ -1099,7 +1116,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
page_base += offset & PAGE_MASK;
}
- if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
+ if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) {
ret = -EFAULT;
break;
@@ -1307,7 +1324,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
* If the object is non-shmem backed, we retry again with the
* path that handles page fault.
*/
- if (ggtt_write(&ggtt->mappable, page_base, page_offset,
+ if (ggtt_write(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) {
ret = -EFAULT;
break;
@@ -1549,10 +1566,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!i915_vma_is_ggtt(vma))
- break;
-
+ for_each_ggtt_vma(vma, obj) {
if (i915_vma_is_active(vma))
continue;
@@ -1612,7 +1626,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (err)
goto out;
- /* Flush and acquire obj->pages so that we are coherent through
+ /*
+ * Proxy objects do not control access to the backing storage, ergo
+ * they cannot be used as a means to manipulate the cache domain
+ * tracking for that backing storage. The proxy object is always
+ * considered to be outside of any cache domain.
+ */
+ if (i915_gem_object_is_proxy(obj)) {
+ err = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * Flush and acquire obj->pages so that we are coherent through
* direct access in memory with previous cached writes through
* shmemfs and that our cache domain tracking remains valid.
* For example, if the obj->filp was moved to swap without us
@@ -1668,6 +1694,11 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
+ /*
+ * Proxy objects are barred from CPU access, so there is no
+ * need to ban sw_finish as it is a nop.
+ */
+
/* Pinned buffers may be scanout, so flush the cache */
i915_gem_object_flush_if_display(obj);
i915_gem_object_put(obj);
@@ -1718,7 +1749,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
*/
if (!obj->base.filp) {
i915_gem_object_put(obj);
- return -EINVAL;
+ return -ENXIO;
}
addr = vm_mmap(obj->base.filp, 0, args->size,
@@ -1936,9 +1967,9 @@ int i915_gem_fault(struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
+ (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
- &ggtt->mappable);
+ &ggtt->iomap);
if (ret)
goto err_fence;
@@ -1948,6 +1979,8 @@ int i915_gem_fault(struct vm_fault *vmf)
list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
GEM_BUG_ON(!obj->userfault_count);
+ i915_vma_set_ggtt_write(vma);
+
err_fence:
i915_vma_unpin_fence(vma);
err_unpin:
@@ -2012,12 +2045,8 @@ static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!i915_vma_is_ggtt(vma))
- break;
-
+ for_each_ggtt_vma(vma, obj)
i915_vma_unset_userfault(vma);
- }
}
/**
@@ -2567,7 +2596,7 @@ static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
}
err = obj->ops->get_pages(obj);
- GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages));
+ GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
return err;
}
@@ -2662,7 +2691,8 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
void *ptr;
int ret;
- GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
+ if (unlikely(!i915_gem_object_has_struct_page(obj)))
+ return ERR_PTR(-ENXIO);
ret = mutex_lock_interruptible(&obj->mm.lock);
if (ret)
@@ -2908,13 +2938,23 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
* Prevent request submission to the hardware until we have
* completed the reset in i915_gem_reset_finish(). If a request
* is completed by one engine, it may then queue a request
- * to a second via its engine->irq_tasklet *just* as we are
+ * to a second via its execlists->tasklet *just* as we are
* calling engine->init_hw() and also writing the ELSP.
- * Turning off the engine->irq_tasklet until the reset is over
+ * Turning off the execlists->tasklet until the reset is over
* prevents the race.
*/
- tasklet_kill(&engine->execlists.irq_tasklet);
- tasklet_disable(&engine->execlists.irq_tasklet);
+ tasklet_kill(&engine->execlists.tasklet);
+ tasklet_disable(&engine->execlists.tasklet);
+
+ /*
+ * We're using worker to queue preemption requests from the tasklet in
+ * GuC submission mode.
+ * Even though tasklet was disabled, we may still have a worker queued.
+ * Let's make sure that all workers scheduled before disabling the
+ * tasklet are completed before continuing with the reset.
+ */
+ if (engine->i915->guc.preempt_wq)
+ flush_workqueue(engine->i915->guc.preempt_wq);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
@@ -3049,7 +3089,12 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
void i915_gem_reset_engine(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request)
{
- engine->irq_posted = 0;
+ /*
+ * Make sure this write is visible before we re-enable the interrupt
+ * handlers on another CPU, as tasklet_enable() resolves to just
+ * a compiler barrier which is insufficient for our purpose here.
+ */
+ smp_store_mb(engine->irq_posted, 0);
if (request)
request = i915_gem_reset_request(engine, request);
@@ -3079,6 +3124,25 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
ctx = fetch_and_zero(&engine->last_retired_context);
if (ctx)
engine->context_unpin(engine, ctx);
+
+ /*
+ * Ostensibily, we always want a context loaded for powersaving,
+ * so if the engine is idle after the reset, send a request
+ * to load our scratch kernel_context.
+ *
+ * More mysteriously, if we leave the engine idle after a reset,
+ * the next userspace batch may hang, with what appears to be
+ * an incoherent read by the CS (presumably stale TLB). An
+ * empty request appears sufficient to paper over the glitch.
+ */
+ if (list_empty(&engine->timeline->requests)) {
+ struct drm_i915_gem_request *rq;
+
+ rq = i915_gem_request_alloc(engine,
+ dev_priv->kernel_context);
+ if (!IS_ERR(rq))
+ __i915_add_request(rq, false);
+ }
}
i915_gem_restore_fences(dev_priv);
@@ -3093,7 +3157,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
- tasklet_enable(&engine->execlists.irq_tasklet);
+ tasklet_enable(&engine->execlists.tasklet);
kthread_unpark(engine->breadcrumbs.signaler);
intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
@@ -3271,13 +3335,20 @@ i915_gem_retire_work_handler(struct work_struct *work)
}
}
+static inline bool
+new_requests_since_last_retire(const struct drm_i915_private *i915)
+{
+ return (READ_ONCE(i915->gt.active_requests) ||
+ work_pending(&i915->gt.idle_work.work));
+}
+
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), gt.idle_work.work);
- struct drm_device *dev = &dev_priv->drm;
bool rearm_hangcheck;
+ ktime_t end;
if (!READ_ONCE(dev_priv->gt.awake))
return;
@@ -3286,14 +3357,21 @@ i915_gem_idle_work_handler(struct work_struct *work)
* Wait for last execlists context complete, but bail out in case a
* new request is submitted.
*/
- wait_for(intel_engines_are_idle(dev_priv), 10);
- if (READ_ONCE(dev_priv->gt.active_requests))
- return;
+ end = ktime_add_ms(ktime_get(), I915_IDLE_ENGINES_TIMEOUT);
+ do {
+ if (new_requests_since_last_retire(dev_priv))
+ return;
+
+ if (intel_engines_are_idle(dev_priv))
+ break;
+
+ usleep_range(100, 500);
+ } while (ktime_before(ktime_get(), end));
rearm_hangcheck =
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
- if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
/* Currently busy, come back later */
mod_delayed_work(dev_priv->wq,
&dev_priv->gt.idle_work,
@@ -3305,17 +3383,26 @@ i915_gem_idle_work_handler(struct work_struct *work)
* New request retired after this work handler started, extend active
* period until next instance of the work.
*/
- if (work_pending(work))
+ if (new_requests_since_last_retire(dev_priv))
goto out_unlock;
- if (dev_priv->gt.active_requests)
- goto out_unlock;
+ /*
+ * Be paranoid and flush a concurrent interrupt to make sure
+ * we don't reactivate any irq tasklets after parking.
+ *
+ * FIXME: Note that even though we have waited for execlists to be idle,
+ * there may still be an in-flight interrupt even though the CSB
+ * is now empty. synchronize_irq() makes sure that a residual interrupt
+ * is completed before we continue, but it doesn't prevent the HW from
+ * raising a spurious interrupt later. To complete the shield we should
+ * coordinate disabling the CS irq with flushing the interrupts.
+ */
+ synchronize_irq(dev_priv->drm.irq);
- if (wait_for(intel_engines_are_idle(dev_priv), 10))
- DRM_ERROR("Timeout waiting for engines to idle\n");
+ intel_engines_park(dev_priv);
+ i915_gem_timelines_park(dev_priv);
- intel_engines_mark_idle(dev_priv);
- i915_gem_timelines_mark_idle(dev_priv);
+ i915_pmu_gt_parked(dev_priv);
GEM_BUG_ON(!dev_priv->gt.awake);
dev_priv->gt.awake = false;
@@ -3323,9 +3410,12 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (INTEL_GEN(dev_priv) >= 6)
gen6_rps_idle(dev_priv);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
+
intel_runtime_pm_put(dev_priv);
out_unlock:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
out_rearm:
if (rearm_hangcheck) {
@@ -3467,8 +3557,19 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
static int wait_for_engines(struct drm_i915_private *i915)
{
- if (wait_for(intel_engines_are_idle(i915), 50)) {
- DRM_ERROR("Failed to idle engines, declaring wedged!\n");
+ if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
+ dev_err(i915->drm.dev,
+ "Failed to idle engines, declaring wedged!\n");
+ if (drm_debug & DRM_UT_DRIVER) {
+ struct drm_printer p = drm_debug_printer(__func__);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ intel_engine_dump(engine, &p,
+ "%s", engine->name);
+ }
+
i915_gem_set_wedged(i915);
return -EIO;
}
@@ -3494,9 +3595,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
if (ret)
return ret;
}
-
i915_gem_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests);
ret = wait_for_engines(i915);
} else {
@@ -3695,7 +3794,8 @@ restart:
return -EBUSY;
}
- if (i915_gem_valid_gtt_space(vma, cache_level))
+ if (!i915_vma_is_closed(vma) &&
+ i915_gem_valid_gtt_space(vma, cache_level))
continue;
ret = i915_vma_unbind(vma);
@@ -3748,7 +3848,7 @@ restart:
* dropped the fence as all snoopable access is
* supposed to be linear.
*/
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ for_each_ggtt_vma(vma, obj) {
ret = i915_vma_put_fence(vma);
if (ret)
return ret;
@@ -3850,6 +3950,15 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
+ /*
+ * The caching mode of proxy object is handled by its generator, and
+ * not allowed to be changed by userspace.
+ */
+ if (i915_gem_object_is_proxy(obj)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
if (obj->cache_level == level)
goto out;
@@ -4655,14 +4764,16 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
i915_gem_object_put(obj);
}
-static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
+static void assert_kernel_context_is_current(struct drm_i915_private *i915)
{
+ struct i915_gem_context *kernel_context = i915->kernel_context;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id)
- GEM_BUG_ON(engine->last_retired_context &&
- !i915_gem_context_is_kernel(engine->last_retired_context));
+ for_each_engine(engine, i915, id) {
+ GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline->last_request));
+ GEM_BUG_ON(engine->last_retired_context != kernel_context);
+ }
}
void i915_gem_sanitize(struct drm_i915_private *i915)
@@ -4768,23 +4879,43 @@ err_unlock:
return ret;
}
-void i915_gem_resume(struct drm_i915_private *dev_priv)
+void i915_gem_resume(struct drm_i915_private *i915)
{
- struct drm_device *dev = &dev_priv->drm;
+ WARN_ON(i915->gt.awake);
- WARN_ON(dev_priv->gt.awake);
+ mutex_lock(&i915->drm.struct_mutex);
+ intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
- mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev_priv);
- i915_gem_restore_fences(dev_priv);
+ i915_gem_restore_gtt_mappings(i915);
+ i915_gem_restore_fences(i915);
- /* As we didn't flush the kernel context before suspend, we cannot
+ /*
+ * As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
- dev_priv->gt.resume(dev_priv);
+ i915->gt.resume(i915);
- mutex_unlock(&dev->struct_mutex);
+ if (i915_gem_init_hw(i915))
+ goto err_wedged;
+
+ intel_guc_resume(i915);
+
+ /* Always reload a context for powersaving. */
+ if (i915_gem_switch_to_kernel_context(i915))
+ goto err_wedged;
+
+out_unlock:
+ intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return;
+
+err_wedged:
+ if (!i915_terminally_wedged(&i915->gpu_error)) {
+ DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
+ i915_gem_set_wedged(i915);
+ }
+ goto out_unlock;
}
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
@@ -4901,40 +5032,132 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
goto out;
}
- /* Need to do basic initialisation of all rings first: */
- ret = __i915_gem_restart_engines(dev_priv);
- if (ret)
- goto out;
-
- intel_mocs_init_l3cc_table(dev_priv);
-
/* We can't enable contexts until all firmware is loaded */
ret = intel_uc_init_hw(dev_priv);
if (ret)
goto out;
+ intel_mocs_init_l3cc_table(dev_priv);
+
+ /* Only when the HW is re-initialised, can we replay the requests */
+ ret = __i915_gem_restart_engines(dev_priv);
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
-bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
+static int __intel_engines_record_defaults(struct drm_i915_private *i915)
{
- if (INTEL_INFO(dev_priv)->gen < 6)
- return false;
+ struct i915_gem_context *ctx;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err;
- /* TODO: make semaphores and Execlists play nicely together */
- if (i915_modparams.enable_execlists)
- return false;
+ /*
+ * As we reset the gpu during very early sanitisation, the current
+ * register state on the GPU should reflect its defaults values.
+ * We load a context onto the hw (with restore-inhibit), then switch
+ * over to a second context to save that default register state. We
+ * can then prime every new context with that state so they all start
+ * from the same default HW values.
+ */
- if (value >= 0)
- return value;
+ ctx = i915_gem_context_create_kernel(i915, 0);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
- /* Enable semaphores on SNB when IO remapping is off */
- if (IS_GEN6(dev_priv) && intel_vtd_active())
- return false;
+ for_each_engine(engine, i915, id) {
+ struct drm_i915_gem_request *rq;
- return true;
+ rq = i915_gem_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ctx;
+ }
+
+ err = 0;
+ if (engine->init_context)
+ err = engine->init_context(rq);
+
+ __i915_add_request(rq, true);
+ if (err)
+ goto err_active;
+ }
+
+ err = i915_gem_switch_to_kernel_context(i915);
+ if (err)
+ goto err_active;
+
+ err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ if (err)
+ goto err_active;
+
+ assert_kernel_context_is_current(i915);
+
+ for_each_engine(engine, i915, id) {
+ struct i915_vma *state;
+
+ state = ctx->engine[id].state;
+ if (!state)
+ continue;
+
+ /*
+ * As we will hold a reference to the logical state, it will
+ * not be torn down with the context, and importantly the
+ * object will hold onto its vma (making it possible for a
+ * stray GTT write to corrupt our defaults). Unmap the vma
+ * from the GTT to prevent such accidents and reclaim the
+ * space.
+ */
+ err = i915_vma_unbind(state);
+ if (err)
+ goto err_active;
+
+ err = i915_gem_object_set_to_cpu_domain(state->obj, false);
+ if (err)
+ goto err_active;
+
+ engine->default_state = i915_gem_object_get(state->obj);
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
+ unsigned int found = intel_engines_has_context_isolation(i915);
+
+ /*
+ * Make sure that classes with multiple engine instances all
+ * share the same basic configuration.
+ */
+ for_each_engine(engine, i915, id) {
+ unsigned int bit = BIT(engine->uabi_class);
+ unsigned int expected = engine->default_state ? bit : 0;
+
+ if ((found & bit) != expected) {
+ DRM_ERROR("mismatching default context state for class %d on engine %s\n",
+ engine->uabi_class, engine->name);
+ }
+ }
+ }
+
+out_ctx:
+ i915_gem_context_set_closed(ctx);
+ i915_gem_context_put(ctx);
+ return err;
+
+err_active:
+ /*
+ * If we have to abandon now, we expect the engines to be idle
+ * and ready to be torn-down. First try to flush any remaining
+ * request, ensure we are pointing at the kernel context and
+ * then remove it.
+ */
+ if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
+ goto out_ctx;
+
+ if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED)))
+ goto out_ctx;
+
+ i915_gem_contexts_lost(i915);
+ goto out_ctx;
}
int i915_gem_init(struct drm_i915_private *dev_priv)
@@ -4952,18 +5175,22 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
- if (!i915_modparams.enable_execlists) {
- dev_priv->gt.resume = intel_legacy_submission_resume;
- dev_priv->gt.cleanup_engine = intel_engine_cleanup;
- } else {
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
dev_priv->gt.resume = intel_lr_context_resume;
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
+ } else {
+ dev_priv->gt.resume = intel_legacy_submission_resume;
+ dev_priv->gt.cleanup_engine = intel_engine_cleanup;
}
ret = i915_gem_init_userptr(dev_priv);
if (ret)
return ret;
+ ret = intel_uc_init_wq(dev_priv);
+ if (ret)
+ return ret;
+
/* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
* used by the CS may be stale, despite us poking the TLB reset. If
@@ -4974,20 +5201,94 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ret = i915_gem_init_ggtt(dev_priv);
- if (ret)
- goto out_unlock;
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_unlock;
+ }
ret = i915_gem_contexts_init(dev_priv);
- if (ret)
- goto out_unlock;
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_ggtt;
+ }
ret = intel_engines_init(dev_priv);
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_context;
+ }
+
+ intel_init_gt_powersave(dev_priv);
+
+ ret = intel_uc_init(dev_priv);
if (ret)
- goto out_unlock;
+ goto err_pm;
ret = i915_gem_init_hw(dev_priv);
+ if (ret)
+ goto err_uc_init;
+
+ /*
+ * Despite its name intel_init_clock_gating applies both display
+ * clock gating workarounds; GT mmio workarounds and the occasional
+ * GT power context workaround. Worse, sometimes it includes a context
+ * register workaround which we need to apply before we record the
+ * default HW state for all contexts.
+ *
+ * FIXME: break up the workarounds and apply them at the right time!
+ */
+ intel_init_clock_gating(dev_priv);
+
+ ret = __intel_engines_record_defaults(dev_priv);
+ if (ret)
+ goto err_init_hw;
+
+ if (i915_inject_load_failure()) {
+ ret = -ENODEV;
+ goto err_init_hw;
+ }
+
+ if (i915_inject_load_failure()) {
+ ret = -EIO;
+ goto err_init_hw;
+ }
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ return 0;
+
+ /*
+ * Unwinding is complicated by that we want to handle -EIO to mean
+ * disable GPU submission but keep KMS alive. We want to mark the
+ * HW as irrevisibly wedged, but keep enough state around that the
+ * driver doesn't explode during runtime.
+ */
+err_init_hw:
+ i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
+ i915_gem_contexts_lost(dev_priv);
+ intel_uc_fini_hw(dev_priv);
+err_uc_init:
+ intel_uc_fini(dev_priv);
+err_pm:
+ if (ret != -EIO) {
+ intel_cleanup_gt_powersave(dev_priv);
+ i915_gem_cleanup_engines(dev_priv);
+ }
+err_context:
+ if (ret != -EIO)
+ i915_gem_contexts_fini(dev_priv);
+err_ggtt:
+err_unlock:
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ if (ret != -EIO)
+ i915_gem_cleanup_userptr(dev_priv);
+
if (ret == -EIO) {
- /* Allow engine initialisation to fail by marking the GPU as
+ /*
+ * Allow engine initialisation to fail by marking the GPU as
* wedged. But we only want to do this where the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
@@ -4998,10 +5299,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
ret = 0;
}
-out_unlock:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
+ i915_gem_drain_freed_objects(dev_priv);
return ret;
}
@@ -5052,6 +5350,22 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
i915_gem_detect_bit_6_swizzle(dev_priv);
}
+static void i915_gem_init__mm(struct drm_i915_private *i915)
+{
+ spin_lock_init(&i915->mm.object_stat_lock);
+ spin_lock_init(&i915->mm.obj_lock);
+ spin_lock_init(&i915->mm.free_lock);
+
+ init_llist_head(&i915->mm.free_list);
+
+ INIT_LIST_HEAD(&i915->mm.unbound_list);
+ INIT_LIST_HEAD(&i915->mm.bound_list);
+ INIT_LIST_HEAD(&i915->mm.fence_list);
+ INIT_LIST_HEAD(&i915->mm.userfault_list);
+
+ INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
+}
+
int
i915_gem_load_init(struct drm_i915_private *dev_priv)
{
@@ -5093,15 +5407,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
if (err)
goto err_priorities;
- INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
-
- spin_lock_init(&dev_priv->mm.obj_lock);
- spin_lock_init(&dev_priv->mm.free_lock);
- init_llist_head(&dev_priv->mm.free_list);
- INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
- INIT_LIST_HEAD(&dev_priv->mm.bound_list);
- INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
+ i915_gem_init__mm(dev_priv);
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index ee54597465b6..e920dab7f1b8 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -28,7 +28,11 @@
#include <linux/bug.h>
#ifdef CONFIG_DRM_I915_DEBUG_GEM
-#define GEM_BUG_ON(expr) BUG_ON(expr)
+#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
+ printk(KERN_ERR "GEM_BUG_ON(%s)\n", __stringify(condition)); \
+ BUG(); \
+ } \
+ } while(0)
#define GEM_WARN_ON(expr) WARN_ON(expr)
#define GEM_DEBUG_DECL(var) var
@@ -44,6 +48,12 @@
#define GEM_DEBUG_BUG_ON(expr)
#endif
+#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
+#define GEM_TRACE(...) trace_printk(__VA_ARGS__)
+#else
+#define GEM_TRACE(...) do { } while (0)
+#endif
+
#define I915_NUM_ENGINES 5
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index f663cd919795..b9b53ac14176 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -167,7 +167,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
i915_sw_fence_await_reservation(&clflush->wait,
obj->resv, NULL,
true, I915_FENCE_TIMEOUT,
- GFP_KERNEL);
+ I915_FENCE_GFP);
reservation_object_lock(obj->resv, NULL);
reservation_object_add_excl_fence(obj->resv, &clflush->dma);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f782cf2069c1..648e7536ff51 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -316,7 +316,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
* present or not in use we still need a small bias as ring wraparound
* at offset 0 sometimes hangs. No idea why.
*/
- if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading)
+ if (USES_GUC(dev_priv))
ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
else
ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
@@ -409,7 +409,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
i915_gem_context_set_closed(ctx); /* not user accessible */
i915_gem_context_clear_bannable(ctx);
i915_gem_context_set_force_single_submission(ctx);
- if (!i915_modparams.enable_guc_submission)
+ if (!USES_GUC_SUBMISSION(to_i915(dev)))
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
@@ -418,8 +418,8 @@ out:
return ctx;
}
-static struct i915_gem_context *
-create_kernel_context(struct drm_i915_private *i915, int prio)
+struct i915_gem_context *
+i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
{
struct i915_gem_context *ctx;
@@ -460,20 +460,12 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
init_llist_head(&dev_priv->contexts.free_list);
- if (intel_vgpu_active(dev_priv) &&
- HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- if (!i915_modparams.enable_execlists) {
- DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
- return -EINVAL;
- }
- }
-
/* Using the simple ida interface, the max is limited by sizeof(int) */
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
ida_init(&dev_priv->contexts.hw_ida);
/* lowest priority; idle task */
- ctx = create_kernel_context(dev_priv, I915_PRIORITY_MIN);
+ ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context\n");
err = PTR_ERR(ctx);
@@ -487,7 +479,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
dev_priv->kernel_context = ctx;
/* highest priority; preempting task */
- ctx = create_kernel_context(dev_priv, INT_MAX);
+ ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default preempt context\n");
err = PTR_ERR(ctx);
@@ -515,6 +507,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id) {
engine->legacy_active_context = NULL;
+ engine->legacy_active_ppgtt = NULL;
if (!engine->last_retired_context)
continue;
@@ -522,28 +515,6 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
engine->context_unpin(engine, engine->last_retired_context);
engine->last_retired_context = NULL;
}
-
- /* Force the GPU state to be restored on enabling */
- if (!i915_modparams.enable_execlists) {
- struct i915_gem_context *ctx;
-
- list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- if (!i915_gem_context_is_default(ctx))
- continue;
-
- for_each_engine(engine, dev_priv, id)
- ctx->engine[engine->id].initialised = false;
-
- ctx->remap_slice = ALL_L3_SLICES(dev_priv);
- }
-
- for_each_engine(engine, dev_priv, id) {
- struct intel_context *kce =
- &dev_priv->kernel_context->engine[engine->id];
-
- kce->initialised = true;
- }
- }
}
void i915_gem_contexts_fini(struct drm_i915_private *i915)
@@ -596,310 +567,7 @@ void i915_gem_context_close(struct drm_file *file)
idr_destroy(&file_priv->context_idr);
}
-static inline int
-mi_set_context(struct drm_i915_gem_request *req, u32 flags)
-{
- struct drm_i915_private *dev_priv = req->i915;
- struct intel_engine_cs *engine = req->engine;
- enum intel_engine_id id;
- const int num_rings =
- /* Use an extended w/a on gen7 if signalling from other rings */
- (i915_modparams.semaphores && INTEL_GEN(dev_priv) == 7) ?
- INTEL_INFO(dev_priv)->num_rings - 1 :
- 0;
- int len;
- u32 *cs;
-
- flags |= MI_MM_SPACE_GTT;
- if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
- /* These flags are for resource streamer on HSW+ */
- flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
- else
- flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
-
- len = 4;
- if (INTEL_GEN(dev_priv) >= 7)
- len += 2 + (num_rings ? 4*num_rings + 6 : 0);
-
- cs = intel_ring_begin(req, len);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
- if (INTEL_GEN(dev_priv) >= 7) {
- *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
- if (num_rings) {
- struct intel_engine_cs *signaller;
-
- *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
- for_each_engine(signaller, dev_priv, id) {
- if (signaller == engine)
- continue;
-
- *cs++ = i915_mmio_reg_offset(
- RING_PSMI_CTL(signaller->mmio_base));
- *cs++ = _MASKED_BIT_ENABLE(
- GEN6_PSMI_SLEEP_MSG_DISABLE);
- }
- }
- }
-
- *cs++ = MI_NOOP;
- *cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags;
- /*
- * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
- * WaMiSetContext_Hang:snb,ivb,vlv
- */
- *cs++ = MI_NOOP;
-
- if (INTEL_GEN(dev_priv) >= 7) {
- if (num_rings) {
- struct intel_engine_cs *signaller;
- i915_reg_t last_reg = {}; /* keep gcc quiet */
-
- *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
- for_each_engine(signaller, dev_priv, id) {
- if (signaller == engine)
- continue;
-
- last_reg = RING_PSMI_CTL(signaller->mmio_base);
- *cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = _MASKED_BIT_DISABLE(
- GEN6_PSMI_SLEEP_MSG_DISABLE);
- }
-
- /* Insert a delay before the next switch! */
- *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- *cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = i915_ggtt_offset(engine->scratch);
- *cs++ = MI_NOOP;
- }
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- }
-
- intel_ring_advance(req, cs);
-
- return 0;
-}
-
-static int remap_l3(struct drm_i915_gem_request *req, int slice)
-{
- u32 *cs, *remap_info = req->i915->l3_parity.remap_info[slice];
- int i;
-
- if (!remap_info)
- return 0;
-
- cs = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /*
- * Note: We do not worry about the concurrent register cacheline hang
- * here because no other code should access these registers other than
- * at initialization time.
- */
- *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
- for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
- *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
- *cs++ = remap_info[i];
- }
- *cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
-
- return 0;
-}
-
-static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *engine,
- struct i915_gem_context *to)
-{
- if (to->remap_slice)
- return false;
-
- if (!to->engine[RCS].initialised)
- return false;
-
- if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
- return false;
-
- return to == engine->legacy_active_context;
-}
-
-static bool
-needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *engine)
-{
- struct i915_gem_context *from = engine->legacy_active_context;
-
- if (!ppgtt)
- return false;
-
- /* Always load the ppgtt on first use */
- if (!from)
- return true;
-
- /* Same context without new entries, skip */
- if ((!from->ppgtt || from->ppgtt == ppgtt) &&
- !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
- return false;
-
- if (engine->id != RCS)
- return true;
-
- if (INTEL_GEN(engine->i915) < 8)
- return true;
-
- return false;
-}
-
-static bool
-needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
- struct i915_gem_context *to,
- u32 hw_flags)
-{
- if (!ppgtt)
- return false;
-
- if (!IS_GEN8(to->i915))
- return false;
-
- if (hw_flags & MI_RESTORE_INHIBIT)
- return true;
-
- return false;
-}
-
-static int do_rcs_switch(struct drm_i915_gem_request *req)
-{
- struct i915_gem_context *to = req->ctx;
- struct intel_engine_cs *engine = req->engine;
- struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
- struct i915_gem_context *from = engine->legacy_active_context;
- u32 hw_flags;
- int ret, i;
-
- GEM_BUG_ON(engine->id != RCS);
-
- if (skip_rcs_switch(ppgtt, engine, to))
- return 0;
-
- if (needs_pd_load_pre(ppgtt, engine)) {
- /* Older GENs and non render rings still want the load first,
- * "PP_DCLV followed by PP_DIR_BASE register through Load
- * Register Immediate commands in Ring Buffer before submitting
- * a context."*/
- trace_switch_mm(engine, to);
- ret = ppgtt->switch_mm(ppgtt, req);
- if (ret)
- return ret;
- }
-
- if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
- /* NB: If we inhibit the restore, the context is not allowed to
- * die because future work may end up depending on valid address
- * space. This means we must enforce that a page table load
- * occur when this occurs. */
- hw_flags = MI_RESTORE_INHIBIT;
- else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
- hw_flags = MI_FORCE_RESTORE;
- else
- hw_flags = 0;
-
- if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
- ret = mi_set_context(req, hw_flags);
- if (ret)
- return ret;
-
- engine->legacy_active_context = to;
- }
-
- /* GEN8 does *not* require an explicit reload if the PDPs have been
- * setup, and we do not wish to move them.
- */
- if (needs_pd_load_post(ppgtt, to, hw_flags)) {
- trace_switch_mm(engine, to);
- ret = ppgtt->switch_mm(ppgtt, req);
- /* The hardware context switch is emitted, but we haven't
- * actually changed the state - so it's probably safe to bail
- * here. Still, let the user know something dangerous has
- * happened.
- */
- if (ret)
- return ret;
- }
-
- if (ppgtt)
- ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
-
- for (i = 0; i < MAX_L3_SLICES; i++) {
- if (!(to->remap_slice & (1<<i)))
- continue;
-
- ret = remap_l3(req, i);
- if (ret)
- return ret;
-
- to->remap_slice &= ~(1<<i);
- }
-
- if (!to->engine[RCS].initialised) {
- if (engine->init_context) {
- ret = engine->init_context(req);
- if (ret)
- return ret;
- }
- to->engine[RCS].initialised = true;
- }
-
- return 0;
-}
-
-/**
- * i915_switch_context() - perform a GPU context switch.
- * @req: request for which we'll execute the context switch
- *
- * The context life cycle is simple. The context refcount is incremented and
- * decremented by 1 and create and destroy. If the context is in use by the GPU,
- * it will have a refcount > 1. This allows us to destroy the context abstract
- * object while letting the normal object tracking destroy the backing BO.
- *
- * This function should not be used in execlists mode. Instead the context is
- * switched by writing to the ELSP and requests keep a reference to their
- * context.
- */
-int i915_switch_context(struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
-
- lockdep_assert_held(&req->i915->drm.struct_mutex);
- if (i915_modparams.enable_execlists)
- return 0;
-
- if (!req->ctx->engine[engine->id].state) {
- struct i915_gem_context *to = req->ctx;
- struct i915_hw_ppgtt *ppgtt =
- to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
-
- if (needs_pd_load_pre(ppgtt, engine)) {
- int ret;
-
- trace_switch_mm(engine, to);
- ret = ppgtt->switch_mm(ppgtt, req);
- if (ret)
- return ret;
-
- ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
- }
-
- engine->legacy_active_context = to;
- return 0;
- }
-
- return do_rcs_switch(req);
-}
-
-static bool engine_has_kernel_context(struct intel_engine_cs *engine)
+static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
{
struct i915_gem_timeline *timeline;
@@ -915,8 +583,7 @@ static bool engine_has_kernel_context(struct intel_engine_cs *engine)
return false;
}
- return (!engine->last_retired_context ||
- i915_gem_context_is_kernel(engine->last_retired_context));
+ return intel_engine_has_kernel_context(engine);
}
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
@@ -931,9 +598,8 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req;
- int ret;
- if (engine_has_kernel_context(engine))
+ if (engine_has_idle_kernel_context(engine))
continue;
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
@@ -951,13 +617,17 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
if (prev)
i915_sw_fence_await_sw_fence_gfp(&req->submit,
&prev->submit,
- GFP_KERNEL);
+ I915_FENCE_GFP);
}
- ret = i915_switch_context(req);
- i915_add_request(req);
- if (ret)
- return ret;
+ /*
+ * Force a flush after the switch to ensure that all rendering
+ * and operations prior to switching to the kernel context hits
+ * memory. This should be guaranteed by the previous request,
+ * but an extra layer of paranoia before we declare the system
+ * idle (on suspend etc) is advisable!
+ */
+ __i915_add_request(req, true);
}
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 44688e22a5c2..4bfb72f8e1cb 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -157,7 +157,6 @@ struct i915_gem_context {
u32 *lrc_reg_state;
u64 lrc_desc;
int pin_count;
- bool initialised;
} engine[I915_NUM_ENGINES];
/** ring_size: size for allocating the per-engine ring buffer */
@@ -292,6 +291,9 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+struct i915_gem_context *
+i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
+
static inline struct i915_gem_context *
i915_gem_context_get(struct i915_gem_context *ctx)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 8daa8a78cdc0..60ca4f05ae94 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -46,7 +46,7 @@ static bool ggtt_is_idle(struct drm_i915_private *i915)
return false;
for_each_engine(engine, i915, id) {
- if (engine->last_retired_context != i915->kernel_context)
+ if (!intel_engine_has_kernel_context(engine))
return false;
}
@@ -73,6 +73,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
if (err)
return err;
+ GEM_BUG_ON(!ggtt_is_idle(i915));
return 0;
}
@@ -216,6 +217,7 @@ search_again:
if (ret)
return ret;
+ cond_resched();
goto search_again;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 435ed95df144..4401068ff468 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -271,7 +271,7 @@ static inline u64 gen8_noncanonical_addr(u64 address)
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
- return eb->engine->needs_cmd_parser && eb->batch_len;
+ return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
}
static int eb_create(struct i915_execbuffer *eb)
@@ -1012,7 +1012,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset += page << PAGE_SHIFT;
}
- vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->mappable,
+ vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
offset);
cache->page = page;
cache->vaddr = (unsigned long)vaddr;
@@ -1111,14 +1111,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_request;
- err = eb->engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- goto err_request;
-
- err = i915_switch_context(rq);
- if (err)
- goto err_request;
-
err = eb->engine->emit_bb_start(rq,
batch->node.start, PAGE_SIZE,
cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
@@ -1818,8 +1810,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
/* Unconditionally flush any chipset caches (for streaming writes). */
i915_gem_chipset_flush(eb->i915);
- /* Unconditionally invalidate GPU caches and TLBs. */
- return eb->engine->emit_flush(eb->request, EMIT_INVALIDATE);
+ return 0;
}
static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
@@ -1965,10 +1956,6 @@ static int eb_submit(struct i915_execbuffer *eb)
if (err)
return err;
- err = i915_switch_context(eb->request);
- if (err)
- return err;
-
if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
err = i915_reset_gen7_sol_offsets(eb->request);
if (err)
@@ -2074,23 +2061,27 @@ static struct drm_syncobj **
get_fence_array(struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file)
{
- const unsigned int nfences = args->num_cliprects;
+ const unsigned long nfences = args->num_cliprects;
struct drm_i915_gem_exec_fence __user *user;
struct drm_syncobj **fences;
- unsigned int n;
+ unsigned long n;
int err;
if (!(args->flags & I915_EXEC_FENCE_ARRAY))
return NULL;
- if (nfences > SIZE_MAX / sizeof(*fences))
+ /* Check multiplication overflow for access_ok() and kvmalloc_array() */
+ BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
+ if (nfences > min_t(unsigned long,
+ ULONG_MAX / sizeof(*user),
+ SIZE_MAX / sizeof(*fences)))
return ERR_PTR(-EINVAL);
user = u64_to_user_ptr(args->cliprects_ptr);
- if (!access_ok(VERIFY_READ, user, nfences * 2 * sizeof(u32)))
+ if (!access_ok(VERIFY_READ, user, nfences * sizeof(*user)))
return ERR_PTR(-EFAULT);
- fences = kvmalloc_array(args->num_cliprects, sizeof(*fences),
+ fences = kvmalloc_array(nfences, sizeof(*fences),
__GFP_NOWARN | GFP_KERNEL);
if (!fences)
return ERR_PTR(-ENOMEM);
@@ -2447,6 +2438,26 @@ err_in_fence:
return err;
}
+static size_t eb_element_size(void)
+{
+ return (sizeof(struct drm_i915_gem_exec_object2) +
+ sizeof(struct i915_vma *) +
+ sizeof(unsigned int));
+}
+
+static bool check_buffer_count(size_t count)
+{
+ const size_t sz = eb_element_size();
+
+ /*
+ * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
+ * array size (see eb_create()). Otherwise, we can accept an array as
+ * large as can be addressed (though use large arrays at your peril)!
+ */
+
+ return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
+}
+
/*
* Legacy execbuffer just creates an exec2 list from the original exec object
* list array and passes it to the real function.
@@ -2455,18 +2466,16 @@ int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
- const size_t sz = (sizeof(struct drm_i915_gem_exec_object2) +
- sizeof(struct i915_vma *) +
- sizeof(unsigned int));
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ const size_t count = args->buffer_count;
unsigned int i;
int err;
- if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
- DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
+ if (!check_buffer_count(count)) {
+ DRM_DEBUG("execbuf2 with %zd buffers\n", count);
return -EINVAL;
}
@@ -2485,9 +2494,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
/* Copy in the exec list from userland */
- exec_list = kvmalloc_array(args->buffer_count, sizeof(*exec_list),
+ exec_list = kvmalloc_array(count, sizeof(*exec_list),
__GFP_NOWARN | GFP_KERNEL);
- exec2_list = kvmalloc_array(args->buffer_count + 1, sz,
+ exec2_list = kvmalloc_array(count + 1, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec_list == NULL || exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
@@ -2498,7 +2507,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
}
err = copy_from_user(exec_list,
u64_to_user_ptr(args->buffers_ptr),
- sizeof(*exec_list) * args->buffer_count);
+ sizeof(*exec_list) * count);
if (err) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, err);
@@ -2548,16 +2557,14 @@ int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
- const size_t sz = (sizeof(struct drm_i915_gem_exec_object2) +
- sizeof(struct i915_vma *) +
- sizeof(unsigned int));
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list;
struct drm_syncobj **fences = NULL;
+ const size_t count = args->buffer_count;
int err;
- if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
- DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
+ if (!check_buffer_count(count)) {
+ DRM_DEBUG("execbuf2 with %zd buffers\n", count);
return -EINVAL;
}
@@ -2565,17 +2572,17 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EINVAL;
/* Allocate an extra slot for use by the command parser */
- exec2_list = kvmalloc_array(args->buffer_count + 1, sz,
+ exec2_list = kvmalloc_array(count + 1, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec2_list == NULL) {
- DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
+ DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
+ count);
return -ENOMEM;
}
if (copy_from_user(exec2_list,
u64_to_user_ptr(args->buffers_ptr),
- sizeof(*exec2_list) * args->buffer_count)) {
- DRM_DEBUG("copy %d exec entries failed\n", args->buffer_count);
+ sizeof(*exec2_list) * count)) {
+ DRM_DEBUG("copy %zd exec entries failed\n", count);
kvfree(exec2_list);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2af65ecf2df8..c5f393870532 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -178,7 +178,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 0;
}
- if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) {
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (has_full_48bit_ppgtt)
return 3;
@@ -454,6 +454,14 @@ static void vm_free_pages_release(struct i915_address_space *vm,
static void vm_free_page(struct i915_address_space *vm, struct page *page)
{
+ /*
+ * On !llc, we need to change the pages back to WB. We only do so
+ * in bulk, so we rarely need to change the page attributes here,
+ * but doing so requires a stop_machine() from deep inside arch/x86/mm.
+ * To make detection of the possible sleep more likely, use an
+ * unconditional might_sleep() for everybody.
+ */
+ might_sleep();
if (!pagevec_add(&vm->free_pages, page))
vm_free_pages_release(vm, false);
}
@@ -2154,7 +2162,7 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
- if (i915_modparams.enable_execlists)
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
return 0;
if (!USES_PPGTT(dev_priv))
@@ -2248,35 +2256,62 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
}
-void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
+static void gen6_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
-
- if (INTEL_INFO(dev_priv)->gen < 6)
- return;
+ u32 fault;
for_each_engine(engine, dev_priv, id) {
- u32 fault_reg;
- fault_reg = I915_READ(RING_FAULT_REG(engine));
- if (fault_reg & RING_FAULT_VALID) {
+ fault = I915_READ(RING_FAULT_REG(engine));
+ if (fault & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
"\tAddr: 0x%08lx\n"
"\tAddress space: %s\n"
"\tSource ID: %d\n"
"\tType: %d\n",
- fault_reg & PAGE_MASK,
- fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
- RING_FAULT_SRCID(fault_reg),
- RING_FAULT_FAULT_TYPE(fault_reg));
+ fault & PAGE_MASK,
+ fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
I915_WRITE(RING_FAULT_REG(engine),
- fault_reg & ~RING_FAULT_VALID);
+ fault & ~RING_FAULT_VALID);
}
}
- /* Engine specific init may not have been done till this point. */
- if (dev_priv->engine[RCS])
- POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
+ POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
+}
+
+static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv)
+{
+ u32 fault = I915_READ(GEN8_RING_FAULT_REG);
+
+ if (fault & RING_FAULT_VALID) {
+ DRM_DEBUG_DRIVER("Unexpected fault\n"
+ "\tAddr: 0x%08lx\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault & PAGE_MASK,
+ GEN8_RING_FAULT_ENGINE_ID(fault),
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
+ I915_WRITE(GEN8_RING_FAULT_REG,
+ fault & ~RING_FAULT_VALID);
+ }
+
+ POSTING_READ(GEN8_RING_FAULT_REG);
+}
+
+void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
+{
+ /* From GEN8 onwards we only have one 'All Engine Fault Register' */
+ if (INTEL_GEN(dev_priv) >= 8)
+ gen8_check_and_clear_faults(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 6)
+ gen6_check_and_clear_faults(dev_priv);
+ else
+ return;
}
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
@@ -2877,7 +2912,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->drm.struct_mutex);
arch_phys_wc_del(ggtt->mtrr);
- io_mapping_fini(&ggtt->mappable);
+ io_mapping_fini(&ggtt->iomap);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2914,50 +2949,6 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
return 0;
}
-static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
-{
- snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
- snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
- return (size_t)snb_gmch_ctl << 25; /* 32 MB units */
-}
-
-static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
-{
- bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
- bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
- return (size_t)bdw_gmch_ctl << 25; /* 32 MB units */
-}
-
-static size_t chv_get_stolen_size(u16 gmch_ctrl)
-{
- gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
- gmch_ctrl &= SNB_GMCH_GMS_MASK;
-
- /*
- * 0x0 to 0x10: 32MB increments starting at 0MB
- * 0x11 to 0x16: 4MB increments starting at 8MB
- * 0x17 to 0x1d: 4MB increments start at 36MB
- */
- if (gmch_ctrl < 0x11)
- return (size_t)gmch_ctrl << 25;
- else if (gmch_ctrl < 0x17)
- return (size_t)(gmch_ctrl - 0x11 + 2) << 22;
- else
- return (size_t)(gmch_ctrl - 0x17 + 9) << 22;
-}
-
-static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
-{
- gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
- gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
-
- if (gen9_gmch_ctl < 0xf0)
- return (size_t)gen9_gmch_ctl << 25; /* 32 MB units */
- else
- /* 4MB increments starting at 0xf0 for 4MB */
- return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22;
-}
-
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
struct drm_i915_private *dev_priv = ggtt->base.i915;
@@ -3041,7 +3032,7 @@ const struct intel_ppat_entry *
intel_ppat_get(struct drm_i915_private *i915, u8 value)
{
struct intel_ppat *ppat = &i915->ppat;
- struct intel_ppat_entry *entry;
+ struct intel_ppat_entry *entry = NULL;
unsigned int scanned, best_score;
int i;
@@ -3064,7 +3055,7 @@ intel_ppat_get(struct drm_i915_private *i915, u8 value)
}
if (scanned == ppat->max_entries) {
- if (!best_score)
+ if (!entry)
return ERR_PTR(-ENOSPC);
kref_get(&entry->ref);
@@ -3171,12 +3162,6 @@ static void cnl_setup_private_ppat(struct intel_ppat *ppat)
ppat->match = bdw_private_pat_match;
ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
- /* XXX: spec is unclear if this is still needed for CNL+ */
- if (!USES_PPGTT(ppat->i915)) {
- __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
- return;
- }
-
__alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
__alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
__alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
@@ -3303,8 +3288,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
int err;
/* TODO: We're not aware of mappable constraints on gen8 yet */
- ggtt->mappable_base = pci_resource_start(pdev, 2);
- ggtt->mappable_end = pci_resource_len(pdev, 2);
+ ggtt->gmadr =
+ (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
if (!err)
@@ -3315,13 +3302,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
if (INTEL_GEN(dev_priv) >= 9) {
- ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
size = gen8_get_total_gtt_size(snb_gmch_ctl);
} else if (IS_CHERRYVIEW(dev_priv)) {
- ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
size = chv_get_total_gtt_size(snb_gmch_ctl);
} else {
- ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
size = gen8_get_total_gtt_size(snb_gmch_ctl);
}
@@ -3361,14 +3345,16 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
u16 snb_gmch_ctl;
int err;
- ggtt->mappable_base = pci_resource_start(pdev, 2);
- ggtt->mappable_end = pci_resource_len(pdev, 2);
+ ggtt->gmadr =
+ (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
/* 64/512MB is the current min/max we actually know of, but this is just
* a coarse sanity check.
*/
if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
- DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
+ DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
return -ENXIO;
}
@@ -3379,8 +3365,6 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
-
size = gen6_get_total_gtt_size(snb_gmch_ctl);
ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
@@ -3417,6 +3401,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
static int i915_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *dev_priv = ggtt->base.i915;
+ phys_addr_t gmadr_base;
int ret;
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
@@ -3426,10 +3411,13 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
}
intel_gtt_get(&ggtt->base.total,
- &ggtt->stolen_size,
- &ggtt->mappable_base,
+ &gmadr_base,
&ggtt->mappable_end);
+ ggtt->gmadr =
+ (struct resource) DEFINE_RES_MEM(gmadr_base,
+ ggtt->mappable_end);
+
ggtt->do_idle_maps = needs_idle_maps(dev_priv);
ggtt->base.insert_page = i915_ggtt_insert_page;
ggtt->base.insert_entries = i915_ggtt_insert_entries;
@@ -3474,9 +3462,9 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
* currently don't have any bits spare to pass in this upper
* restriction!
*/
- if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) {
+ if (USES_GUC(dev_priv)) {
ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
- ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
+ ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
}
if ((ggtt->base.total - 1) >> 32) {
@@ -3484,21 +3472,21 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
" of address space! Found %lldM!\n",
ggtt->base.total >> 20);
ggtt->base.total = 1ULL << 32;
- ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
+ ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
}
if (ggtt->mappable_end > ggtt->base.total) {
DRM_ERROR("mappable aperture extends past end of GGTT,"
- " aperture=%llx, total=%llx\n",
- ggtt->mappable_end, ggtt->base.total);
+ " aperture=%pa, total=%llx\n",
+ &ggtt->mappable_end, ggtt->base.total);
ggtt->mappable_end = ggtt->base.total;
}
/* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_INFO("Memory usable by graphics device = %lluM\n",
- ggtt->base.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
- DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
+ DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->base.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("DSM size = %lluM\n",
+ (u64)resource_size(&intel_graphics_stolen_res) >> 20);
if (intel_vtd_active())
DRM_INFO("VT-d active for gfx access\n");
@@ -3527,14 +3515,14 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
- if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
- dev_priv->ggtt.mappable_base,
+ if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
+ dev_priv->ggtt.gmadr.start,
dev_priv->ggtt.mappable_end)) {
ret = -EIO;
goto out_gtt_cleanup;
}
- ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
+ ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
/*
* Initialise stolen early so that we may reserve preallocated
@@ -3564,6 +3552,8 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915)
GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
i915->ggtt.invalidate = guc_ggtt_invalidate;
+
+ i915_ggtt_invalidate(i915);
}
void i915_ggtt_disable_guc(struct drm_i915_private *i915)
@@ -3572,6 +3562,8 @@ void i915_ggtt_disable_guc(struct drm_i915_private *i915)
GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
i915->ggtt.invalidate = gen6_ggtt_invalidate;
+
+ i915_ggtt_invalidate(i915);
}
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
@@ -3591,10 +3583,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
bool ggtt_bound = false;
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (vma->vm != &ggtt->base)
- continue;
-
+ for_each_ggtt_vma(vma, obj) {
if (!i915_vma_unbind(vma))
continue;
@@ -3708,9 +3697,6 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
rot_info->plane[i].stride, st, sg);
}
- DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
- obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
-
kvfree(page_addr_list);
return st;
@@ -3720,8 +3706,8 @@ err_sg_alloc:
err_st_alloc:
kvfree(page_addr_list);
- DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
+ DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 93211a96fdad..a42890d9af38 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -368,23 +368,10 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
*/
struct i915_ggtt {
struct i915_address_space base;
- struct io_mapping mappable; /* Mapping to our CPU mappable region */
- phys_addr_t mappable_base; /* PA of our GMADR */
- u64 mappable_end; /* End offset that we can CPU map */
-
- /* Stolen memory is segmented in hardware with different portions
- * offlimits to certain functions.
- *
- * The drm_mm is initialised to the total accessible range, as found
- * from the PCI config. On Broadwell+, this is further restricted to
- * avoid the first page! The upper end of stolen memory is reserved for
- * hardware functions and similarly removed from the accessible range.
- */
- u32 stolen_size; /* Total size of stolen memory */
- u32 stolen_usable_size; /* Total size minus reserved ranges */
- u32 stolen_reserved_base;
- u32 stolen_reserved_size;
+ struct io_mapping iomap; /* Mapping to our CPU mappable region */
+ struct resource gmadr; /* GMADR resource */
+ resource_size_t mappable_end; /* End offset that we can CPU map */
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index ee83ec838ee7..a1d6956734f7 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
+#define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
/* convert swiotlb segment size into sensible units (pages)! */
#define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
@@ -95,7 +96,8 @@ create_st:
struct page *page;
do {
- page = alloc_pages(gfp | (order ? QUIET : 0), order);
+ page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
+ order);
if (page)
break;
if (!order--)
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 63ce38c1cce9..05e89e1c0a08 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -53,8 +53,9 @@ struct i915_lut_handle {
struct drm_i915_gem_object_ops {
unsigned int flags;
-#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
-#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
+#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
+#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
+#define I915_GEM_OBJECT_IS_PROXY BIT(2)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@@ -260,6 +261,8 @@ struct drm_i915_gem_object {
} userptr;
unsigned long scratch;
+
+ void *gvt_info;
};
/** for phys allocated objects */
@@ -362,6 +365,12 @@ i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
+}
+
+static inline bool
i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
{
return obj->active_count;
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 3703dc91eeda..f7fc0df251ac 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -26,10 +26,12 @@
*/
#include "i915_drv.h"
+#include "i915_gem_render_state.h"
#include "intel_renderstate.h"
struct intel_render_state {
const struct intel_renderstate_rodata *rodata;
+ struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 batch_offset;
u32 batch_size;
@@ -40,6 +42,9 @@ struct intel_render_state {
static const struct intel_renderstate_rodata *
render_state_get_rodata(const struct intel_engine_cs *engine)
{
+ if (engine->id != RCS)
+ return NULL;
+
switch (INTEL_GEN(engine->i915)) {
case 6:
return &gen6_null_state;
@@ -74,17 +79,16 @@ static int render_state_setup(struct intel_render_state *so,
struct drm_i915_private *i915)
{
const struct intel_renderstate_rodata *rodata = so->rodata;
- struct drm_i915_gem_object *obj = so->vma->obj;
unsigned int i = 0, reloc_index = 0;
unsigned int needs_clflush;
u32 *d;
int ret;
- ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
+ ret = i915_gem_obj_prepare_shmem_write(so->obj, &needs_clflush);
if (ret)
return ret;
- d = kmap_atomic(i915_gem_object_get_dirty_page(obj, 0));
+ d = kmap_atomic(i915_gem_object_get_dirty_page(so->obj, 0));
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
@@ -112,7 +116,7 @@ static int render_state_setup(struct intel_render_state *so,
goto err;
}
- so->batch_offset = so->vma->node.start;
+ so->batch_offset = i915_ggtt_offset(so->vma);
so->batch_size = rodata->batch_items * sizeof(u32);
while (i % CACHELINE_DWORDS)
@@ -160,9 +164,9 @@ static int render_state_setup(struct intel_render_state *so,
drm_clflush_virt_range(d, i * sizeof(u32));
kunmap_atomic(d);
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
out:
- i915_gem_obj_finish_shmem_access(obj);
+ i915_gem_obj_finish_shmem_access(so->obj);
return ret;
err:
@@ -173,112 +177,57 @@ err:
#undef OUT_BATCH
-int i915_gem_render_state_init(struct intel_engine_cs *engine)
+int i915_gem_render_state_emit(struct drm_i915_gem_request *rq)
{
- struct intel_render_state *so;
- const struct intel_renderstate_rodata *rodata;
- struct drm_i915_gem_object *obj;
- int ret;
+ struct intel_engine_cs *engine = rq->engine;
+ struct intel_render_state so = {}; /* keep the compiler happy */
+ int err;
- if (engine->id != RCS)
+ so.rodata = render_state_get_rodata(engine);
+ if (!so.rodata)
return 0;
- rodata = render_state_get_rodata(engine);
- if (!rodata)
- return 0;
-
- if (rodata->batch_items * 4 > PAGE_SIZE)
+ if (so.rodata->batch_items * 4 > PAGE_SIZE)
return -EINVAL;
- so = kmalloc(sizeof(*so), GFP_KERNEL);
- if (!so)
- return -ENOMEM;
-
- obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- ret = PTR_ERR(obj);
- goto err_free;
- }
+ so.obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
+ if (IS_ERR(so.obj))
+ return PTR_ERR(so.obj);
- so->vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
- if (IS_ERR(so->vma)) {
- ret = PTR_ERR(so->vma);
+ so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.base, NULL);
+ if (IS_ERR(so.vma)) {
+ err = PTR_ERR(so.vma);
goto err_obj;
}
- so->rodata = rodata;
- engine->render_state = so;
- return 0;
-
-err_obj:
- i915_gem_object_put(obj);
-err_free:
- kfree(so);
- return ret;
-}
-
-int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
-{
- struct intel_render_state *so;
- int ret;
-
- lockdep_assert_held(&req->i915->drm.struct_mutex);
-
- so = req->engine->render_state;
- if (!so)
- return 0;
-
- /* Recreate the page after shrinking */
- if (!i915_gem_object_has_pages(so->vma->obj))
- so->batch_offset = -1;
-
- ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (ret)
- return ret;
+ err = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ goto err_vma;
- if (so->vma->node.start != so->batch_offset) {
- ret = render_state_setup(so, req->i915);
- if (ret)
- goto err_unpin;
- }
-
- ret = req->engine->emit_flush(req, EMIT_INVALIDATE);
- if (ret)
+ err = render_state_setup(&so, rq->i915);
+ if (err)
goto err_unpin;
- ret = req->engine->emit_bb_start(req,
- so->batch_offset, so->batch_size,
- I915_DISPATCH_SECURE);
- if (ret)
+ err = engine->emit_bb_start(rq,
+ so.batch_offset, so.batch_size,
+ I915_DISPATCH_SECURE);
+ if (err)
goto err_unpin;
- if (so->aux_size > 8) {
- ret = req->engine->emit_bb_start(req,
- so->aux_offset, so->aux_size,
- I915_DISPATCH_SECURE);
- if (ret)
+ if (so.aux_size > 8) {
+ err = engine->emit_bb_start(rq,
+ so.aux_offset, so.aux_size,
+ I915_DISPATCH_SECURE);
+ if (err)
goto err_unpin;
}
- i915_vma_move_to_active(so->vma, req, 0);
+ i915_vma_move_to_active(so.vma, rq, 0);
err_unpin:
- i915_vma_unpin(so->vma);
- return ret;
-}
-
-void i915_gem_render_state_fini(struct intel_engine_cs *engine)
-{
- struct intel_render_state *so;
- struct drm_i915_gem_object *obj;
-
- so = fetch_and_zero(&engine->render_state);
- if (!so)
- return;
-
- obj = so->vma->obj;
-
- i915_vma_close(so->vma);
- __i915_gem_object_release_unless_active(obj);
-
- kfree(so);
+ i915_vma_unpin(so.vma);
+err_vma:
+ i915_vma_close(so.vma);
+err_obj:
+ __i915_gem_object_release_unless_active(so.obj);
+ return err;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index 87481845799d..86369520482e 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -26,8 +26,6 @@
struct drm_i915_gem_request;
-int i915_gem_render_state_init(struct intel_engine_cs *engine);
-int i915_gem_render_state_emit(struct drm_i915_gem_request *req);
-void i915_gem_render_state_fini(struct intel_engine_cs *engine);
+int i915_gem_render_state_emit(struct drm_i915_gem_request *rq);
#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index d140fcf5c6a3..d575109f7a7f 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -252,12 +252,29 @@ static void mark_busy(struct drm_i915_private *i915)
GEM_BUG_ON(!i915->gt.active_requests);
intel_runtime_pm_get_noresume(i915);
+
+ /*
+ * It seems that the DMC likes to transition between the DC states a lot
+ * when there are no connected displays (no active power domains) during
+ * command submission.
+ *
+ * This activity has negative impact on the performance of the chip with
+ * huge latencies observed in the interrupt handler and elsewhere.
+ *
+ * Work around it by grabbing a GT IRQ power domain whilst there is any
+ * GT activity, preventing any DC state transitions.
+ */
+ intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+
i915->gt.awake = true;
intel_enable_gt_powersave(i915);
i915_update_gfx_val(i915);
if (INTEL_GEN(i915) >= 6)
gen6_rps_busy(i915);
+ i915_pmu_gt_unparked(i915);
+
+ intel_engines_unpark(i915);
queue_delayed_work(i915->wq,
&i915->gt.retire_work,
@@ -462,6 +479,7 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
/* Transfer from per-context onto the global per-engine timeline */
timeline = engine->timeline;
GEM_BUG_ON(timeline == request->timeline);
+ GEM_BUG_ON(request->global_seqno);
seqno = timeline_get_seqno(timeline);
GEM_BUG_ON(!seqno);
@@ -508,6 +526,7 @@ void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
/* Only unwind in reverse order, required so that the per-context list
* is kept in seqno/ring order.
*/
+ GEM_BUG_ON(!request->global_seqno);
GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
engine->timeline->seqno--;
@@ -622,6 +641,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
if (ret)
goto err_unpin;
+ ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST);
+ if (ret)
+ goto err_unreserve;
+
/* Move the oldest request to the slab-cache (if not in use!) */
req = list_first_entry_or_null(&engine->timeline->requests,
typeof(*req), link);
@@ -656,10 +679,21 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
*
* Do not use kmem_cache_zalloc() here!
*/
- req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
- if (!req) {
- ret = -ENOMEM;
- goto err_unreserve;
+ req = kmem_cache_alloc(dev_priv->requests,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (unlikely(!req)) {
+ /* Ratelimit ourselves to prevent oom from malicious clients */
+ ret = i915_gem_wait_for_idle(dev_priv,
+ I915_WAIT_LOCKED |
+ I915_WAIT_INTERRUPTIBLE);
+ if (ret)
+ goto err_unreserve;
+
+ req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto err_unreserve;
+ }
}
req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
@@ -701,22 +735,30 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
- ret = engine->request_alloc(req);
- if (ret)
- goto err_ctx;
-
- /* Record the position of the start of the request so that
+ /*
+ * Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the
* position of the head.
*/
req->head = req->ring->emit;
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = engine->emit_flush(req, EMIT_INVALIDATE);
+ if (ret)
+ goto err_unwind;
+
+ ret = engine->request_alloc(req);
+ if (ret)
+ goto err_unwind;
+
/* Check that we didn't interrupt ourselves with a new request */
GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
return req;
-err_ctx:
+err_unwind:
+ req->ring->emit = req->head;
+
/* Make sure we didn't add ourselves to external state before freeing */
GEM_BUG_ON(!list_empty(&req->active_list));
GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
@@ -753,7 +795,7 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
if (to->engine == from->engine) {
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
- GFP_KERNEL);
+ I915_FENCE_GFP);
return ret < 0 ? ret : 0;
}
@@ -781,7 +823,7 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
await_dma_fence:
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
- GFP_KERNEL);
+ I915_FENCE_GFP);
return ret < 0 ? ret : 0;
}
@@ -832,7 +874,7 @@ i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
else
ret = i915_sw_fence_await_dma_fence(&req->submit, fence,
I915_FENCE_TIMEOUT,
- GFP_KERNEL);
+ I915_FENCE_GFP);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 26249f39de67..0d6d39f19506 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -204,6 +204,8 @@ struct drm_i915_gem_request {
struct list_head client_link;
};
+#define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
+
extern const struct dma_fence_ops i915_fence_ops;
static inline bool dma_fence_is_i915(const struct dma_fence *fence)
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 3770e3323fc8..9029ed04879c 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -35,9 +35,9 @@
#include "i915_drv.h"
#include "i915_trace.h"
-static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
+static bool shrinker_lock(struct drm_i915_private *i915, bool *unlock)
{
- switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
+ switch (mutex_trylock_recursive(&i915->drm.struct_mutex)) {
case MUTEX_TRYLOCK_RECURSIVE:
*unlock = false;
return true;
@@ -47,7 +47,7 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
preempt_disable();
do {
cpu_relax();
- if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
+ if (mutex_trylock(&i915->drm.struct_mutex)) {
*unlock = true;
break;
}
@@ -63,12 +63,12 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
BUG();
}
-static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
+static void shrinker_unlock(struct drm_i915_private *i915, bool unlock)
{
if (!unlock)
return;
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&i915->drm.struct_mutex);
}
static bool swap_available(void)
@@ -118,7 +118,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
/**
* i915_gem_shrink - Shrink buffer object caches
- * @dev_priv: i915 device
+ * @i915: i915 device
* @target: amount of memory to make available, in pages
* @nr_scanned: optional output for number of pages scanned (incremental)
* @flags: control flags for selecting cache types
@@ -142,7 +142,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
* The number of pages of backing storage actually released.
*/
unsigned long
-i915_gem_shrink(struct drm_i915_private *dev_priv,
+i915_gem_shrink(struct drm_i915_private *i915,
unsigned long target,
unsigned long *nr_scanned,
unsigned flags)
@@ -151,15 +151,15 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
struct list_head *list;
unsigned int bit;
} phases[] = {
- { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
- { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
+ { &i915->mm.unbound_list, I915_SHRINK_UNBOUND },
+ { &i915->mm.bound_list, I915_SHRINK_BOUND },
{ NULL, 0 },
}, *phase;
unsigned long count = 0;
unsigned long scanned = 0;
bool unlock;
- if (!shrinker_lock(dev_priv, &unlock))
+ if (!shrinker_lock(i915, &unlock))
return 0;
/*
@@ -172,10 +172,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
* we will free as much as we can and hope to get a second chance.
*/
if (flags & I915_SHRINK_ACTIVE)
- i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
+ i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
- trace_i915_gem_shrink(dev_priv, target, flags);
- i915_gem_retire_requests(dev_priv);
+ trace_i915_gem_shrink(i915, target, flags);
+ i915_gem_retire_requests(i915);
/*
* Unbinding of objects will require HW access; Let us not wake the
@@ -183,7 +183,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
* we will force the wake during oom-notifier.
*/
if ((flags & I915_SHRINK_BOUND) &&
- !intel_runtime_pm_get_if_in_use(dev_priv))
+ !intel_runtime_pm_get_if_in_use(i915))
flags &= ~I915_SHRINK_BOUND;
/*
@@ -221,7 +221,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
* to be able to shrink their pages, so they remain on
* the unbound/bound list until actually freed.
*/
- spin_lock(&dev_priv->mm.obj_lock);
+ spin_lock(&i915->mm.obj_lock);
while (count < target &&
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
@@ -244,7 +244,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if (!can_release_pages(obj))
continue;
- spin_unlock(&dev_priv->mm.obj_lock);
+ spin_unlock(&i915->mm.obj_lock);
if (unsafe_drop_pages(obj)) {
/* May arrive from get_pages on another bo */
@@ -258,18 +258,18 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
}
scanned += obj->base.size >> PAGE_SHIFT;
- spin_lock(&dev_priv->mm.obj_lock);
+ spin_lock(&i915->mm.obj_lock);
}
list_splice_tail(&still_in_list, phase->list);
- spin_unlock(&dev_priv->mm.obj_lock);
+ spin_unlock(&i915->mm.obj_lock);
}
if (flags & I915_SHRINK_BOUND)
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(i915);
- i915_gem_retire_requests(dev_priv);
+ i915_gem_retire_requests(i915);
- shrinker_unlock(dev_priv, unlock);
+ shrinker_unlock(i915, unlock);
if (nr_scanned)
*nr_scanned += scanned;
@@ -278,7 +278,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
/**
* i915_gem_shrink_all - Shrink buffer object caches completely
- * @dev_priv: i915 device
+ * @i915: i915 device
*
* This is a simple wraper around i915_gem_shrink() to aggressively shrink all
* caches completely. It also first waits for and retires all outstanding
@@ -290,16 +290,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
* Returns:
* The number of pages of backing storage actually released.
*/
-unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
+unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
{
unsigned long freed;
- intel_runtime_pm_get(dev_priv);
- freed = i915_gem_shrink(dev_priv, -1UL, NULL,
+ intel_runtime_pm_get(i915);
+ freed = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(i915);
return freed;
}
@@ -347,53 +347,53 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
static unsigned long
i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
- struct drm_i915_private *dev_priv =
+ struct drm_i915_private *i915 =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
unsigned long freed;
bool unlock;
sc->nr_scanned = 0;
- if (!shrinker_lock(dev_priv, &unlock))
+ if (!shrinker_lock(i915, &unlock))
return SHRINK_STOP;
- freed = i915_gem_shrink(dev_priv,
+ freed = i915_gem_shrink(i915,
sc->nr_to_scan,
&sc->nr_scanned,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE);
if (freed < sc->nr_to_scan)
- freed += i915_gem_shrink(dev_priv,
+ freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
if (freed < sc->nr_to_scan && current_is_kswapd()) {
- intel_runtime_pm_get(dev_priv);
- freed += i915_gem_shrink(dev_priv,
+ intel_runtime_pm_get(i915);
+ freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(i915);
}
- shrinker_unlock(dev_priv, unlock);
+ shrinker_unlock(i915, unlock);
return sc->nr_scanned ? freed : SHRINK_STOP;
}
static bool
-shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
+shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
int timeout_ms)
{
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
do {
- if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
- shrinker_lock(dev_priv, unlock))
+ if (i915_gem_wait_for_idle(i915, 0) == 0 &&
+ shrinker_lock(i915, unlock))
break;
schedule_timeout_killable(1);
@@ -412,32 +412,32 @@ shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
static int
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
{
- struct drm_i915_private *dev_priv =
+ struct drm_i915_private *i915 =
container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_i915_gem_object *obj;
unsigned long unevictable, bound, unbound, freed_pages;
- freed_pages = i915_gem_shrink_all(dev_priv);
+ freed_pages = i915_gem_shrink_all(i915);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
* being pointed to by hardware.
*/
unbound = bound = unevictable = 0;
- spin_lock(&dev_priv->mm.obj_lock);
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
+ spin_lock(&i915->mm.obj_lock);
+ list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) {
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
unbound += obj->base.size >> PAGE_SHIFT;
}
- list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
+ list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
bound += obj->base.size >> PAGE_SHIFT;
}
- spin_unlock(&dev_priv->mm.obj_lock);
+ spin_unlock(&i915->mm.obj_lock);
if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu pages freed, "
@@ -455,74 +455,74 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
static int
i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
- struct drm_i915_private *dev_priv =
+ struct drm_i915_private *i915 =
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
bool unlock;
int ret;
- if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
+ if (!shrinker_lock_uninterruptible(i915, &unlock, 5000))
return NOTIFY_DONE;
/* Force everything onto the inactive lists */
- ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
+ ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
if (ret)
goto out;
- intel_runtime_pm_get(dev_priv);
- freed_pages += i915_gem_shrink(dev_priv, -1UL, NULL,
+ intel_runtime_pm_get(i915);
+ freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE |
I915_SHRINK_VMAPS);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(i915);
/* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next,
- &dev_priv->ggtt.base.inactive_list, vm_link) {
+ &i915->ggtt.base.inactive_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
if (vma->iomap && i915_vma_unbind(vma) == 0)
freed_pages += count;
}
out:
- shrinker_unlock(dev_priv, unlock);
+ shrinker_unlock(i915, unlock);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
/**
- * i915_gem_shrinker_init - Initialize i915 shrinker
- * @dev_priv: i915 device
+ * i915_gem_shrinker_register - Register the i915 shrinker
+ * @i915: i915 device
*
* This function registers and sets up the i915 shrinker and OOM handler.
*/
-void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
+void i915_gem_shrinker_register(struct drm_i915_private *i915)
{
- dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
- dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
- dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
- dev_priv->mm.shrinker.batch = 4096;
- WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
+ i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
+ i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
+ i915->mm.shrinker.seeks = DEFAULT_SEEKS;
+ i915->mm.shrinker.batch = 4096;
+ WARN_ON(register_shrinker(&i915->mm.shrinker));
- dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
- WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
+ i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
+ WARN_ON(register_oom_notifier(&i915->mm.oom_notifier));
- dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
- WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
+ i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
+ WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
}
/**
- * i915_gem_shrinker_cleanup - Clean up i915 shrinker
- * @dev_priv: i915 device
+ * i915_gem_shrinker_unregister - Unregisters the i915 shrinker
+ * @i915: i915 device
*
* This function unregisters the i915 shrinker and OOM handler.
*/
-void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
+void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
{
- WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
- WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
- unregister_shrinker(&dev_priv->mm.shrinker);
+ WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
+ WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
+ unregister_shrinker(&i915->mm.shrinker);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 03e7abc7e043..d3f222fa6356 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -30,9 +30,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#define KB(x) ((x) * 1024)
-#define MB(x) (KB(x) * 1024)
-
/*
* The BIOS typically reserves some of the system's memory for the exclusive
* use of the integrated graphics. This memory is no longer available for
@@ -79,129 +76,26 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->mm.stolen_lock);
}
-static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv)
+static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
+ struct resource *dsm)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct resource *r;
- dma_addr_t base;
-
- /* Almost universally we can find the Graphics Base of Stolen Memory
- * at register BSM (0x5c) in the igfx configuration space. On a few
- * (desktop) machines this is also mirrored in the bridge device at
- * different locations, or in the MCHBAR.
- *
- * On 865 we just check the TOUD register.
- *
- * On 830/845/85x the stolen memory base isn't available in any
- * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
- *
- */
- base = 0;
- if (INTEL_GEN(dev_priv) >= 3) {
- u32 bsm;
-
- pci_read_config_dword(pdev, INTEL_BSM, &bsm);
-
- base = bsm & INTEL_BSM_MASK;
- } else if (IS_I865G(dev_priv)) {
- u32 tseg_size = 0;
- u16 toud = 0;
- u8 tmp;
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I845_ESMRAMC, &tmp);
-
- if (tmp & TSEG_ENABLE) {
- switch (tmp & I845_TSEG_SIZE_MASK) {
- case I845_TSEG_SIZE_512K:
- tseg_size = KB(512);
- break;
- case I845_TSEG_SIZE_1M:
- tseg_size = MB(1);
- break;
- }
- }
-
- pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
- I865_TOUD, &toud);
-
- base = (toud << 16) + tseg_size;
- } else if (IS_I85X(dev_priv)) {
- u32 tseg_size = 0;
- u32 tom;
- u8 tmp;
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I85X_ESMRAMC, &tmp);
-
- if (tmp & TSEG_ENABLE)
- tseg_size = MB(1);
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1),
- I85X_DRB3, &tmp);
- tom = tmp * MB(32);
-
- base = tom - tseg_size - ggtt->stolen_size;
- } else if (IS_I845G(dev_priv)) {
- u32 tseg_size = 0;
- u32 tom;
- u8 tmp;
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I845_ESMRAMC, &tmp);
-
- if (tmp & TSEG_ENABLE) {
- switch (tmp & I845_TSEG_SIZE_MASK) {
- case I845_TSEG_SIZE_512K:
- tseg_size = KB(512);
- break;
- case I845_TSEG_SIZE_1M:
- tseg_size = MB(1);
- break;
- }
- }
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I830_DRB3, &tmp);
- tom = tmp * MB(32);
-
- base = tom - tseg_size - ggtt->stolen_size;
- } else if (IS_I830(dev_priv)) {
- u32 tseg_size = 0;
- u32 tom;
- u8 tmp;
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I830_ESMRAMC, &tmp);
+ if (dsm->start == 0 || dsm->end <= dsm->start)
+ return -EINVAL;
- if (tmp & TSEG_ENABLE) {
- if (tmp & I830_TSEG_SIZE_1M)
- tseg_size = MB(1);
- else
- tseg_size = KB(512);
- }
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I830_DRB3, &tmp);
- tom = tmp * MB(32);
-
- base = tom - tseg_size - ggtt->stolen_size;
- }
-
- if (base == 0 || add_overflows(base, ggtt->stolen_size))
- return 0;
+ /*
+ * TODO: We have yet too encounter the case where the GTT wasn't at the
+ * end of stolen. With that assumption we could simplify this.
+ */
- /* make sure we don't clobber the GTT if it's within stolen memory */
+ /* Make sure we don't clobber the GTT if it's within stolen memory */
if (INTEL_GEN(dev_priv) <= 4 &&
!IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
- struct {
- dma_addr_t start, end;
- } stolen[2] = {
- { .start = base, .end = base + ggtt->stolen_size, },
- { .start = base, .end = base + ggtt->stolen_size, },
- };
- u64 ggtt_start, ggtt_end;
+ struct resource stolen[2] = {*dsm, *dsm};
+ struct resource ggtt_res;
+ resource_size_t ggtt_start;
ggtt_start = I915_READ(PGTBL_CTL);
if (IS_GEN4(dev_priv))
@@ -209,70 +103,64 @@ static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv)
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
ggtt_start &= PGTBL_ADDRESS_LO_MASK;
- ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
-
- if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
- stolen[0].end = ggtt_start;
- if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
- stolen[1].start = ggtt_end;
-
- /* pick the larger of the two chunks */
- if (stolen[0].end - stolen[0].start >
- stolen[1].end - stolen[1].start) {
- base = stolen[0].start;
- ggtt->stolen_size = stolen[0].end - stolen[0].start;
- } else {
- base = stolen[1].start;
- ggtt->stolen_size = stolen[1].end - stolen[1].start;
- }
+
+ ggtt_res =
+ (struct resource) DEFINE_RES_MEM(ggtt_start,
+ ggtt_total_entries(ggtt) * 4);
+
+ if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
+ stolen[0].end = ggtt_res.start;
+ if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
+ stolen[1].start = ggtt_res.end;
+
+ /* Pick the larger of the two chunks */
+ if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
+ *dsm = stolen[0];
+ else
+ *dsm = stolen[1];
if (stolen[0].start != stolen[1].start ||
stolen[0].end != stolen[1].end) {
- dma_addr_t end = base + ggtt->stolen_size - 1;
-
- DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
- (unsigned long long)ggtt_start,
- (unsigned long long)ggtt_end - 1);
- DRM_DEBUG_KMS("Stolen memory adjusted to %pad - %pad\n",
- &base, &end);
+ DRM_DEBUG_KMS("GTT within stolen memory at %pR\n", &ggtt_res);
+ DRM_DEBUG_KMS("Stolen memory adjusted to %pR\n", dsm);
}
}
-
- /* Verify that nothing else uses this physical address. Stolen
+ /*
+ * Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
- r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size,
+ r = devm_request_mem_region(dev_priv->drm.dev, dsm->start,
+ resource_size(dsm),
"Graphics Stolen Memory");
if (r == NULL) {
/*
* One more attempt but this time requesting region from
- * base + 1, as we have seen that this resolves the region
+ * start + 1, as we have seen that this resolves the region
* conflict with the PCI Bus.
* This is a BIOS w/a: Some BIOS wrap stolen in the root
* PCI bus, but have an off-by-one error. Hence retry the
* reservation starting from 1 instead of 0.
* There's also BIOS with off-by-one on the other end.
*/
- r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
- ggtt->stolen_size - 2,
+ r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1,
+ resource_size(dsm) - 2,
"Graphics Stolen Memory");
/*
* GEN3 firmware likes to smash pci bridges into the stolen
* range. Apparently this works.
*/
if (r == NULL && !IS_GEN3(dev_priv)) {
- dma_addr_t end = base + ggtt->stolen_size;
+ DRM_ERROR("conflict detected with stolen region: %pR\n",
+ dsm);
- DRM_ERROR("conflict detected with stolen region: [%pad - %pad]\n",
- &base, &end);
- base = 0;
+ return -EBUSY;
}
}
- return base;
+ return 0;
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
@@ -286,13 +174,24 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
}
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
- dma_addr_t *base, u32 *size)
+ resource_size_t *base, resource_size_t *size)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
- dma_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
+ resource_size_t stolen_top = dev_priv->dsm.end + 1;
+
+ if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) {
+ *base = 0;
+ *size = 0;
+ return;
+ }
+
+ /*
+ * Whether ILK really reuses the ELK register for this is unclear.
+ * Let's see if we catch anyone with this supposedly enabled on ILK.
+ */
+ WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val);
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
@@ -309,10 +208,16 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
- dma_addr_t *base, u32 *size)
+ resource_size_t *base, resource_size_t *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
+ *base = 0;
+ *size = 0;
+ return;
+ }
+
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
@@ -335,10 +240,16 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
- dma_addr_t *base, u32 *size)
+ resource_size_t *base, resource_size_t *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
+ *base = 0;
+ *size = 0;
+ return;
+ }
+
*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
@@ -355,10 +266,16 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
- dma_addr_t *base, u32 *size)
+ resource_size_t *base, resource_size_t *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
+ *base = 0;
+ *size = 0;
+ return;
+ }
+
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
@@ -381,13 +298,18 @@ static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
- dma_addr_t *base, u32 *size)
+ resource_size_t *base, resource_size_t *size)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
- dma_addr_t stolen_top;
+ resource_size_t stolen_top;
- stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
+ if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
+ *base = 0;
+ *size = 0;
+ return;
+ }
+
+ stolen_top = dev_priv->dsm.end + 1;
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
@@ -403,10 +325,9 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- dma_addr_t reserved_base, stolen_top;
- u32 reserved_total, reserved_size;
- u32 stolen_usable_start;
+ resource_size_t reserved_base, stolen_top;
+ resource_size_t reserved_total, reserved_size;
+ resource_size_t stolen_usable_start;
mutex_init(&dev_priv->mm.stolen_lock);
@@ -420,14 +341,18 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
return 0;
}
- if (ggtt->stolen_size == 0)
+ if (resource_size(&intel_graphics_stolen_res) == 0)
return 0;
- dev_priv->mm.stolen_base = i915_stolen_to_dma(dev_priv);
- if (dev_priv->mm.stolen_base == 0)
+ dev_priv->dsm = intel_graphics_stolen_res;
+
+ if (i915_adjust_stolen(dev_priv, &dev_priv->dsm))
return 0;
- stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
+ GEM_BUG_ON(dev_priv->dsm.start == 0);
+ GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);
+
+ stolen_top = dev_priv->dsm.end + 1;
reserved_base = 0;
reserved_size = 0;
@@ -436,14 +361,12 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
case 3:
break;
case 4:
- if (IS_G4X(dev_priv))
- g4x_get_stolen_reserved(dev_priv,
- &reserved_base, &reserved_size);
- break;
+ if (!IS_G4X(dev_priv))
+ break;
+ /* fall through */
case 5:
- /* Assume the gen6 maximum for the older platforms. */
- reserved_size = 1024 * 1024;
- reserved_base = stolen_top - reserved_size;
+ g4x_get_stolen_reserved(dev_priv,
+ &reserved_base, &reserved_size);
break;
case 6:
gen6_get_stolen_reserved(dev_priv,
@@ -470,50 +393,47 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
reserved_base = stolen_top;
}
- if (reserved_base < dev_priv->mm.stolen_base ||
- reserved_base + reserved_size > stolen_top) {
- dma_addr_t reserved_top = reserved_base + reserved_size;
- DRM_DEBUG_KMS("Stolen reserved area [%pad - %pad] outside stolen memory [%pad - %pad]\n",
- &reserved_base, &reserved_top,
- &dev_priv->mm.stolen_base, &stolen_top);
+ dev_priv->dsm_reserved =
+ (struct resource) DEFINE_RES_MEM(reserved_base, reserved_size);
+
+ if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) {
+ DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
+ &dev_priv->dsm_reserved, &dev_priv->dsm);
return 0;
}
- ggtt->stolen_reserved_base = reserved_base;
- ggtt->stolen_reserved_size = reserved_size;
-
/* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
- DRM_DEBUG_KMS("Memory reserved for graphics device: %uK, usable: %uK\n",
- ggtt->stolen_size >> 10,
- (ggtt->stolen_size - reserved_total) >> 10);
+ DRM_DEBUG_KMS("Memory reserved for graphics device: %lluK, usable: %lluK\n",
+ (u64)resource_size(&dev_priv->dsm) >> 10,
+ ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
stolen_usable_start = 0;
/* WaSkipStolenMemoryFirstPage:bdw+ */
if (INTEL_GEN(dev_priv) >= 8)
stolen_usable_start = 4096;
- ggtt->stolen_usable_size =
- ggtt->stolen_size - reserved_total - stolen_usable_start;
+ dev_priv->stolen_usable_size =
+ resource_size(&dev_priv->dsm) - reserved_total - stolen_usable_start;
/* Basic memrange allocator for stolen space. */
drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
- ggtt->stolen_usable_size);
+ dev_priv->stolen_usable_size);
return 0;
}
static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
- u32 offset, u32 size)
+ resource_size_t offset, resource_size_t size)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct sg_table *st;
struct scatterlist *sg;
- GEM_BUG_ON(range_overflows(offset, size, dev_priv->ggtt.stolen_size));
+ GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm)));
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -533,7 +453,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
sg->offset = 0;
sg->length = size;
- sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
+ sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset;
sg_dma_len(sg) = size;
return st;
@@ -611,7 +531,8 @@ cleanup:
}
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size)
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size)
{
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
@@ -644,9 +565,9 @@ i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size)
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
- u32 stolen_offset,
- u32 gtt_offset,
- u32 size)
+ resource_size_t stolen_offset,
+ resource_size_t gtt_offset,
+ resource_size_t size)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj;
@@ -659,8 +580,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
- stolen_offset, gtt_offset, size);
+ DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
+ &stolen_offset, &gtt_offset, &size);
/* KISS and expect everything to be page-aligned */
if (WARN_ON(size == 0) ||
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 1294cf695df0..d9dc9df523b5 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -205,10 +205,7 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
if (tiling_mode == I915_TILING_NONE)
return 0;
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!i915_vma_is_ggtt(vma))
- break;
-
+ for_each_ggtt_vma(vma, obj) {
if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue;
@@ -285,10 +282,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
}
mutex_unlock(&obj->mm.lock);
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!i915_vma_is_ggtt(vma))
- break;
-
+ for_each_ggtt_vma(vma, obj) {
vma->fence_size =
i915_gem_fence_size(i915, vma->size, tiling, stride);
vma->fence_alignment =
@@ -345,6 +339,15 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
+ /*
+ * The tiling mode of proxy objects is handled by its generator, and
+ * not allowed to be changed by userspace.
+ */
+ if (i915_gem_object_is_proxy(obj)) {
+ err = -ENXIO;
+ goto err;
+ }
+
if (!i915_tiling_ok(obj, args->tiling_mode, args->stride)) {
err = -EINVAL;
goto err;
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
index c597ce277a04..e9fd87604067 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
@@ -33,11 +33,8 @@ static void __intel_timeline_init(struct intel_timeline *tl,
{
tl->fence_context = context;
tl->common = parent;
-#ifdef CONFIG_DEBUG_SPINLOCK
- __raw_spin_lock_init(&tl->lock.rlock, lockname, lockclass);
-#else
spin_lock_init(&tl->lock);
-#endif
+ lockdep_set_class_and_name(&tl->lock, lockclass, lockname);
init_request_active(&tl->last_request, NULL);
INIT_LIST_HEAD(&tl->requests);
i915_syncmap_init(&tl->sync);
@@ -107,8 +104,8 @@ int i915_gem_timeline_init__global(struct drm_i915_private *i915)
}
/**
- * i915_gem_timelines_mark_idle -- called when the driver idles
- * @i915 - the drm_i915_private device
+ * i915_gem_timelines_park - called when the driver idles
+ * @i915: the drm_i915_private device
*
* When the driver is completely idle, we know that all of our sync points
* have been signaled and our tracking is then entirely redundant. Any request
@@ -116,7 +113,7 @@ int i915_gem_timeline_init__global(struct drm_i915_private *i915)
* the fence is signaled and therefore we will not even look them up in the
* sync point map.
*/
-void i915_gem_timelines_mark_idle(struct drm_i915_private *i915)
+void i915_gem_timelines_park(struct drm_i915_private *i915)
{
struct i915_gem_timeline *timeline;
int i;
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
index bfb5eb94c64d..b5a22400a01f 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.h
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -93,7 +93,7 @@ int i915_gem_timeline_init(struct drm_i915_private *i915,
struct i915_gem_timeline *tl,
const char *name);
int i915_gem_timeline_init__global(struct drm_i915_private *i915);
-void i915_gem_timelines_mark_idle(struct drm_i915_private *i915);
+void i915_gem_timelines_park(struct drm_i915_private *i915);
void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
static inline int __intel_timeline_sync_set(struct intel_timeline *tl,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 653fb69e7ecb..944059322daa 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -30,6 +30,8 @@
#include <generated/utsrelease.h>
#include <linux/stop_machine.h>
#include <linux/zlib.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
static const char *engine_str(int engine)
@@ -175,6 +177,21 @@ static void i915_error_puts(struct drm_i915_error_state_buf *e,
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
#define err_puts(e, s) i915_error_puts(e, s)
+static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
+{
+ i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
+}
+
+static inline struct drm_printer
+i915_error_printer(struct drm_i915_error_state_buf *e)
+{
+ struct drm_printer p = {
+ .printfn = __i915_printfn_error,
+ .arg = e,
+ };
+ return p;
+}
+
#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
struct compress {
@@ -399,6 +416,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
int n;
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
+ err_printf(m, " IDLE?: %s\n", yesno(ee->idle));
err_printf(m, " START: 0x%08x\n", ee->start);
err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
@@ -547,34 +565,17 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
const struct intel_device_info *info)
{
-#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
-#undef PRINT_FLAG
-}
+ struct drm_printer p = i915_error_printer(m);
-static __always_inline void err_print_param(struct drm_i915_error_state_buf *m,
- const char *name,
- const char *type,
- const void *x)
-{
- if (!__builtin_strcmp(type, "bool"))
- err_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
- else if (!__builtin_strcmp(type, "int"))
- err_printf(m, "i915.%s=%d\n", name, *(const int *)x);
- else if (!__builtin_strcmp(type, "unsigned int"))
- err_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
- else if (!__builtin_strcmp(type, "char *"))
- err_printf(m, "i915.%s=%s\n", name, *(const char **)x);
- else
- BUILD_BUG();
+ intel_device_info_dump_flags(info, &p);
}
static void err_print_params(struct drm_i915_error_state_buf *m,
- const struct i915_params *p)
+ const struct i915_params *params)
{
-#define PRINT(T, x, ...) err_print_param(m, #x, #T, &p->x);
- I915_PARAMS_FOR_EACH(PRINT);
-#undef PRINT
+ struct drm_printer p = i915_error_printer(m);
+
+ i915_params_dump(params, &p);
}
static void err_print_pciid(struct drm_i915_error_state_buf *m,
@@ -589,6 +590,21 @@ static void err_print_pciid(struct drm_i915_error_state_buf *m,
pdev->subsystem_device);
}
+static void err_print_uc(struct drm_i915_error_state_buf *m,
+ const struct i915_error_uc *error_uc)
+{
+ struct drm_printer p = i915_error_printer(m);
+ const struct i915_gpu_state *error =
+ container_of(error_uc, typeof(*error), uc);
+
+ if (!error->device_info.has_guc)
+ return;
+
+ intel_uc_fw_dump(&error_uc->guc_fw, &p);
+ intel_uc_fw_dump(&error_uc->huc_fw, &p);
+ print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
+}
+
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_gpu_state *error)
{
@@ -759,11 +775,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
print_error_obj(m, dev_priv->engine[i],
"WA batchbuffer", ee->wa_batchbuffer);
- }
-
- print_error_obj(m, NULL, "Semaphores", error->semaphore);
- print_error_obj(m, NULL, "GuC log buffer", error->guc_log);
+ print_error_obj(m, dev_priv->engine[i],
+ "NULL context", ee->default_state);
+ }
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
@@ -773,6 +788,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_print_capabilities(m, &error->device_info);
err_print_params(m, &error->params);
+ err_print_uc(m, &error->uc);
if (m->bytes == 0 && m->err)
return m->err;
@@ -831,6 +847,22 @@ static __always_inline void free_param(const char *type, void *x)
kfree(*(void **)x);
}
+static void cleanup_params(struct i915_gpu_state *error)
+{
+#define FREE(T, x, ...) free_param(#T, &error->params.x);
+ I915_PARAMS_FOR_EACH(FREE);
+#undef FREE
+}
+
+static void cleanup_uc_state(struct i915_gpu_state *error)
+{
+ struct i915_error_uc *error_uc = &error->uc;
+
+ kfree(error_uc->guc_fw.path);
+ kfree(error_uc->huc_fw.path);
+ i915_error_object_free(error_uc->guc_log);
+}
+
void __i915_gpu_state_free(struct kref *error_ref)
{
struct i915_gpu_state *error =
@@ -856,9 +888,6 @@ void __i915_gpu_state_free(struct kref *error_ref)
kfree(ee->waiters);
}
- i915_error_object_free(error->semaphore);
- i915_error_object_free(error->guc_log);
-
for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
kfree(error->active_bo[i]);
kfree(error->pinned_bo);
@@ -866,9 +895,8 @@ void __i915_gpu_state_free(struct kref *error_ref)
kfree(error->overlay);
kfree(error->display);
-#define FREE(T, x, ...) free_param(#T, &error->params.x);
- I915_PARAMS_FOR_EACH(FREE);
-#undef FREE
+ cleanup_params(error);
+ cleanup_uc_state(error);
kfree(error);
}
@@ -912,7 +940,7 @@ i915_error_object_create(struct drm_i915_private *i915,
ggtt->base.insert_page(&ggtt->base, dma, slot,
I915_CACHE_NONE, 0);
- s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
+ s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst);
io_mapping_unmap_atomic(s);
@@ -1071,34 +1099,6 @@ gen8_engine_sync_index(struct intel_engine_cs *engine,
return idx;
}
-static void gen8_record_semaphore_state(struct i915_gpu_state *error,
- struct intel_engine_cs *engine,
- struct drm_i915_error_engine *ee)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_engine_cs *to;
- enum intel_engine_id id;
-
- if (!error->semaphore)
- return;
-
- for_each_engine(to, dev_priv, id) {
- int idx;
- u16 signal_offset;
- u32 *tmp;
-
- if (engine == to)
- continue;
-
- signal_offset =
- (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
- tmp = error->semaphore->pages[0];
- idx = gen8_engine_sync_index(engine, to);
-
- ee->semaphore_mboxes[idx] = tmp[signal_offset];
- }
-}
-
static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
@@ -1172,11 +1172,12 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (INTEL_GEN(dev_priv) >= 6) {
ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
- ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
- if (INTEL_GEN(dev_priv) >= 8)
- gen8_record_semaphore_state(error, engine, ee);
- else
+ if (INTEL_GEN(dev_priv) >= 8) {
+ ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
+ } else {
gen6_record_semaphore_state(engine, ee);
+ ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
+ }
}
if (INTEL_GEN(dev_priv) >= 4) {
@@ -1239,6 +1240,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->hws = I915_READ(mmio);
}
+ ee->idle = intel_engine_is_idle(engine);
ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
ee->hangcheck_action = engine->hangcheck.action;
ee->hangcheck_stalled = engine->hangcheck.stalled;
@@ -1400,15 +1402,30 @@ static void request_record_user_bo(struct drm_i915_gem_request *request,
ee->user_bo_count = count;
}
+static struct drm_i915_error_object *
+capture_object(struct drm_i915_private *dev_priv,
+ struct drm_i915_gem_object *obj)
+{
+ if (obj && i915_gem_object_has_pages(obj)) {
+ struct i915_vma fake = {
+ .node = { .start = U64_MAX, .size = obj->base.size },
+ .size = obj->base.size,
+ .pages = obj->mm.pages,
+ .obj = obj,
+ };
+
+ return i915_error_object_create(dev_priv, &fake);
+ } else {
+ return NULL;
+ }
+}
+
static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct i915_gpu_state *error)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int i;
- error->semaphore =
- i915_error_object_create(dev_priv, dev_priv->semaphore);
-
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct intel_engine_cs *engine = dev_priv->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
@@ -1474,6 +1491,9 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
ee->wa_ctx =
i915_error_object_create(dev_priv, engine->wa_ctx.vma);
+
+ ee->default_state =
+ capture_object(dev_priv, engine->default_state);
}
}
@@ -1559,15 +1579,25 @@ static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
error->pinned_bo = bo;
}
-static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error)
+static void capture_uc_state(struct i915_gpu_state *error)
{
- /* Capturing log buf contents won't be useful if logging was disabled */
- if (!dev_priv->guc.log.vma || (i915_modparams.guc_log_level < 0))
+ struct drm_i915_private *i915 = error->i915;
+ struct i915_error_uc *error_uc = &error->uc;
+
+ /* Capturing uC state won't be useful if there is no GuC */
+ if (!error->device_info.has_guc)
return;
- error->guc_log = i915_error_object_create(dev_priv,
- dev_priv->guc.log.vma);
+ error_uc->guc_fw = i915->guc.fw;
+ error_uc->huc_fw = i915->huc.fw;
+
+ /* Non-default firmware paths will be specified by the modparam.
+ * As modparams are generally accesible from the userspace make
+ * explicit copies of the firmware paths.
+ */
+ error_uc->guc_fw.path = kstrdup(i915->guc.fw.path, GFP_ATOMIC);
+ error_uc->huc_fw.path = kstrdup(i915->huc.fw.path, GFP_ATOMIC);
+ error_uc->guc_log = i915_error_object_create(i915, i915->guc.log.vma);
}
/* Capture all registers which don't fit into another category. */
@@ -1695,6 +1725,14 @@ static __always_inline void dup_param(const char *type, void *x)
*(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
}
+static void capture_params(struct i915_gpu_state *error)
+{
+ error->params = i915_modparams;
+#define DUP(T, x, ...) dup_param(#T, &error->params.x);
+ I915_PARAMS_FOR_EACH(DUP);
+#undef DUP
+}
+
static int capture(void *data)
{
struct i915_gpu_state *error = data;
@@ -1705,10 +1743,8 @@ static int capture(void *data)
ktime_to_timeval(ktime_sub(ktime_get(),
error->i915->gt.last_init_time));
- error->params = i915_modparams;
-#define DUP(T, x, ...) dup_param(#T, &error->params.x);
- I915_PARAMS_FOR_EACH(DUP);
-#undef DUP
+ capture_params(error);
+ capture_uc_state(error);
i915_capture_gen_state(error->i915, error);
i915_capture_reg_state(error->i915, error);
@@ -1716,7 +1752,6 @@ static int capture(void *data)
i915_gem_record_rings(error->i915, error);
i915_capture_active_buffers(error->i915, error);
i915_capture_pinned_buffers(error->i915, error);
- i915_gem_capture_guc_log_buffer(error->i915, error);
error->overlay = intel_overlay_capture_error_state(error->i915);
error->display = intel_display_capture_error_state(error->i915);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f8205841868b..3517c6548e2c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1068,6 +1068,9 @@ static void notify_ring(struct intel_engine_cs *engine)
struct drm_i915_gem_request *rq = NULL;
struct intel_wait *wait;
+ if (!engine->breadcrumbs.irq_armed)
+ return;
+
atomic_inc(&engine->irq_count);
set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
@@ -1101,7 +1104,8 @@ static void notify_ring(struct intel_engine_cs *engine)
if (wakeup)
wake_up_process(wait->tsk);
} else {
- __intel_engine_disarm_breadcrumbs(engine);
+ if (engine->breadcrumbs.irq_armed)
+ __intel_engine_disarm_breadcrumbs(engine);
}
spin_unlock(&engine->breadcrumbs.irq_lock);
@@ -1396,11 +1400,11 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
notify_ring(engine);
- tasklet |= i915_modparams.enable_guc_submission;
+ tasklet |= USES_GUC_SUBMISSION(engine->i915);
}
if (tasklet)
- tasklet_hi_schedule(&execlists->irq_tasklet);
+ tasklet_hi_schedule(&execlists->tasklet);
}
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
@@ -3064,7 +3068,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
i9xx_pipestat_irq_reset(dev_priv);
GEN3_IRQ_RESET(VLV_);
- dev_priv->irq_mask = ~0;
+ dev_priv->irq_mask = ~0u;
}
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -3089,7 +3093,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT;
- WARN_ON(dev_priv->irq_mask != ~0);
+ WARN_ON(dev_priv->irq_mask != ~0u);
dev_priv->irq_mask = ~enable_mask;
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index 49a079494b68..79f8ec756362 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -96,6 +96,11 @@ bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
{
- if (static_cpu_has(X86_FEATURE_XMM4_1))
+ /*
+ * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
+ * emulation. So don't enable movntdqa in hypervisor guest.
+ */
+ if (static_cpu_has(X86_FEATURE_XMM4_1) &&
+ !boot_cpu_has(X86_FEATURE_HYPERVISOR))
static_branch_enable(&has_movntdqa);
}
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/i915_oa_bdw.c
index abdf4d0abcce..4abd2e8b5083 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.c
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.c
@@ -85,9 +85,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"d6de6f55-e526-4f79-a6a6-d7315c09044e",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/i915_oa_bxt.c
index b69b900de0fe..cb6f304ec16a 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.c
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.c
@@ -83,9 +83,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"5ee72f5c-092f-421e-8b70-225f7c3e9612",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
index 368c87d7ee9a..8641ae30e343 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"74fb4902-d3d3-4237-9e90-cbdc68d0a446",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
new file mode 100644
index 000000000000..42ff06fe54a3
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
@@ -0,0 +1,109 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_cflgt3.h"
+
+static const struct i915_oa_reg b_counter_config_test_oa[] = {
+ { _MMIO(0x2740), 0x00000000 },
+ { _MMIO(0x2744), 0x00800000 },
+ { _MMIO(0x2714), 0xf0800000 },
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x2724), 0xf0800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2770), 0x00000004 },
+ { _MMIO(0x2774), 0x00000000 },
+ { _MMIO(0x2778), 0x00000003 },
+ { _MMIO(0x277c), 0x00000000 },
+ { _MMIO(0x2780), 0x00000007 },
+ { _MMIO(0x2784), 0x00000000 },
+ { _MMIO(0x2788), 0x00100002 },
+ { _MMIO(0x278c), 0x0000fff7 },
+ { _MMIO(0x2790), 0x00100002 },
+ { _MMIO(0x2794), 0x0000ffcf },
+ { _MMIO(0x2798), 0x00100082 },
+ { _MMIO(0x279c), 0x0000ffef },
+ { _MMIO(0x27a0), 0x001000c2 },
+ { _MMIO(0x27a4), 0x0000ffe7 },
+ { _MMIO(0x27a8), 0x00100001 },
+ { _MMIO(0x27ac), 0x0000ffe7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_test_oa[] = {
+};
+
+static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x9840), 0x00000080 },
+ { _MMIO(0x9888), 0x11810000 },
+ { _MMIO(0x9888), 0x07810013 },
+ { _MMIO(0x9888), 0x1f810000 },
+ { _MMIO(0x9888), 0x1d810000 },
+ { _MMIO(0x9888), 0x1b930040 },
+ { _MMIO(0x9888), 0x07e54000 },
+ { _MMIO(0x9888), 0x1f908000 },
+ { _MMIO(0x9888), 0x11900000 },
+ { _MMIO(0x9888), 0x37900000 },
+ { _MMIO(0x9888), 0x53900000 },
+ { _MMIO(0x9888), 0x45900000 },
+ { _MMIO(0x9888), 0x33900000 },
+};
+
+static ssize_t
+show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "1\n");
+}
+
+void
+i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
+{
+ strncpy(dev_priv->perf.oa.test_config.uuid,
+ "577e8e2c-3fa0-4875-8743-3538d585e3b0",
+ UUID_STRING_LEN);
+ dev_priv->perf.oa.test_config.id = 1;
+
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+}
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
new file mode 100644
index 000000000000..c13b5aac01b9
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
@@ -0,0 +1,34 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_OA_CFLGT3_H__
+#define __I915_OA_CFLGT3_H__
+
+extern void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/i915_oa_chv.c
index 322a3f94cd16..556febb2c3c8 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.c
+++ b/drivers/gpu/drm/i915/i915_oa_chv.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"4a534b07-cba3-414d-8d60-874830e883aa",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c
new file mode 100644
index 000000000000..ff0ac3627cc4
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.c
@@ -0,0 +1,121 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_cnl.h"
+
+static const struct i915_oa_reg b_counter_config_test_oa[] = {
+ { _MMIO(0x2740), 0x00000000 },
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x2714), 0xf0800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2724), 0xf0800000 },
+ { _MMIO(0x2770), 0x00000004 },
+ { _MMIO(0x2774), 0x0000ffff },
+ { _MMIO(0x2778), 0x00000003 },
+ { _MMIO(0x277c), 0x0000ffff },
+ { _MMIO(0x2780), 0x00000007 },
+ { _MMIO(0x2784), 0x0000ffff },
+ { _MMIO(0x2788), 0x00100002 },
+ { _MMIO(0x278c), 0x0000fff7 },
+ { _MMIO(0x2790), 0x00100002 },
+ { _MMIO(0x2794), 0x0000ffcf },
+ { _MMIO(0x2798), 0x00100082 },
+ { _MMIO(0x279c), 0x0000ffef },
+ { _MMIO(0x27a0), 0x001000c2 },
+ { _MMIO(0x27a4), 0x0000ffe7 },
+ { _MMIO(0x27a8), 0x00100001 },
+ { _MMIO(0x27ac), 0x0000ffe7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_test_oa[] = {
+};
+
+static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0xd04), 0x00000200 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x17060000 },
+ { _MMIO(0x9840), 0x00000000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x13034000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x07060066 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x05060000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x0f080040 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x07091000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x0f041000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x1d004000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x35000000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x49000000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x3d000000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x31000000 },
+};
+
+static ssize_t
+show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "1\n");
+}
+
+void
+i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
+{
+ strncpy(dev_priv->perf.oa.test_config.uuid,
+ "db41edd4-d8e7-4730-ad11-b9a2d6833503",
+ UUID_STRING_LEN);
+ dev_priv->perf.oa.test_config.id = 1;
+
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+}
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/i915_oa_cnl.h
new file mode 100644
index 000000000000..fb918b131105
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.h
@@ -0,0 +1,34 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_OA_CNL_H__
+#define __I915_OA_CNL_H__
+
+extern void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/i915_oa_glk.c
index 4ee527e4c926..971db587957c 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.c
+++ b/drivers/gpu/drm/i915/i915_oa_glk.c
@@ -83,9 +83,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"dd3fd789-e783-4204-8cd0-b671bbccb0cf",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c
index 56b03773bb9d..434a9b96d7ab 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.c
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.c
@@ -113,9 +113,9 @@ show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *b
void
i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"403d8832-1a27-4aa6-a64e-f5389ce7b212",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_render_basic;
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
index b6e7cc774136..2fa98a40bbc8 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"baa3c7e4-52b6-4b85-801e-465a94b746dd",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
index 5576afdd9a7e..f3cb6679a1bc 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"f1792f32-6db2-4b50-b4b2-557128f1688d",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
index 890d55879946..bf8b8cd8a50d 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
@@ -83,9 +83,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"1651949f-0ac0-4cb1-a06f-dafd74a407d1",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
index 85e51addf86a..ae534c7c8135 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"2b985803-d3c9-4629-8a4f-634bfecba0e8",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
index bce031ee4445..817fba2d82df 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"882fa433-1f4a-4a67-a962-c741888fe5f5",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index b4faeb6aa2bd..b5f3eb4fa8a3 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -22,6 +22,8 @@
* IN THE SOFTWARE.
*/
+#include <drm/drm_print.h>
+
#include "i915_params.h"
#include "i915_drv.h"
@@ -46,17 +48,6 @@ i915_param_named_unsafe(panel_ignore_lid, int, 0600,
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
-i915_param_named_unsafe(semaphores, int, 0400,
- "Use semaphores for inter-ring sync "
- "(default: -1 (use per-chip defaults))");
-
-i915_param_named_unsafe(enable_rc6, int, 0400,
- "Enable power-saving render C-state 6. "
- "Different stages can be selected via bitmask values "
- "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
- "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
- "default: -1 (use per-chip default)");
-
i915_param_named_unsafe(enable_dc, int, 0400,
"Enable power-saving display C-states. "
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
@@ -99,10 +90,6 @@ i915_param_named_unsafe(enable_ppgtt, int, 0400,
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
-i915_param_named_unsafe(enable_execlists, int, 0400,
- "Override execlists usage. "
- "(-1=auto [default], 0=disabled, 1=enabled)");
-
i915_param_named_unsafe(enable_psr, int, 0600,
"Enable PSR "
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
@@ -162,13 +149,10 @@ i915_param_named_unsafe(edp_vswing, int, 0400,
"(0=use value from vbt [default], 1=low power swing(200mV),"
"2=default swing(400mV))");
-i915_param_named_unsafe(enable_guc_loading, int, 0400,
- "Enable GuC firmware loading "
- "(-1=auto, 0=never [default], 1=if available, 2=required)");
-
-i915_param_named_unsafe(enable_guc_submission, int, 0400,
- "Enable GuC submission "
- "(-1=auto, 0=never [default], 1=if available, 2=required)");
+i915_param_named_unsafe(enable_guc, int, 0400,
+ "Enable GuC load for GuC submission and/or HuC load. "
+ "Required functionality can be selected using bitmask values. "
+ "(-1=auto, 0=disable [default], 1=GuC submission, 2=HuC load)");
i915_param_named(guc_log_level, int, 0400,
"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
@@ -190,3 +174,34 @@ i915_param_named(enable_dpcd_backlight, bool, 0600,
i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
+
+static __always_inline void _print_param(struct drm_printer *p,
+ const char *name,
+ const char *type,
+ const void *x)
+{
+ if (!__builtin_strcmp(type, "bool"))
+ drm_printf(p, "i915.%s=%s\n", name, yesno(*(const bool *)x));
+ else if (!__builtin_strcmp(type, "int"))
+ drm_printf(p, "i915.%s=%d\n", name, *(const int *)x);
+ else if (!__builtin_strcmp(type, "unsigned int"))
+ drm_printf(p, "i915.%s=%u\n", name, *(const unsigned int *)x);
+ else if (!__builtin_strcmp(type, "char *"))
+ drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
+ else
+ BUILD_BUG();
+}
+
+/**
+ * i915_params_dump - dump i915 modparams
+ * @params: i915 modparams
+ * @p: the &drm_printer
+ *
+ * Pretty printer for i915 modparams.
+ */
+void i915_params_dump(const struct i915_params *params, struct drm_printer *p)
+{
+#define PRINT(T, x, ...) _print_param(p, #x, #T, &params->x);
+ I915_PARAMS_FOR_EACH(PRINT);
+#undef PRINT
+}
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index c7292268ed43..c96360398072 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -25,27 +25,29 @@
#ifndef _I915_PARAMS_H_
#define _I915_PARAMS_H_
+#include <linux/bitops.h>
#include <linux/cache.h> /* for __read_mostly */
+struct drm_printer;
+
+#define ENABLE_GUC_SUBMISSION BIT(0)
+#define ENABLE_GUC_LOAD_HUC BIT(1)
+
#define I915_PARAMS_FOR_EACH(param) \
param(char *, vbt_firmware, NULL) \
param(int, modeset, -1) \
param(int, panel_ignore_lid, 1) \
- param(int, semaphores, -1) \
param(int, lvds_channel_mode, 0) \
param(int, panel_use_ssc, -1) \
param(int, vbt_sdvo_panel_type, -1) \
- param(int, enable_rc6, -1) \
param(int, enable_dc, -1) \
param(int, enable_fbc, -1) \
param(int, enable_ppgtt, -1) \
- param(int, enable_execlists, -1) \
param(int, enable_psr, -1) \
param(int, disable_power_well, -1) \
param(int, enable_ips, 1) \
param(int, invert_brightness, 0) \
- param(int, enable_guc_loading, 0) \
- param(int, enable_guc_submission, 0) \
+ param(int, enable_guc, 0) \
param(int, guc_log_level, -1) \
param(char *, guc_firmware_path, NULL) \
param(char *, huc_firmware_path, NULL) \
@@ -77,5 +79,7 @@ struct i915_params {
extern struct i915_params i915_modparams __read_mostly;
+void i915_params_dump(const struct i915_params *params, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6458c309c039..36d48422b475 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -209,6 +209,8 @@ static const struct intel_device_info intel_gm45_info __initconst = {
.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING, \
.has_snoop = true, \
+ /* ilk does support rc6, but we do not implement [power] contexts */ \
+ .has_rc6 = 0, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
@@ -631,6 +633,8 @@ static const struct pci_device_id pciidlist[] = {
INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_CFL_U_GT1_IDS(&intel_coffeelake_gt1_info),
+ INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_U_GT2_IDS(&intel_cannonlake_gt2_info),
INTEL_CNL_Y_GT2_IDS(&intel_cannonlake_gt2_info),
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index d453756ca128..e42d9a4de322 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -207,6 +207,8 @@
#include "i915_oa_kblgt3.h"
#include "i915_oa_glk.h"
#include "i915_oa_cflgt2.h"
+#include "i915_oa_cflgt3.h"
+#include "i915_oa_cnl.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -1214,9 +1216,9 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- if (i915_modparams.enable_execlists)
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
- else {
+ } else {
struct intel_engine_cs *engine = dev_priv->engine[RCS];
struct intel_ring *ring;
int ret;
@@ -1260,7 +1262,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- if (i915_modparams.enable_execlists) {
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
} else {
struct intel_engine_cs *engine = dev_priv->engine[RCS];
@@ -1724,10 +1726,9 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
GFP_KERNEL);
}
- ret = i915_switch_context(req);
i915_add_request(req);
- return ret;
+ return 0;
}
/*
@@ -1851,7 +1852,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
* be read back from automatically triggered reports, as part of the
* RPT_ID field.
*/
- if (IS_GEN9(dev_priv)) {
+ if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) {
I915_WRITE(GEN8_OA_DEBUG,
_MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
@@ -1884,6 +1885,16 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
}
+static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
+{
+ /* Reset all contexts' slices/subslices configurations. */
+ gen8_configure_all_contexts(dev_priv, NULL, false);
+
+ /* Make sure we disable noa to save power. */
+ I915_WRITE(RPM_CONFIG1,
+ I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
+}
+
static void gen7_oa_enable(struct drm_i915_private *dev_priv)
{
/*
@@ -2679,8 +2690,8 @@ err:
static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
{
- return div_u64(1000000000ULL * (2ULL << exponent),
- dev_priv->perf.oa.timestamp_frequency);
+ return div64_u64(1000000000ULL * (2ULL << exponent),
+ 1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz);
}
/**
@@ -2934,6 +2945,10 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
} else if (IS_COFFEELAKE(dev_priv)) {
if (IS_CFL_GT2(dev_priv))
i915_perf_load_test_config_cflgt2(dev_priv);
+ if (IS_CFL_GT3(dev_priv))
+ i915_perf_load_test_config_cflgt3(dev_priv);
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ i915_perf_load_test_config_cnl(dev_priv);
}
if (dev_priv->perf.oa.test_config.id == 0)
@@ -2991,7 +3006,7 @@ static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
int i;
for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
- if (flex_eu_regs[i].reg == addr)
+ if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
return true;
}
return false;
@@ -2999,31 +3014,47 @@ static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
{
- return (addr >= OASTARTTRIG1.reg && addr <= OASTARTTRIG8.reg) ||
- (addr >= OAREPORTTRIG1.reg && addr <= OAREPORTTRIG8.reg) ||
- (addr >= OACEC0_0.reg && addr <= OACEC7_1.reg);
+ return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) &&
+ addr <= i915_mmio_reg_offset(OASTARTTRIG8)) ||
+ (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) &&
+ addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) ||
+ (addr >= i915_mmio_reg_offset(OACEC0_0) &&
+ addr <= i915_mmio_reg_offset(OACEC7_1));
}
static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
{
- return addr == HALF_SLICE_CHICKEN2.reg ||
- (addr >= MICRO_BP0_0.reg && addr <= NOA_WRITE.reg) ||
- (addr >= OA_PERFCNT1_LO.reg && addr <= OA_PERFCNT2_HI.reg) ||
- (addr >= OA_PERFMATRIX_LO.reg && addr <= OA_PERFMATRIX_HI.reg);
+ return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) ||
+ (addr >= i915_mmio_reg_offset(MICRO_BP0_0) &&
+ addr <= i915_mmio_reg_offset(NOA_WRITE)) ||
+ (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) &&
+ addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) ||
+ (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) &&
+ addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI));
}
static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
{
return gen7_is_valid_mux_addr(dev_priv, addr) ||
- addr == WAIT_FOR_RC6_EXIT.reg ||
- (addr >= RPM_CONFIG0.reg && addr <= NOA_CONFIG(8).reg);
+ addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) ||
+ (addr >= i915_mmio_reg_offset(RPM_CONFIG0) &&
+ addr <= i915_mmio_reg_offset(NOA_CONFIG(8)));
+}
+
+static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+{
+ return gen8_is_valid_mux_addr(dev_priv, addr) ||
+ (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
+ addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
}
static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
{
return gen7_is_valid_mux_addr(dev_priv, addr) ||
(addr >= 0x25100 && addr <= 0x2FF90) ||
- addr == 0x9ec0;
+ (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) &&
+ addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) ||
+ addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0);
}
static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
@@ -3038,14 +3069,14 @@ static uint32_t mask_reg_value(u32 reg, u32 val)
* WaDisableSTUnitPowerOptimization workaround. Make sure the value
* programmed by userspace doesn't change this.
*/
- if (HALF_SLICE_CHICKEN2.reg == reg)
+ if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg)
val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
* indicated by its name and a bunch of selection fields used by OA
* configs.
*/
- if (WAIT_FOR_RC6_EXIT.reg == reg)
+ if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg)
val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
return val;
@@ -3392,8 +3423,6 @@ static struct ctl_table dev_root[] = {
*/
void i915_perf_init(struct drm_i915_private *dev_priv)
{
- dev_priv->perf.oa.timestamp_frequency = 0;
-
if (IS_HASWELL(dev_priv)) {
dev_priv->perf.oa.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
@@ -3409,70 +3438,68 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.oa_hw_tail_read =
gen7_oa_hw_tail_read;
- dev_priv->perf.oa.timestamp_frequency = 12500000;
-
dev_priv->perf.oa.oa_formats = hsw_oa_formats;
- } else if (i915_modparams.enable_execlists) {
+ } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
/* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of
* this driver, before upstreaming did this) it didn't seem
* worth the complexity to maintain now that BDW+ enable
* execlist mode by default.
*/
- dev_priv->perf.oa.ops.is_valid_b_counter_reg =
- gen7_is_valid_b_counter_addr;
- dev_priv->perf.oa.ops.is_valid_mux_reg =
- gen8_is_valid_mux_addr;
- dev_priv->perf.oa.ops.is_valid_flex_reg =
- gen8_is_valid_flex_addr;
+ dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
- dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
- dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
dev_priv->perf.oa.ops.read = gen8_oa_read;
dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
-
- if (IS_GEN8(dev_priv)) {
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
-
- dev_priv->perf.oa.timestamp_frequency = 12500000;
+ if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) {
+ dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+ gen7_is_valid_b_counter_addr;
+ dev_priv->perf.oa.ops.is_valid_mux_reg =
+ gen8_is_valid_mux_addr;
+ dev_priv->perf.oa.ops.is_valid_flex_reg =
+ gen8_is_valid_flex_addr;
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->perf.oa.ops.is_valid_mux_reg =
chv_is_valid_mux_addr;
}
- } else if (IS_GEN9(dev_priv)) {
+
+ dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
+ dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
+
+ if (IS_GEN8(dev_priv)) {
+ dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
+ dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
+
+ dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
+ } else {
+ dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
+ dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+
+ dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
+ }
+ } else if (IS_GEN10(dev_priv)) {
+ dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+ gen7_is_valid_b_counter_addr;
+ dev_priv->perf.oa.ops.is_valid_mux_reg =
+ gen10_is_valid_mux_addr;
+ dev_priv->perf.oa.ops.is_valid_flex_reg =
+ gen8_is_valid_flex_addr;
+
+ dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
+ dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
+
dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
-
- switch (dev_priv->info.platform) {
- case INTEL_BROXTON:
- case INTEL_GEMINILAKE:
- dev_priv->perf.oa.timestamp_frequency = 19200000;
- break;
- case INTEL_SKYLAKE:
- case INTEL_KABYLAKE:
- case INTEL_COFFEELAKE:
- dev_priv->perf.oa.timestamp_frequency = 12000000;
- break;
- default:
- /* Leave timestamp_frequency to 0 so we can
- * detect unsupported platforms.
- */
- break;
- }
}
}
- if (dev_priv->perf.oa.timestamp_frequency) {
+ if (dev_priv->perf.oa.ops.enable_metric_set) {
hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
@@ -3482,8 +3509,8 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->perf.lock);
spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
- oa_sample_rate_hard_limit =
- dev_priv->perf.oa.timestamp_frequency / 2;
+ oa_sample_rate_hard_limit = 1000 *
+ (INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
mutex_init(&dev_priv->perf.metrics_lock);
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
new file mode 100644
index 000000000000..55a8a1e29424
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -0,0 +1,865 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/perf_event.h>
+#include <linux/pm_runtime.h>
+
+#include "i915_drv.h"
+#include "i915_pmu.h"
+#include "intel_ringbuffer.h"
+
+/* Frequency for the sampling timer for events which need it. */
+#define FREQUENCY 200
+#define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
+
+#define ENGINE_SAMPLE_MASK \
+ (BIT(I915_SAMPLE_BUSY) | \
+ BIT(I915_SAMPLE_WAIT) | \
+ BIT(I915_SAMPLE_SEMA))
+
+#define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
+
+static cpumask_t i915_pmu_cpumask;
+
+static u8 engine_config_sample(u64 config)
+{
+ return config & I915_PMU_SAMPLE_MASK;
+}
+
+static u8 engine_event_sample(struct perf_event *event)
+{
+ return engine_config_sample(event->attr.config);
+}
+
+static u8 engine_event_class(struct perf_event *event)
+{
+ return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
+}
+
+static u8 engine_event_instance(struct perf_event *event)
+{
+ return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
+}
+
+static bool is_engine_config(u64 config)
+{
+ return config < __I915_PMU_OTHER(0);
+}
+
+static unsigned int config_enabled_bit(u64 config)
+{
+ if (is_engine_config(config))
+ return engine_config_sample(config);
+ else
+ return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
+}
+
+static u64 config_enabled_mask(u64 config)
+{
+ return BIT_ULL(config_enabled_bit(config));
+}
+
+static bool is_engine_event(struct perf_event *event)
+{
+ return is_engine_config(event->attr.config);
+}
+
+static unsigned int event_enabled_bit(struct perf_event *event)
+{
+ return config_enabled_bit(event->attr.config);
+}
+
+static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
+{
+ u64 enable;
+
+ /*
+ * Only some counters need the sampling timer.
+ *
+ * We start with a bitmask of all currently enabled events.
+ */
+ enable = i915->pmu.enable;
+
+ /*
+ * Mask out all the ones which do not need the timer, or in
+ * other words keep all the ones that could need the timer.
+ */
+ enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
+ config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
+ ENGINE_SAMPLE_MASK;
+
+ /*
+ * When the GPU is idle per-engine counters do not need to be
+ * running so clear those bits out.
+ */
+ if (!gpu_active)
+ enable &= ~ENGINE_SAMPLE_MASK;
+ /*
+ * Also there is software busyness tracking available we do not
+ * need the timer for I915_SAMPLE_BUSY counter.
+ *
+ * Use RCS as proxy for all engines.
+ */
+ else if (intel_engine_supports_stats(i915->engine[RCS]))
+ enable &= ~BIT(I915_SAMPLE_BUSY);
+
+ /*
+ * If some bits remain it means we need the sampling timer running.
+ */
+ return enable;
+}
+
+void i915_pmu_gt_parked(struct drm_i915_private *i915)
+{
+ if (!i915->pmu.base.event_init)
+ return;
+
+ spin_lock_irq(&i915->pmu.lock);
+ /*
+ * Signal sampling timer to stop if only engine events are enabled and
+ * GPU went idle.
+ */
+ i915->pmu.timer_enabled = pmu_needs_timer(i915, false);
+ spin_unlock_irq(&i915->pmu.lock);
+}
+
+static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
+{
+ if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
+ i915->pmu.timer_enabled = true;
+ hrtimer_start_range_ns(&i915->pmu.timer,
+ ns_to_ktime(PERIOD), 0,
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
+void i915_pmu_gt_unparked(struct drm_i915_private *i915)
+{
+ if (!i915->pmu.base.event_init)
+ return;
+
+ spin_lock_irq(&i915->pmu.lock);
+ /*
+ * Re-enable sampling timer when GPU goes active.
+ */
+ __i915_pmu_maybe_start_timer(i915);
+ spin_unlock_irq(&i915->pmu.lock);
+}
+
+static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
+{
+ if (!fw)
+ intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+
+ return true;
+}
+
+static void
+update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
+{
+ sample->cur += mul_u32_u32(val, unit);
+}
+
+static void engines_sample(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ bool fw = false;
+
+ if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
+ return;
+
+ if (!dev_priv->gt.awake)
+ return;
+
+ if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ return;
+
+ for_each_engine(engine, dev_priv, id) {
+ u32 current_seqno = intel_engine_get_seqno(engine);
+ u32 last_seqno = intel_engine_last_submit(engine);
+ u32 val;
+
+ val = !i915_seqno_passed(current_seqno, last_seqno);
+
+ update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
+ PERIOD, val);
+
+ if (val && (engine->pmu.enable &
+ (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
+ fw = grab_forcewake(dev_priv, fw);
+
+ val = I915_READ_FW(RING_CTL(engine->mmio_base));
+ } else {
+ val = 0;
+ }
+
+ update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
+ PERIOD, !!(val & RING_WAIT));
+
+ update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
+ PERIOD, !!(val & RING_WAIT_SEMAPHORE));
+ }
+
+ if (fw)
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ intel_runtime_pm_put(dev_priv);
+}
+
+static void frequency_sample(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->pmu.enable &
+ config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
+ u32 val;
+
+ val = dev_priv->gt_pm.rps.cur_freq;
+ if (dev_priv->gt.awake &&
+ intel_runtime_pm_get_if_in_use(dev_priv)) {
+ val = intel_get_cagf(dev_priv,
+ I915_READ_NOTRACE(GEN6_RPSTAT1));
+ intel_runtime_pm_put(dev_priv);
+ }
+
+ update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
+ 1, intel_gpu_freq(dev_priv, val));
+ }
+
+ if (dev_priv->pmu.enable &
+ config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
+ update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
+ intel_gpu_freq(dev_priv,
+ dev_priv->gt_pm.rps.cur_freq));
+ }
+}
+
+static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
+{
+ struct drm_i915_private *i915 =
+ container_of(hrtimer, struct drm_i915_private, pmu.timer);
+
+ if (!READ_ONCE(i915->pmu.timer_enabled))
+ return HRTIMER_NORESTART;
+
+ engines_sample(i915);
+ frequency_sample(i915);
+
+ hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
+ return HRTIMER_RESTART;
+}
+
+static u64 count_interrupts(struct drm_i915_private *i915)
+{
+ /* open-coded kstat_irqs() */
+ struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
+ u64 sum = 0;
+ int cpu;
+
+ if (!desc || !desc->kstat_irqs)
+ return 0;
+
+ for_each_possible_cpu(cpu)
+ sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
+
+ return sum;
+}
+
+static void i915_pmu_event_destroy(struct perf_event *event)
+{
+ WARN_ON(event->parent);
+}
+
+static int engine_event_init(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+
+ if (!intel_engine_lookup_user(i915, engine_event_class(event),
+ engine_event_instance(event)))
+ return -ENODEV;
+
+ switch (engine_event_sample(event)) {
+ case I915_SAMPLE_BUSY:
+ case I915_SAMPLE_WAIT:
+ break;
+ case I915_SAMPLE_SEMA:
+ if (INTEL_GEN(i915) < 6)
+ return -ENODEV;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int i915_pmu_event_init(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ int ret;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* unsupported modes and filters */
+ if (event->attr.sample_period) /* no sampling */
+ return -EINVAL;
+
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ /* only allow running on one cpu at a time */
+ if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
+ return -EINVAL;
+
+ if (is_engine_event(event)) {
+ ret = engine_event_init(event);
+ } else {
+ ret = 0;
+ switch (event->attr.config) {
+ case I915_PMU_ACTUAL_FREQUENCY:
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ /* Requires a mutex for sampling! */
+ ret = -ENODEV;
+ case I915_PMU_REQUESTED_FREQUENCY:
+ if (INTEL_GEN(i915) < 6)
+ ret = -ENODEV;
+ break;
+ case I915_PMU_INTERRUPTS:
+ break;
+ case I915_PMU_RC6_RESIDENCY:
+ if (!HAS_RC6(i915))
+ ret = -ENODEV;
+ break;
+ default:
+ ret = -ENOENT;
+ break;
+ }
+ }
+ if (ret)
+ return ret;
+
+ if (!event->parent)
+ event->destroy = i915_pmu_event_destroy;
+
+ return 0;
+}
+
+static u64 __i915_pmu_event_read(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ u64 val = 0;
+
+ if (is_engine_event(event)) {
+ u8 sample = engine_event_sample(event);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+
+ if (WARN_ON_ONCE(!engine)) {
+ /* Do nothing */
+ } else if (sample == I915_SAMPLE_BUSY &&
+ engine->pmu.busy_stats) {
+ val = ktime_to_ns(intel_engine_get_busy_time(engine));
+ } else {
+ val = engine->pmu.sample[sample].cur;
+ }
+ } else {
+ switch (event->attr.config) {
+ case I915_PMU_ACTUAL_FREQUENCY:
+ val =
+ div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
+ FREQUENCY);
+ break;
+ case I915_PMU_REQUESTED_FREQUENCY:
+ val =
+ div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
+ FREQUENCY);
+ break;
+ case I915_PMU_INTERRUPTS:
+ val = count_interrupts(i915);
+ break;
+ case I915_PMU_RC6_RESIDENCY:
+ intel_runtime_pm_get(i915);
+ val = intel_rc6_residency_ns(i915,
+ IS_VALLEYVIEW(i915) ?
+ VLV_GT_RENDER_RC6 :
+ GEN6_GT_GFX_RC6);
+ if (HAS_RC6p(i915))
+ val += intel_rc6_residency_ns(i915,
+ GEN6_GT_GFX_RC6p);
+ if (HAS_RC6pp(i915))
+ val += intel_rc6_residency_ns(i915,
+ GEN6_GT_GFX_RC6pp);
+ intel_runtime_pm_put(i915);
+ break;
+ }
+ }
+
+ return val;
+}
+
+static void i915_pmu_event_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev, new;
+
+again:
+ prev = local64_read(&hwc->prev_count);
+ new = __i915_pmu_event_read(event);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
+ goto again;
+
+ local64_add(new - prev, &event->count);
+}
+
+static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
+{
+ return intel_engine_supports_stats(engine) &&
+ (engine->pmu.enable & BIT(I915_SAMPLE_BUSY));
+}
+
+static void i915_pmu_enable(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ unsigned int bit = event_enabled_bit(event);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->pmu.lock, flags);
+
+ /*
+ * Update the bitmask of enabled events and increment
+ * the event reference counter.
+ */
+ GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
+ GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
+ i915->pmu.enable |= BIT_ULL(bit);
+ i915->pmu.enable_count[bit]++;
+
+ /*
+ * Start the sampling timer if needed and not already enabled.
+ */
+ __i915_pmu_maybe_start_timer(i915);
+
+ /*
+ * For per-engine events the bitmask and reference counting
+ * is stored per engine.
+ */
+ if (is_engine_event(event)) {
+ u8 sample = engine_event_sample(event);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+ GEM_BUG_ON(!engine);
+ engine->pmu.enable |= BIT(sample);
+
+ GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
+ GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
+ if (engine->pmu.enable_count[sample]++ == 0) {
+ /*
+ * Enable engine busy stats tracking if needed or
+ * alternatively cancel the scheduled disable.
+ *
+ * If the delayed disable was pending, cancel it and
+ * in this case do not enable since it already is.
+ */
+ if (engine_needs_busy_stats(engine) &&
+ !engine->pmu.busy_stats) {
+ engine->pmu.busy_stats = true;
+ if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
+ intel_enable_engine_stats(engine);
+ }
+ }
+ }
+
+ /*
+ * Store the current counter value so we can report the correct delta
+ * for all listeners. Even when the event was already enabled and has
+ * an existing non-zero value.
+ */
+ local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
+
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
+}
+
+static void __disable_busy_stats(struct work_struct *work)
+{
+ struct intel_engine_cs *engine =
+ container_of(work, typeof(*engine), pmu.disable_busy_stats.work);
+
+ intel_disable_engine_stats(engine);
+}
+
+static void i915_pmu_disable(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ unsigned int bit = event_enabled_bit(event);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->pmu.lock, flags);
+
+ if (is_engine_event(event)) {
+ u8 sample = engine_event_sample(event);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+ GEM_BUG_ON(!engine);
+ GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
+ GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
+ /*
+ * Decrement the reference count and clear the enabled
+ * bitmask when the last listener on an event goes away.
+ */
+ if (--engine->pmu.enable_count[sample] == 0) {
+ engine->pmu.enable &= ~BIT(sample);
+ if (!engine_needs_busy_stats(engine) &&
+ engine->pmu.busy_stats) {
+ engine->pmu.busy_stats = false;
+ /*
+ * We request a delayed disable to handle the
+ * rapid on/off cycles on events, which can
+ * happen when tools like perf stat start, in a
+ * nicer way.
+ *
+ * In addition, this also helps with busy stats
+ * accuracy with background CPU offline/online
+ * migration events.
+ */
+ queue_delayed_work(system_wq,
+ &engine->pmu.disable_busy_stats,
+ round_jiffies_up_relative(HZ));
+ }
+ }
+ }
+
+ GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
+ GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
+ /*
+ * Decrement the reference count and clear the enabled
+ * bitmask when the last listener on an event goes away.
+ */
+ if (--i915->pmu.enable_count[bit] == 0) {
+ i915->pmu.enable &= ~BIT_ULL(bit);
+ i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
+ }
+
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
+}
+
+static void i915_pmu_event_start(struct perf_event *event, int flags)
+{
+ i915_pmu_enable(event);
+ event->hw.state = 0;
+}
+
+static void i915_pmu_event_stop(struct perf_event *event, int flags)
+{
+ if (flags & PERF_EF_UPDATE)
+ i915_pmu_event_read(event);
+ i915_pmu_disable(event);
+ event->hw.state = PERF_HES_STOPPED;
+}
+
+static int i915_pmu_event_add(struct perf_event *event, int flags)
+{
+ if (flags & PERF_EF_START)
+ i915_pmu_event_start(event, flags);
+
+ return 0;
+}
+
+static void i915_pmu_event_del(struct perf_event *event, int flags)
+{
+ i915_pmu_event_stop(event, PERF_EF_UPDATE);
+}
+
+static int i915_pmu_event_event_idx(struct perf_event *event)
+{
+ return 0;
+}
+
+struct i915_str_attribute {
+ struct device_attribute attr;
+ const char *str;
+};
+
+static ssize_t i915_pmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i915_str_attribute *eattr;
+
+ eattr = container_of(attr, struct i915_str_attribute, attr);
+ return sprintf(buf, "%s\n", eattr->str);
+}
+
+#define I915_PMU_FORMAT_ATTR(_name, _config) \
+ (&((struct i915_str_attribute[]) { \
+ { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
+ .str = _config, } \
+ })[0].attr.attr)
+
+static struct attribute *i915_pmu_format_attrs[] = {
+ I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
+ NULL,
+};
+
+static const struct attribute_group i915_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = i915_pmu_format_attrs,
+};
+
+struct i915_ext_attribute {
+ struct device_attribute attr;
+ unsigned long val;
+};
+
+static ssize_t i915_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i915_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct i915_ext_attribute, attr);
+ return sprintf(buf, "config=0x%lx\n", eattr->val);
+}
+
+#define I915_EVENT_ATTR(_name, _config) \
+ (&((struct i915_ext_attribute[]) { \
+ { .attr = __ATTR(_name, 0444, i915_pmu_event_show, NULL), \
+ .val = _config, } \
+ })[0].attr.attr)
+
+#define I915_EVENT_STR(_name, _str) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
+ .id = 0, \
+ .event_str = _str, } \
+ })[0].attr.attr)
+
+#define I915_EVENT(_name, _config, _unit) \
+ I915_EVENT_ATTR(_name, _config), \
+ I915_EVENT_STR(_name.unit, _unit)
+
+#define I915_ENGINE_EVENT(_name, _class, _instance, _sample) \
+ I915_EVENT_ATTR(_name, __I915_PMU_ENGINE(_class, _instance, _sample)), \
+ I915_EVENT_STR(_name.unit, "ns")
+
+#define I915_ENGINE_EVENTS(_name, _class, _instance) \
+ I915_ENGINE_EVENT(_name##_instance-busy, _class, _instance, I915_SAMPLE_BUSY), \
+ I915_ENGINE_EVENT(_name##_instance-sema, _class, _instance, I915_SAMPLE_SEMA), \
+ I915_ENGINE_EVENT(_name##_instance-wait, _class, _instance, I915_SAMPLE_WAIT)
+
+static struct attribute *i915_pmu_events_attrs[] = {
+ I915_ENGINE_EVENTS(rcs, I915_ENGINE_CLASS_RENDER, 0),
+ I915_ENGINE_EVENTS(bcs, I915_ENGINE_CLASS_COPY, 0),
+ I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 0),
+ I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 1),
+ I915_ENGINE_EVENTS(vecs, I915_ENGINE_CLASS_VIDEO_ENHANCE, 0),
+
+ I915_EVENT(actual-frequency, I915_PMU_ACTUAL_FREQUENCY, "MHz"),
+ I915_EVENT(requested-frequency, I915_PMU_REQUESTED_FREQUENCY, "MHz"),
+
+ I915_EVENT_ATTR(interrupts, I915_PMU_INTERRUPTS),
+
+ I915_EVENT(rc6-residency, I915_PMU_RC6_RESIDENCY, "ns"),
+
+ NULL,
+};
+
+static const struct attribute_group i915_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = i915_pmu_events_attrs,
+};
+
+static ssize_t
+i915_pmu_get_attr_cpumask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
+}
+
+static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
+
+static struct attribute *i915_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group i915_pmu_cpumask_attr_group = {
+ .attrs = i915_cpumask_attrs,
+};
+
+static const struct attribute_group *i915_pmu_attr_groups[] = {
+ &i915_pmu_format_attr_group,
+ &i915_pmu_events_attr_group,
+ &i915_pmu_cpumask_attr_group,
+ NULL
+};
+
+static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
+
+ GEM_BUG_ON(!pmu->base.event_init);
+
+ /* Select the first online CPU as a designated reader. */
+ if (!cpumask_weight(&i915_pmu_cpumask))
+ cpumask_set_cpu(cpu, &i915_pmu_cpumask);
+
+ return 0;
+}
+
+static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
+ unsigned int target;
+
+ GEM_BUG_ON(!pmu->base.event_init);
+
+ if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
+ target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
+ /* Migrate events if there is a valid target */
+ if (target < nr_cpu_ids) {
+ cpumask_set_cpu(target, &i915_pmu_cpumask);
+ perf_pmu_migrate_context(&pmu->base, cpu, target);
+ }
+ }
+
+ return 0;
+}
+
+static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
+
+static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
+{
+ enum cpuhp_state slot;
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/x86/intel/i915:online",
+ i915_pmu_cpu_online,
+ i915_pmu_cpu_offline);
+ if (ret < 0)
+ return ret;
+
+ slot = ret;
+ ret = cpuhp_state_add_instance(slot, &i915->pmu.node);
+ if (ret) {
+ cpuhp_remove_multi_state(slot);
+ return ret;
+ }
+
+ cpuhp_slot = slot;
+ return 0;
+}
+
+static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
+{
+ WARN_ON(cpuhp_slot == CPUHP_INVALID);
+ WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node));
+ cpuhp_remove_multi_state(cpuhp_slot);
+}
+
+void i915_pmu_register(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int ret;
+
+ if (INTEL_GEN(i915) <= 2) {
+ DRM_INFO("PMU not supported for this GPU.");
+ return;
+ }
+
+ i915->pmu.base.attr_groups = i915_pmu_attr_groups;
+ i915->pmu.base.task_ctx_nr = perf_invalid_context;
+ i915->pmu.base.event_init = i915_pmu_event_init;
+ i915->pmu.base.add = i915_pmu_event_add;
+ i915->pmu.base.del = i915_pmu_event_del;
+ i915->pmu.base.start = i915_pmu_event_start;
+ i915->pmu.base.stop = i915_pmu_event_stop;
+ i915->pmu.base.read = i915_pmu_event_read;
+ i915->pmu.base.event_idx = i915_pmu_event_event_idx;
+
+ spin_lock_init(&i915->pmu.lock);
+ hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ i915->pmu.timer.function = i915_sample;
+
+ for_each_engine(engine, i915, id)
+ INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats,
+ __disable_busy_stats);
+
+ ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
+ if (ret)
+ goto err;
+
+ ret = i915_pmu_register_cpuhp_state(i915);
+ if (ret)
+ goto err_unreg;
+
+ return;
+
+err_unreg:
+ perf_pmu_unregister(&i915->pmu.base);
+err:
+ i915->pmu.base.event_init = NULL;
+ DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
+}
+
+void i915_pmu_unregister(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (!i915->pmu.base.event_init)
+ return;
+
+ WARN_ON(i915->pmu.enable);
+
+ hrtimer_cancel(&i915->pmu.timer);
+
+ for_each_engine(engine, i915, id) {
+ GEM_BUG_ON(engine->pmu.busy_stats);
+ flush_delayed_work(&engine->pmu.disable_busy_stats);
+ }
+
+ i915_pmu_unregister_cpuhp_state(i915);
+
+ perf_pmu_unregister(&i915->pmu.base);
+ i915->pmu.base.event_init = NULL;
+}
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
new file mode 100644
index 000000000000..40c154d13565
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#ifndef __I915_PMU_H__
+#define __I915_PMU_H__
+
+enum {
+ __I915_SAMPLE_FREQ_ACT = 0,
+ __I915_SAMPLE_FREQ_REQ,
+ __I915_NUM_PMU_SAMPLERS
+};
+
+/**
+ * How many different events we track in the global PMU mask.
+ *
+ * It is also used to know to needed number of event reference counters.
+ */
+#define I915_PMU_MASK_BITS \
+ ((1 << I915_PMU_SAMPLE_BITS) + \
+ (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0)))
+
+struct i915_pmu_sample {
+ u64 cur;
+};
+
+struct i915_pmu {
+ /**
+ * @node: List node for CPU hotplug handling.
+ */
+ struct hlist_node node;
+ /**
+ * @base: PMU base.
+ */
+ struct pmu base;
+ /**
+ * @lock: Lock protecting enable mask and ref count handling.
+ */
+ spinlock_t lock;
+ /**
+ * @timer: Timer for internal i915 PMU sampling.
+ */
+ struct hrtimer timer;
+ /**
+ * @enable: Bitmask of all currently enabled events.
+ *
+ * Bits are derived from uAPI event numbers in a way that low 16 bits
+ * correspond to engine event _sample_ _type_ (I915_SAMPLE_QUEUED is
+ * bit 0), and higher bits correspond to other events (for instance
+ * I915_PMU_ACTUAL_FREQUENCY is bit 16 etc).
+ *
+ * In other words, low 16 bits are not per engine but per engine
+ * sampler type, while the upper bits are directly mapped to other
+ * event types.
+ */
+ u64 enable;
+ /**
+ * @enable_count: Reference counts for the enabled events.
+ *
+ * Array indices are mapped in the same way as bits in the @enable field
+ * and they are used to control sampling on/off when multiple clients
+ * are using the PMU API.
+ */
+ unsigned int enable_count[I915_PMU_MASK_BITS];
+ /**
+ * @timer_enabled: Should the internal sampling timer be running.
+ */
+ bool timer_enabled;
+ /**
+ * @sample: Current and previous (raw) counters for sampling events.
+ *
+ * These counters are updated from the i915 PMU sampling timer.
+ *
+ * Only global counters are held here, while the per-engine ones are in
+ * struct intel_engine_cs.
+ */
+ struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
+};
+
+#ifdef CONFIG_PERF_EVENTS
+void i915_pmu_register(struct drm_i915_private *i915);
+void i915_pmu_unregister(struct drm_i915_private *i915);
+void i915_pmu_gt_parked(struct drm_i915_private *i915);
+void i915_pmu_gt_unparked(struct drm_i915_private *i915);
+#else
+static inline void i915_pmu_register(struct drm_i915_private *i915) {}
+static inline void i915_pmu_unregister(struct drm_i915_private *i915) {}
+static inline void i915_pmu_gt_parked(struct drm_i915_private *i915) {}
+static inline void i915_pmu_gt_unparked(struct drm_i915_private *i915) {}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7923dfd9963c..505c605eff98 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -186,6 +186,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VIDEO_ENHANCEMENT_CLASS 2
#define COPY_ENGINE_CLASS 3
#define OTHER_CLASS 4
+#define MAX_ENGINE_CLASS 4
+
+#define MAX_ENGINE_INSTANCE 1
/* PCI config space */
@@ -355,9 +358,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
-#define GEN8_CONFIG0 _MMIO(0xD00)
-#define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1)
-
#define GAC_ECO_BITS _MMIO(0x14090)
#define ECOBITS_SNB_BIT (1<<13)
#define ECOBITS_PPGTT_CACHE64B (3<<8)
@@ -382,6 +382,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_STOLEN_RESERVED_2M (1 << 7)
#define GEN8_STOLEN_RESERVED_4M (2 << 7)
#define GEN8_STOLEN_RESERVED_8M (3 << 7)
+#define GEN6_STOLEN_RESERVED_ENABLE (1 << 0)
/* VGA stuff */
@@ -1109,16 +1110,50 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OA_PERFCNT1_HI _MMIO(0x91BC)
#define OA_PERFCNT2_LO _MMIO(0x91C0)
#define OA_PERFCNT2_HI _MMIO(0x91C4)
+#define OA_PERFCNT3_LO _MMIO(0x91C8)
+#define OA_PERFCNT3_HI _MMIO(0x91CC)
+#define OA_PERFCNT4_LO _MMIO(0x91D8)
+#define OA_PERFCNT4_HI _MMIO(0x91DC)
#define OA_PERFMATRIX_LO _MMIO(0x91C8)
#define OA_PERFMATRIX_HI _MMIO(0x91CC)
/* RPM unit config (Gen8+) */
#define RPM_CONFIG0 _MMIO(0x0D00)
-#define RPM_CONFIG1 _MMIO(0x0D04)
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (1 << GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 0
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 1
+#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT 1
+#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK (0x3 << GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT)
-/* RPC unit config (Gen8+) */
-#define RPM_CONFIG _MMIO(0x0D08)
+#define RPM_CONFIG1 _MMIO(0x0D04)
+#define GEN10_GT_NOA_ENABLE (1 << 9)
+
+/* GPM unit config (Gen9+) */
+#define CTC_MODE _MMIO(0xA26C)
+#define CTC_SOURCE_PARAMETER_MASK 1
+#define CTC_SOURCE_CRYSTAL_CLOCK 0
+#define CTC_SOURCE_DIVIDE_LOGIC 1
+#define CTC_SHIFT_PARAMETER_SHIFT 1
+#define CTC_SHIFT_PARAMETER_MASK (0x3 << CTC_SHIFT_PARAMETER_SHIFT)
+
+/* RCP unit config (Gen8+) */
+#define RCP_CONFIG _MMIO(0x0D08)
+
+/* NOA (HSW) */
+#define HSW_MBVID2_NOA0 _MMIO(0x9E80)
+#define HSW_MBVID2_NOA1 _MMIO(0x9E84)
+#define HSW_MBVID2_NOA2 _MMIO(0x9E88)
+#define HSW_MBVID2_NOA3 _MMIO(0x9E8C)
+#define HSW_MBVID2_NOA4 _MMIO(0x9E90)
+#define HSW_MBVID2_NOA5 _MMIO(0x9E94)
+#define HSW_MBVID2_NOA6 _MMIO(0x9E98)
+#define HSW_MBVID2_NOA7 _MMIO(0x9E9C)
+#define HSW_MBVID2_NOA8 _MMIO(0x9EA0)
+#define HSW_MBVID2_NOA9 _MMIO(0x9EA4)
+
+#define HSW_MBVID2_MISR0 _MMIO(0x9EC0)
/* NOA (Gen8+) */
#define NOA_CONFIG(i) _MMIO(0x0D0C + (i) * 4)
@@ -2329,6 +2364,8 @@ enum i915_power_well_id {
#define ARB_MODE_SWIZZLE_BDW (1<<1)
#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100*(engine)->hw_id)
+#define GEN8_RING_FAULT_REG _MMIO(0x4094)
+#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
#define RING_FAULT_GTTSEL_MASK (1<<11)
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
@@ -3241,6 +3278,7 @@ enum i915_power_well_id {
# define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */
# define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */
# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */
+# define PNV_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 24) /* pnv */
# define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */
# define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */
# define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */
@@ -3395,6 +3433,7 @@ enum i915_power_well_id {
#define ELK_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x48)
#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16)
#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4)
+#define G4X_STOLEN_RESERVED_ENABLE (1 << 0)
/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
#define DCLK _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5e04)
@@ -3816,9 +3855,13 @@ enum {
* GEN9 clock gating regs
*/
#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
+#define DARBF_GATING_DIS (1 << 27)
#define PWM2_GATING_DIS (1 << 14)
#define PWM1_GATING_DIS (1 << 13)
+#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
+#define BXT_GMBUS_GATING_DIS (1 << 14)
+
#define _CLKGATE_DIS_PSL_A 0x46520
#define _CLKGATE_DIS_PSL_B 0x46524
#define _CLKGATE_DIS_PSL_C 0x46528
@@ -3834,6 +3877,10 @@ enum {
*/
#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
#define SARBUNIT_CLKGATE_DIS (1 << 5)
+#define RCCUNIT_CLKGATE_DIS (1 << 7)
+
+#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
+#define VFUNIT_CLKGATE_DIS (1 << 20)
/*
* Display engine regs
@@ -6260,7 +6307,7 @@ enum {
#define _PLANE_CTL_2_A 0x70280
#define _PLANE_CTL_3_A 0x70380
#define PLANE_CTL_ENABLE (1 << 31)
-#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30)
+#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-GLK */
#define PLANE_CTL_FORMAT_MASK (0xf << 24)
#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24)
#define PLANE_CTL_FORMAT_NV12 ( 1 << 24)
@@ -6270,7 +6317,7 @@ enum {
#define PLANE_CTL_FORMAT_AYUV ( 8 << 24)
#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24)
#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24)
-#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23)
+#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */
#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21)
#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21)
@@ -6283,13 +6330,14 @@ enum {
#define PLANE_CTL_YUV422_VYUY ( 3 << 16)
#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
-#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13)
+#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */
#define PLANE_CTL_TILED_MASK (0x7 << 10)
#define PLANE_CTL_TILED_LINEAR ( 0 << 10)
#define PLANE_CTL_TILED_X ( 1 << 10)
#define PLANE_CTL_TILED_Y ( 4 << 10)
#define PLANE_CTL_TILED_YF ( 5 << 10)
-#define PLANE_CTL_ALPHA_MASK (0x3 << 4)
+#define PLANE_CTL_FLIP_HORIZONTAL ( 1 << 8)
+#define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */
#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
@@ -6329,6 +6377,10 @@ enum {
#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30)
#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23)
#define PLANE_COLOR_PLANE_GAMMA_DISABLE (1 << 13)
+#define PLANE_COLOR_ALPHA_MASK (0x3 << 4)
+#define PLANE_COLOR_ALPHA_DISABLE (0 << 4)
+#define PLANE_COLOR_ALPHA_SW_PREMULTIPLY (2 << 4)
+#define PLANE_COLOR_ALPHA_HW_PREMULTIPLY (3 << 4)
#define _PLANE_BUF_CFG_1_A 0x7027c
#define _PLANE_BUF_CFG_2_A 0x7037c
#define _PLANE_NV12_BUF_CFG_1_A 0x70278
@@ -7511,6 +7563,7 @@ enum {
#define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020)
+#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1<<31)
#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
@@ -7774,8 +7827,9 @@ enum {
#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0x0D88)
#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0x0D84)
#define FORCEWAKE_ACK_BLITTER_GEN9 _MMIO(0x130044)
-#define FORCEWAKE_KERNEL 0x1
-#define FORCEWAKE_USER 0x2
+#define FORCEWAKE_KERNEL BIT(0)
+#define FORCEWAKE_USER BIT(1)
+#define FORCEWAKE_KERNEL_FALLBACK BIT(15)
#define FORCEWAKE_MT_ACK _MMIO(0x130040)
#define ECOBUS _MMIO(0xa180)
#define FORCEWAKE_MT_ENABLE (1<<5)
@@ -7905,6 +7959,7 @@ enum {
#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
+#define GEN10_MEDIA_WAKE_RATE_LIMIT _MMIO(0xA0A0)
#define GEN6_RC_EVALUATION_INTERVAL _MMIO(0xA0A8)
#define GEN6_RC_IDLE_HYSTERSIS _MMIO(0xA0AC)
#define GEN6_RC_SLEEP _MMIO(0xA0B0)
@@ -8036,11 +8091,18 @@ enum {
#define CHV_EU311_PG_ENABLE (1<<1)
#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice)*0x4)
+#define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \
+ ((slice) % 3) * 0x4)
#define GEN9_PGCTL_SLICE_ACK (1 << 0)
#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2))
+#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F)
#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice)*0x8)
+#define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \
+ ((slice) % 3) * 0x8)
#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice)*0x8)
+#define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \
+ ((slice) % 3) * 0x8)
#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
@@ -8092,6 +8154,7 @@ enum {
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
#define THROTTLE_12_5 (7<<2)
+#define DISABLE_EARLY_EOT (1<<1)
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
@@ -8838,6 +8901,12 @@ enum skl_power_gate {
#define ILK_TIMESTAMP_HI _MMIO(0x70070)
#define IVB_TIMESTAMP_CTR _MMIO(0x44070)
+#define GEN9_TIMESTAMP_OVERRIDE _MMIO(0x44074)
+#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT 0
+#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK 0x3ff
+#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT 12
+#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK (0xf << 12)
+
#define _PIPE_FRMTMSTMP_A 0x70048
#define PIPE_FRMTMSTMP(pipe) \
_MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A)
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index 78e1a1b168ff..9766e806dce6 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -99,6 +99,6 @@ __printf(2, 3)
bool __igt_timeout(unsigned long timeout, const char *fmt, ...);
#define igt_timeout(t, fmt, ...) \
- __igt_timeout((t), KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+ __igt_timeout((t), KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#endif /* !__I915_SELFTEST_H__ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index ac236b88c99c..3669f5eeb91e 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -303,6 +303,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
int pending;
debug_fence_assert(fence);
+ might_sleep_if(gfpflags_allow_blocking(gfp));
if (i915_sw_fence_done(signaler))
return 0;
@@ -419,6 +420,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
int ret;
debug_fence_assert(fence);
+ might_sleep_if(gfpflags_allow_blocking(gfp));
if (dma_fence_is_signaled(dma))
return 0;
@@ -465,6 +467,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
int ret = 0, pending;
debug_fence_assert(fence);
+ might_sleep_if(gfpflags_allow_blocking(gfp));
if (write) {
struct dma_fence **shared;
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index fb46ce3bd5f2..b33d2158c234 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -42,14 +42,30 @@ static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
static u32 calc_residency(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
- return DIV_ROUND_CLOSEST_ULL(intel_rc6_residency_us(dev_priv, reg),
- 1000);
+ u64 res;
+
+ intel_runtime_pm_get(dev_priv);
+ res = intel_rc6_residency_us(dev_priv, reg);
+ intel_runtime_pm_put(dev_priv);
+
+ return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%x\n", intel_rc6_enabled());
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ unsigned int mask;
+
+ mask = 0;
+ if (HAS_RC6(dev_priv))
+ mask |= BIT(0);
+ if (HAS_RC6p(dev_priv))
+ mask |= BIT(1);
+ if (HAS_RC6pp(dev_priv))
+ mask |= BIT(2);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", mask);
}
static ssize_t
@@ -252,14 +268,9 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else {
- u32 rpstat = I915_READ(GEN6_RPSTAT1);
- if (INTEL_GEN(dev_priv) >= 9)
- ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
- else
- ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
- ret = intel_gpu_freq(dev_priv, ret);
+ ret = intel_gpu_freq(dev_priv,
+ intel_get_cagf(dev_priv,
+ I915_READ(GEN6_RPSTAT1)));
}
mutex_unlock(&dev_priv->pcu_lock);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 4e76768ffa95..e1169c02eb2b 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -616,6 +616,7 @@ TRACE_EVENT(i915_gem_request_queue,
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, hw_id)
__field(u32, ring)
__field(u32, ctx)
__field(u32, seqno)
@@ -624,15 +625,16 @@ TRACE_EVENT(i915_gem_request_queue,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
+ __entry->hw_id = req->ctx->hw_id;
__entry->ring = req->engine->id;
__entry->ctx = req->fence.context;
__entry->seqno = req->fence.seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
- __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
- __entry->flags)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
+ __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
+ __entry->seqno, __entry->flags)
);
DECLARE_EVENT_CLASS(i915_gem_request,
@@ -641,23 +643,25 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, ctx)
+ __field(u32, hw_id)
__field(u32, ring)
+ __field(u32, ctx)
__field(u32, seqno)
__field(u32, global)
),
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
+ __entry->hw_id = req->ctx->hw_id;
__entry->ring = req->engine->id;
__entry->ctx = req->fence.context;
__entry->seqno = req->fence.seqno;
__entry->global = req->global_seqno;
),
- TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
- __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
- __entry->global)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
+ __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
+ __entry->seqno, __entry->global)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
@@ -683,15 +687,17 @@ DECLARE_EVENT_CLASS(i915_gem_request_hw,
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, hw_id)
__field(u32, ring)
+ __field(u32, ctx)
__field(u32, seqno)
__field(u32, global_seqno)
- __field(u32, ctx)
__field(u32, port)
),
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
+ __entry->hw_id = req->ctx->hw_id;
__entry->ring = req->engine->id;
__entry->ctx = req->fence.context;
__entry->seqno = req->fence.seqno;
@@ -699,10 +705,10 @@ DECLARE_EVENT_CLASS(i915_gem_request_hw,
__entry->port = port;
),
- TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
- __entry->dev, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->global_seqno,
- __entry->port)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
+ __entry->dev, __entry->hw_id, __entry->ring,
+ __entry->ctx, __entry->seqno,
+ __entry->global_seqno, __entry->port)
);
DEFINE_EVENT(i915_gem_request_hw, i915_gem_request_in,
@@ -772,6 +778,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, hw_id)
__field(u32, ring)
__field(u32, ctx)
__field(u32, seqno)
@@ -787,6 +794,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
*/
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
+ __entry->hw_id = req->ctx->hw_id;
__entry->ring = req->engine->id;
__entry->ctx = req->fence.context;
__entry->seqno = req->fence.seqno;
@@ -794,10 +802,10 @@ TRACE_EVENT(i915_gem_request_wait_begin,
__entry->flags = flags;
),
- TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
- __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
- __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
- __entry->flags)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
+ __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
+ __entry->seqno, __entry->global,
+ !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index af3d7cc53fa1..51dbfe5bb418 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -83,8 +83,11 @@
(typeof(ptr))(__v & -BIT(n)); \
})
-#define ptr_pack_bits(ptr, bits, n) \
- ((typeof(ptr))((unsigned long)(ptr) | (bits)))
+#define ptr_pack_bits(ptr, bits, n) ({ \
+ unsigned long __bits = (bits); \
+ GEM_BUG_ON(__bits & -BIT(n)); \
+ ((typeof(ptr))((unsigned long)(ptr) | __bits)); \
+})
#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
@@ -137,4 +140,19 @@ static inline void drain_delayed_work(struct delayed_work *dw)
} while (delayed_work_pending(dw));
}
+static inline const char *yesno(bool v)
+{
+ return v ? "yes" : "no";
+}
+
+static inline const char *onoff(bool v)
+{
+ return v ? "on" : "off";
+}
+
+static inline const char *enableddisabled(bool v)
+{
+ return v ? "enabled" : "disabled";
+}
+
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index fbfab2f33023..e0e7c48f45dc 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -142,6 +142,12 @@ vma_create(struct drm_i915_gem_object *obj,
i915_gem_object_get_stride(obj));
GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
+ /*
+ * We put the GGTT vma at the start of the vma-list, followed
+ * by the ppGGTT vma. This allows us to break early when
+ * iterating over only the GGTT vma for an object, see
+ * for_each_ggtt_vma()
+ */
vma->flags |= I915_VMA_GGTT;
list_add(&vma->obj_link, &obj->vma_list);
} else {
@@ -305,7 +311,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
ptr = vma->iomap;
if (ptr == NULL) {
- ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
+ ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
vma->node.start,
vma->node.size);
if (ptr == NULL) {
@@ -322,6 +328,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
if (err)
goto err_unpin;
+ i915_vma_set_ggtt_write(vma);
return ptr;
err_unpin:
@@ -330,12 +337,24 @@ err:
return IO_ERR_PTR(err);
}
+void i915_vma_flush_writes(struct i915_vma *vma)
+{
+ if (!i915_vma_has_ggtt_write(vma))
+ return;
+
+ i915_gem_flush_ggtt_writes(vma->vm->i915);
+
+ i915_vma_unset_ggtt_write(vma);
+}
+
void i915_vma_unpin_iomap(struct i915_vma *vma)
{
lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
GEM_BUG_ON(vma->iomap == NULL);
+ i915_vma_flush_writes(vma);
+
i915_vma_unpin_fence(vma);
i915_vma_unpin(vma);
}
@@ -466,6 +485,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
u64 start, end;
int ret;
+ GEM_BUG_ON(i915_vma_is_closed(vma));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
@@ -640,15 +660,17 @@ int __i915_vma_do_pin(struct i915_vma *vma,
if (ret)
goto err_unpin;
}
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
if (ret)
goto err_remove;
+ GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
+
if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
__i915_vma_set_map_and_fenceable(vma);
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
return 0;
@@ -656,6 +678,7 @@ err_remove:
if ((bound & I915_VMA_BIND_MASK) == 0) {
i915_vma_remove(vma);
GEM_BUG_ON(vma->pages);
+ GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
}
err_unpin:
__i915_vma_unpin(vma);
@@ -675,7 +698,9 @@ static void i915_vma_destroy(struct i915_vma *vma)
GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
+ list_del(&vma->obj_link);
list_del(&vma->vm_link);
+
if (!i915_vma_is_ggtt(vma))
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
@@ -687,7 +712,6 @@ void i915_vma_close(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_is_closed(vma));
vma->flags |= I915_VMA_CLOSED;
- list_del(&vma->obj_link);
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
@@ -740,6 +764,7 @@ int i915_vma_unbind(struct i915_vma *vma)
/* First wait upon any activity as retiring the request may
* have side-effects such as unpinning or even unbinding this vma.
*/
+ might_sleep();
active = i915_vma_get_active(vma);
if (active) {
int idx;
@@ -786,6 +811,15 @@ int i915_vma_unbind(struct i915_vma *vma)
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
if (i915_vma_is_map_and_fenceable(vma)) {
+ /*
+ * Check that we have flushed all writes through the GGTT
+ * before the unbind, other due to non-strict nature of those
+ * indirect writes they may end up referencing the GGTT PTE
+ * after the unbind.
+ */
+ i915_vma_flush_writes(vma);
+ GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
+
/* release the fence reg _after_ flushing */
ret = i915_vma_put_fence(vma);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 1e2bc9b3c3ac..fd5b84904f7c 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -90,6 +90,7 @@ struct i915_vma {
#define I915_VMA_CLOSED BIT(10)
#define I915_VMA_USERFAULT_BIT 11
#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
+#define I915_VMA_GGTT_WRITE BIT(12)
unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES];
@@ -138,6 +139,24 @@ static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
return vma->flags & I915_VMA_GGTT;
}
+static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_GGTT_WRITE;
+}
+
+static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ vma->flags |= I915_VMA_GGTT_WRITE;
+}
+
+static inline void i915_vma_unset_ggtt_write(struct i915_vma *vma)
+{
+ vma->flags &= ~I915_VMA_GGTT_WRITE;
+}
+
+void i915_vma_flush_writes(struct i915_vma *vma);
+
static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
{
return vma->flags & I915_VMA_CAN_FENCE;
@@ -389,5 +408,19 @@ i915_vma_unpin_fence(struct i915_vma *vma)
__i915_vma_unpin_fence(vma);
}
-#endif
+#define for_each_until(cond) if (cond) break; else
+
+/**
+ * for_each_ggtt_vma - Iterate over the GGTT VMA belonging to an object.
+ * @V: the #i915_vma iterator
+ * @OBJ: the #drm_i915_gem_object
+ *
+ * GGTT VMA are placed at the being of the object's vma_list, see
+ * vma_create(), so we can stop our walk as soon as we see a ppgtt VMA,
+ * or the list is empty ofc.
+ */
+#define for_each_ggtt_vma(V, OBJ) \
+ list_for_each_entry(V, &(OBJ)->vma_list, obj_link) \
+ for_each_until(!i915_vma_is_ggtt(V))
+#endif
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 0ddba16fde1b..f1502a0188eb 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -102,13 +102,13 @@ static const struct dp_aud_n_m dp_aud_n_m[] = {
};
static const struct dp_aud_n_m *
-audio_config_dp_get_n_m(struct intel_crtc *intel_crtc, int rate)
+audio_config_dp_get_n_m(const struct intel_crtc_state *crtc_state, int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
if (rate == dp_aud_n_m[i].sample_rate &&
- intel_crtc->config->port_clock == dp_aud_n_m[i].clock)
+ crtc_state->port_clock == dp_aud_n_m[i].clock)
return &dp_aud_n_m[i];
}
@@ -157,8 +157,10 @@ static const struct {
};
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
-static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted_mode)
+static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
int i;
for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
@@ -179,9 +181,11 @@ static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted
return hdmi_audio_clock[i].config;
}
-static int audio_config_hdmi_get_n(const struct drm_display_mode *adjusted_mode,
+static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
int rate)
{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
int i;
for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) {
@@ -220,7 +224,9 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
return true;
}
-static void g4x_audio_codec_disable(struct intel_encoder *encoder)
+static void g4x_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
uint32_t eldv, tmp;
@@ -239,11 +245,12 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder)
I915_WRITE(G4X_AUD_CNTL_ST, tmp);
}
-static void g4x_audio_codec_enable(struct drm_connector *connector,
- struct intel_encoder *encoder,
- const struct drm_display_mode *adjusted_mode)
+static void g4x_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_connector *connector = conn_state->connector;
uint8_t *eld = connector->eld;
uint32_t eldv;
uint32_t tmp;
@@ -279,16 +286,20 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
}
static void
-hsw_dp_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
- const struct drm_display_mode *adjusted_mode)
+hsw_dp_audio_config_update(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- int rate = acomp ? acomp->aud_sample_rate[port] : 0;
- const struct dp_aud_n_m *nm = audio_config_dp_get_n_m(intel_crtc, rate);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum port port = encoder->port;
+ enum pipe pipe = crtc->pipe;
+ const struct dp_aud_n_m *nm;
+ int rate;
u32 tmp;
+ rate = acomp ? acomp->aud_sample_rate[port] : 0;
+ nm = audio_config_dp_get_n_m(crtc_state, rate);
if (nm)
DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n);
else
@@ -323,23 +334,26 @@ hsw_dp_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
}
static void
-hsw_hdmi_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
- const struct drm_display_mode *adjusted_mode)
+hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- int rate = acomp ? acomp->aud_sample_rate[port] : 0;
- enum pipe pipe = intel_crtc->pipe;
- int n;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum port port = encoder->port;
+ enum pipe pipe = crtc->pipe;
+ int n, rate;
u32 tmp;
+ rate = acomp ? acomp->aud_sample_rate[port] : 0;
+
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
+ tmp |= audio_config_hdmi_pixel_clock(crtc_state);
- n = audio_config_hdmi_get_n(adjusted_mode, rate);
+ n = audio_config_hdmi_get_n(crtc_state, rate);
if (n != 0) {
DRM_DEBUG_KMS("using N %d\n", n);
@@ -363,20 +377,22 @@ hsw_hdmi_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
}
static void
-hsw_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
- const struct drm_display_mode *adjusted_mode)
+hsw_audio_config_update(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- hsw_dp_audio_config_update(intel_crtc, port, adjusted_mode);
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ hsw_dp_audio_config_update(encoder, crtc_state);
else
- hsw_hdmi_audio_config_update(intel_crtc, port, adjusted_mode);
+ hsw_hdmi_audio_config_update(encoder, crtc_state);
}
-static void hsw_audio_codec_disable(struct intel_encoder *encoder)
+static void hsw_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ enum pipe pipe = crtc->pipe;
uint32_t tmp;
DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
@@ -389,7 +405,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
+ if (intel_crtc_has_dp_encoder(old_crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
@@ -402,14 +418,14 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->av_mutex);
}
-static void hsw_audio_codec_enable(struct drm_connector *connector,
- struct intel_encoder *intel_encoder,
- const struct drm_display_mode *adjusted_mode)
+static void hsw_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
- enum pipe pipe = intel_crtc->pipe;
- enum port port = intel_encoder->port;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_connector *connector = conn_state->connector;
+ enum pipe pipe = crtc->pipe;
const uint8_t *eld = connector->eld;
uint32_t tmp;
int len, i;
@@ -448,17 +464,19 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
/* Enable timestamps */
- hsw_audio_config_update(intel_crtc, port, adjusted_mode);
+ hsw_audio_config_update(encoder, crtc_state);
mutex_unlock(&dev_priv->av_mutex);
}
-static void ilk_audio_codec_disable(struct intel_encoder *intel_encoder)
+static void ilk_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
- enum pipe pipe = intel_crtc->pipe;
- enum port port = intel_encoder->port;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ enum pipe pipe = crtc->pipe;
+ enum port port = encoder->port;
uint32_t tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
@@ -485,7 +503,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *intel_encoder)
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
+ if (intel_crtc_has_dp_encoder(old_crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(aud_config, tmp);
@@ -497,14 +515,15 @@ static void ilk_audio_codec_disable(struct intel_encoder *intel_encoder)
I915_WRITE(aud_cntrl_st2, tmp);
}
-static void ilk_audio_codec_enable(struct drm_connector *connector,
- struct intel_encoder *intel_encoder,
- const struct drm_display_mode *adjusted_mode)
+static void ilk_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
- enum pipe pipe = intel_crtc->pipe;
- enum port port = intel_encoder->port;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_connector *connector = conn_state->connector;
+ enum pipe pipe = crtc->pipe;
+ enum port port = encoder->port;
uint8_t *eld = connector->eld;
uint32_t tmp, eldv;
int len, i;
@@ -568,36 +587,36 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
+ if (intel_crtc_has_dp_encoder(crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
else
- tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
+ tmp |= audio_config_hdmi_pixel_clock(crtc_state);
I915_WRITE(aud_config, tmp);
}
/**
* intel_audio_codec_enable - Enable the audio codec for HD audio
- * @intel_encoder: encoder on which to enable audio
+ * @encoder: encoder on which to enable audio
* @crtc_state: pointer to the current crtc state.
* @conn_state: pointer to the current connector state.
*
* The enable sequences may only be performed after enabling the transcoder and
* port, and after completed link training.
*/
-void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
+void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
- struct drm_connector *connector;
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- enum port port = intel_encoder->port;
- enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_connector *connector = conn_state->connector;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ enum port port = encoder->port;
+ enum pipe pipe = crtc->pipe;
- connector = conn_state->connector;
- if (!connector || !connector->eld[0])
+ if (!connector->eld[0])
return;
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -609,19 +628,20 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
if (dev_priv->display.audio_codec_enable)
- dev_priv->display.audio_codec_enable(connector, intel_encoder,
- adjusted_mode);
+ dev_priv->display.audio_codec_enable(encoder,
+ crtc_state,
+ conn_state);
mutex_lock(&dev_priv->av_mutex);
- intel_encoder->audio_connector = connector;
+ encoder->audio_connector = connector;
/* referred in audio callbacks */
- dev_priv->av_enc_map[pipe] = intel_encoder;
+ dev_priv->av_enc_map[pipe] = encoder;
mutex_unlock(&dev_priv->av_mutex);
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
/* audio drivers expect pipe = -1 to indicate Non-MST cases */
- if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
pipe = -1;
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
(int) port, (int) pipe);
@@ -629,36 +649,41 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
intel_lpe_audio_notify(dev_priv, pipe, port, connector->eld,
crtc_state->port_clock,
- intel_encoder->type == INTEL_OUTPUT_DP);
+ intel_crtc_has_dp_encoder(crtc_state));
}
/**
* intel_audio_codec_disable - Disable the audio codec for HD audio
- * @intel_encoder: encoder on which to disable audio
+ * @encoder: encoder on which to disable audio
+ * @old_crtc_state: pointer to the old crtc state.
+ * @old_conn_state: pointer to the old connector state.
*
* The disable sequences must be performed before disabling the transcoder or
* port.
*/
-void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
+void intel_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- enum port port = intel_encoder->port;
- struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
if (dev_priv->display.audio_codec_disable)
- dev_priv->display.audio_codec_disable(intel_encoder);
+ dev_priv->display.audio_codec_disable(encoder,
+ old_crtc_state,
+ old_conn_state);
mutex_lock(&dev_priv->av_mutex);
- intel_encoder->audio_connector = NULL;
+ encoder->audio_connector = NULL;
dev_priv->av_enc_map[pipe] = NULL;
mutex_unlock(&dev_priv->av_mutex);
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
/* audio drivers expect pipe = -1 to indicate Non-MST cases */
- if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
+ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
pipe = -1;
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
(int) port, (int) pipe);
@@ -793,10 +818,9 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
int pipe, int rate)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
- struct intel_encoder *intel_encoder;
- struct intel_crtc *crtc;
- struct drm_display_mode *adjusted_mode;
struct i915_audio_component *acomp = dev_priv->audio_component;
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
int err = 0;
if (!HAS_DDI(dev_priv))
@@ -806,23 +830,19 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
mutex_lock(&dev_priv->av_mutex);
/* 1. get the pipe */
- intel_encoder = get_saved_enc(dev_priv, port, pipe);
- if (!intel_encoder || !intel_encoder->base.crtc) {
+ encoder = get_saved_enc(dev_priv, port, pipe);
+ if (!encoder || !encoder->base.crtc) {
DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
err = -ENODEV;
goto unlock;
}
- /* pipe passed from the audio driver will be -1 for Non-MST case */
- crtc = to_intel_crtc(intel_encoder->base.crtc);
- pipe = crtc->pipe;
-
- adjusted_mode = &crtc->config->base.adjusted_mode;
+ crtc = to_intel_crtc(encoder->base.crtc);
/* port must be valid now, otherwise the pipe will be invalid */
acomp->aud_sample_rate[port] = rate;
- hsw_audio_config_update(crtc, port, adjusted_mode);
+ hsw_audio_config_update(encoder, crtc->config);
unlock:
mutex_unlock(&dev_priv->av_mutex);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index fd23023df7c1..51108ffc28d1 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1234,6 +1234,30 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
info->hdmi_level_shift = hdmi_level_shift;
}
+ if (bdb_version >= 204) {
+ int max_tmds_clock;
+
+ switch (child->hdmi_max_data_rate) {
+ default:
+ MISSING_CASE(child->hdmi_max_data_rate);
+ /* fall through */
+ case HDMI_MAX_DATA_RATE_PLATFORM:
+ max_tmds_clock = 0;
+ break;
+ case HDMI_MAX_DATA_RATE_297:
+ max_tmds_clock = 297000;
+ break;
+ case HDMI_MAX_DATA_RATE_165:
+ max_tmds_clock = 165000;
+ break;
+ }
+
+ if (max_tmds_clock)
+ DRM_DEBUG_KMS("VBT HDMI max TMDS clock for port %c: %d kHz\n",
+ port_name(port), max_tmds_clock);
+ info->max_tmds_clock = max_tmds_clock;
+ }
+
/* Parse the I_boost config for SKL and above */
if (bdb_version >= 196 && child->iboost) {
info->dp_boost_level = translate_iboost(child->dp_iboost_level);
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index bcbc7abe6693..58c624f982d9 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -27,6 +27,12 @@
#include "i915_drv.h"
+#ifdef CONFIG_SMP
+#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_cpu)
+#else
+#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL)
+#endif
+
static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
{
struct intel_wait *wait;
@@ -36,8 +42,20 @@ static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
wait = b->irq_wait;
if (wait) {
+ /*
+ * N.B. Since task_asleep() and ttwu are not atomic, the
+ * waiter may actually go to sleep after the check, causing
+ * us to suppress a valid wakeup. We prefer to reduce the
+ * number of false positive missed_breadcrumb() warnings
+ * at the expense of a few false negatives, as it it easy
+ * to trigger a false positive under heavy load. Enough
+ * signal should remain from genuine missed_breadcrumb()
+ * for us to detect in CI.
+ */
+ bool was_asleep = task_asleep(wait->tsk);
+
result = ENGINE_WAKEUP_WAITER;
- if (wake_up_process(wait->tsk))
+ if (wake_up_process(wait->tsk) && was_asleep)
result |= ENGINE_WAKEUP_ASLEEP;
}
@@ -64,20 +82,21 @@ static unsigned long wait_timeout(void)
static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
{
- DRM_DEBUG_DRIVER("%s missed breadcrumb at %pS, irq posted? %s, current seqno=%x, last=%x\n",
- engine->name, __builtin_return_address(0),
- yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
- &engine->irq_posted)),
- intel_engine_get_seqno(engine),
- intel_engine_last_submit(engine));
+ if (drm_debug & DRM_UT_DRIVER) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ intel_engine_dump(engine, &p,
+ "%s missed breadcrumb at %pS\n",
+ engine->name, __builtin_return_address(0));
+ }
set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
}
static void intel_breadcrumbs_hangcheck(struct timer_list *t)
{
- struct intel_engine_cs *engine = from_timer(engine, t,
- breadcrumbs.hangcheck);
+ struct intel_engine_cs *engine =
+ from_timer(engine, t, breadcrumbs.hangcheck);
struct intel_breadcrumbs *b = &engine->breadcrumbs;
if (!b->irq_armed)
@@ -103,7 +122,7 @@ static void intel_breadcrumbs_hangcheck(struct timer_list *t)
*/
if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
missed_breadcrumb(engine);
- mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+ mod_timer(&b->fake_irq, jiffies + 1);
} else {
mod_timer(&b->hangcheck, wait_timeout());
}
@@ -123,7 +142,7 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t)
*/
spin_lock_irq(&b->irq_lock);
- if (!__intel_breadcrumbs_wakeup(b))
+ if (b->irq_armed && !__intel_breadcrumbs_wakeup(b))
__intel_engine_disarm_breadcrumbs(engine);
spin_unlock_irq(&b->irq_lock);
if (!b->irq_armed)
@@ -145,6 +164,14 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t)
static void irq_enable(struct intel_engine_cs *engine)
{
+ /*
+ * FIXME: Ideally we want this on the API boundary, but for the
+ * sake of testing with mock breadcrumbs (no HW so unable to
+ * enable irqs) we place it deep within the bowels, at the point
+ * of no return.
+ */
+ GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
+
/* Enabling the IRQ may miss the generation of the interrupt, but
* we still need to force the barrier before reading the seqno,
* just in case.
@@ -171,39 +198,64 @@ void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
lockdep_assert_held(&b->irq_lock);
GEM_BUG_ON(b->irq_wait);
+ GEM_BUG_ON(!b->irq_armed);
- if (b->irq_enabled) {
+ GEM_BUG_ON(!b->irq_enabled);
+ if (!--b->irq_enabled)
irq_disable(engine);
- b->irq_enabled = false;
- }
b->irq_armed = false;
}
+void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ spin_lock_irq(&b->irq_lock);
+ if (!b->irq_enabled++)
+ irq_enable(engine);
+ GEM_BUG_ON(!b->irq_enabled); /* no overflow! */
+ spin_unlock_irq(&b->irq_lock);
+}
+
+void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ spin_lock_irq(&b->irq_lock);
+ GEM_BUG_ON(!b->irq_enabled); /* no underflow! */
+ if (!--b->irq_enabled)
+ irq_disable(engine);
+ spin_unlock_irq(&b->irq_lock);
+}
+
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct intel_wait *wait, *n, *first;
+ struct intel_wait *wait, *n;
if (!b->irq_armed)
goto wakeup_signaler;
- /* We only disarm the irq when we are idle (all requests completed),
+ /*
+ * We only disarm the irq when we are idle (all requests completed),
* so if the bottom-half remains asleep, it missed the request
* completion.
*/
+ if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP)
+ missed_breadcrumb(engine);
spin_lock_irq(&b->rb_lock);
spin_lock(&b->irq_lock);
- first = fetch_and_zero(&b->irq_wait);
- __intel_engine_disarm_breadcrumbs(engine);
+ b->irq_wait = NULL;
+ if (b->irq_armed)
+ __intel_engine_disarm_breadcrumbs(engine);
spin_unlock(&b->irq_lock);
rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
RB_CLEAR_NODE(&wait->node);
- if (wake_up_process(wait->tsk) && wait == first)
- missed_breadcrumb(engine);
+ wake_up_process(wait->tsk);
}
b->waiters = RB_ROOT;
@@ -249,6 +301,7 @@ static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
struct intel_engine_cs *engine =
container_of(b, struct intel_engine_cs, breadcrumbs);
struct drm_i915_private *i915 = engine->i915;
+ bool enabled;
lockdep_assert_held(&b->irq_lock);
if (b->irq_armed)
@@ -260,7 +313,6 @@ static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
* the irq.
*/
b->irq_armed = true;
- GEM_BUG_ON(b->irq_enabled);
if (I915_SELFTEST_ONLY(b->mock)) {
/* For our mock objects we want to avoid interaction
@@ -281,14 +333,15 @@ static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
*/
/* No interrupts? Kick the waiter every jiffie! */
- if (intel_irqs_enabled(i915)) {
- if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
- irq_enable(engine);
- b->irq_enabled = true;
+ enabled = false;
+ if (!b->irq_enabled++ &&
+ !test_bit(engine->id, &i915->gpu_error.test_irq_rings)) {
+ irq_enable(engine);
+ enabled = true;
}
enable_fake_irq(b);
- return true;
+ return enabled;
}
static inline struct intel_wait *to_wait(struct rb_node *node)
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 60cf4e58389a..d77e2bec1e29 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -437,13 +437,45 @@ static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
return 200000;
}
+static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
+{
+ if (IS_VALLEYVIEW(dev_priv)) {
+ if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
+ return 2;
+ else if (cdclk >= 266667)
+ return 1;
+ else
+ return 0;
+ } else {
+ /*
+ * Specs are full of misinformation, but testing on actual
+ * hardware has shown that we just need to write the desired
+ * CCK divider into the Punit register.
+ */
+ return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
+ }
+}
+
static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
+ u32 val;
+
cdclk_state->vco = vlv_get_hpll_vco(dev_priv);
cdclk_state->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
CCK_DISPLAY_CLOCK_CONTROL,
cdclk_state->vco);
+
+ mutex_lock(&dev_priv->pcu_lock);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ mutex_unlock(&dev_priv->pcu_lock);
+
+ if (IS_VALLEYVIEW(dev_priv))
+ cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >>
+ DSPFREQGUAR_SHIFT;
+ else
+ cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >>
+ DSPFREQGUAR_SHIFT_CHV;
}
static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
@@ -486,7 +518,19 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
{
int cdclk = cdclk_state->cdclk;
- u32 val, cmd;
+ u32 val, cmd = cdclk_state->voltage_level;
+
+ switch (cdclk) {
+ case 400000:
+ case 333333:
+ case 320000:
+ case 266667:
+ case 200000:
+ break;
+ default:
+ MISSING_CASE(cdclk);
+ return;
+ }
/* There are cases where we can end up here with power domains
* off and a CDCLK frequency other than the minimum, like when
@@ -496,13 +540,6 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
- if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
- cmd = 2;
- else if (cdclk == 266667)
- cmd = 1;
- else
- cmd = 0;
-
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK;
@@ -562,7 +599,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
{
int cdclk = cdclk_state->cdclk;
- u32 val, cmd;
+ u32 val, cmd = cdclk_state->voltage_level;
switch (cdclk) {
case 333333:
@@ -583,13 +620,6 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
- /*
- * Specs are full of misinformation, but testing on actual
- * hardware has shown that we just need to write the desired
- * CCK divider into the Punit register.
- */
- cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
-
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK_CHV;
@@ -621,6 +651,21 @@ static int bdw_calc_cdclk(int min_cdclk)
return 337500;
}
+static u8 bdw_calc_voltage_level(int cdclk)
+{
+ switch (cdclk) {
+ default:
+ case 337500:
+ return 2;
+ case 450000:
+ return 0;
+ case 540000:
+ return 1;
+ case 675000:
+ return 3;
+ }
+}
+
static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
@@ -639,13 +684,20 @@ static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_state->cdclk = 337500;
else
cdclk_state->cdclk = 675000;
+
+ /*
+ * Can't read this out :( Let's assume it's
+ * at least what the CDCLK frequency requires.
+ */
+ cdclk_state->voltage_level =
+ bdw_calc_voltage_level(cdclk_state->cdclk);
}
static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
{
int cdclk = cdclk_state->cdclk;
- uint32_t val, data;
+ uint32_t val;
int ret;
if (WARN((I915_READ(LCPLL_CTL) &
@@ -681,25 +733,21 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
val &= ~LCPLL_CLK_FREQ_MASK;
switch (cdclk) {
+ default:
+ MISSING_CASE(cdclk);
+ /* fall through */
+ case 337500:
+ val |= LCPLL_CLK_FREQ_337_5_BDW;
+ break;
case 450000:
val |= LCPLL_CLK_FREQ_450;
- data = 0;
break;
case 540000:
val |= LCPLL_CLK_FREQ_54O_BDW;
- data = 1;
- break;
- case 337500:
- val |= LCPLL_CLK_FREQ_337_5_BDW;
- data = 2;
break;
case 675000:
val |= LCPLL_CLK_FREQ_675_BDW;
- data = 3;
break;
- default:
- WARN(1, "invalid cdclk frequency\n");
- return;
}
I915_WRITE(LCPLL_CTL, val);
@@ -713,16 +761,13 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
DRM_ERROR("Switching back to LCPLL failed\n");
mutex_lock(&dev_priv->pcu_lock);
- sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
+ sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
intel_update_cdclk(dev_priv);
-
- WARN(cdclk != dev_priv->cdclk.hw.cdclk,
- "cdclk requested %d kHz but got %d kHz\n",
- cdclk, dev_priv->cdclk.hw.cdclk);
}
static int skl_calc_cdclk(int min_cdclk, int vco)
@@ -748,6 +793,24 @@ static int skl_calc_cdclk(int min_cdclk, int vco)
}
}
+static u8 skl_calc_voltage_level(int cdclk)
+{
+ switch (cdclk) {
+ default:
+ case 308571:
+ case 337500:
+ return 0;
+ case 450000:
+ case 432000:
+ return 1;
+ case 540000:
+ return 2;
+ case 617143:
+ case 675000:
+ return 3;
+ }
+}
+
static void skl_dpll0_update(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
@@ -798,7 +861,7 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_state->cdclk = cdclk_state->ref;
if (cdclk_state->vco == 0)
- return;
+ goto out;
cdctl = I915_READ(CDCLK_CTL);
@@ -839,6 +902,14 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv,
break;
}
}
+
+ out:
+ /*
+ * Can't read this out :( Let's assume it's
+ * at least what the CDCLK frequency requires.
+ */
+ cdclk_state->voltage_level =
+ skl_calc_voltage_level(cdclk_state->cdclk);
}
/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
@@ -917,11 +988,9 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
- u32 freq_select, pcu_ack, cdclk_ctl;
+ u32 freq_select, cdclk_ctl;
int ret;
- WARN_ON((cdclk == 24000) != (vco == 0));
-
mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
@@ -936,25 +1005,24 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
/* Choose frequency for this cdclk */
switch (cdclk) {
+ default:
+ WARN_ON(cdclk != dev_priv->cdclk.hw.ref);
+ WARN_ON(vco != 0);
+ /* fall through */
+ case 308571:
+ case 337500:
+ freq_select = CDCLK_FREQ_337_308;
+ break;
case 450000:
case 432000:
freq_select = CDCLK_FREQ_450_432;
- pcu_ack = 1;
break;
case 540000:
freq_select = CDCLK_FREQ_540;
- pcu_ack = 2;
- break;
- case 308571:
- case 337500:
- default:
- freq_select = CDCLK_FREQ_337_308;
- pcu_ack = 0;
break;
case 617143:
case 675000:
freq_select = CDCLK_FREQ_675_617;
- pcu_ack = 3;
break;
}
@@ -993,7 +1061,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
/* inform PCU of the change */
mutex_lock(&dev_priv->pcu_lock);
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
+ sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
@@ -1012,6 +1081,8 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
goto sanitize;
intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+
/* Is PLL enabled and locked ? */
if (dev_priv->cdclk.hw.vco == 0 ||
dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.ref)
@@ -1072,6 +1143,7 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv)
if (cdclk_state.vco == 0)
cdclk_state.vco = 8100000;
cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco);
+ cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
skl_set_cdclk(dev_priv, &cdclk_state);
}
@@ -1089,6 +1161,7 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = cdclk_state.ref;
cdclk_state.vco = 0;
+ cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
skl_set_cdclk(dev_priv, &cdclk_state);
}
@@ -1117,6 +1190,11 @@ static int glk_calc_cdclk(int min_cdclk)
return 79200;
}
+static u8 bxt_calc_voltage_level(int cdclk)
+{
+ return DIV_ROUND_UP(cdclk, 25000);
+}
+
static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{
int ratio;
@@ -1127,6 +1205,7 @@ static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
switch (cdclk) {
default:
MISSING_CASE(cdclk);
+ /* fall through */
case 144000:
case 288000:
case 384000:
@@ -1151,6 +1230,7 @@ static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
switch (cdclk) {
default:
MISSING_CASE(cdclk);
+ /* fall through */
case 79200:
case 158400:
case 316800:
@@ -1191,7 +1271,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_state->cdclk = cdclk_state->ref;
if (cdclk_state->vco == 0)
- return;
+ goto out;
divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
@@ -1215,6 +1295,14 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
}
cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
+
+ out:
+ /*
+ * Can't read this out :( Let's assume it's
+ * at least what the CDCLK frequency requires.
+ */
+ cdclk_state->voltage_level =
+ bxt_calc_voltage_level(cdclk_state->cdclk);
}
static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
@@ -1263,24 +1351,22 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
/* cdclk = vco / 2 / div{1,1.5,2,4} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
- case 8:
- divider = BXT_CDCLK_CD2X_DIV_SEL_4;
- break;
- case 4:
- divider = BXT_CDCLK_CD2X_DIV_SEL_2;
+ default:
+ WARN_ON(cdclk != dev_priv->cdclk.hw.ref);
+ WARN_ON(vco != 0);
+ /* fall through */
+ case 2:
+ divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
case 3:
WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
break;
- case 2:
- divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+ case 4:
+ divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
- default:
- WARN_ON(cdclk != dev_priv->cdclk.hw.ref);
- WARN_ON(vco != 0);
-
- divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+ case 8:
+ divider = BXT_CDCLK_CD2X_DIV_SEL_4;
break;
}
@@ -1319,7 +1405,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- DIV_ROUND_UP(cdclk, 25000));
+ cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
@@ -1336,6 +1422,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
u32 cdctl, expected;
intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
if (dev_priv->cdclk.hw.vco == 0 ||
dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.ref)
@@ -1411,6 +1498,7 @@ void bxt_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = bxt_calc_cdclk(0);
cdclk_state.vco = bxt_de_pll_vco(dev_priv, cdclk_state.cdclk);
}
+ cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
bxt_set_cdclk(dev_priv, &cdclk_state);
}
@@ -1428,6 +1516,7 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = cdclk_state.ref;
cdclk_state.vco = 0;
+ cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
bxt_set_cdclk(dev_priv, &cdclk_state);
}
@@ -1442,6 +1531,19 @@ static int cnl_calc_cdclk(int min_cdclk)
return 168000;
}
+static u8 cnl_calc_voltage_level(int cdclk)
+{
+ switch (cdclk) {
+ default:
+ case 168000:
+ return 0;
+ case 336000:
+ return 1;
+ case 528000:
+ return 2;
+ }
+}
+
static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
@@ -1475,7 +1577,7 @@ static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_state->cdclk = cdclk_state->ref;
if (cdclk_state->vco == 0)
- return;
+ goto out;
divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
@@ -1492,6 +1594,14 @@ static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
}
cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
+
+ out:
+ /*
+ * Can't read this out :( Let's assume it's
+ * at least what the CDCLK frequency requires.
+ */
+ cdclk_state->voltage_level =
+ cnl_calc_voltage_level(cdclk_state->cdclk);
}
static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
@@ -1532,7 +1642,7 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
- u32 val, divider, pcu_ack;
+ u32 val, divider;
int ret;
mutex_lock(&dev_priv->pcu_lock);
@@ -1549,30 +1659,15 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
/* cdclk = vco / 2 / div{1,2} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
- case 4:
- divider = BXT_CDCLK_CD2X_DIV_SEL_2;
- break;
- case 2:
- divider = BXT_CDCLK_CD2X_DIV_SEL_1;
- break;
default:
WARN_ON(cdclk != dev_priv->cdclk.hw.ref);
WARN_ON(vco != 0);
-
+ /* fall through */
+ case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
- }
-
- switch (cdclk) {
- case 528000:
- pcu_ack = 2;
- break;
- case 336000:
- pcu_ack = 1;
- break;
- case 168000:
- default:
- pcu_ack = 0;
+ case 4:
+ divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
}
@@ -1593,10 +1688,17 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
/* inform PCU of the change */
mutex_lock(&dev_priv->pcu_lock);
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
+ sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
+
+ /*
+ * Can't read out the voltage level :(
+ * Let's just assume everything is as expected.
+ */
+ dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
}
static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
@@ -1609,6 +1711,7 @@ static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
switch (cdclk) {
default:
MISSING_CASE(cdclk);
+ /* fall through */
case 168000:
case 336000:
ratio = dev_priv->cdclk.hw.ref == 19200 ? 35 : 28;
@@ -1626,6 +1729,7 @@ static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
u32 cdctl, expected;
intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
if (dev_priv->cdclk.hw.vco == 0 ||
dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.ref)
@@ -1685,6 +1789,7 @@ void cnl_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = cnl_calc_cdclk(0);
cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
+ cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
cnl_set_cdclk(dev_priv, &cdclk_state);
}
@@ -1702,22 +1807,48 @@ void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = cdclk_state.ref;
cdclk_state.vco = 0;
+ cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
cnl_set_cdclk(dev_priv, &cdclk_state);
}
/**
- * intel_cdclk_state_compare - Determine if two CDCLK states differ
+ * intel_cdclk_needs_modeset - Determine if two CDCLK states require a modeset on all pipes
* @a: first CDCLK state
* @b: second CDCLK state
*
* Returns:
- * True if the CDCLK states are identical, false if they differ.
+ * True if the CDCLK states require pipes to be off during reprogramming, false if not.
*/
-bool intel_cdclk_state_compare(const struct intel_cdclk_state *a,
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b)
{
- return memcmp(a, b, sizeof(*a)) == 0;
+ return a->cdclk != b->cdclk ||
+ a->vco != b->vco ||
+ a->ref != b->ref;
+}
+
+/**
+ * intel_cdclk_changed - Determine if two CDCLK states are different
+ * @a: first CDCLK state
+ * @b: second CDCLK state
+ *
+ * Returns:
+ * True if the CDCLK states don't match, false if they do.
+ */
+bool intel_cdclk_changed(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b)
+{
+ return intel_cdclk_needs_modeset(a, b) ||
+ a->voltage_level != b->voltage_level;
+}
+
+void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
+ const char *context)
+{
+ DRM_DEBUG_DRIVER("%s %d kHz, VCO %d kHz, ref %d kHz, voltage level %d\n",
+ context, cdclk_state->cdclk, cdclk_state->vco,
+ cdclk_state->ref, cdclk_state->voltage_level);
}
/**
@@ -1731,29 +1862,28 @@ bool intel_cdclk_state_compare(const struct intel_cdclk_state *a,
void intel_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
{
- if (intel_cdclk_state_compare(&dev_priv->cdclk.hw, cdclk_state))
+ if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state))
return;
if (WARN_ON_ONCE(!dev_priv->display.set_cdclk))
return;
- DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz, VCO %d kHz, ref %d kHz\n",
- cdclk_state->cdclk, cdclk_state->vco,
- cdclk_state->ref);
+ intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to");
dev_priv->display.set_cdclk(dev_priv, cdclk_state);
+
+ if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state),
+ "cdclk state doesn't match!\n")) {
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "[hw state]");
+ intel_dump_cdclk_state(cdclk_state, "[sw state]");
+ }
}
static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
int pixel_rate)
{
if (INTEL_GEN(dev_priv) >= 10)
- /*
- * FIXME: Switch to DIV_ROUND_UP(pixel_rate, 2)
- * once DDI clock voltage requirements are
- * handled correctly.
- */
- return pixel_rate;
+ return DIV_ROUND_UP(pixel_rate, 2);
else if (IS_GEMINILAKE(dev_priv))
/*
* FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
@@ -1783,7 +1913,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
min_cdclk = intel_pixel_rate_to_cdclk(dev_priv, crtc_state->pixel_rate);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
+ if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state))
min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95);
/* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
@@ -1846,6 +1976,43 @@ static int intel_compute_min_cdclk(struct drm_atomic_state *state)
return min_cdclk;
}
+/*
+ * Note that this functions assumes that 0 is
+ * the lowest voltage value, and higher values
+ * correspond to increasingly higher voltages.
+ *
+ * Should that relationship no longer hold on
+ * future platforms this code will need to be
+ * adjusted.
+ */
+static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ u8 min_voltage_level;
+ int i;
+ enum pipe pipe;
+
+ memcpy(state->min_voltage_level, dev_priv->min_voltage_level,
+ sizeof(state->min_voltage_level));
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->base.enable)
+ state->min_voltage_level[i] =
+ crtc_state->min_voltage_level;
+ else
+ state->min_voltage_level[i] = 0;
+ }
+
+ min_voltage_level = 0;
+ for_each_pipe(dev_priv, pipe)
+ min_voltage_level = max(state->min_voltage_level[pipe],
+ min_voltage_level);
+
+ return min_voltage_level;
+}
+
static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -1859,11 +2026,15 @@ static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
intel_state->cdclk.logical.cdclk = cdclk;
+ intel_state->cdclk.logical.voltage_level =
+ vlv_calc_voltage_level(dev_priv, cdclk);
if (!intel_state->active_crtcs) {
cdclk = vlv_calc_cdclk(dev_priv, 0);
intel_state->cdclk.actual.cdclk = cdclk;
+ intel_state->cdclk.actual.voltage_level =
+ vlv_calc_voltage_level(dev_priv, cdclk);
} else {
intel_state->cdclk.actual =
intel_state->cdclk.logical;
@@ -1888,11 +2059,15 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
cdclk = bdw_calc_cdclk(min_cdclk);
intel_state->cdclk.logical.cdclk = cdclk;
+ intel_state->cdclk.logical.voltage_level =
+ bdw_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
cdclk = bdw_calc_cdclk(0);
intel_state->cdclk.actual.cdclk = cdclk;
+ intel_state->cdclk.actual.voltage_level =
+ bdw_calc_voltage_level(cdclk);
} else {
intel_state->cdclk.actual =
intel_state->cdclk.logical;
@@ -1923,12 +2098,16 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
+ intel_state->cdclk.logical.voltage_level =
+ skl_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
cdclk = skl_calc_cdclk(0, vco);
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
+ intel_state->cdclk.actual.voltage_level =
+ skl_calc_voltage_level(cdclk);
} else {
intel_state->cdclk.actual =
intel_state->cdclk.logical;
@@ -1957,6 +2136,8 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
+ intel_state->cdclk.logical.voltage_level =
+ bxt_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
if (IS_GEMINILAKE(dev_priv)) {
@@ -1969,6 +2150,8 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
+ intel_state->cdclk.actual.voltage_level =
+ bxt_calc_voltage_level(cdclk);
} else {
intel_state->cdclk.actual =
intel_state->cdclk.logical;
@@ -1992,6 +2175,9 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
+ intel_state->cdclk.logical.voltage_level =
+ max(cnl_calc_voltage_level(cdclk),
+ cnl_compute_min_voltage_level(intel_state));
if (!intel_state->active_crtcs) {
cdclk = cnl_calc_cdclk(0);
@@ -1999,6 +2185,8 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
+ intel_state->cdclk.actual.voltage_level =
+ cnl_calc_voltage_level(cdclk);
} else {
intel_state->cdclk.actual =
intel_state->cdclk.logical;
@@ -2012,12 +2200,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
int max_cdclk_freq = dev_priv->max_cdclk_freq;
if (INTEL_GEN(dev_priv) >= 10)
- /*
- * FIXME: Allow '2 * max_cdclk_freq'
- * once DDI clock voltage requirements are
- * handled correctly.
- */
- return max_cdclk_freq;
+ return 2 * max_cdclk_freq;
else if (IS_GEMINILAKE(dev_priv))
/*
* FIXME: Limiting to 99% as a temporary workaround. See
@@ -2116,10 +2299,6 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
{
dev_priv->display.get_cdclk(dev_priv, &dev_priv->cdclk.hw);
- DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
- dev_priv->cdclk.hw.cdclk, dev_priv->cdclk.hw.vco,
- dev_priv->cdclk.hw.ref);
-
/*
* 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
* Programmng [sic] note: bit[9:2] should be programmed to the number
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index b8315bca852b..aa66e952a95d 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -370,7 +370,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
*/
if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled &&
(intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
- hsw_disable_ips(intel_crtc);
+ hsw_disable_ips(intel_crtc_state);
reenable_ips = true;
}
@@ -380,7 +380,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
i9xx_load_luts(crtc_state);
if (reenable_ips)
- hsw_enable_ips(intel_crtc);
+ hsw_enable_ips(intel_crtc_state);
}
static void bdw_load_degamma_lut(struct drm_crtc_state *state)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 437339f5d098..9f31aea51dff 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -119,6 +119,8 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
static void intel_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
+
pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
@@ -217,11 +219,9 @@ static void hsw_disable_crt(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_crtc *crtc = old_crtc_state->base.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- WARN_ON(!intel_crtc->config->has_pch_encoder);
+ WARN_ON(!old_crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
@@ -245,46 +245,42 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
}
static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- WARN_ON(!intel_crtc->config->has_pch_encoder);
+ WARN_ON(!crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
static void hsw_pre_enable_crt(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum pipe pipe = crtc->pipe;
- WARN_ON(!intel_crtc->config->has_pch_encoder);
+ WARN_ON(!crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- dev_priv->display.fdi_link_train(intel_crtc, pipe_config);
+ dev_priv->display.fdi_link_train(crtc, crtc_state);
}
static void hsw_enable_crt(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum pipe pipe = crtc->pipe;
- WARN_ON(!intel_crtc->config->has_pch_encoder);
+ WARN_ON(!crtc_state->has_pch_encoder);
- intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
+ intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON);
intel_wait_for_vblank(dev_priv, pipe);
intel_wait_for_vblank(dev_priv, pipe);
@@ -293,10 +289,10 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
}
static void intel_enable_crt(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
+ intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON);
}
static enum drm_mode_status
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index da9de47562b8..7fe4aac0facc 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -37,16 +37,16 @@
#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
-#define I915_CSR_CNL "i915/cnl_dmc_ver1_04.bin"
-#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
+#define I915_CSR_CNL "i915/cnl_dmc_ver1_06.bin"
+#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 6)
-#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
+#define I915_CSR_KBL "i915/kbl_dmc_ver1_04.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
-#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
+#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
-#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
+#define I915_CSR_SKL "i915/skl_dmc_ver1_27.bin"
MODULE_FIRMWARE(I915_CSR_SKL);
-#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 26)
+#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27)
#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
MODULE_FIRMWARE(I915_CSR_BXT);
@@ -198,6 +198,7 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
si = bxt_stepping_info;
} else {
size = 0;
+ si = NULL;
}
if (INTEL_REVID(dev_priv) < size)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 58a3755544b2..f51645a08dca 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -492,24 +492,6 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
{ 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
};
-enum port intel_ddi_get_encoder_port(struct intel_encoder *encoder)
-{
- switch (encoder->type) {
- case INTEL_OUTPUT_DP_MST:
- return enc_to_mst(&encoder->base)->primary->port;
- case INTEL_OUTPUT_DP:
- case INTEL_OUTPUT_EDP:
- case INTEL_OUTPUT_HDMI:
- case INTEL_OUTPUT_UNKNOWN:
- return enc_to_dig_port(&encoder->base)->port;
- case INTEL_OUTPUT_ANALOG:
- return PORT_E;
- default:
- MISSING_CASE(encoder->type);
- return PORT_A;
- }
-}
-
static const struct ddi_buf_trans *
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
@@ -811,31 +793,24 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
* values in advance. This function programs the correct values for
* DP/eDP/FDI use cases.
*/
-static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
+static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
int i, n_entries;
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
const struct ddi_buf_trans *ddi_translations;
- switch (encoder->type) {
- case INTEL_OUTPUT_EDP:
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv,
+ &n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port,
&n_entries);
- break;
- case INTEL_OUTPUT_DP:
+ else
ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port,
&n_entries);
- break;
- case INTEL_OUTPUT_ANALOG:
- ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv,
- &n_entries);
- break;
- default:
- MISSING_CASE(encoder->type);
- return;
- }
/* If we're boosting the current, set bit 31 of trans1 */
if (IS_GEN9_BC(dev_priv) &&
@@ -861,7 +836,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
int n_entries;
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
const struct ddi_buf_trans *ddi_translations;
ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
@@ -937,7 +912,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
- intel_prepare_dp_ddi_buffers(encoder);
+ intel_prepare_dp_ddi_buffers(encoder, crtc_state);
}
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
@@ -1448,19 +1423,16 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
ddi_dotclock_get(pipe_config);
}
-static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id pll_id)
+static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state)
{
- struct intel_shared_dpll *pll;
struct intel_dpll_hw_state *state;
struct dpll clock;
/* For DDI ports we always use a shared PLL. */
- if (WARN_ON(pll_id == DPLL_ID_PRIVATE))
+ if (WARN_ON(!crtc_state->shared_dpll))
return 0;
- pll = &dev_priv->shared_dplls[pll_id];
- state = &pll->state.hw_state;
+ state = &crtc_state->dpll_hw_state;
clock.m1 = 2;
clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
@@ -1474,19 +1446,15 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
}
static void bxt_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
- enum intel_dpll_id pll_id = port;
-
- pipe_config->port_clock = bxt_calc_pll_link(dev_priv, pll_id);
+ pipe_config->port_clock = bxt_calc_pll_link(pipe_config);
ddi_dotclock_get(pipe_config);
}
-void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+static void intel_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -1504,33 +1472,34 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- int type = encoder->type;
- uint32_t temp;
+ u32 temp;
- if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
- WARN_ON(transcoder_is_dsi(cpu_transcoder));
+ if (!intel_crtc_has_dp_encoder(crtc_state))
+ return;
- temp = TRANS_MSA_SYNC_CLK;
- switch (crtc_state->pipe_bpp) {
- case 18:
- temp |= TRANS_MSA_6_BPC;
- break;
- case 24:
- temp |= TRANS_MSA_8_BPC;
- break;
- case 30:
- temp |= TRANS_MSA_10_BPC;
- break;
- case 36:
- temp |= TRANS_MSA_12_BPC;
- break;
- default:
- BUG();
- }
- I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
+ WARN_ON(transcoder_is_dsi(cpu_transcoder));
+
+ temp = TRANS_MSA_SYNC_CLK;
+ switch (crtc_state->pipe_bpp) {
+ case 18:
+ temp |= TRANS_MSA_6_BPC;
+ break;
+ case 24:
+ temp |= TRANS_MSA_8_BPC;
+ break;
+ case 30:
+ temp |= TRANS_MSA_10_BPC;
+ break;
+ case 36:
+ temp |= TRANS_MSA_12_BPC;
+ break;
+ default:
+ MISSING_CASE(crtc_state->pipe_bpp);
+ break;
}
+
+ I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
@@ -1540,6 +1509,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
uint32_t temp;
+
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (state == true)
temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
@@ -1555,8 +1525,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- enum port port = intel_ddi_get_encoder_port(encoder);
- int type = encoder->type;
+ enum port port = encoder->port;
uint32_t temp;
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
@@ -1611,7 +1580,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
}
}
- if (type == INTEL_OUTPUT_HDMI) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
if (crtc_state->has_hdmi_sink)
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
@@ -1621,19 +1590,15 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
temp |= TRANS_DDI_HDMI_SCRAMBLING_MASK;
if (crtc_state->hdmi_high_tmds_clock_ratio)
temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
- } else if (type == INTEL_OUTPUT_ANALOG) {
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
temp |= (crtc_state->fdi_lanes - 1) << 1;
- } else if (type == INTEL_OUTPUT_DP ||
- type == INTEL_OUTPUT_EDP) {
- temp |= TRANS_DDI_MODE_SELECT_DP_SST;
- temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
- } else if (type == INTEL_OUTPUT_DP_MST) {
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
} else {
- WARN(1, "Invalid encoder type %d for pipe %c\n",
- encoder->type, pipe_name(pipe));
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+ temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
@@ -1656,7 +1621,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder = intel_connector->encoder;
int type = intel_connector->base.connector_type;
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
enum pipe pipe = 0;
enum transcoder cpu_transcoder;
uint32_t tmp;
@@ -1715,9 +1680,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
+ enum pipe p;
u32 tmp;
- int i;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
@@ -1752,15 +1717,17 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
goto out;
}
- for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+ for_each_pipe(dev_priv, p) {
+ enum transcoder cpu_transcoder = (enum transcoder) p;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
TRANS_DDI_MODE_SELECT_DP_MST)
goto out;
- *pipe = i;
+ *pipe = p;
ret = true;
goto out;
@@ -1800,7 +1767,7 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
@@ -1836,8 +1803,8 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
- enum port port = intel_dig_port->port;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
uint8_t iboost;
if (type == INTEL_OUTPUT_HDMI)
@@ -1939,8 +1906,8 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
const struct cnl_ddi_buf_trans *ddi_translations;
+ enum port port = encoder->port;
int n_entries, ln;
u32 val;
@@ -2003,7 +1970,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
int width, rate, ln;
u32 val;
@@ -2122,13 +2089,13 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
const struct intel_shared_dpll *pll)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
uint32_t val;
if (WARN_ON(!pll))
return;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll_lock);
if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
@@ -2150,7 +2117,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
val = I915_READ(DPLL_CTRL2);
val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
- DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
+ DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
@@ -2166,7 +2133,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
static void intel_ddi_clk_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
if (IS_CANNONLAKE(dev_priv))
I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
@@ -2184,7 +2151,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int level = intel_ddi_dp_level(intel_dp);
@@ -2205,7 +2172,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(encoder, level, encoder->type);
else
- intel_prepare_dp_ddi_buffers(encoder);
+ intel_prepare_dp_ddi_buffers(encoder, crtc_state);
intel_ddi_init_dp_buf_reg(encoder);
if (!is_mst)
@@ -2222,7 +2189,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
int level = intel_ddi_hdmi_level(dev_priv, port);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
@@ -2254,6 +2221,19 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
+ /*
+ * When called from DP MST code:
+ * - conn_state will be NULL
+ * - encoder will be the main encoder (ie. mst->primary)
+ * - the main connector associated with this port
+ * won't be active or linked to a crtc
+ * - crtc_state will be the state of the first stream to
+ * be activated on this port, and it may not be the same
+ * stream that will be deactivated last, but each stream
+ * should have a state that is identical when it comes to
+ * the DP link parameteres
+ */
+
WARN_ON(crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
@@ -2267,7 +2247,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
static void intel_disable_ddi_buf(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
bool wait = false;
u32 val;
@@ -2294,12 +2274,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_dp *intel_dp = &dig_port->dp;
- /*
- * old_crtc_state and old_conn_state are NULL when called from
- * DP_MST. The main connector associated with this port is never
- * bound to a crtc for MST.
- */
- bool is_mst = !old_crtc_state;
+ bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST);
/*
* Power down sink before disabling the port, otherwise we end
@@ -2343,12 +2318,19 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
/*
- * old_crtc_state and old_conn_state are NULL when called from
- * DP_MST. The main connector associated with this port is never
- * bound to a crtc for MST.
+ * When called from DP MST code:
+ * - old_conn_state will be NULL
+ * - encoder will be the main encoder (ie. mst->primary)
+ * - the main connector associated with this port
+ * won't be active or linked to a crtc
+ * - old_crtc_state will be the state of the last stream to
+ * be deactivated on this port, and it may not be the same
+ * stream that was activated last, but each stream
+ * should have a state that is identical when it comes to
+ * the DP link parameteres
*/
- if (old_crtc_state &&
- intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
+
+ if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
intel_ddi_post_disable_hdmi(encoder,
old_crtc_state, old_conn_state);
else
@@ -2396,7 +2378,7 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
intel_dp_stop_link_train(intel_dp);
@@ -2415,7 +2397,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
intel_hdmi_handle_sink_scrambling(encoder,
conn_state->connector,
@@ -2450,7 +2432,8 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder);
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
intel_edp_drrs_disable(intel_dp, old_crtc_state);
intel_psr_disable(intel_dp, old_crtc_state);
@@ -2462,7 +2445,8 @@ static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder);
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
intel_hdmi_handle_sink_scrambling(encoder,
old_conn_state->connector,
@@ -2493,7 +2477,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
uint32_t val;
bool wait = false;
@@ -2534,24 +2518,31 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
udelay(600);
}
-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc)
+static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
{
- u32 temp;
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return false;
- if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
- temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
- return true;
- }
- return false;
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO))
+ return false;
+
+ return I915_READ(HSW_AUD_PIN_ELD_CP_VLD) &
+ AUDIO_OUTPUT_ENABLE(cpu_transcoder);
+}
+
+void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *crtc_state)
+{
+ if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
+ crtc_state->min_voltage_level = 2;
}
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_digital_port *intel_dig_port;
u32 temp, flags = 0;
@@ -2604,12 +2595,23 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->hdmi_high_tmds_clock_ratio = true;
/* fall through */
case TRANS_DDI_MODE_SELECT_DVI:
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
pipe_config->lane_count = 4;
break;
case TRANS_DDI_MODE_SELECT_FDI:
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
break;
case TRANS_DDI_MODE_SELECT_DP_SST:
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
+ else
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
+ pipe_config->lane_count =
+ ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+ intel_dp_get_m_n(intel_crtc, pipe_config);
+ break;
case TRANS_DDI_MODE_SELECT_DP_MST:
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
intel_dp_get_m_n(intel_crtc, pipe_config);
@@ -2619,7 +2621,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
}
pipe_config->has_audio =
- intel_ddi_is_audio_enabled(dev_priv, intel_crtc);
+ intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
@@ -2646,6 +2648,26 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
+
+ intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+}
+
+static enum intel_output_type
+intel_ddi_compute_output_type(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ switch (conn_state->connector->connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ return INTEL_OUTPUT_HDMI;
+ case DRM_MODE_CONNECTOR_eDP:
+ return INTEL_OUTPUT_EDP;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ return INTEL_OUTPUT_DP;
+ default:
+ MISSING_CASE(conn_state->connector->connector_type);
+ return INTEL_OUTPUT_UNUSED;
+ }
}
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
@@ -2653,24 +2675,22 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int type = encoder->type;
- int port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
int ret;
- WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
-
if (port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
- if (type == INTEL_OUTPUT_HDMI)
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
else
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
if (IS_GEN9_LP(dev_priv) && ret)
pipe_config->lane_lat_optim_mask =
- bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
- pipe_config->lane_count);
+ bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+
+ intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
return ret;
@@ -2685,7 +2705,7 @@ static struct intel_connector *
intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
{
struct intel_connector *connector;
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
connector = intel_connector_alloc();
if (!connector)
@@ -2704,7 +2724,7 @@ static struct intel_connector *
intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
{
struct intel_connector *connector;
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
connector = intel_connector_alloc();
if (!connector)
@@ -2716,6 +2736,34 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
return connector;
}
+static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport)
+{
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+
+ if (dport->base.port != PORT_A)
+ return false;
+
+ if (dport->saved_port_bits & DDI_A_4_LANES)
+ return false;
+
+ /* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
+ * supported configuration
+ */
+ if (IS_GEN9_LP(dev_priv))
+ return true;
+
+ /* Cannonlake: Most of SKUs don't support DDI_E, and the only
+ * one who does also have a full A/E split called
+ * DDI_F what makes DDI_E useless. However for this
+ * case let's trust VBT info.
+ */
+ if (IS_CANNONLAKE(dev_priv) &&
+ !intel_bios_is_port_present(dev_priv, PORT_E))
+ return true;
+
+ return false;
+}
+
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
struct intel_digital_port *intel_dig_port;
@@ -2782,6 +2830,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
+ intel_encoder->compute_output_type = intel_ddi_compute_output_type;
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
if (IS_GEN9_LP(dev_priv))
@@ -2794,7 +2843,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->suspend = intel_dp_encoder_suspend;
intel_encoder->get_power_domains = intel_ddi_get_power_domains;
- intel_dig_port->port = port;
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL |
DDI_A_4_LANES);
@@ -2825,23 +2873,20 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
}
/*
- * Bspec says that DDI_A_4_LANES is the only supported configuration
- * for Broxton. Yet some BIOS fail to set this bit on port A if eDP
- * wasn't lit up at boot. Force this bit on in our internal
- * configuration so that we use the proper lane count for our
- * calculations.
+ * Some BIOS might fail to set this bit on port A if eDP
+ * wasn't lit up at boot. Force this bit set when needed
+ * so we use the proper lane count for our calculations.
*/
- if (IS_GEN9_LP(dev_priv) && port == PORT_A) {
- if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
- DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
- intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
- max_lanes = 4;
- }
+ if (intel_ddi_a_force_4_lanes(intel_dig_port)) {
+ DRM_DEBUG_KMS("Forcing DDI_A_4_LANES for port A\n");
+ intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
+ max_lanes = 4;
}
+ intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = max_lanes;
- intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+ intel_encoder->type = INTEL_OUTPUT_DDI;
intel_encoder->power_domain = intel_port_to_power_domain(port);
intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 875d428ea75f..d28592e43512 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -22,6 +22,9 @@
*
*/
+#include <drm/drm_print.h>
+
+#include "intel_device_info.h"
#include "i915_drv.h"
#define PLATFORM_NAME(x) [INTEL_##x] = #x
@@ -67,21 +70,55 @@ const char *intel_platform_name(enum intel_platform platform)
return platform_names[platform];
}
-void intel_device_info_dump(struct drm_i915_private *dev_priv)
+void intel_device_info_dump_flags(const struct intel_device_info *info,
+ struct drm_printer *p)
{
- const struct intel_device_info *info = &dev_priv->info;
-
- DRM_DEBUG_DRIVER("i915 device info: platform=%s gen=%i pciid=0x%04x rev=0x%02x",
- intel_platform_name(info->platform),
- info->gen,
- dev_priv->drm.pdev->device,
- dev_priv->drm.pdev->revision);
-#define PRINT_FLAG(name) \
- DRM_DEBUG_DRIVER("i915 device info: " #name ": %s", yesno(info->name))
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
}
+static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
+{
+ drm_printf(p, "slice mask: %04x\n", sseu->slice_mask);
+ drm_printf(p, "slice total: %u\n", hweight8(sseu->slice_mask));
+ drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu));
+ drm_printf(p, "subslice mask %04x\n", sseu->subslice_mask);
+ drm_printf(p, "subslice per slice: %u\n",
+ hweight8(sseu->subslice_mask));
+ drm_printf(p, "EU total: %u\n", sseu->eu_total);
+ drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
+ drm_printf(p, "has slice power gating: %s\n",
+ yesno(sseu->has_slice_pg));
+ drm_printf(p, "has subslice power gating: %s\n",
+ yesno(sseu->has_subslice_pg));
+ drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
+}
+
+void intel_device_info_dump_runtime(const struct intel_device_info *info,
+ struct drm_printer *p)
+{
+ sseu_dump(&info->sseu, p);
+
+ drm_printf(p, "CS timestamp frequency: %u kHz\n",
+ info->cs_timestamp_frequency_khz);
+}
+
+void intel_device_info_dump(const struct intel_device_info *info,
+ struct drm_printer *p)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(info, struct drm_i915_private, info);
+
+ drm_printf(p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
+ INTEL_DEVID(dev_priv),
+ INTEL_REVID(dev_priv),
+ intel_platform_name(info->platform),
+ info->gen);
+
+ intel_device_info_dump_flags(info, p);
+}
+
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
@@ -235,16 +272,6 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask & BIT(ss)))
info->has_pooled_eu = hweight8(sseu->subslice_mask) == 3;
- /*
- * There is a HW issue in 2x6 fused down parts that requires
- * Pooled EU to be enabled as a WA. The pool configuration
- * changes depending upon which subslice is fused down. This
- * doesn't affect if the device has all 3 subslices enabled.
- */
- /* WaEnablePooledEuFor2x6:bxt */
- info->has_pooled_eu |= (hweight8(sseu->subslice_mask) == 2 &&
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST));
-
sseu->min_eu_in_pool = 0;
if (info->has_pooled_eu) {
if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
@@ -329,7 +356,111 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->has_eu_pg = 0;
}
-/*
+static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
+{
+ u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
+ u32 base_freq, frac_freq;
+
+ base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
+ base_freq *= 1000;
+
+ frac_freq = ((ts_override &
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
+ frac_freq = 1000 / (frac_freq + 1);
+
+ return base_freq + frac_freq;
+}
+
+static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
+{
+ u32 f12_5_mhz = 12500;
+ u32 f19_2_mhz = 19200;
+ u32 f24_mhz = 24000;
+
+ if (INTEL_GEN(dev_priv) <= 4) {
+ /* PRMs say:
+ *
+ * "The value in this register increments once every 16
+ * hclks." (through the “Clocking Configuration”
+ * (“CLKCFG”) MCHBAR register)
+ */
+ return dev_priv->rawclk_freq / 16;
+ } else if (INTEL_GEN(dev_priv) <= 8) {
+ /* PRMs say:
+ *
+ * "The PCU TSC counts 10ns increments; this timestamp
+ * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
+ * rolling over every 1.5 hours).
+ */
+ return f12_5_mhz;
+ } else if (INTEL_GEN(dev_priv) <= 9) {
+ u32 ctc_reg = I915_READ(CTC_MODE);
+ u32 freq = 0;
+
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(dev_priv);
+ } else {
+ freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;
+
+ /* Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
+ CTC_SHIFT_PARAMETER_SHIFT);
+ }
+
+ return freq;
+ } else if (INTEL_GEN(dev_priv) <= 10) {
+ u32 ctc_reg = I915_READ(CTC_MODE);
+ u32 freq = 0;
+ u32 rpm_config_reg = 0;
+
+ /* First figure out the reference frequency. There are 2 ways
+ * we can compute the frequency, either through the
+ * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
+ * tells us which one we should use.
+ */
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(dev_priv);
+ } else {
+ u32 crystal_clock;
+
+ rpm_config_reg = I915_READ(RPM_CONFIG0);
+ crystal_clock = (rpm_config_reg &
+ GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+ GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
+ switch (crystal_clock) {
+ case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+ freq = f19_2_mhz;
+ break;
+ case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+ freq = f24_mhz;
+ break;
+ }
+
+ /* Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - ((rpm_config_reg &
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
+ }
+
+ return freq;
+ }
+
+ MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
+ return 0;
+}
+
+/**
+ * intel_device_info_runtime_init - initialize runtime info
+ * @info: intel device info struct
+ *
* Determine various intel_device_info fields at runtime.
*
* Use it when either:
@@ -342,12 +473,16 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
* - after the PCH has been detected,
* - before the first usage of the fields it can tweak.
*/
-void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
+void intel_device_info_runtime_init(struct intel_device_info *info)
{
- struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ struct drm_i915_private *dev_priv =
+ container_of(info, struct drm_i915_private, info);
enum pipe pipe;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 10) {
+ for_each_pipe(dev_priv, pipe)
+ info->num_scalers[pipe] = 2;
+ } else if (INTEL_GEN(dev_priv) == 9) {
info->num_scalers[PIPE_A] = 2;
info->num_scalers[PIPE_B] = 2;
info->num_scalers[PIPE_C] = 1;
@@ -447,19 +582,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 10)
gen10_sseu_info_init(dev_priv);
- DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
- DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
- DRM_DEBUG_DRIVER("subslice total: %u\n",
- sseu_subslice_total(&info->sseu));
- DRM_DEBUG_DRIVER("subslice mask %04x\n", info->sseu.subslice_mask);
- DRM_DEBUG_DRIVER("subslice per slice: %u\n",
- hweight8(info->sseu.subslice_mask));
- DRM_DEBUG_DRIVER("EU total: %u\n", info->sseu.eu_total);
- DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->sseu.eu_per_subslice);
- DRM_DEBUG_DRIVER("has slice power gating: %s\n",
- info->sseu.has_slice_pg ? "y" : "n");
- DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
- info->sseu.has_subslice_pg ? "y" : "n");
- DRM_DEBUG_DRIVER("has EU power gating: %s\n",
- info->sseu.has_eu_pg ? "y" : "n");
+ /* Initialize command stream timestamp frequency */
+ info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
new file mode 100644
index 000000000000..49cb27bd04c1
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_DEVICE_INFO_H_
+#define _INTEL_DEVICE_INFO_H_
+
+#include "intel_display.h"
+
+struct drm_printer;
+struct drm_i915_private;
+
+/* Keep in gen based order, and chronological order within a gen */
+enum intel_platform {
+ INTEL_PLATFORM_UNINITIALIZED = 0,
+ /* gen2 */
+ INTEL_I830,
+ INTEL_I845G,
+ INTEL_I85X,
+ INTEL_I865G,
+ /* gen3 */
+ INTEL_I915G,
+ INTEL_I915GM,
+ INTEL_I945G,
+ INTEL_I945GM,
+ INTEL_G33,
+ INTEL_PINEVIEW,
+ /* gen4 */
+ INTEL_I965G,
+ INTEL_I965GM,
+ INTEL_G45,
+ INTEL_GM45,
+ /* gen5 */
+ INTEL_IRONLAKE,
+ /* gen6 */
+ INTEL_SANDYBRIDGE,
+ /* gen7 */
+ INTEL_IVYBRIDGE,
+ INTEL_VALLEYVIEW,
+ INTEL_HASWELL,
+ /* gen8 */
+ INTEL_BROADWELL,
+ INTEL_CHERRYVIEW,
+ /* gen9 */
+ INTEL_SKYLAKE,
+ INTEL_BROXTON,
+ INTEL_KABYLAKE,
+ INTEL_GEMINILAKE,
+ INTEL_COFFEELAKE,
+ /* gen10 */
+ INTEL_CANNONLAKE,
+ INTEL_MAX_PLATFORMS
+};
+
+#define DEV_INFO_FOR_EACH_FLAG(func) \
+ func(is_mobile); \
+ func(is_lp); \
+ func(is_alpha_support); \
+ /* Keep has_* in alphabetical order */ \
+ func(has_64bit_reloc); \
+ func(has_aliasing_ppgtt); \
+ func(has_csr); \
+ func(has_ddi); \
+ func(has_dp_mst); \
+ func(has_reset_engine); \
+ func(has_fbc); \
+ func(has_fpga_dbg); \
+ func(has_full_ppgtt); \
+ func(has_full_48bit_ppgtt); \
+ func(has_gmch_display); \
+ func(has_guc); \
+ func(has_guc_ct); \
+ func(has_hotplug); \
+ func(has_l3_dpf); \
+ func(has_llc); \
+ func(has_logical_ring_contexts); \
+ func(has_logical_ring_preemption); \
+ func(has_overlay); \
+ func(has_pooled_eu); \
+ func(has_psr); \
+ func(has_rc6); \
+ func(has_rc6p); \
+ func(has_resource_streamer); \
+ func(has_runtime_pm); \
+ func(has_snoop); \
+ func(unfenced_needs_alignment); \
+ func(cursor_needs_physical); \
+ func(hws_needs_physical); \
+ func(overlay_needs_physical); \
+ func(supports_tv); \
+ func(has_ipc);
+
+struct sseu_dev_info {
+ u8 slice_mask;
+ u8 subslice_mask;
+ u8 eu_total;
+ u8 eu_per_subslice;
+ u8 min_eu_in_pool;
+ /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+ u8 subslice_7eu[3];
+ u8 has_slice_pg:1;
+ u8 has_subslice_pg:1;
+ u8 has_eu_pg:1;
+};
+
+struct intel_device_info {
+ u16 device_id;
+ u16 gen_mask;
+
+ u8 gen;
+ u8 gt; /* GT number, 0 if undefined */
+ u8 num_rings;
+ u8 ring_mask; /* Rings supported by the HW */
+
+ enum intel_platform platform;
+ u32 platform_mask;
+
+ u32 display_mmio_offset;
+
+ u8 num_pipes;
+ u8 num_sprites[I915_MAX_PIPES];
+ u8 num_scalers[I915_MAX_PIPES];
+
+ unsigned int page_sizes; /* page sizes supported by the HW */
+
+#define DEFINE_FLAG(name) u8 name:1
+ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
+ u16 ddb_size; /* in blocks */
+
+ /* Register offsets for the various display pipes and transcoders */
+ int pipe_offsets[I915_MAX_TRANSCODERS];
+ int trans_offsets[I915_MAX_TRANSCODERS];
+ int palette_offsets[I915_MAX_PIPES];
+ int cursor_offsets[I915_MAX_PIPES];
+
+ /* Slice/subslice/EU info */
+ struct sseu_dev_info sseu;
+
+ u32 cs_timestamp_frequency_khz;
+
+ struct color_luts {
+ u16 degamma_lut_size;
+ u16 gamma_lut_size;
+ } color;
+};
+
+static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
+{
+ return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
+}
+
+const char *intel_platform_name(enum intel_platform platform);
+
+void intel_device_info_runtime_init(struct intel_device_info *info);
+void intel_device_info_dump(const struct intel_device_info *info,
+ struct drm_printer *p);
+void intel_device_info_dump_flags(const struct intel_device_info *info,
+ struct drm_printer *p);
+void intel_device_info_dump_runtime(const struct intel_device_info *info,
+ struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 50f8443641b8..0cd355978ab4 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -219,10 +219,8 @@ intel_fdi_link_freq(struct drm_i915_private *dev_priv,
{
if (HAS_DDI(dev_priv))
return pipe_config->port_clock; /* SPLL */
- else if (IS_GEN5(dev_priv))
- return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
else
- return 270000;
+ return dev_priv->fdi_pll_freq;
}
static const struct intel_limit intel_limits_i8xx_dac = {
@@ -491,7 +489,7 @@ static const struct intel_limit intel_limits_bxt = {
};
static bool
-needs_modeset(struct drm_crtc_state *state)
+needs_modeset(const struct drm_crtc_state *state)
{
return drm_atomic_crtc_needs_modeset(state);
}
@@ -1040,28 +1038,14 @@ static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
wait_for_pipe_scanline_moving(crtc, true);
}
-/*
- * intel_wait_for_pipe_off - wait for pipe to turn off
- * @crtc: crtc whose pipe to wait for
- *
- * After disabling a pipe, we can't wait for vblank in the usual way,
- * spinning on the vblank interrupt status bit, since we won't actually
- * see an interrupt when the pipe is disabled.
- *
- * On Gen4 and above:
- * wait for the pipe register state bit to turn off
- *
- * Otherwise:
- * wait for the display line value to settle (it usually
- * ends up stopping at the start of the next frame).
- *
- */
-static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
+static void
+intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
if (INTEL_GEN(dev_priv) >= 4) {
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
@@ -1653,7 +1637,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
u32 port_mask;
i915_reg_t dpll_reg;
- switch (dport->port) {
+ switch (dport->base.port) {
case PORT_B:
port_mask = DPLL_PORTB_READY_MASK;
dpll_reg = DPLL(0);
@@ -1675,7 +1659,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
dpll_reg, port_mask, expected_mask,
1000))
WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
- port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
+ port_name(dport->base.port),
+ I915_READ(dpll_reg) & port_mask, expected_mask);
}
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -1823,27 +1808,18 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- WARN_ON(!crtc->config->has_pch_encoder);
-
if (HAS_PCH_LPT(dev_priv))
return PIPE_A;
else
return crtc->pipe;
}
-/**
- * intel_enable_pipe - enable a pipe, asserting requirements
- * @crtc: crtc responsible for the pipe
- *
- * Enable @crtc's pipe, making sure that various hardware specific requirements
- * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
- */
-static void intel_enable_pipe(struct intel_crtc *crtc)
+static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
i915_reg_t reg;
u32 val;
@@ -1857,12 +1833,12 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
* need the check.
*/
if (HAS_GMCH_DISPLAY(dev_priv)) {
- if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
+ if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
} else {
- if (crtc->config->has_pch_encoder) {
+ if (new_crtc_state->has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv,
intel_crtc_pch_transcoder(crtc));
@@ -1890,24 +1866,15 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
* when it's derived from the timestamps. So let's wait for the
* pipe to start properly before we call drm_crtc_vblank_on()
*/
- if (dev->max_vblank_count == 0)
+ if (dev_priv->drm.max_vblank_count == 0)
intel_wait_for_pipe_scanline_moving(crtc);
}
-/**
- * intel_disable_pipe - disable a pipe, asserting requirements
- * @crtc: crtc whose pipes is to be disabled
- *
- * Disable the pipe of @crtc, making sure that various hardware
- * specific requirements are met, if applicable, e.g. plane
- * disabled, panel fitter off, etc.
- *
- * Will wait until the pipe has shut down before returning.
- */
-static void intel_disable_pipe(struct intel_crtc *crtc)
+static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 val;
@@ -1929,7 +1896,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
* Double wide has implications for planes
* so best keep it disabled when not needed.
*/
- if (crtc->config->double_wide)
+ if (old_crtc_state->double_wide)
val &= ~PIPECONF_DOUBLE_WIDE;
/* Don't disable pipe or pipe PLLs if needed */
@@ -1938,7 +1905,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
I915_WRITE(reg, val);
if ((val & PIPECONF_ENABLE) == 0)
- intel_wait_for_pipe_off(crtc);
+ intel_wait_for_pipe_off(old_crtc_state);
}
static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
@@ -2672,7 +2639,6 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj = NULL;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_framebuffer *fb = &plane_config->fb->base;
@@ -2688,7 +2654,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
- if (size_aligned * 2 > ggtt->stolen_usable_size)
+ if (size_aligned * 2 > dev_priv->stolen_usable_size)
return false;
mutex_lock(&dev->struct_mutex);
@@ -3107,6 +3073,12 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
unsigned int rotation = plane_state->base.rotation;
int ret;
+ if (rotation & DRM_MODE_REFLECT_X &&
+ fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
+ return -EINVAL;
+ }
+
if (!plane_state->base.visible)
return 0;
@@ -3241,16 +3213,16 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
}
-static void i9xx_update_primary_plane(struct intel_plane *primary,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static void i9xx_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
- enum plane plane = primary->plane;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 linear_offset;
u32 dspcntr = plane_state->ctl;
- i915_reg_t reg = DSPCNTR(plane);
+ i915_reg_t reg = DSPCNTR(i9xx_plane);
int x = plane_state->main.x;
int y = plane_state->main.y;
unsigned long irqflags;
@@ -3269,34 +3241,34 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
- I915_WRITE_FW(DSPSIZE(plane),
+ I915_WRITE_FW(DSPSIZE(i9xx_plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
- I915_WRITE_FW(DSPPOS(plane), 0);
- } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
- I915_WRITE_FW(PRIMSIZE(plane),
+ I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
+ } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+ I915_WRITE_FW(PRIMSIZE(i9xx_plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
- I915_WRITE_FW(PRIMPOS(plane), 0);
- I915_WRITE_FW(PRIMCNSTALPHA(plane), 0);
+ I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
+ I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
}
I915_WRITE_FW(reg, dspcntr);
- I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
+ I915_WRITE_FW(DSPSTRIDE(i9xx_plane), fb->pitches[0]);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE_FW(DSPSURF(plane),
+ I915_WRITE_FW(DSPSURF(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
dspaddr_offset);
- I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
+ I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
} else if (INTEL_GEN(dev_priv) >= 4) {
- I915_WRITE_FW(DSPSURF(plane),
+ I915_WRITE_FW(DSPSURF(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
dspaddr_offset);
- I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
+ I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
+ I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
} else {
- I915_WRITE_FW(DSPADDR(plane),
+ I915_WRITE_FW(DSPADDR(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
dspaddr_offset);
}
@@ -3305,32 +3277,31 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void i9xx_disable_primary_plane(struct intel_plane *primary,
- struct intel_crtc *crtc)
+static void i9xx_disable_plane(struct intel_plane *plane,
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
- enum plane plane = primary->plane;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(DSPCNTR(plane), 0);
- if (INTEL_INFO(dev_priv)->gen >= 4)
- I915_WRITE_FW(DSPSURF(plane), 0);
+ I915_WRITE_FW(DSPCNTR(i9xx_plane), 0);
+ if (INTEL_GEN(dev_priv) >= 4)
+ I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
else
- I915_WRITE_FW(DSPADDR(plane), 0);
- POSTING_READ_FW(DSPCNTR(plane));
+ I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
+ POSTING_READ_FW(DSPCNTR(i9xx_plane));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static bool i9xx_plane_get_hw_state(struct intel_plane *primary)
+static bool i9xx_plane_get_hw_state(struct intel_plane *plane)
{
-
- struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
- enum plane plane = primary->plane;
- enum pipe pipe = primary->pipe;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ enum pipe pipe = plane->pipe;
bool ret;
/*
@@ -3342,7 +3313,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *primary)
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE;
+ ret = I915_READ(DSPCNTR(i9xx_plane)) & DISPLAY_PLANE_ENABLE;
intel_display_power_put(dev_priv, power_domain);
@@ -3415,20 +3386,11 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format)
case DRM_FORMAT_RGB565:
return PLANE_CTL_FORMAT_RGB_565;
case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
case DRM_FORMAT_XRGB8888:
- return PLANE_CTL_FORMAT_XRGB_8888;
- /*
- * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
- * to be already pre-multiplied. We need to add a knob (or a different
- * DRM_FORMAT) for user-space to configure that.
- */
- case DRM_FORMAT_ABGR8888:
- return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
- PLANE_CTL_ALPHA_SW_PREMULTIPLY;
case DRM_FORMAT_ARGB8888:
- return PLANE_CTL_FORMAT_XRGB_8888 |
- PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ return PLANE_CTL_FORMAT_XRGB_8888;
case DRM_FORMAT_XRGB2101010:
return PLANE_CTL_FORMAT_XRGB_2101010;
case DRM_FORMAT_XBGR2101010:
@@ -3448,6 +3410,33 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format)
return 0;
}
+/*
+ * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
+ * to be already pre-multiplied. We need to add a knob (or a different
+ * DRM_FORMAT) for user-space to configure that.
+ */
+static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
+{
+ switch (pixel_format) {
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB8888:
+ return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ default:
+ return PLANE_CTL_ALPHA_DISABLE;
+ }
+}
+
+static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format)
+{
+ switch (pixel_format) {
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB8888:
+ return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
+ default:
+ return PLANE_COLOR_ALPHA_DISABLE;
+ }
+}
+
static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
{
switch (fb_modifier) {
@@ -3470,9 +3459,9 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
return 0;
}
-static u32 skl_plane_ctl_rotation(unsigned int rotation)
+static u32 skl_plane_ctl_rotate(unsigned int rotate)
{
- switch (rotation) {
+ switch (rotate) {
case DRM_MODE_ROTATE_0:
break;
/*
@@ -3486,7 +3475,22 @@ static u32 skl_plane_ctl_rotation(unsigned int rotation)
case DRM_MODE_ROTATE_270:
return PLANE_CTL_ROTATE_90;
default:
- MISSING_CASE(rotation);
+ MISSING_CASE(rotate);
+ }
+
+ return 0;
+}
+
+static u32 cnl_plane_ctl_flip(unsigned int reflect)
+{
+ switch (reflect) {
+ case 0:
+ break;
+ case DRM_MODE_REFLECT_X:
+ return PLANE_CTL_FLIP_HORIZONTAL;
+ case DRM_MODE_REFLECT_Y:
+ default:
+ MISSING_CASE(reflect);
}
return 0;
@@ -3504,7 +3508,8 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl = PLANE_CTL_ENABLE;
- if (!IS_GEMINILAKE(dev_priv) && !IS_CANNONLAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
+ plane_ctl |= skl_plane_ctl_alpha(fb->format->format);
plane_ctl |=
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE |
@@ -3513,7 +3518,11 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl |= skl_plane_ctl_format(fb->format->format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
- plane_ctl |= skl_plane_ctl_rotation(rotation);
+ plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ plane_ctl |= cnl_plane_ctl_flip(rotation &
+ DRM_MODE_REFLECT_MASK);
if (key->flags & I915_SET_COLORKEY_DESTINATION)
plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
@@ -3523,6 +3532,20 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
return plane_ctl;
}
+u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ u32 plane_color_ctl = 0;
+
+ plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
+ plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+ plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
+ plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
+
+ return plane_color_ctl;
+}
+
static int
__intel_display_resume(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -4465,7 +4488,7 @@ intel_trans_dp_port_sel(struct intel_crtc *crtc)
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_EDP)
- return enc_to_dig_port(&encoder->base)->port;
+ return encoder->port;
}
return -1;
@@ -4816,12 +4839,13 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
}
}
-void hsw_enable_ips(struct intel_crtc *crtc)
+void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- if (!crtc->config->ips_enabled)
+ if (!crtc_state->ips_enabled)
return;
/*
@@ -4829,8 +4853,7 @@ void hsw_enable_ips(struct intel_crtc *crtc)
* This function is called from post_plane_update, which is run after
* a vblank wait.
*/
-
- assert_plane_enabled(to_intel_plane(crtc->base.primary));
+ WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
@@ -4856,16 +4879,15 @@ void hsw_enable_ips(struct intel_crtc *crtc)
}
}
-void hsw_disable_ips(struct intel_crtc *crtc)
+void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- if (!crtc->config->ips_enabled)
+ if (!crtc_state->ips_enabled)
return;
- assert_plane_enabled(to_intel_plane(crtc->base.primary));
-
if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
@@ -4910,7 +4932,8 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
* completely hide the primary plane.
*/
static void
-intel_post_enable_primary(struct drm_crtc *crtc)
+intel_post_enable_primary(struct drm_crtc *crtc,
+ const struct intel_crtc_state *new_crtc_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4918,14 +4941,6 @@ intel_post_enable_primary(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
/*
- * FIXME IPS should be fine as long as one plane is
- * enabled, but in practice it seems to have problems
- * when going from primary only to sprite only and vice
- * versa.
- */
- hsw_enable_ips(intel_crtc);
-
- /*
* Gen2 reports pipe underruns whenever all planes are disabled.
* So don't enable underrun reporting before at least some planes
* are enabled.
@@ -4940,9 +4955,9 @@ intel_post_enable_primary(struct drm_crtc *crtc)
intel_check_pch_fifo_underruns(dev_priv);
}
-/* FIXME move all this to pre_plane_update() with proper state tracking */
+/* FIXME get rid of this and use pre_plane_update */
static void
-intel_pre_disable_primary(struct drm_crtc *crtc)
+intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4951,32 +4966,12 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
- * So diasble underrun reporting before all the planes get disabled.
- * FIXME: Need to fix the logic to work when we turn off all planes
- * but leave the pipe running.
+ * So disable underrun reporting before all the planes get disabled.
*/
if (IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- /*
- * FIXME IPS should be fine as long as one plane is
- * enabled, but in practice it seems to have problems
- * when going from primary only to sprite only and vice
- * versa.
- */
- hsw_disable_ips(intel_crtc);
-}
-
-/* FIXME get rid of this and use pre_plane_update */
-static void
-intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
-
- intel_pre_disable_primary(crtc);
+ hsw_disable_ips(to_intel_crtc_state(crtc->state));
/*
* Vblank time updates from the shadow to live plane control register
@@ -4992,6 +4987,38 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
intel_wait_for_vblank(dev_priv, pipe);
}
+static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!old_crtc_state->ips_enabled)
+ return false;
+
+ if (needs_modeset(&new_crtc_state->base))
+ return true;
+
+ return !new_crtc_state->ips_enabled;
+}
+
+static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!new_crtc_state->ips_enabled)
+ return false;
+
+ if (needs_modeset(&new_crtc_state->base))
+ return true;
+
+ /*
+ * We can't read out IPS on broadwell, assume the worst and
+ * forcibly enable IPS on the first fastset.
+ */
+ if (new_crtc_state->update_pipe &&
+ old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
+ return true;
+
+ return !old_crtc_state->ips_enabled;
+}
+
static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
@@ -5008,6 +5035,9 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
if (pipe_config->update_wm_post && pipe_config->base.active)
intel_update_watermarks(crtc);
+ if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
+ hsw_enable_ips(pipe_config);
+
if (old_pri_state) {
struct intel_plane_state *primary_state =
intel_atomic_get_new_plane_state(to_intel_atomic_state(old_state),
@@ -5020,7 +5050,7 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
if (primary_state->base.visible &&
(needs_modeset(&pipe_config->base) ||
!old_primary_state->base.visible))
- intel_post_enable_primary(&crtc->base);
+ intel_post_enable_primary(&crtc->base, pipe_config);
}
}
@@ -5038,6 +5068,9 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
+ if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
+ hsw_disable_ips(old_crtc_state);
+
if (old_pri_state) {
struct intel_plane_state *primary_state =
intel_atomic_get_new_plane_state(old_intel_state,
@@ -5046,10 +5079,13 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
to_intel_plane_state(old_pri_state);
intel_fbc_pre_update(crtc, pipe_config, primary_state);
-
- if (old_primary_state->base.visible &&
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So disable underrun reporting before all the planes get disabled.
+ */
+ if (IS_GEN2(dev_priv) && old_primary_state->base.visible &&
(modeset || !primary_state->base.visible))
- intel_pre_disable_primary(&crtc->base);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
}
/*
@@ -5312,7 +5348,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
- intel_enable_pipe(intel_crtc);
+ intel_enable_pipe(pipe_config);
if (intel_crtc->config->has_pch_encoder)
ironlake_pch_enable(pipe_config);
@@ -5431,7 +5467,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
if (!transcoder_is_dsi(cpu_transcoder))
- intel_enable_pipe(intel_crtc);
+ intel_enable_pipe(pipe_config);
if (intel_crtc->config->has_pch_encoder)
lpt_pch_enable(pipe_config);
@@ -5497,7 +5533,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
- intel_disable_pipe(intel_crtc);
+ intel_disable_pipe(old_crtc_state);
ironlake_pfit_disable(intel_crtc, false);
@@ -5549,7 +5585,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
if (!transcoder_is_dsi(cpu_transcoder))
- intel_disable_pipe(intel_crtc);
+ intel_disable_pipe(old_crtc_state);
if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
@@ -5727,7 +5763,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
dev_priv->display.initial_watermarks(old_intel_state,
pipe_config);
- intel_enable_pipe(intel_crtc);
+ intel_enable_pipe(pipe_config);
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
@@ -5786,7 +5822,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->config);
else
intel_update_watermarks(intel_crtc);
- intel_enable_pipe(intel_crtc);
+ intel_enable_pipe(pipe_config);
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
@@ -5830,7 +5866,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
- intel_disable_pipe(intel_crtc);
+ intel_disable_pipe(old_crtc_state);
i9xx_pfit_disable(intel_crtc);
@@ -5925,6 +5961,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
dev_priv->min_cdclk[intel_crtc->pipe] = 0;
+ dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
}
/*
@@ -6179,18 +6216,20 @@ retry:
return ret;
}
-static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *pipe_config)
+bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
{
- if (pipe_config->ips_force_disable)
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ /* IPS only exists on ULT machines and is tied to pipe A. */
+ if (!hsw_crtc_supports_ips(crtc))
return false;
- if (pipe_config->pipe_bpp > 24)
+ if (!i915_modparams.enable_ips)
return false;
- /* HSW can handle pixel rate up to cdclk? */
- if (IS_HASWELL(dev_priv))
- return true;
+ if (crtc_state->pipe_bpp > 24)
+ return false;
/*
* We compare against max which means we must take
@@ -6199,19 +6238,36 @@ static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
*
* Should measure whether using a lower cdclk w/o IPS
*/
- return pipe_config->pixel_rate <=
- dev_priv->max_cdclk_freq * 95 / 100;
+ if (IS_BROADWELL(dev_priv) &&
+ crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
+ return false;
+
+ return true;
}
-static void hsw_compute_ips_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv =
+ to_i915(crtc_state->base.crtc->dev);
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(crtc_state->base.state);
- pipe_config->ips_enabled = i915_modparams.enable_ips &&
- hsw_crtc_supports_ips(crtc) &&
- pipe_config_supports_ips(dev_priv, pipe_config);
+ if (!hsw_crtc_state_ips_capable(crtc_state))
+ return false;
+
+ if (crtc_state->ips_force_disable)
+ return false;
+
+ /* IPS should be fine as long as at least one plane is enabled. */
+ if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
+ return false;
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (IS_BROADWELL(dev_priv) &&
+ crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
+ return false;
+
+ return true;
}
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
@@ -6329,9 +6385,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
intel_crtc_compute_pixel_rate(pipe_config);
- if (HAS_IPS(dev_priv))
- hsw_compute_ips_config(crtc, pipe_config);
-
if (pipe_config->has_pch_encoder)
return ironlake_fdi_compute_config(crtc, pipe_config);
@@ -7388,15 +7441,16 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ enum pipe pipe = crtc->pipe;
u32 val, base, offset;
- int pipe = crtc->pipe, plane = crtc->plane;
int fourcc, pixel_format;
unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
- val = I915_READ(DSPCNTR(plane));
- if (!(val & DISPLAY_PLANE_ENABLE))
+ if (!plane->get_hw_state(plane))
return;
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
@@ -7409,6 +7463,8 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->dev = dev;
+ val = I915_READ(DSPCNTR(i9xx_plane));
+
if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
@@ -7420,14 +7476,17 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fourcc = i9xx_format_to_fourcc(pixel_format);
fb->format = drm_format_info(fourcc);
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ offset = I915_READ(DSPOFFSET(i9xx_plane));
+ base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
if (plane_config->tiling)
- offset = I915_READ(DSPTILEOFF(plane));
+ offset = I915_READ(DSPTILEOFF(i9xx_plane));
else
- offset = I915_READ(DSPLINOFF(plane));
- base = I915_READ(DSPSURF(plane)) & 0xfffff000;
+ offset = I915_READ(DSPLINOFF(i9xx_plane));
+ base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
} else {
- base = I915_READ(DSPADDR(plane));
+ base = I915_READ(DSPADDR(i9xx_plane));
}
plane_config->base = base;
@@ -7435,15 +7494,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->width = ((val >> 16) & 0xfff) + 1;
fb->height = ((val >> 0) & 0xfff) + 1;
- val = I915_READ(DSPSTRIDE(pipe));
+ val = I915_READ(DSPSTRIDE(i9xx_plane));
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(fb, 0, fb->height);
plane_config->size = fb->pitches[0] * aligned_height;
- DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- pipe_name(pipe), plane, fb->width, fb->height,
+ DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.name, plane->base.name, fb->width, fb->height,
fb->format->cpp[0] * 8, base, fb->pitches[0],
plane_config->size);
@@ -7619,7 +7678,7 @@ static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
break;
case INTEL_OUTPUT_EDP:
has_panel = true;
- if (enc_to_dig_port(&encoder->base)->port == PORT_A)
+ if (encoder->port == PORT_A)
has_cpu_edp = true;
break;
default:
@@ -8412,13 +8471,18 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- u32 val, base, offset, stride_mult, tiling;
- int pipe = crtc->pipe;
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = crtc->pipe;
+ u32 val, base, offset, stride_mult, tiling, alpha;
int fourcc, pixel_format;
unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
+ if (!plane->get_hw_state(plane))
+ return;
+
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -8429,14 +8493,19 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->dev = dev;
- val = I915_READ(PLANE_CTL(pipe, 0));
- if (!(val & PLANE_CTL_ENABLE))
- goto error;
+ val = I915_READ(PLANE_CTL(pipe, plane_id));
pixel_format = val & PLANE_CTL_FORMAT_MASK;
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
+ alpha &= PLANE_COLOR_ALPHA_MASK;
+ } else {
+ alpha = val & PLANE_CTL_ALPHA_MASK;
+ }
+
fourcc = skl_format_to_fourcc(pixel_format,
- val & PLANE_CTL_ORDER_RGBX,
- val & PLANE_CTL_ALPHA_MASK);
+ val & PLANE_CTL_ORDER_RGBX, alpha);
fb->format = drm_format_info(fourcc);
tiling = val & PLANE_CTL_TILED_MASK;
@@ -8465,16 +8534,16 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
goto error;
}
- base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
+ base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
plane_config->base = base;
- offset = I915_READ(PLANE_OFFSET(pipe, 0));
+ offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
- val = I915_READ(PLANE_SIZE(pipe, 0));
+ val = I915_READ(PLANE_SIZE(pipe, plane_id));
fb->height = ((val >> 16) & 0xfff) + 1;
fb->width = ((val >> 0) & 0x1fff) + 1;
- val = I915_READ(PLANE_STRIDE(pipe, 0));
+ val = I915_READ(PLANE_STRIDE(pipe, plane_id));
stride_mult = intel_fb_stride_alignment(fb, 0);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
@@ -8482,8 +8551,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->size = fb->pitches[0] * aligned_height;
- DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- pipe_name(pipe), fb->width, fb->height,
+ DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.name, plane->base.name, fb->width, fb->height,
fb->format->cpp[0] * 8, base, fb->pitches[0],
plane_config->size);
@@ -8518,74 +8587,6 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
}
}
-static void
-ironlake_get_initial_plane_config(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 val, base, offset;
- int pipe = crtc->pipe;
- int fourcc, pixel_format;
- unsigned int aligned_height;
- struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
-
- val = I915_READ(DSPCNTR(pipe));
- if (!(val & DISPLAY_PLANE_ENABLE))
- return;
-
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
- if (!intel_fb) {
- DRM_DEBUG_KMS("failed to alloc fb\n");
- return;
- }
-
- fb = &intel_fb->base;
-
- fb->dev = dev;
-
- if (INTEL_GEN(dev_priv) >= 4) {
- if (val & DISPPLANE_TILED) {
- plane_config->tiling = I915_TILING_X;
- fb->modifier = I915_FORMAT_MOD_X_TILED;
- }
- }
-
- pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
- fourcc = i9xx_format_to_fourcc(pixel_format);
- fb->format = drm_format_info(fourcc);
-
- base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- offset = I915_READ(DSPOFFSET(pipe));
- } else {
- if (plane_config->tiling)
- offset = I915_READ(DSPTILEOFF(pipe));
- else
- offset = I915_READ(DSPLINOFF(pipe));
- }
- plane_config->base = base;
-
- val = I915_READ(PIPESRC(pipe));
- fb->width = ((val >> 16) & 0xfff) + 1;
- fb->height = ((val >> 0) & 0xfff) + 1;
-
- val = I915_READ(DSPSTRIDE(pipe));
- fb->pitches[0] = val & 0xffffffc0;
-
- aligned_height = intel_fb_align_height(fb, 0, fb->height);
-
- plane_config->size = fb->pitches[0] * aligned_height;
-
- DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- pipe_name(pipe), fb->width, fb->height,
- fb->format->cpp[0] * 8, base, fb->pitches[0],
- plane_config->size);
-
- plane_config->fb = intel_fb;
-}
-
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -8843,7 +8844,9 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
}
/*
@@ -9217,9 +9220,18 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
ironlake_get_pfit_config(crtc, pipe_config);
}
- if (IS_HASWELL(dev_priv))
- pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
- (I915_READ(IPS_CTL) & IPS_ENABLE);
+ if (hsw_crtc_supports_ips(crtc)) {
+ if (IS_HASWELL(dev_priv))
+ pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
+ else {
+ /*
+ * We cannot readout IPS state on broadwell, set to
+ * true so we can set it to a defined state on first
+ * commit.
+ */
+ pipe_config->ips_enabled = true;
+ }
+ }
if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
@@ -9300,11 +9312,12 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
u32 offset;
int ret;
- ret = drm_plane_helper_check_state(&plane_state->base,
- &plane_state->clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true);
+ ret = drm_atomic_helper_check_plane_state(&plane_state->base,
+ &crtc_state->base,
+ &plane_state->clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
if (ret)
return ret;
@@ -9706,111 +9719,27 @@ err:
return ERR_PTR(ret);
}
-static u32
-intel_framebuffer_pitch_for_width(int width, int bpp)
-{
- u32 pitch = DIV_ROUND_UP(width * bpp, 8);
- return ALIGN(pitch, 64);
-}
-
-static u32
-intel_framebuffer_size_for_mode(const struct drm_display_mode *mode, int bpp)
-{
- u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
- return PAGE_ALIGN(pitch * mode->vdisplay);
-}
-
-static struct drm_framebuffer *
-intel_framebuffer_create_for_mode(struct drm_device *dev,
- const struct drm_display_mode *mode,
- int depth, int bpp)
-{
- struct drm_framebuffer *fb;
- struct drm_i915_gem_object *obj;
- struct drm_mode_fb_cmd2 mode_cmd = { 0 };
-
- obj = i915_gem_object_create(to_i915(dev),
- intel_framebuffer_size_for_mode(mode, bpp));
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- mode_cmd.width = mode->hdisplay;
- mode_cmd.height = mode->vdisplay;
- mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
- bpp);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
-
- fb = intel_framebuffer_create(obj, &mode_cmd);
- if (IS_ERR(fb))
- i915_gem_object_put(obj);
-
- return fb;
-}
-
-static struct drm_framebuffer *
-mode_fits_in_fbdev(struct drm_device *dev,
- const struct drm_display_mode *mode)
-{
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj;
- struct drm_framebuffer *fb;
-
- if (!dev_priv->fbdev)
- return NULL;
-
- if (!dev_priv->fbdev->fb)
- return NULL;
-
- obj = dev_priv->fbdev->fb->obj;
- BUG_ON(!obj);
-
- fb = &dev_priv->fbdev->fb->base;
- if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
- fb->format->cpp[0] * 8))
- return NULL;
-
- if (obj->base.size < mode->vdisplay * fb->pitches[0])
- return NULL;
-
- drm_framebuffer_get(fb);
- return fb;
-#else
- return NULL;
-#endif
-}
-
-static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
- struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_framebuffer *fb,
- int x, int y)
+static int intel_modeset_disable_planes(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
{
+ struct drm_plane *plane;
struct drm_plane_state *plane_state;
- int hdisplay, vdisplay;
- int ret;
-
- plane_state = drm_atomic_get_plane_state(state, crtc->primary);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
-
- if (mode)
- drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay);
- else
- hdisplay = vdisplay = 0;
+ int ret, i;
- ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
+ ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
- drm_atomic_set_fb_for_plane(plane_state, fb);
- plane_state->crtc_x = 0;
- plane_state->crtc_y = 0;
- plane_state->crtc_w = hdisplay;
- plane_state->crtc_h = vdisplay;
- plane_state->src_x = x << 16;
- plane_state->src_y = y << 16;
- plane_state->src_w = hdisplay << 16;
- plane_state->src_h = vdisplay << 16;
+
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
+ if (plane_state->crtc != crtc)
+ continue;
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+ if (ret)
+ return ret;
+
+ drm_atomic_set_fb_for_plane(plane_state, NULL);
+ }
return 0;
}
@@ -9828,7 +9757,6 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_framebuffer *fb;
struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state = NULL, *restore_state = NULL;
struct drm_connector_state *connector_state;
@@ -9896,10 +9824,6 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
found:
intel_crtc = to_intel_crtc(crtc);
- ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
- if (ret)
- goto fail;
-
state = drm_atomic_state_alloc(dev);
restore_state = drm_atomic_state_alloc(dev);
if (!state || !restore_state) {
@@ -9931,39 +9855,17 @@ found:
if (!mode)
mode = &load_detect_mode;
- /* We need a framebuffer large enough to accommodate all accesses
- * that the plane may generate whilst we perform load detection.
- * We can not rely on the fbcon either being present (we get called
- * during its initialisation to detect all boot displays, or it may
- * not even exist) or that it is large enough to satisfy the
- * requested mode.
- */
- fb = mode_fits_in_fbdev(dev, mode);
- if (fb == NULL) {
- DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
- fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
- } else
- DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
- if (IS_ERR(fb)) {
- DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
- ret = PTR_ERR(fb);
- goto fail;
- }
-
- ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
- drm_framebuffer_put(fb);
+ ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
if (ret)
goto fail;
- ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
+ ret = intel_modeset_disable_planes(state, crtc);
if (ret)
goto fail;
ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
if (!ret)
ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
- if (!ret)
- ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
if (ret) {
DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
goto fail;
@@ -10472,6 +10374,9 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
pipe_config);
}
+ if (HAS_IPS(dev_priv))
+ pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
+
return ret;
}
@@ -10601,7 +10506,7 @@ static const char * const output_type_str[] = {
OUTPUT_TYPE(DP),
OUTPUT_TYPE(EDP),
OUTPUT_TYPE(DSI),
- OUTPUT_TYPE(UNKNOWN),
+ OUTPUT_TYPE(DDI),
OUTPUT_TYPE(DP_MST),
};
@@ -10772,13 +10677,13 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
switch (encoder->type) {
unsigned int port_mask;
- case INTEL_OUTPUT_UNKNOWN:
+ case INTEL_OUTPUT_DDI:
if (WARN_ON(!HAS_DDI(to_i915(dev))))
break;
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
- port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
+ port_mask = 1 << encoder->port;
/* the same port mustn't appear more than once */
if (used_ports & port_mask)
@@ -10788,7 +10693,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
break;
case INTEL_OUTPUT_DP_MST:
used_mst_ports |=
- 1 << enc_to_mst(&encoder->base)->primary->port;
+ 1 << encoder->port;
break;
default:
break;
@@ -10905,7 +10810,12 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
* Determine output_types before calling the .compute_config()
* hooks so that the hooks can use this information safely.
*/
- pipe_config->output_types |= 1 << encoder->type;
+ if (encoder->compute_output_type)
+ pipe_config->output_types |=
+ BIT(encoder->compute_output_type(encoder, pipe_config,
+ connector_state));
+ else
+ pipe_config->output_types |= BIT(encoder->type);
}
encoder_retry:
@@ -10969,31 +10879,6 @@ fail:
return ret;
}
-static void
-intel_modeset_update_crtc_state(struct drm_atomic_state *state)
-{
- struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
- int i;
-
- /* Double check state. */
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
-
- /*
- * Update legacy state to satisfy fbc code. This can
- * be removed when fbc uses the atomic state.
- */
- if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
- struct drm_plane_state *plane_state = crtc->primary->state;
-
- crtc->primary->fb = plane_state->fb;
- crtc->x = plane_state->src_x >> 16;
- crtc->y = plane_state->src_y >> 16;
- }
- }
-}
-
static bool intel_fuzzy_clock_check(int clock1, int clock2)
{
int diff;
@@ -11094,6 +10979,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
bool adjust)
{
bool ret = true;
+ bool fixup_inherited = adjust &&
+ (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
+ !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
#define PIPE_CONF_CHECK_X(name) \
if (current_config->name != pipe_config->name) { \
@@ -11113,6 +11001,31 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
ret = false; \
}
+#define PIPE_CONF_CHECK_BOOL(name) \
+ if (current_config->name != pipe_config->name) { \
+ pipe_config_err(adjust, __stringify(name), \
+ "(expected %s, found %s)\n", \
+ yesno(current_config->name), \
+ yesno(pipe_config->name)); \
+ ret = false; \
+ }
+
+/*
+ * Checks state where we only read out the enabling, but not the entire
+ * state itself (like full infoframes or ELD for audio). These states
+ * require a full modeset on bootup to fix up.
+ */
+#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) \
+ if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
+ PIPE_CONF_CHECK_BOOL(name); \
+ } else { \
+ pipe_config_err(adjust, __stringify(name), \
+ "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
+ yesno(current_config->name), \
+ yesno(pipe_config->name)); \
+ ret = false; \
+ }
+
#define PIPE_CONF_CHECK_P(name) \
if (current_config->name != pipe_config->name) { \
pipe_config_err(adjust, __stringify(name), \
@@ -11198,7 +11111,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(cpu_transcoder);
- PIPE_CONF_CHECK_I(has_pch_encoder);
+ PIPE_CONF_CHECK_BOOL(has_pch_encoder);
PIPE_CONF_CHECK_I(fdi_lanes);
PIPE_CONF_CHECK_M_N(fdi_m_n);
@@ -11230,17 +11143,17 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
PIPE_CONF_CHECK_I(pixel_multiplier);
- PIPE_CONF_CHECK_I(has_hdmi_sink);
+ PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- PIPE_CONF_CHECK_I(limited_color_range);
+ PIPE_CONF_CHECK_BOOL(limited_color_range);
- PIPE_CONF_CHECK_I(hdmi_scrambling);
- PIPE_CONF_CHECK_I(hdmi_high_tmds_clock_ratio);
- PIPE_CONF_CHECK_I(has_infoframe);
- PIPE_CONF_CHECK_I(ycbcr420);
+ PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
+ PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
+ PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
+ PIPE_CONF_CHECK_BOOL(ycbcr420);
- PIPE_CONF_CHECK_I(has_audio);
+ PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
@@ -11266,7 +11179,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
- PIPE_CONF_CHECK_I(pch_pfit.enabled);
+ PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_X(pch_pfit.pos);
PIPE_CONF_CHECK_X(pch_pfit.size);
@@ -11276,11 +11189,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
}
- /* BDW+ don't expose a synchronous way to read the state */
- if (IS_HASWELL(dev_priv))
- PIPE_CONF_CHECK_I(ips_enabled);
-
- PIPE_CONF_CHECK_I(double_wide);
+ PIPE_CONF_CHECK_BOOL(double_wide);
PIPE_CONF_CHECK_P(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
@@ -11314,8 +11223,12 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
+ PIPE_CONF_CHECK_I(min_voltage_level);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
+#undef PIPE_CONF_CHECK_BOOL
+#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
#undef PIPE_CONF_CHECK_P
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
@@ -11582,10 +11495,8 @@ verify_crtc_state(struct drm_crtc *crtc,
"Encoder connected to wrong pipe %c\n",
pipe_name(pipe));
- if (active) {
- pipe_config->output_types |= 1 << encoder->type;
+ if (active)
encoder->get_config(encoder, pipe_config);
- }
}
intel_crtc_compute_pixel_rate(pipe_config);
@@ -11607,6 +11518,18 @@ verify_crtc_state(struct drm_crtc *crtc,
}
static void
+intel_verify_planes(struct intel_atomic_state *state)
+{
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane,
+ plane_state, i)
+ assert_plane(plane, plane_state->base.visible);
+}
+
+static void
verify_single_dpll_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct drm_crtc *crtc,
@@ -11956,16 +11879,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* holding all the crtc locks, even if we don't end up
* touching the hardware
*/
- if (!intel_cdclk_state_compare(&dev_priv->cdclk.logical,
- &intel_state->cdclk.logical)) {
+ if (intel_cdclk_changed(&dev_priv->cdclk.logical,
+ &intel_state->cdclk.logical)) {
ret = intel_lock_all_pipes(state);
if (ret < 0)
return ret;
}
/* All pipes must be switched off while we change the cdclk. */
- if (!intel_cdclk_state_compare(&dev_priv->cdclk.actual,
- &intel_state->cdclk.actual)) {
+ if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
+ &intel_state->cdclk.actual)) {
ret = intel_modeset_all_pipes(state);
if (ret < 0)
return ret;
@@ -11974,6 +11897,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
intel_state->cdclk.logical.cdclk,
intel_state->cdclk.actual.cdclk);
+ DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
+ intel_state->cdclk.logical.voltage_level,
+ intel_state->cdclk.actual.voltage_level);
} else {
to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
}
@@ -12085,7 +12011,7 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
return ret;
- intel_fbc_choose_crtc(dev_priv, state);
+ intel_fbc_choose_crtc(dev_priv, intel_state);
return calc_watermark_data(state);
}
@@ -12325,9 +12251,9 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
}
}
- /* Only after disabling all output pipelines that will be changed can we
- * update the the output configuration. */
- intel_modeset_update_crtc_state(state);
+ /* FIXME: Eventually get rid of our intel_crtc->config pointer */
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
+ to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
if (intel_state->modeset) {
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
@@ -12396,6 +12322,9 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
}
+ if (intel_state->modeset)
+ intel_verify_planes(intel_state);
+
if (intel_state->modeset && intel_can_enable_sagv(state))
intel_enable_sagv(dev_priv);
@@ -12542,6 +12471,9 @@ static int intel_atomic_commit(struct drm_device *dev,
if (intel_state->modeset) {
memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
sizeof(intel_state->min_cdclk));
+ memcpy(dev_priv->min_voltage_level,
+ intel_state->min_voltage_level,
+ sizeof(intel_state->min_voltage_level));
dev_priv->active_crtcs = intel_state->active_crtcs;
dev_priv->cdclk.logical = intel_state->cdclk.logical;
dev_priv->cdclk.actual = intel_state->cdclk.actual;
@@ -12783,7 +12715,7 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
- if (IS_GEMINILAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
max_dotclk *= 2;
if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
@@ -12822,10 +12754,11 @@ intel_check_primary_plane(struct intel_plane *plane,
can_position = true;
}
- ret = drm_plane_helper_check_state(&state->base,
- &state->clip,
- min_scale, max_scale,
- can_position, true);
+ ret = drm_atomic_helper_check_plane_state(&state->base,
+ &crtc_state->base,
+ &state->clip,
+ min_scale, max_scale,
+ can_position, true);
if (ret)
return ret;
@@ -12846,6 +12779,9 @@ intel_check_primary_plane(struct intel_plane *plane,
state->ctl = i9xx_plane_ctl(crtc_state, state);
}
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ state->color_ctl = glk_plane_color_ctl(crtc_state, state);
+
return 0;
}
@@ -12890,6 +12826,7 @@ out:
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_crtc_state->state);
@@ -12897,6 +12834,20 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
intel_pipe_update_end(new_crtc_state);
+
+ if (new_crtc_state->update_pipe &&
+ !needs_modeset(&new_crtc_state->base) &&
+ old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED) {
+ if (!IS_GEN2(dev_priv))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, intel_crtc->pipe, true);
+
+ if (new_crtc_state->has_pch_encoder) {
+ enum pipe pch_transcoder =
+ intel_crtc_pch_transcoder(intel_crtc);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
+ }
+ }
}
/**
@@ -13198,9 +13149,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
* port is hooked to pipe B. Hence we want plane A feeding pipe B.
*/
if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
- primary->plane = (enum plane) !pipe;
+ primary->i9xx_plane = (enum i9xx_plane_id) !pipe;
else
- primary->plane = (enum plane) pipe;
+ primary->i9xx_plane = (enum i9xx_plane_id) pipe;
primary->id = PLANE_PRIMARY;
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
@@ -13229,16 +13180,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
num_formats = ARRAY_SIZE(i965_primary_formats);
modifiers = i9xx_format_modifiers;
- primary->update_plane = i9xx_update_primary_plane;
- primary->disable_plane = i9xx_disable_primary_plane;
+ primary->update_plane = i9xx_update_plane;
+ primary->disable_plane = i9xx_disable_plane;
primary->get_hw_state = i9xx_plane_get_hw_state;
} else {
intel_primary_formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
modifiers = i9xx_format_modifiers;
- primary->update_plane = i9xx_update_primary_plane;
- primary->disable_plane = i9xx_disable_primary_plane;
+ primary->update_plane = i9xx_update_plane;
+ primary->disable_plane = i9xx_disable_plane;
primary->get_hw_state = i9xx_plane_get_hw_state;
}
@@ -13262,11 +13213,17 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
- "plane %c", plane_name(primary->plane));
+ "plane %c",
+ plane_name(primary->i9xx_plane));
if (ret)
goto fail;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 10) {
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
+ DRM_MODE_REFLECT_X;
+ } else if (INTEL_GEN(dev_priv) >= 9) {
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
@@ -13322,7 +13279,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
cursor->can_scale = false;
cursor->max_downscale = 1;
cursor->pipe = pipe;
- cursor->plane = pipe;
+ cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
cursor->id = PLANE_CURSOR;
cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
@@ -13450,14 +13407,13 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
goto fail;
intel_crtc->pipe = pipe;
- intel_crtc->plane = primary->plane;
/* initialize shared scalers */
intel_crtc_init_scalers(intel_crtc, crtc_state);
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
- dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
- dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc;
+ dev_priv->plane_to_crtc_mapping[primary->i9xx_plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[primary->i9xx_plane] = intel_crtc;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
@@ -14137,7 +14093,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
} else if (HAS_DDI(dev_priv)) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_initial_plane_config =
- ironlake_get_initial_plane_config;
+ i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
@@ -14145,7 +14101,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
} else if (HAS_PCH_SPLIT(dev_priv)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.get_initial_plane_config =
- ironlake_get_initial_plane_config;
+ i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
@@ -14384,6 +14340,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
}
@@ -14463,6 +14420,8 @@ retry:
cs->wm.need_postvbl_update = true;
dev_priv->display.optimize_watermarks(intel_state, cs);
+
+ to_intel_crtc_state(crtc->state)->wm = cs->wm;
}
put_state:
@@ -14472,6 +14431,22 @@ fail:
drm_modeset_acquire_fini(&ctx);
}
+static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
+{
+ if (IS_GEN5(dev_priv)) {
+ u32 fdi_pll_clk =
+ I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
+
+ dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
+ } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
+ dev_priv->fdi_pll_freq = 270000;
+ } else {
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
+}
+
int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -14544,7 +14519,7 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
}
- dev->mode_config.fb_base = ggtt->mappable_base;
+ dev->mode_config.fb_base = ggtt->gmadr.start;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
INTEL_INFO(dev_priv)->num_pipes,
@@ -14561,6 +14536,7 @@ int intel_modeset_init(struct drm_device *dev)
}
intel_shared_dpll_init(dev);
+ intel_update_fdi_pll_freq(dev_priv);
intel_update_czclk(dev_priv);
intel_modeset_init_hw(dev);
@@ -14612,6 +14588,7 @@ int intel_modeset_init(struct drm_device *dev)
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
/* 640x480@60Hz, ~25175 kHz */
struct dpll clock = {
.m1 = 18,
@@ -14675,6 +14652,8 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
POSTING_READ(PIPECONF(pipe));
+
+ intel_wait_for_pipe_scanline_moving(crtc);
}
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -14700,11 +14679,11 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
}
static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
- struct intel_plane *primary)
+ struct intel_plane *plane)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum plane plane = primary->plane;
- u32 val = I915_READ(DSPCNTR(plane));
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 val = I915_READ(DSPCNTR(i9xx_plane));
return (val & DISPLAY_PLANE_ENABLE) == 0 ||
(val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
@@ -14768,7 +14747,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
/* Clear any frame start delays used for debugging left by the BIOS */
- if (!transcoder_is_dsi(cpu_transcoder)) {
+ if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
i915_reg_t reg = PIPECONF(cpu_transcoder);
I915_WRITE(reg,
@@ -14866,8 +14845,6 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
- /* Enabled encoders without active connectors will be fixed in
- * the crtc fixup. */
}
void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
@@ -14980,7 +14957,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc_state = to_intel_crtc_state(crtc->base.state);
encoder->base.crtc = &crtc->base;
- crtc_state->output_types |= 1 << encoder->type;
encoder->get_config(encoder, crtc_state);
} else {
encoder->base.crtc = NULL;
@@ -15059,6 +15035,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
+ dev_priv->min_voltage_level[crtc->pipe] =
+ crtc_state->min_voltage_level;
intel_pipe_config_sanity_check(dev_priv, crtc_state);
}
@@ -15082,6 +15060,23 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
}
}
+static void intel_early_display_was(struct drm_i915_private *dev_priv)
+{
+ /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+ DARBF_GATING_DIS);
+
+ if (IS_HASWELL(dev_priv)) {
+ /*
+ * WaRsPkgCStateDisplayPMReq:hsw
+ * System hang if this isn't done before disabling all planes!
+ */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+ }
+}
+
/* Scan out the current hw modeset state,
* and sanitizes it to the current state
*/
@@ -15095,15 +15090,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct intel_encoder *encoder;
int i;
- if (IS_HASWELL(dev_priv)) {
- /*
- * WaRsPkgCStateDisplayPMReq:hsw
- * System hang if this isn't done before disabling all planes!
- */
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
- }
-
+ intel_early_display_was(dev_priv);
intel_modeset_readout_hw_state(dev);
/* HW state is read out, now we need to sanitize this mess. */
@@ -15197,17 +15184,6 @@ void intel_display_resume(struct drm_device *dev)
drm_atomic_state_put(state);
}
-void intel_modeset_gem_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- intel_init_gt_powersave(dev_priv);
-
- intel_init_clock_gating(dev_priv);
-
- intel_setup_overlay(dev_priv);
-}
-
int intel_connector_register(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -15236,10 +15212,7 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- /* First disable polling... */
- drm_kms_helper_poll_fini(dev);
-
- /* Then kill the work that may have been queued by hpd. */
+ /* Kill all the work that may have been queued by hpd. */
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->modeset_retry_work.func)
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
new file mode 100644
index 000000000000..a0d2b6169361
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright © 2006-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_DISPLAY_H_
+#define _INTEL_DISPLAY_H_
+
+enum pipe {
+ INVALID_PIPE = -1,
+
+ PIPE_A = 0,
+ PIPE_B,
+ PIPE_C,
+ _PIPE_EDP,
+
+ I915_MAX_PIPES = _PIPE_EDP
+};
+
+#define pipe_name(p) ((p) + 'A')
+
+enum transcoder {
+ TRANSCODER_A = 0,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP,
+ TRANSCODER_DSI_A,
+ TRANSCODER_DSI_C,
+
+ I915_MAX_TRANSCODERS
+};
+
+static inline const char *transcoder_name(enum transcoder transcoder)
+{
+ switch (transcoder) {
+ case TRANSCODER_A:
+ return "A";
+ case TRANSCODER_B:
+ return "B";
+ case TRANSCODER_C:
+ return "C";
+ case TRANSCODER_EDP:
+ return "EDP";
+ case TRANSCODER_DSI_A:
+ return "DSI A";
+ case TRANSCODER_DSI_C:
+ return "DSI C";
+ default:
+ return "<invalid>";
+ }
+}
+
+static inline bool transcoder_is_dsi(enum transcoder transcoder)
+{
+ return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
+}
+
+/*
+ * Global legacy plane identifier. Valid only for primary/sprite
+ * planes on pre-g4x, and only for primary planes on g4x-bdw.
+ */
+enum i9xx_plane_id {
+ PLANE_A,
+ PLANE_B,
+ PLANE_C,
+};
+
+#define plane_name(p) ((p) + 'A')
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
+
+/*
+ * Per-pipe plane identifier.
+ * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
+ * number of planes per CRTC. Not all platforms really have this many planes,
+ * which means some arrays of size I915_MAX_PLANES may have unused entries
+ * between the topmost sprite plane and the cursor plane.
+ *
+ * This is expected to be passed to various register macros
+ * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
+ */
+enum plane_id {
+ PLANE_PRIMARY,
+ PLANE_SPRITE0,
+ PLANE_SPRITE1,
+ PLANE_SPRITE2,
+ PLANE_CURSOR,
+
+ I915_MAX_PLANES,
+};
+
+#define for_each_plane_id_on_crtc(__crtc, __p) \
+ for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
+ for_each_if((__crtc)->plane_ids_mask & BIT(__p))
+
+enum port {
+ PORT_NONE = -1,
+
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+
+ I915_MAX_PORTS
+};
+
+#define port_name(p) ((p) + 'A')
+
+enum dpio_channel {
+ DPIO_CH0,
+ DPIO_CH1
+};
+
+enum dpio_phy {
+ DPIO_PHY0,
+ DPIO_PHY1,
+ DPIO_PHY2,
+};
+
+#define I915_NUM_PHYS_VLV 2
+
+enum intel_display_power_domain {
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_A_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_B_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_C_PANEL_FITTER,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_TRANSCODER_DSI_A,
+ POWER_DOMAIN_TRANSCODER_DSI_C,
+ POWER_DOMAIN_PORT_DDI_A_LANES,
+ POWER_DOMAIN_PORT_DDI_B_LANES,
+ POWER_DOMAIN_PORT_DDI_C_LANES,
+ POWER_DOMAIN_PORT_DDI_D_LANES,
+ POWER_DOMAIN_PORT_DDI_E_LANES,
+ POWER_DOMAIN_PORT_DDI_A_IO,
+ POWER_DOMAIN_PORT_DDI_B_IO,
+ POWER_DOMAIN_PORT_DDI_C_IO,
+ POWER_DOMAIN_PORT_DDI_D_IO,
+ POWER_DOMAIN_PORT_DDI_E_IO,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_PORT_OTHER,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO,
+ POWER_DOMAIN_PLLS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_INIT,
+
+ POWER_DOMAIN_NUM,
+};
+
+#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
+#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
+ ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+ ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+ (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+/* Used by dp and fdi links */
+struct intel_link_m_n {
+ u32 tu;
+ u32 gmch_m;
+ u32 gmch_n;
+ u32 link_m;
+ u32 link_n;
+};
+
+#define for_each_pipe(__dev_priv, __p) \
+ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
+
+#define for_each_pipe_masked(__dev_priv, __p, __mask) \
+ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
+ for_each_if((__mask) & BIT(__p))
+
+#define for_each_universal_plane(__dev_priv, __pipe, __p) \
+ for ((__p) = 0; \
+ (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
+ (__p)++)
+
+#define for_each_sprite(__dev_priv, __p, __s) \
+ for ((__s) = 0; \
+ (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
+ (__s)++)
+
+#define for_each_port_masked(__port, __ports_mask) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
+ for_each_if((__ports_mask) & BIT(__port))
+
+#define for_each_crtc(dev, crtc) \
+ list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
+
+#define for_each_intel_plane(dev, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head)
+
+#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head) \
+ for_each_if((plane_mask) & \
+ BIT(drm_plane_index(&intel_plane->base)))
+
+#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head) \
+ for_each_if((intel_plane)->pipe == (intel_crtc)->pipe)
+
+#define for_each_intel_crtc(dev, intel_crtc) \
+ list_for_each_entry(intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head)
+
+#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
+ list_for_each_entry(intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ for_each_if((crtc_mask) & BIT(drm_crtc_index(&intel_crtc->base)))
+
+#define for_each_intel_encoder(dev, intel_encoder) \
+ list_for_each_entry(intel_encoder, \
+ &(dev)->mode_config.encoder_list, \
+ base.head)
+
+#define for_each_intel_connector_iter(intel_connector, iter) \
+ while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
+
+#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
+ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+ for_each_if((intel_encoder)->base.crtc == (__crtc))
+
+#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
+ list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
+ for_each_if((intel_connector)->base.encoder == (__encoder))
+
+#define for_each_power_domain(domain, mask) \
+ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
+ for_each_if(BIT_ULL(domain) & (mask))
+
+#define for_each_power_well(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells < \
+ (__dev_priv)->power_domains.power_well_count; \
+ (__power_well)++)
+
+#define for_each_power_well_rev(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
+ (__dev_priv)->power_domains.power_well_count - 1; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
+ (__power_well)--)
+
+#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well(__dev_priv, __power_well) \
+ for_each_if((__power_well)->domains & (__domain_mask))
+
+#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well_rev(__dev_priv, __power_well) \
+ for_each_if((__power_well)->domains & (__domain_mask))
+
+#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
+#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(crtc)
+
+#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
+ (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
+void intel_link_compute_m_n(int bpp, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n,
+ bool reduce_m_n);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 158438bb0389..35c5299feab6 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -129,11 +129,13 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
}
-static void intel_dp_link_down(struct intel_dp *intel_dp);
+static void intel_dp_link_down(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state);
static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
-static void vlv_steal_power_sequencer(struct drm_device *dev,
+static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
enum pipe pipe);
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
@@ -221,7 +223,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->port;
+ enum port port = dig_port->base.port;
const int *source_rates;
int size;
u32 voltage;
@@ -427,24 +429,19 @@ static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
}
static void
-intel_dp_init_panel_power_sequencer(struct drm_device *dev,
- struct intel_dp *intel_dp);
+intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
static void
-intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp,
+intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
bool force_disable_vdd);
static void
-intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
+intel_dp_pps_init(struct intel_dp *intel_dp);
static void pps_lock(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &intel_dig_port->base;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
/*
- * See vlv_power_sequencer_reset() why we need
+ * See intel_power_sequencer_reset() why we need
* a power domain reference here.
*/
intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
@@ -454,10 +451,7 @@ static void pps_lock(struct intel_dp *intel_dp)
static void pps_unlock(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &intel_dig_port->base;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
mutex_unlock(&dev_priv->pps_mutex);
@@ -467,8 +461,8 @@ static void pps_unlock(struct intel_dp *intel_dp)
static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum pipe pipe = intel_dp->pps_pipe;
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = DPIO_PHY(pipe);
@@ -477,11 +471,11 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
"skipping pipe %c power seqeuncer kick due to port %c being active\n",
- pipe_name(pipe), port_name(intel_dig_port->port)))
+ pipe_name(pipe), port_name(intel_dig_port->base.port)))
return;
DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
- pipe_name(pipe), port_name(intel_dig_port->port));
+ pipe_name(pipe), port_name(intel_dig_port->base.port));
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
@@ -578,9 +572,8 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -603,16 +596,16 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
if (WARN_ON(pipe == INVALID_PIPE))
pipe = PIPE_A;
- vlv_steal_power_sequencer(dev, pipe);
+ vlv_steal_power_sequencer(dev_priv, pipe);
intel_dp->pps_pipe = pipe;
DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
pipe_name(intel_dp->pps_pipe),
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
/* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
+ intel_dp_init_panel_power_sequencer(intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
/*
* Even vdd force doesn't work until we've made
@@ -626,9 +619,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
static int
bxt_power_sequencer_idx(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -649,7 +640,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
* Only the HW needs to be reprogrammed, the SW state is fixed and
* has been setup during connector init.
*/
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
+ intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
return 0;
}
@@ -701,10 +692,9 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
static void
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -731,13 +721,12 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
port_name(port), pipe_name(intel_dp->pps_pipe));
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
+ intel_dp_init_panel_power_sequencer(intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
}
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
@@ -754,15 +743,20 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
* should use them always.
*/
- for_each_intel_encoder(dev, encoder) {
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp;
if (encoder->type != INTEL_OUTPUT_DP &&
- encoder->type != INTEL_OUTPUT_EDP)
+ encoder->type != INTEL_OUTPUT_EDP &&
+ encoder->type != INTEL_OUTPUT_DDI)
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
+ /* Skip pure DVI/HDMI DDI encoders */
+ if (!i915_mmio_reg_valid(intel_dp->output_reg))
+ continue;
+
WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
if (encoder->type != INTEL_OUTPUT_EDP)
@@ -783,10 +777,10 @@ struct pps_registers {
i915_reg_t pp_div;
};
-static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
- struct intel_dp *intel_dp,
+static void intel_pps_get_registers(struct intel_dp *intel_dp,
struct pps_registers *regs)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
int pps_idx = 0;
memset(regs, 0, sizeof(*regs));
@@ -809,8 +803,7 @@ _pp_ctrl_reg(struct intel_dp *intel_dp)
{
struct pps_registers regs;
- intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
- &regs);
+ intel_pps_get_registers(intel_dp, &regs);
return regs.pp_ctrl;
}
@@ -820,8 +813,7 @@ _pp_stat_reg(struct intel_dp *intel_dp)
{
struct pps_registers regs;
- intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
- &regs);
+ intel_pps_get_registers(intel_dp, &regs);
return regs.pp_stat;
}
@@ -833,8 +825,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
{
struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
edp_notifier);
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
return 0;
@@ -864,8 +855,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -878,8 +868,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -893,8 +882,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
static void
intel_dp_check_edp(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (!intel_dp_is_edp(intel_dp))
return;
@@ -910,9 +898,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
static uint32_t
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t status;
bool done;
@@ -959,7 +945,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* like to run at 2MHz. So, take the cdclk or PCH rawclk value and
* divide by 2000 and use that
*/
- if (intel_dig_port->port == PORT_A)
+ if (intel_dig_port->base.port == PORT_A)
return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
else
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
@@ -970,7 +956,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
- if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
+ if (intel_dig_port->base.port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
@@ -1440,7 +1426,7 @@ static void intel_aux_reg_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = intel_aux_port(dev_priv,
- dp_to_dig_port(intel_dp)->port);
+ dp_to_dig_port(intel_dp)->base.port);
int i;
intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
@@ -1458,7 +1444,7 @@ static void
intel_dp_aux_init(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
intel_aux_reg_init(intel_dp);
drm_dp_aux_init(&intel_dp->aux);
@@ -1479,8 +1465,7 @@ static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct dp_link_dpll *divisor = NULL;
int i, count = 0;
@@ -1628,7 +1613,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = encoder->port;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
@@ -1658,7 +1643,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true;
pipe_config->has_drrs = false;
- if (port == PORT_A)
+ if (IS_G4X(dev_priv) || port == PORT_A)
pipe_config->has_audio = false;
else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
pipe_config->has_audio = intel_dp->has_audio;
@@ -1692,6 +1677,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
conn_state->scaling_mode);
}
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return false;
+
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
@@ -1849,11 +1838,10 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
static void intel_dp_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = dp_to_dig_port(intel_dp)->port;
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ enum port port = encoder->port;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
@@ -1940,20 +1928,18 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
-static void intel_pps_verify_state(struct drm_i915_private *dev_priv,
- struct intel_dp *intel_dp);
+static void intel_pps_verify_state(struct intel_dp *intel_dp);
static void wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
i915_reg_t pp_stat_reg, pp_ctrl_reg;
lockdep_assert_held(&dev_priv->pps_mutex);
- intel_pps_verify_state(dev_priv, intel_dp);
+ intel_pps_verify_state(intel_dp);
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
@@ -2024,8 +2010,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 control;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2046,9 +2031,8 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
*/
static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->want_panel_vdd;
@@ -2067,7 +2051,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -2087,7 +2071,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
*/
if (!edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
msleep(intel_dp->panel_power_up_delay);
}
@@ -2113,13 +2097,12 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
pps_unlock(intel_dp);
I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
- port_name(dp_to_dig_port(intel_dp)->port));
+ port_name(dp_to_dig_port(intel_dp)->base.port));
}
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port =
dp_to_dig_port(intel_dp);
u32 pp;
@@ -2133,7 +2116,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -2193,7 +2176,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
return;
I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
- port_name(dp_to_dig_port(intel_dp)->port));
+ port_name(dp_to_dig_port(intel_dp)->base.port));
intel_dp->want_panel_vdd = false;
@@ -2205,8 +2188,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
static void edp_panel_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2216,11 +2198,11 @@ static void edp_panel_on(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
- port_name(dp_to_dig_port(intel_dp)->port));
+ port_name(dp_to_dig_port(intel_dp)->base.port));
if (WARN(edp_have_panel_power(intel_dp),
"eDP port %c panel power already on\n",
- port_name(dp_to_dig_port(intel_dp)->port)))
+ port_name(dp_to_dig_port(intel_dp)->base.port)))
return;
wait_panel_power_cycle(intel_dp);
@@ -2264,8 +2246,7 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
static void edp_panel_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2275,10 +2256,10 @@ static void edp_panel_off(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
- port_name(dp_to_dig_port(intel_dp)->port));
+ port_name(dp_to_dig_port(intel_dp)->base.port));
WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
- port_name(dp_to_dig_port(intel_dp)->port));
+ port_name(dp_to_dig_port(intel_dp)->base.port));
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -2313,9 +2294,7 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
/* Enable backlight in the panel power control. */
static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2358,8 +2337,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
/* Disable backlight in the panel power control. */
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2430,7 +2408,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
I915_STATE_WARN(cur_state != state,
"DP port %c state assertion failure (expected %s, current %s)\n",
- port_name(dig_port->port),
+ port_name(dig_port->base.port),
onoff(state), onoff(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
@@ -2486,10 +2464,10 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
udelay(200);
}
-static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
+static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -2505,6 +2483,21 @@ static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
udelay(200);
}
+static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
+{
+ /*
+ * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
+ * be capable of signalling downstream hpd with a long pulse.
+ * Whether or not that means D3 is safe to use is not clear,
+ * but let's assume so until proven otherwise.
+ *
+ * FIXME should really check all downstream ports...
+ */
+ return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
+ intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
+ intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
+}
+
/* If the sink supports it, try to set the power state appropriately */
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
@@ -2515,6 +2508,9 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
return;
if (mode != DRM_MODE_DPMS_ON) {
+ if (downstream_hpd_needs_d0(intel_dp))
+ return;
+
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
DP_SET_POWER_D3);
} else {
@@ -2544,10 +2540,9 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = dp_to_dig_port(intel_dp)->port;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ enum port port = encoder->port;
u32 tmp;
bool ret;
@@ -2596,12 +2591,16 @@ out:
static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
u32 tmp, flags = 0;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = dp_to_dig_port(intel_dp)->port;
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ enum port port = encoder->port;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
+ else
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
tmp = I915_READ(intel_dp->output_reg);
@@ -2680,7 +2679,8 @@ static void intel_disable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder);
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
@@ -2694,12 +2694,10 @@ static void g4x_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-
intel_disable_dp(encoder, old_crtc_state, old_conn_state);
/* disable the port before the pipe on g4x */
- intel_dp_link_down(intel_dp);
+ intel_dp_link_down(encoder, old_crtc_state);
}
static void ilk_disable_dp(struct intel_encoder *encoder,
@@ -2725,38 +2723,34 @@ static void ilk_post_disable_dp(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = encoder->port;
- intel_dp_link_down(intel_dp);
+ intel_dp_link_down(encoder, old_crtc_state);
/* Only ilk+ has port A */
if (port == PORT_A)
- ironlake_edp_pll_off(intel_dp);
+ ironlake_edp_pll_off(intel_dp, old_crtc_state);
}
static void vlv_post_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-
- intel_dp_link_down(intel_dp);
+ intel_dp_link_down(encoder, old_crtc_state);
}
static void chv_post_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_dp_link_down(intel_dp);
+ intel_dp_link_down(encoder, old_crtc_state);
mutex_lock(&dev_priv->sb_lock);
/* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, true);
+ chv_data_lane_soft_reset(encoder, old_crtc_state, true);
mutex_unlock(&dev_priv->sb_lock);
}
@@ -2766,10 +2760,9 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t *DP,
uint8_t dp_train_pat)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
@@ -2852,8 +2845,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
static void intel_dp_enable_port(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
/* enable with pattern 1 (as per spec) */
@@ -2877,10 +2869,9 @@ static void intel_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
enum pipe pipe = crtc->pipe;
@@ -2890,7 +2881,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
pps_lock(intel_dp);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_init_panel_power_sequencer(intel_dp);
+ vlv_init_panel_power_sequencer(encoder, pipe_config);
intel_dp_enable_port(intel_dp, pipe_config);
@@ -2944,7 +2935,7 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = encoder->port;
intel_dp_prepare(encoder, pipe_config);
@@ -2977,22 +2968,21 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
* from a port.
*/
DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
- pipe_name(pipe), port_name(intel_dig_port->port));
+ pipe_name(pipe), port_name(intel_dig_port->base.port));
I915_WRITE(pp_on_reg, 0);
POSTING_READ(pp_on_reg);
intel_dp->pps_pipe = INVALID_PIPE;
}
-static void vlv_steal_power_sequencer(struct drm_device *dev,
+static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
lockdep_assert_held(&dev_priv->pps_mutex);
- for_each_intel_encoder(dev, encoder) {
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp;
enum port port;
@@ -3001,7 +2991,7 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
- port = dp_to_dig_port(intel_dp)->port;
+ port = dp_to_dig_port(intel_dp)->base.port;
WARN(intel_dp->active_pipe == pipe,
"stealing pipe %c power sequencer from active (e)DP port %c\n",
@@ -3018,13 +3008,12 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
}
}
-static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
+static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &intel_dig_port->base;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -3044,7 +3033,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
* We may be stealing the power
* sequencer from another port.
*/
- vlv_steal_power_sequencer(dev, crtc->pipe);
+ vlv_steal_power_sequencer(dev_priv, crtc->pipe);
intel_dp->active_pipe = crtc->pipe;
@@ -3055,18 +3044,18 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
intel_dp->pps_pipe = crtc->pipe;
DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
- pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
+ pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
/* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
+ intel_dp_init_panel_power_sequencer(intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
}
static void vlv_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- vlv_phy_pre_encoder_enable(encoder);
+ vlv_phy_pre_encoder_enable(encoder, pipe_config);
intel_enable_dp(encoder, pipe_config, conn_state);
}
@@ -3077,14 +3066,14 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
{
intel_dp_prepare(encoder, pipe_config);
- vlv_phy_pre_pll_enable(encoder);
+ vlv_phy_pre_pll_enable(encoder, pipe_config);
}
static void chv_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- chv_phy_pre_encoder_enable(encoder);
+ chv_phy_pre_encoder_enable(encoder, pipe_config);
intel_enable_dp(encoder, pipe_config, conn_state);
@@ -3098,14 +3087,14 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
{
intel_dp_prepare(encoder, pipe_config);
- chv_phy_pre_pll_enable(encoder);
+ chv_phy_pre_pll_enable(encoder, pipe_config);
}
static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
- chv_phy_post_pll_disable(encoder);
+ chv_phy_post_pll_disable(encoder, old_crtc_state);
}
/*
@@ -3153,7 +3142,7 @@ uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
if (INTEL_GEN(dev_priv) >= 9) {
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
@@ -3172,7 +3161,7 @@ uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
if (INTEL_GEN(dev_priv) >= 9) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
@@ -3505,10 +3494,9 @@ gen7_edp_signal_levels(uint8_t train_set)
void
intel_dp_set_signal_levels(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- enum port port = intel_dig_port->port;
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ enum port port = intel_dig_port->base.port;
uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
@@ -3563,10 +3551,9 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
uint32_t val;
if (!HAS_DDI(dev_priv))
@@ -3595,13 +3582,13 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
}
static void
-intel_dp_link_down(struct intel_dp *intel_dp)
+intel_dp_link_down(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
- enum port port = intel_dig_port->port;
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ enum port port = encoder->port;
uint32_t DP = intel_dp->DP;
if (WARN_ON(HAS_DDI(dev_priv)))
@@ -3747,11 +3734,11 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd))
- DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
+ DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
- /* Intermediate frequency support */
- if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */
+ /* Read the eDP 1.4+ supported link rates. */
+ if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
int i;
@@ -3775,6 +3762,10 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
intel_dp->num_sink_rates = i;
}
+ /*
+ * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
+ * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
+ */
if (intel_dp->num_sink_rates)
intel_dp->use_rate_select = true;
else
@@ -3874,11 +3865,12 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
intel_dp->is_mst);
}
-static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
+static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state, bool disable_wa)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
u8 buf;
int ret = 0;
int count = 0;
@@ -3914,15 +3906,17 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
}
out:
- hsw_enable_ips(intel_crtc);
+ if (disable_wa)
+ hsw_enable_ips(crtc_state);
return ret;
}
-static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
+static int intel_dp_sink_crc_start(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
u8 buf;
int ret;
@@ -3936,16 +3930,16 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
return -EIO;
if (buf & DP_TEST_SINK_START) {
- ret = intel_dp_sink_crc_stop(intel_dp);
+ ret = intel_dp_sink_crc_stop(intel_dp, crtc_state, false);
if (ret)
return ret;
}
- hsw_disable_ips(intel_crtc);
+ hsw_disable_ips(crtc_state);
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
buf | DP_TEST_SINK_START) < 0) {
- hsw_enable_ips(intel_crtc);
+ hsw_enable_ips(crtc_state);
return -EIO;
}
@@ -3953,16 +3947,16 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
return 0;
}
-int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+int intel_dp_sink_crc(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, u8 *crc)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
u8 buf;
int count, ret;
int attempts = 6;
- ret = intel_dp_sink_crc_start(intel_dp);
+ ret = intel_dp_sink_crc_start(intel_dp, crtc_state);
if (ret)
return ret;
@@ -3990,7 +3984,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
}
stop:
- intel_dp_sink_crc_stop(intel_dp);
+ intel_dp_sink_crc_stop(intel_dp, crtc_state, true);
return ret;
}
@@ -4285,21 +4279,29 @@ intel_dp_retrain_link(struct intel_dp *intel_dp)
static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_connector_state *conn_state =
+ intel_dp->attached_connector->base.state;
u8 link_status[DP_LINK_STATUS_SIZE];
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("Failed to get link status\n");
return;
}
- if (!intel_encoder->base.crtc)
+ if (!conn_state->crtc)
+ return;
+
+ WARN_ON(!drm_modeset_is_locked(&conn_state->crtc->mutex));
+
+ if (!conn_state->crtc->state->active)
return;
- if (!to_intel_crtc(intel_encoder->base.crtc)->active)
+ if (conn_state->commit &&
+ !try_wait_for_completion(&conn_state->commit->hw_done))
return;
/*
@@ -4335,8 +4337,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
static bool
intel_dp_short_pulse(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u8 sink_irq_vector = 0;
u8 old_sink_count = intel_dp->sink_count;
bool ret;
@@ -4375,13 +4376,12 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
/* Send a Hotplug Uevent to userspace to start modeset */
- drm_kms_helper_hotplug_event(intel_encoder->base.dev);
+ drm_kms_helper_hotplug_event(&dev_priv->drm);
}
return true;
@@ -4445,8 +4445,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
static enum drm_connector_status
edp_detect(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum drm_connector_status status;
status = intel_panel_detect(dev_priv);
@@ -4461,7 +4460,7 @@ static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
{
u32 bit;
- switch (port->port) {
+ switch (port->base.port) {
case PORT_B:
bit = SDE_PORTB_HOTPLUG;
break;
@@ -4472,7 +4471,7 @@ static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
bit = SDE_PORTD_HOTPLUG;
break;
default:
- MISSING_CASE(port->port);
+ MISSING_CASE(port->base.port);
return false;
}
@@ -4484,7 +4483,7 @@ static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
{
u32 bit;
- switch (port->port) {
+ switch (port->base.port) {
case PORT_B:
bit = SDE_PORTB_HOTPLUG_CPT;
break;
@@ -4495,7 +4494,7 @@ static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
bit = SDE_PORTD_HOTPLUG_CPT;
break;
default:
- MISSING_CASE(port->port);
+ MISSING_CASE(port->base.port);
return false;
}
@@ -4507,7 +4506,7 @@ static bool spt_digital_port_connected(struct drm_i915_private *dev_priv,
{
u32 bit;
- switch (port->port) {
+ switch (port->base.port) {
case PORT_A:
bit = SDE_PORTA_HOTPLUG_SPT;
break;
@@ -4526,7 +4525,7 @@ static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
{
u32 bit;
- switch (port->port) {
+ switch (port->base.port) {
case PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
break;
@@ -4537,7 +4536,7 @@ static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
break;
default:
- MISSING_CASE(port->port);
+ MISSING_CASE(port->base.port);
return false;
}
@@ -4549,7 +4548,7 @@ static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
{
u32 bit;
- switch (port->port) {
+ switch (port->base.port) {
case PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
break;
@@ -4560,7 +4559,7 @@ static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
break;
default:
- MISSING_CASE(port->port);
+ MISSING_CASE(port->base.port);
return false;
}
@@ -4570,7 +4569,7 @@ static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
static bool ilk_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
- if (port->port == PORT_A)
+ if (port->base.port == PORT_A)
return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
else
return ibx_digital_port_connected(dev_priv, port);
@@ -4579,7 +4578,7 @@ static bool ilk_digital_port_connected(struct drm_i915_private *dev_priv,
static bool snb_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
- if (port->port == PORT_A)
+ if (port->base.port == PORT_A)
return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
else
return cpt_digital_port_connected(dev_priv, port);
@@ -4588,7 +4587,7 @@ static bool snb_digital_port_connected(struct drm_i915_private *dev_priv,
static bool ivb_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
- if (port->port == PORT_A)
+ if (port->base.port == PORT_A)
return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
else
return cpt_digital_port_connected(dev_priv, port);
@@ -4597,7 +4596,7 @@ static bool ivb_digital_port_connected(struct drm_i915_private *dev_priv,
static bool bdw_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
- if (port->port == PORT_A)
+ if (port->base.port == PORT_A)
return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
else
return cpt_digital_port_connected(dev_priv, port);
@@ -4702,24 +4701,21 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
}
static int
-intel_dp_long_pulse(struct intel_connector *intel_connector)
+intel_dp_long_pulse(struct intel_connector *connector)
{
- struct drm_connector *connector = &intel_connector->base;
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
enum drm_connector_status status;
u8 sink_irq_vector = 0;
- WARN_ON(!drm_modeset_is_locked(&connector->dev->mode_config.connection_mutex));
+ WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
- intel_display_power_get(to_i915(dev), intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
/* Can't disconnect eDP, but you can close the lid... */
if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
- else if (intel_digital_port_connected(to_i915(dev),
+ else if (intel_digital_port_connected(dev_priv,
dp_to_dig_port(intel_dp)))
status = intel_dp_detect_dpcd(intel_dp);
else
@@ -4740,9 +4736,6 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
goto out;
}
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DP;
-
if (intel_dp->reset_link_params) {
/* Initial max link lane count */
intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
@@ -4793,7 +4786,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp->aux.i2c_defer_count = 0;
intel_dp_set_edid(intel_dp);
- if (intel_dp_is_edp(intel_dp) || intel_connector->detect_edid)
+ if (intel_dp_is_edp(intel_dp) || connector->detect_edid)
status = connector_status_connected;
intel_dp->detect_done = true;
@@ -4816,7 +4809,7 @@ out:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
- intel_display_power_put(to_i915(dev), intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
return status;
}
@@ -4832,8 +4825,19 @@ intel_dp_detect(struct drm_connector *connector,
connector->base.id, connector->name);
/* If full detect is not performed yet, do a full detect */
- if (!intel_dp->detect_done)
+ if (!intel_dp->detect_done) {
+ struct drm_crtc *crtc;
+ int ret;
+
+ crtc = connector->state->crtc;
+ if (crtc) {
+ ret = drm_modeset_lock(&crtc->mutex, ctx);
+ if (ret)
+ return ret;
+ }
+
status = intel_dp_long_pulse(intel_dp->attached_connector);
+ }
intel_dp->detect_done = false;
@@ -4859,9 +4863,6 @@ intel_dp_force(struct drm_connector *connector)
intel_dp_set_edid(intel_dp);
intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
-
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DP;
}
static int intel_dp_get_modes(struct drm_connector *connector)
@@ -4986,9 +4987,7 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -5041,7 +5040,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
if (intel_dp_is_edp(intel_dp)) {
/* Reinit the power sequencer, in case BIOS did something with it. */
- intel_dp_pps_init(encoder->dev, intel_dp);
+ intel_dp_pps_init(intel_dp);
intel_edp_panel_vdd_sanitize(intel_dp);
}
@@ -5076,14 +5075,9 @@ enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum irqreturn ret = IRQ_NONE;
- if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
- intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
- intel_dig_port->base.type = INTEL_OUTPUT_DP;
-
if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
/*
* vdd off can generate a long pulse on eDP which
@@ -5092,12 +5086,12 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
* "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
*/
DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
return IRQ_HANDLED;
}
DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
- port_name(intel_dig_port->port),
+ port_name(intel_dig_port->base.port),
long_hpd ? "long" : "short");
if (long_hpd) {
@@ -5125,7 +5119,38 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
}
if (!intel_dp->is_mst) {
- if (!intel_dp_short_pulse(intel_dp)) {
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector *connector = &intel_dp->attached_connector->base;
+ struct drm_crtc *crtc;
+ int iret;
+ bool handled = false;
+
+ drm_modeset_acquire_init(&ctx, 0);
+retry:
+ iret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, &ctx);
+ if (iret)
+ goto err;
+
+ crtc = connector->state->crtc;
+ if (crtc) {
+ iret = drm_modeset_lock(&crtc->mutex, &ctx);
+ if (iret)
+ goto err;
+ }
+
+ handled = intel_dp_short_pulse(intel_dp);
+
+err:
+ if (iret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ WARN(iret, "Acquiring modeset locks failed with %i\n", iret);
+
+ if (!handled) {
intel_dp->detect_done = false;
goto put_power;
}
@@ -5159,8 +5184,11 @@ static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
+
+ if (!IS_G4X(dev_priv) && port != PORT_A)
+ intel_attach_force_audio_property(connector);
- intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
if (intel_dp_is_edp(intel_dp)) {
@@ -5185,13 +5213,13 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
}
static void
-intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dp *intel_dp, struct edp_power_seq *seq)
+intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
struct pps_registers regs;
- intel_pps_get_registers(dev_priv, intel_dp, &regs);
+ intel_pps_get_registers(intel_dp, &regs);
/* Workaround: Need to write PP_CONTROL with the unlock key as
* the very first thing. */
@@ -5235,13 +5263,12 @@ intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
}
static void
-intel_pps_verify_state(struct drm_i915_private *dev_priv,
- struct intel_dp *intel_dp)
+intel_pps_verify_state(struct intel_dp *intel_dp)
{
struct edp_power_seq hw;
struct edp_power_seq *sw = &intel_dp->pps_delays;
- intel_pps_readout_hw_state(dev_priv, intel_dp, &hw);
+ intel_pps_readout_hw_state(intel_dp, &hw);
if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
@@ -5252,10 +5279,9 @@ intel_pps_verify_state(struct drm_i915_private *dev_priv,
}
static void
-intel_dp_init_panel_power_sequencer(struct drm_device *dev,
- struct intel_dp *intel_dp)
+intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct edp_power_seq cur, vbt, spec,
*final = &intel_dp->pps_delays;
@@ -5265,7 +5291,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
if (final->t11_t12 != 0)
return;
- intel_pps_readout_hw_state(dev_priv, intel_dp, &cur);
+ intel_pps_readout_hw_state(intel_dp, &cur);
intel_pps_dump_state("cur", &cur);
@@ -5336,23 +5362,28 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
*/
final->t8 = 1;
final->t9 = 1;
+
+ /*
+ * HW has only a 100msec granularity for t11_t12 so round it up
+ * accordingly.
+ */
+ final->t11_t12 = roundup(final->t11_t12, 100 * 10);
}
static void
-intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp,
+intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
bool force_disable_vdd)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp_on, pp_off, pp_div, port_sel = 0;
int div = dev_priv->rawclk_freq / 1000;
struct pps_registers regs;
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
lockdep_assert_held(&dev_priv->pps_mutex);
- intel_pps_get_registers(dev_priv, intel_dp, &regs);
+ intel_pps_get_registers(intel_dp, &regs);
/*
* On some VLV machines the BIOS can leave the VDD
@@ -5424,16 +5455,15 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_READ(regs.pp_div));
}
-static void intel_dp_pps_init(struct drm_device *dev,
- struct intel_dp *intel_dp)
+static void intel_dp_pps_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_initial_power_sequencer_setup(intel_dp);
} else {
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
+ intel_dp_init_panel_power_sequencer(intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
}
}
@@ -5472,7 +5502,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
dig_port = dp_to_dig_port(intel_dp);
encoder = &dig_port->base;
- intel_crtc = to_intel_crtc(encoder->base.crtc);
if (!intel_crtc) {
DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
@@ -5545,8 +5574,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (!crtc_state->has_drrs) {
DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
@@ -5581,8 +5609,7 @@ unlock:
void intel_edp_drrs_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (!old_crtc_state->has_drrs)
return;
@@ -5765,7 +5792,7 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
/**
* intel_dp_drrs_init - Init basic DRRS work and mutex.
- * @intel_connector: eDP connector
+ * @connector: eDP connector
* @fixed_mode: preferred mode of panel
*
* This function is called only once at driver load to initialize basic
@@ -5777,12 +5804,10 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
* from VBT setting).
*/
static struct drm_display_mode *
-intel_dp_drrs_init(struct intel_connector *intel_connector,
- struct drm_display_mode *fixed_mode)
+intel_dp_drrs_init(struct intel_connector *connector,
+ struct drm_display_mode *fixed_mode)
{
- struct drm_connector *connector = &intel_connector->base;
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_display_mode *downclock_mode = NULL;
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
@@ -5798,8 +5823,8 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
return NULL;
}
- downclock_mode = intel_find_panel_downclock
- (dev_priv, fixed_mode, connector);
+ downclock_mode = intel_find_panel_downclock(dev_priv, fixed_mode,
+ &connector->base);
if (!downclock_mode) {
DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
@@ -5816,11 +5841,9 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector)
{
- struct drm_connector *connector = &intel_connector->base;
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_connector *connector = &intel_connector->base;
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *alt_fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
@@ -5838,7 +5861,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* eDP and LVDS bail out early in this case to prevent interfering
* with an already powered-on LVDS power sequencer.
*/
- if (intel_get_lvds_encoder(dev)) {
+ if (intel_get_lvds_encoder(&dev_priv->drm)) {
WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
DRM_INFO("LVDS was detected, not registering eDP\n");
@@ -5848,7 +5871,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
pps_lock(intel_dp);
intel_dp_init_panel_power_timestamps(intel_dp);
- intel_dp_pps_init(dev, intel_dp);
+ intel_dp_pps_init(intel_dp);
intel_edp_panel_vdd_sanitize(intel_dp);
pps_unlock(intel_dp);
@@ -5868,7 +5891,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (drm_add_edid_modes(connector, edid)) {
drm_mode_connector_update_edid_property(connector,
edid);
- drm_edid_to_eld(connector, edid);
} else {
kfree(edid);
edid = ERR_PTR(-EINVAL);
@@ -5949,9 +5971,9 @@ intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port)
struct intel_encoder *encoder = &intel_dig_port->base;
struct intel_dp *intel_dp = &intel_dig_port->dp;
- encoder->hpd_pin = intel_hpd_pin(intel_dig_port->port);
+ encoder->hpd_pin = intel_hpd_pin(encoder->port);
- switch (intel_dig_port->port) {
+ switch (encoder->port) {
case PORT_A:
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_A;
break;
@@ -5969,7 +5991,7 @@ intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port)
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
break;
default:
- MISSING_CASE(intel_dig_port->port);
+ MISSING_CASE(encoder->port);
}
}
@@ -6005,7 +6027,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_encoder->port;
int type;
/* Initialize the work for modeset in case of link train failure */
@@ -6074,7 +6096,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
- connector->interlace_allowed = true;
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+ connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
intel_dp_init_connector_port_info(intel_dig_port);
@@ -6175,7 +6198,6 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->disable = g4x_disable_dp;
}
- intel_dig_port->port = port;
intel_dig_port->dp.output_reg = output_reg;
intel_dig_port->max_lanes = 4;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 772521440a9f..c3de0918ee13 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -34,6 +34,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -87,6 +88,12 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->dp_m_n.tu = slots;
+ if (IS_GEN9_LP(dev_priv))
+ pipe_config->lane_lat_optim_mask =
+ bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+
+ intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+
return true;
}
@@ -142,7 +149,8 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
DRM_ERROR("failed to update payload %d\n", ret);
}
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder);
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
}
static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
@@ -172,13 +180,27 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
intel_dp->active_mst_links--;
intel_mst->connector = NULL;
- if (intel_dp->active_mst_links == 0) {
+ if (intel_dp->active_mst_links == 0)
intel_dig_port->base.post_disable(&intel_dig_port->base,
- NULL, NULL);
- }
+ old_crtc_state, NULL);
+
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
}
+static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ if (intel_dp->active_mst_links == 0 &&
+ intel_dig_port->base.pre_pll_enable)
+ intel_dig_port->base.pre_pll_enable(&intel_dig_port->base,
+ pipe_config, NULL);
+}
+
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
@@ -187,7 +209,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int ret;
@@ -231,7 +253,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
int ret;
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
@@ -265,48 +287,8 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- u32 temp, flags = 0;
-
- pipe_config->has_audio =
- intel_ddi_is_audio_enabled(dev_priv, crtc);
-
- temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if (temp & TRANS_DDI_PHSYNC)
- flags |= DRM_MODE_FLAG_PHSYNC;
- else
- flags |= DRM_MODE_FLAG_NHSYNC;
- if (temp & TRANS_DDI_PVSYNC)
- flags |= DRM_MODE_FLAG_PVSYNC;
- else
- flags |= DRM_MODE_FLAG_NVSYNC;
-
- switch (temp & TRANS_DDI_BPC_MASK) {
- case TRANS_DDI_BPC_6:
- pipe_config->pipe_bpp = 18;
- break;
- case TRANS_DDI_BPC_8:
- pipe_config->pipe_bpp = 24;
- break;
- case TRANS_DDI_BPC_10:
- pipe_config->pipe_bpp = 30;
- break;
- case TRANS_DDI_BPC_12:
- pipe_config->pipe_bpp = 36;
- break;
- default:
- break;
- }
- pipe_config->base.adjusted_mode.flags |= flags;
-
- pipe_config->lane_count =
- ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
-
- intel_dp_get_m_n(crtc, pipe_config);
- intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
+ intel_ddi_get_config(&intel_dig_port->base, pipe_config);
}
static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
@@ -570,13 +552,14 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->power_domain = intel_dig_port->base.power_domain;
- intel_encoder->port = intel_dig_port->port;
+ intel_encoder->port = intel_dig_port->base.port;
intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
+ intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index de38d014ed39..76473e9836c6 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -466,21 +466,21 @@ void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
lockdep_assert_held(&dev_priv->power_domains.lock);
- if (rcomp_phy != -1) {
+ was_enabled = true;
+ if (rcomp_phy != -1)
was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
- /*
- * We need to copy the GRC calibration value from rcomp_phy,
- * so make sure it's powered up.
- */
- if (!was_enabled)
- _bxt_ddi_phy_init(dev_priv, rcomp_phy);
- }
+ /*
+ * We need to copy the GRC calibration value from rcomp_phy,
+ * so make sure it's powered up.
+ */
+ if (!was_enabled)
+ _bxt_ddi_phy_init(dev_priv, rcomp_phy);
_bxt_ddi_phy_init(dev_priv, phy);
- if (rcomp_phy != -1 && !was_enabled)
- bxt_ddi_phy_uninit(dev_priv, phy_info->rcomp_phy);
+ if (!was_enabled)
+ bxt_ddi_phy_uninit(dev_priv, rcomp_phy);
}
static bool __printf(6, 7)
@@ -567,8 +567,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
}
uint8_t
-bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
- uint8_t lane_count)
+bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count)
{
switch (lane_count) {
case 1:
@@ -587,9 +586,8 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
uint8_t lane_lat_optim_mask)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- enum port port = dport->port;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
enum dpio_phy phy;
enum dpio_channel ch;
int lane;
@@ -614,9 +612,8 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
uint8_t
bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- enum port port = dport->port;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
enum dpio_phy phy;
enum dpio_channel ch;
int lane;
@@ -642,7 +639,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
u32 val;
@@ -734,11 +731,12 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
}
void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
uint32_t val;
@@ -777,17 +775,16 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
}
}
-void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
- enum pipe pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
unsigned int lane_mask =
- intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
+ intel_dp_unused_lane_mask(crtc_state->lane_count);
u32 val;
/*
@@ -803,7 +800,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
mutex_lock(&dev_priv->sb_lock);
/* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, true);
+ chv_data_lane_soft_reset(encoder, crtc_state, true);
/* program left/right clock distribution */
if (pipe != PIPE_B) {
@@ -833,7 +830,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
- if (intel_crtc->config->lane_count > 2) {
+ if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
@@ -858,16 +855,15 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->sb_lock);
}
-void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
int data, i, stagger;
u32 val;
@@ -878,16 +874,16 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
- if (intel_crtc->config->lane_count > 2) {
+ if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
}
/* Program Tx lane latency optimal setting*/
- for (i = 0; i < intel_crtc->config->lane_count; i++) {
+ for (i = 0; i < crtc_state->lane_count; i++) {
/* Set the upar bit */
- if (intel_crtc->config->lane_count == 1)
+ if (crtc_state->lane_count == 1)
data = 0x0;
else
data = (i == 1) ? 0x0 : 0x1;
@@ -896,13 +892,13 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
}
/* Data lane stagger programming */
- if (intel_crtc->config->port_clock > 270000)
+ if (crtc_state->port_clock > 270000)
stagger = 0x18;
- else if (intel_crtc->config->port_clock > 135000)
+ else if (crtc_state->port_clock > 135000)
stagger = 0xd;
- else if (intel_crtc->config->port_clock > 67500)
+ else if (crtc_state->port_clock > 67500)
stagger = 0x7;
- else if (intel_crtc->config->port_clock > 33750)
+ else if (crtc_state->port_clock > 33750)
stagger = 0x4;
else
stagger = 0x2;
@@ -911,7 +907,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
- if (intel_crtc->config->lane_count > 2) {
+ if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
@@ -924,7 +920,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
DPIO_TX1_STAGGER_MULT(6) |
DPIO_TX2_STAGGER_MULT(0));
- if (intel_crtc->config->lane_count > 2) {
+ if (crtc_state->lane_count > 2) {
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
@@ -934,7 +930,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
}
/* Deassert data lane reset */
- chv_data_lane_soft_reset(encoder, false);
+ chv_data_lane_soft_reset(encoder, crtc_state, false);
mutex_unlock(&dev_priv->sb_lock);
}
@@ -950,10 +946,11 @@ void chv_phy_release_cl2_override(struct intel_encoder *encoder)
}
}
-void chv_phy_post_pll_disable(struct intel_encoder *encoder)
+void chv_phy_post_pll_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(old_crtc_state->base.crtc)->pipe;
u32 val;
mutex_lock(&dev_priv->sb_lock);
@@ -991,7 +988,7 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
@@ -1009,15 +1006,14 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
mutex_unlock(&dev_priv->sb_lock);
}
-void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->sb_lock);
@@ -1037,15 +1033,15 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->sb_lock);
}
-void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u32 val;
mutex_lock(&dev_priv->sb_lock);
@@ -1067,14 +1063,14 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->sb_lock);
}
-void vlv_phy_reset_lanes(struct intel_encoder *encoder)
+void vlv_phy_reset_lanes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index df808a94c511..51c5ae4e9116 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -813,15 +813,11 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (encoder->type == INTEL_OUTPUT_HDMI) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
-
- } else if (encoder->type == INTEL_OUTPUT_DP ||
- encoder->type == INTEL_OUTPUT_DP_MST ||
- encoder->type == INTEL_OUTPUT_EDP) {
+ } else if (intel_crtc_has_dp_encoder(crtc_state)) {
pll = hsw_ddi_dp_get_dpll(encoder, clock);
-
- } else if (encoder->type == INTEL_OUTPUT_ANALOG) {
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
if (WARN_ON(crtc_state->port_clock / 2 != 135000))
return NULL;
@@ -1369,15 +1365,13 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
- if (encoder->type == INTEL_OUTPUT_HDMI) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
return NULL;
}
- } else if (encoder->type == INTEL_OUTPUT_DP ||
- encoder->type == INTEL_OUTPUT_DP_MST ||
- encoder->type == INTEL_OUTPUT_EDP) {
+ } else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
@@ -1388,7 +1382,7 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
return NULL;
}
- if (encoder->type == INTEL_OUTPUT_EDP)
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
pll = intel_find_shared_dpll(crtc, crtc_state,
DPLL_ID_SKL_DPLL0,
DPLL_ID_SKL_DPLL0);
@@ -1808,18 +1802,15 @@ bxt_get_dpll(struct intel_crtc *crtc,
{
struct intel_dpll_hw_state dpll_hw_state = { };
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_digital_port *intel_dig_port;
struct intel_shared_dpll *pll;
int i, clock = crtc_state->port_clock;
- if (encoder->type == INTEL_OUTPUT_HDMI &&
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
!bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
&dpll_hw_state))
return NULL;
- if ((encoder->type == INTEL_OUTPUT_DP ||
- encoder->type == INTEL_OUTPUT_EDP ||
- encoder->type == INTEL_OUTPUT_DP_MST) &&
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
!bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
return NULL;
@@ -1828,15 +1819,8 @@ bxt_get_dpll(struct intel_crtc *crtc,
crtc_state->dpll_hw_state = dpll_hw_state;
- if (encoder->type == INTEL_OUTPUT_DP_MST) {
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
-
- intel_dig_port = intel_mst->primary;
- } else
- intel_dig_port = enc_to_dig_port(&encoder->base);
-
/* 1:1 mapping between ports and PLLs */
- i = (enum intel_dpll_id) intel_dig_port->port;
+ i = (enum intel_dpll_id) encoder->port;
pll = intel_get_shared_dpll_by_id(dev_priv, i);
DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
@@ -2008,8 +1992,8 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
* requirement, follow the Display Voltage Frequency Switching
* Sequence Before Frequency Change
*
- * FIXME: (DVFS) is used to adjust the display voltage to match the
- * display clock frequencies
+ * Note: DVFS is actually handled via the cdclk code paths,
+ * hence we do nothing here.
*/
/* 6. Enable DPLL in DPLL_ENABLE. */
@@ -2030,8 +2014,8 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
* requirement, follow the Display Voltage Frequency Switching
* Sequence After Frequency Change
*
- * FIXME: (DVFS) is used to adjust the display voltage to match the
- * display clock frequencies
+ * Note: DVFS is actually handled via the cdclk code paths,
+ * hence we do nothing here.
*/
/*
@@ -2055,8 +2039,8 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
* requirement, follow the Display Voltage Frequency Switching
* Sequence Before Frequency Change
*
- * FIXME: (DVFS) is used to adjust the display voltage to match the
- * display clock frequencies
+ * Note: DVFS is actually handled via the cdclk code paths,
+ * hence we do nothing here.
*/
/* 3. Disable DPLL through DPLL_ENABLE. */
@@ -2077,8 +2061,8 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
* requirement, follow the Display Voltage Frequency Switching
* Sequence After Frequency Change
*
- * FIXME: (DVFS) is used to adjust the display voltage to match the
- * display clock frequencies
+ * Note: DVFS is actually handled via the cdclk code paths,
+ * hence we do nothing here.
*/
/* 6. Disable DPLL power in DPLL_ENABLE. */
@@ -2126,10 +2110,8 @@ out:
return ret;
}
-static void cnl_wrpll_get_multipliers(unsigned int bestdiv,
- unsigned int *pdiv,
- unsigned int *qdiv,
- unsigned int *kdiv)
+static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
+ int *qdiv, int *kdiv)
{
/* even dividers */
if (bestdiv % 2 == 0) {
@@ -2167,10 +2149,12 @@ static void cnl_wrpll_get_multipliers(unsigned int bestdiv,
}
}
-static void cnl_wrpll_params_populate(struct skl_wrpll_params *params, uint32_t dco_freq,
- uint32_t ref_freq, uint32_t pdiv, uint32_t qdiv,
- uint32_t kdiv)
+static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
+ u32 dco_freq, u32 ref_freq,
+ int pdiv, int qdiv, int kdiv)
{
+ u32 dco;
+
switch (kdiv) {
case 1:
params->kdiv = 1;
@@ -2202,39 +2186,35 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params, uint32_t
WARN(1, "Incorrect PDiv\n");
}
- if (kdiv != 2)
- qdiv = 1;
+ WARN_ON(kdiv != 2 && qdiv != 1);
params->qdiv_ratio = qdiv;
params->qdiv_mode = (qdiv == 1) ? 0 : 1;
- params->dco_integer = div_u64(dco_freq, ref_freq);
- params->dco_fraction = div_u64((div_u64((uint64_t)dco_freq<<15, (uint64_t)ref_freq) -
- ((uint64_t)params->dco_integer<<15)) * 0x8000, 0x8000);
+ dco = div_u64((u64)dco_freq << 15, ref_freq);
+
+ params->dco_integer = dco >> 15;
+ params->dco_fraction = dco & 0x7fff;
}
static bool
-cnl_ddi_calculate_wrpll(int clock /* in Hz */,
+cnl_ddi_calculate_wrpll(int clock,
struct drm_i915_private *dev_priv,
struct skl_wrpll_params *wrpll_params)
{
- uint64_t afe_clock = clock * 5 / KHz(1); /* clocks in kHz */
- unsigned int dco_min = 7998 * KHz(1);
- unsigned int dco_max = 10000 * KHz(1);
- unsigned int dco_mid = (dco_min + dco_max) / 2;
-
+ u32 afe_clock = clock * 5;
+ u32 dco_min = 7998000;
+ u32 dco_max = 10000000;
+ u32 dco_mid = (dco_min + dco_max) / 2;
static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
18, 20, 24, 28, 30, 32, 36, 40,
42, 44, 48, 50, 52, 54, 56, 60,
64, 66, 68, 70, 72, 76, 78, 80,
84, 88, 90, 92, 96, 98, 100, 102,
3, 5, 7, 9, 15, 21 };
- unsigned int d, dco;
- unsigned int dco_centrality = 0;
- unsigned int best_dco_centrality = 999999;
- unsigned int best_div = 0;
- unsigned int best_dco = 0;
- unsigned int pdiv = 0, qdiv = 0, kdiv = 0;
+ u32 dco, best_dco = 0, dco_centrality = 0;
+ u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
+ int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
for (d = 0; d < ARRAY_SIZE(dividers); d++) {
dco = afe_clock * dividers[d];
@@ -2271,7 +2251,7 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
- if (!cnl_ddi_calculate_wrpll(clock * 1000, dev_priv, &wrpll_params))
+ if (!cnl_ddi_calculate_wrpll(clock, dev_priv, &wrpll_params))
return false;
cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
@@ -2281,7 +2261,6 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
- wrpll_params.central_freq |
DPLL_CFGCR1_CENTRAL_FREQ;
memset(&crtc_state->dpll_hw_state, 0,
@@ -2345,15 +2324,13 @@ cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
- if (encoder->type == INTEL_OUTPUT_HDMI) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
bret = cnl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
return NULL;
}
- } else if (encoder->type == INTEL_OUTPUT_DP ||
- encoder->type == INTEL_OUTPUT_DP_MST ||
- encoder->type == INTEL_OUTPUT_EDP) {
+ } else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = cnl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
@@ -2361,8 +2338,8 @@ cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
}
crtc_state->dpll_hw_state = dpll_hw_state;
} else {
- DRM_DEBUG_KMS("Skip DPLL setup for encoder %d\n",
- encoder->type);
+ DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
+ crtc_state->output_types);
return NULL;
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 5d77f75a9f9c..30f791f89d64 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -47,14 +47,12 @@
* contexts. Note that it's important that we check the condition again after
* having timed out, since the timeout could be due to preemption or similar and
* we've never had a chance to check the condition before the timeout.
- *
- * TODO: When modesetting has fully transitioned to atomic, the below
- * drm_can_sleep() can be removed and in_atomic()/!in_atomic() asserts
- * added.
*/
-#define _wait_for(COND, US, W) ({ \
+#define _wait_for(COND, US, Wmin, Wmax) ({ \
unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \
+ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
int ret__; \
+ might_sleep(); \
for (;;) { \
bool expired__ = time_after(jiffies, timeout__); \
if (COND) { \
@@ -65,16 +63,14 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if ((W) && drm_can_sleep()) { \
- usleep_range((W), (W)*2); \
- } else { \
- cpu_relax(); \
- } \
+ usleep_range(wait__, wait__ * 2); \
+ if (wait__ < (Wmax)) \
+ wait__ <<= 1; \
} \
ret__; \
})
-#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000)
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
@@ -123,7 +119,7 @@
int ret__; \
BUILD_BUG_ON(!__builtin_constant_p(US)); \
if ((US) > 10) \
- ret__ = _wait_for((COND), (US), 10); \
+ ret__ = _wait_for((COND), (US), 10, 10); \
else \
ret__ = _wait_for_atomic((COND), (US), 0); \
ret__; \
@@ -173,7 +169,7 @@ enum intel_output_type {
INTEL_OUTPUT_DP = 7,
INTEL_OUTPUT_EDP = 8,
INTEL_OUTPUT_DSI = 9,
- INTEL_OUTPUT_UNKNOWN = 10,
+ INTEL_OUTPUT_DDI = 10,
INTEL_OUTPUT_DP_MST = 11,
};
@@ -216,6 +212,9 @@ struct intel_encoder {
enum port port;
unsigned int cloneable;
void (*hot_plug)(struct intel_encoder *);
+ enum intel_output_type (*compute_output_type)(struct intel_encoder *,
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
bool (*compute_config)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
@@ -386,6 +385,8 @@ struct intel_atomic_state {
unsigned int active_crtcs;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
+ /* minimum acceptable voltage level for each pipe */
+ u8 min_voltage_level[I915_MAX_PIPES];
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
@@ -420,6 +421,9 @@ struct intel_plane_state {
/* plane control register */
u32 ctl;
+ /* plane color control register */
+ u32 color_ctl;
+
/*
* scaler_id
* = -1 : not using a scaler
@@ -738,6 +742,9 @@ struct intel_crtc_state {
*/
uint8_t lane_lat_optim_mask;
+ /* minimum acceptable voltage level */
+ u8 min_voltage_level;
+
/* Panel fitter controls for gen2-gen4 + VLV */
struct {
u32 control;
@@ -795,7 +802,6 @@ struct intel_crtc_state {
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
- enum plane plane;
/*
* Whether the crtc and the connected output pipeline is active. Implies
* that crtc->enabled is set, i.e. the current mode configuration has
@@ -840,7 +846,7 @@ struct intel_crtc {
struct intel_plane {
struct drm_plane base;
- u8 plane;
+ enum i9xx_plane_id i9xx_plane;
enum plane_id id;
enum pipe pipe;
bool can_scale;
@@ -1049,7 +1055,6 @@ struct intel_lspcon {
struct intel_digital_port {
struct intel_encoder base;
- enum port port;
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
@@ -1081,7 +1086,7 @@ struct intel_dp_mst_encoder {
static inline enum dpio_channel
vlv_dport_to_channel(struct intel_digital_port *dport)
{
- switch (dport->port) {
+ switch (dport->base.port) {
case PORT_B:
case PORT_D:
return DPIO_CH0;
@@ -1095,7 +1100,7 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
static inline enum dpio_phy
vlv_dport_to_phy(struct intel_digital_port *dport)
{
- switch (dport->port) {
+ switch (dport->base.port) {
case PORT_B:
case PORT_C:
return DPIO_PHY0;
@@ -1127,7 +1132,7 @@ intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
}
static inline struct intel_crtc *
-intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum plane plane)
+intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum i9xx_plane_id plane)
{
return dev_priv->plane_to_crtc_mapping[plane];
}
@@ -1148,7 +1153,7 @@ enc_to_dig_port(struct drm_encoder *encoder)
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
switch (intel_encoder->type) {
- case INTEL_OUTPUT_UNKNOWN:
+ case INTEL_OUTPUT_DDI:
WARN_ON(!HAS_DDI(to_i915(encoder->dev)));
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
@@ -1272,7 +1277,6 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
void hsw_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
-enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
@@ -1284,15 +1288,13 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
-void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
bool state);
+void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *crtc_state);
u32 bxt_signal_levels(struct intel_dp *intel_dp);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
@@ -1305,7 +1307,9 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
-void intel_audio_codec_disable(struct intel_encoder *encoder);
+void intel_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
@@ -1323,10 +1327,14 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_state_compare(const struct intel_cdclk_state *a,
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b);
+bool intel_cdclk_changed(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b);
void intel_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state);
+void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
+ const char *context);
/* intel_display.c */
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
@@ -1478,8 +1486,9 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
bool intel_crtc_active(struct intel_crtc *crtc);
-void hsw_enable_ips(struct intel_crtc *crtc);
-void hsw_disable_ips(struct intel_crtc *crtc);
+bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
+void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
+void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
@@ -1492,6 +1501,8 @@ static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
return i915_ggtt_offset(state->vma);
}
+u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
@@ -1522,7 +1533,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
+int intel_dp_sink_crc(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
@@ -1641,7 +1653,7 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
/* intel_fbc.c */
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
- struct drm_atomic_state *state);
+ struct intel_atomic_state *state);
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
void intel_fbc_pre_update(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
@@ -1869,7 +1881,6 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
@@ -1897,15 +1908,10 @@ bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *ddb,
int ignore);
bool ilk_disable_lp_wm(struct drm_device *dev);
-int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate);
void intel_init_ipc(struct drm_i915_private *dev_priv);
void intel_enable_ipc(struct drm_i915_private *dev_priv);
-static inline int intel_rc6_enabled(void)
-{
- return i915_modparams.enable_rc6;
-}
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 83f15848098a..f67d321376e4 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -662,11 +662,11 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
}
}
-static void intel_dsi_port_enable(struct intel_encoder *encoder)
+static void intel_dsi_port_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -705,7 +705,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
if (IS_BROXTON(dev_priv))
temp |= LANE_CONFIGURATION_DUAL_LINK_A;
else
- temp |= intel_crtc->pipe ?
+ temp |= crtc->pipe ?
LANE_CONFIGURATION_DUAL_LINK_B :
LANE_CONFIGURATION_DUAL_LINK_A;
}
@@ -875,7 +875,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
- intel_dsi_port_enable(encoder);
+ intel_dsi_port_enable(encoder, pipe_config);
}
intel_panel_enable_backlight(pipe_config, conn_state);
@@ -1082,7 +1082,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
struct drm_display_mode *adjusted_mode_sw;
- struct intel_crtc *intel_crtc;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
unsigned int lane_count = intel_dsi->lane_count;
unsigned int bpp, fmt;
@@ -1093,8 +1093,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
crtc_hblank_start_sw, crtc_hblank_end_sw;
/* FIXME: hw readout should not depend on SW state */
- intel_crtc = to_intel_crtc(encoder->base.crtc);
- adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode;
+ adjusted_mode_sw = &crtc->config->base.adjusted_mode;
/*
* Atleast one port is active as encoder->get_config called only if
@@ -1243,6 +1242,8 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
u32 pclk;
DRM_DEBUG_KMS("\n");
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
+
if (IS_GEN9_LP(dev_priv))
bxt_dsi_get_pipe_config(encoder, pipe_config);
@@ -1665,6 +1666,27 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
+static int intel_dsi_get_panel_orientation(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
+ enum i9xx_plane_id plane;
+ u32 val;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (connector->encoder->crtc_mask == BIT(PIPE_B))
+ plane = PLANE_B;
+ else
+ plane = PLANE_A;
+
+ val = I915_READ(DSPCNTR(plane));
+ if (val & DISPPLANE_ROTATE_180)
+ orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+ }
+
+ return orientation;
+}
+
static void intel_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1680,6 +1702,13 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
allowed_scalers);
connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
+
+ connector->base.display_info.panel_orientation =
+ intel_dsi_get_panel_orientation(connector);
+ drm_connector_init_panel_orientation_property(
+ &connector->base,
+ connector->panel.fixed_mode->hdisplay,
+ connector->panel.fixed_mode->vdisplay);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 53c9b763f4ce..754baa00bea9 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -159,6 +159,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp, flags = 0;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO);
+
tmp = I915_READ(intel_dvo->dev.dvo_reg);
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 6074e04dc99f..6bb51a502b8b 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -37,8 +37,6 @@
* Resource Streamer, is 66944 bytes, which rounds to 17 pages.
*/
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
-/* Same as Haswell, but 72064 bytes now. */
-#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
@@ -50,6 +48,8 @@ struct engine_class_info {
const char *name;
int (*init_legacy)(struct intel_engine_cs *engine);
int (*init_execlists)(struct intel_engine_cs *engine);
+
+ u8 uabi_class;
};
static const struct engine_class_info intel_engine_classes[] = {
@@ -57,21 +57,25 @@ static const struct engine_class_info intel_engine_classes[] = {
.name = "rcs",
.init_execlists = logical_render_ring_init,
.init_legacy = intel_init_render_ring_buffer,
+ .uabi_class = I915_ENGINE_CLASS_RENDER,
},
[COPY_ENGINE_CLASS] = {
.name = "bcs",
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_blt_ring_buffer,
+ .uabi_class = I915_ENGINE_CLASS_COPY,
},
[VIDEO_DECODE_CLASS] = {
.name = "vcs",
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_bsd_ring_buffer,
+ .uabi_class = I915_ENGINE_CLASS_VIDEO,
},
[VIDEO_ENHANCEMENT_CLASS] = {
.name = "vecs",
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_vebox_ring_buffer,
+ .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
},
};
@@ -158,9 +162,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
case 9:
return GEN9_LR_CONTEXT_RENDER_SIZE;
case 8:
- return i915_modparams.enable_execlists ?
- GEN8_LR_CONTEXT_RENDER_SIZE :
- GEN8_CXT_TOTAL_SIZE;
+ return GEN8_LR_CONTEXT_RENDER_SIZE;
case 7:
if (IS_HASWELL(dev_priv))
return HSW_CXT_TOTAL_SIZE;
@@ -203,6 +205,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
class_info = &intel_engine_classes[info->class];
+ if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
+ return -EINVAL;
+
+ if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
+ return -EINVAL;
+
+ if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
+ return -EINVAL;
+
GEM_BUG_ON(dev_priv->engine[id]);
engine = kzalloc(sizeof(*engine), GFP_KERNEL);
if (!engine)
@@ -213,13 +224,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u",
class_info->name, info->instance) >=
sizeof(engine->name));
- engine->uabi_id = info->uabi_id;
engine->hw_id = engine->guc_id = info->hw_id;
engine->mmio_base = info->mmio_base;
engine->irq_shift = info->irq_shift;
engine->class = info->class;
engine->instance = info->instance;
+ engine->uabi_id = info->uabi_id;
+ engine->uabi_class = class_info->uabi_class;
+
engine->context_size = __intel_engine_context_size(dev_priv,
engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
@@ -228,8 +241,11 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
+ spin_lock_init(&engine->stats.lock);
+
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+ dev_priv->engine_class[info->class][info->instance] = engine;
dev_priv->engine[id] = engine;
return 0;
}
@@ -281,6 +297,8 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
device_info->num_rings = hweight32(mask);
+ i915_check_and_clear_faults(dev_priv);
+
return 0;
cleanup:
@@ -306,7 +324,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
&intel_engine_classes[engine->class];
int (*init)(struct intel_engine_cs *engine);
- if (i915_modparams.enable_execlists)
+ if (HAS_EXECLISTS(dev_priv))
init = class_info->init_execlists;
else
init = class_info->init_legacy;
@@ -356,18 +374,6 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
if (HAS_VEBOX(dev_priv))
I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
}
- if (dev_priv->semaphore) {
- struct page *page = i915_vma_first_page(dev_priv->semaphore);
- void *semaphores;
-
- /* Semaphores are in noncoherent memory, flush to be safe */
- semaphores = kmap_atomic(page);
- memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
- 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
- drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
- I915_NUM_ENGINES * gen8_semaphore_seqno_size);
- kunmap_atomic(semaphores);
- }
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
@@ -620,7 +626,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* Similarly the preempt context must always be available so that
* we can interrupt the engine at any time.
*/
- if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) {
+ if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
ring = engine->context_pin(engine,
engine->i915->preempt_context);
if (IS_ERR(ring)) {
@@ -633,25 +639,19 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (ret)
goto err_unpin_preempt;
- ret = i915_gem_render_state_init(engine);
- if (ret)
- goto err_breadcrumbs;
-
if (HWS_NEEDS_PHYSICAL(engine->i915))
ret = init_phys_status_page(engine);
else
ret = init_status_page(engine);
if (ret)
- goto err_rs_fini;
+ goto err_breadcrumbs;
return 0;
-err_rs_fini:
- i915_gem_render_state_fini(engine);
err_breadcrumbs:
intel_engine_fini_breadcrumbs(engine);
err_unpin_preempt:
- if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
+ if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
engine->context_unpin(engine, engine->i915->preempt_context);
err_unpin_kernel:
engine->context_unpin(engine, engine->i915->kernel_context);
@@ -674,12 +674,14 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
else
cleanup_status_page(engine);
- i915_gem_render_state_fini(engine);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
- if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
+ if (engine->default_state)
+ i915_gem_object_put(engine->default_state);
+
+ if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
engine->context_unpin(engine, engine->i915->preempt_context);
engine->context_unpin(engine, engine->i915->kernel_context);
}
@@ -1014,22 +1016,6 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
- /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
- GEN9_DG_MIRROR_FIX_ENABLE);
-
- /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
- GEN9_RHWO_OPTIMIZATION_DISABLE);
- /*
- * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
- * but we do that in per ctx batchbuffer as there is an issue
- * with this register not getting restored on ctx restore
- */
- }
-
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
@@ -1045,11 +1031,6 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
- /* WaDisableMaskBasedCammingInRCC:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
- PIXEL_MASK_CAMMING_DISABLE);
-
/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
@@ -1079,14 +1060,22 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
if (IS_SKYLAKE(dev_priv) ||
IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+ IS_COFFEELAKE(dev_priv))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
+ /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+ if (IS_GEN9_LP(dev_priv)) {
+ u32 val = I915_READ(GEN8_L3SQCREG1);
+
+ val &= ~L3_PRIO_CREDITS_MASK;
+ val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
+ I915_WRITE(GEN8_L3SQCREG1, val);
+ }
+
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES));
@@ -1210,66 +1199,22 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
if (ret)
return ret;
- /* WaStoreMultiplePTEenable:bxt */
- /* This is a requirement according to Hardware specification */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
-
- /* WaSetClckGatingDisableMedia:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
- ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
- }
-
/* WaDisableThreadStallDopClockGating:bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
STALL_DOP_GATING_DISABLE);
/* WaDisablePooledEuLoadBalancingFix:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
- I915_WRITE(FF_SLICE_CS_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
- }
-
- /* WaDisableSbeCacheDispatchPortSharing:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
- WA_SET_BIT_MASKED(
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
- }
-
- /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
- /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
- /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
- /* WaDisableLSQCROPERFforOCL:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
- if (ret)
- return ret;
-
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
- if (ret)
- return ret;
- }
-
- /* WaProgramL3SqcReg1DefaultForPerf:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
- u32 val = I915_READ(GEN8_L3SQCREG1);
- val &= ~L3_PRIO_CREDITS_MASK;
- val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
- I915_WRITE(GEN8_L3SQCREG1, val);
- }
+ I915_WRITE(FF_SLICE_CS_CHICKEN2,
+ _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
/* WaToEnableHwFixForPushConstHWBug:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaInPlaceDecompressionHang:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
return 0;
}
@@ -1327,6 +1272,9 @@ static int cnl_init_workarounds(struct intel_engine_cs *engine)
if (ret)
return ret;
+ /* WaDisableEarlyEOT:cnl */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
+
return 0;
}
@@ -1573,10 +1521,8 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- if (READ_ONCE(dev_priv->gt.active_requests))
- return false;
-
- /* If the driver is wedged, HW state may be very inconsistent and
+ /*
+ * If the driver is wedged, HW state may be very inconsistent and
* report that it is still busy, even though we have stopped using it.
*/
if (i915_terminally_wedged(&dev_priv->gpu_error))
@@ -1590,6 +1536,34 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
return true;
}
+/**
+ * intel_engine_has_kernel_context:
+ * @engine: the engine
+ *
+ * Returns true if the last context to be executed on this engine, or has been
+ * executed if the engine is already idle, is the kernel context
+ * (#i915.kernel_context).
+ */
+bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
+{
+ const struct i915_gem_context * const kernel_context =
+ engine->i915->kernel_context;
+ struct drm_i915_gem_request *rq;
+
+ lockdep_assert_held(&engine->i915->drm.struct_mutex);
+
+ /*
+ * Check the last context seen by the engine. If active, it will be
+ * the last request that remains in the timeline. When idle, it is
+ * the last executed context as tracked by retirement.
+ */
+ rq = __i915_gem_active_peek(&engine->timeline->last_request);
+ if (rq)
+ return rq->ctx == kernel_context;
+ else
+ return engine->last_retired_context == kernel_context;
+}
+
void intel_engines_reset_default_submission(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -1599,19 +1573,63 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
engine->set_default_submission(engine);
}
-void intel_engines_mark_idle(struct drm_i915_private *i915)
+/**
+ * intel_engines_park: called when the GT is transitioning from busy->idle
+ * @i915: the i915 device
+ *
+ * The GT is now idle and about to go to sleep (maybe never to wake again?).
+ * Time for us to tidy and put away our toys (release resources back to the
+ * system).
+ */
+void intel_engines_park(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
+ /* Flush the residual irq tasklets first. */
intel_engine_disarm_breadcrumbs(engine);
+ tasklet_kill(&engine->execlists.tasklet);
+
+ /*
+ * We are committed now to parking the engines, make sure there
+ * will be no more interrupts arriving later and the engines
+ * are truly idle.
+ */
+ if (wait_for(intel_engine_is_idle(engine), 10)) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ dev_err(i915->drm.dev,
+ "%s is not idle before parking\n",
+ engine->name);
+ intel_engine_dump(engine, &p, NULL);
+ }
+
+ if (engine->park)
+ engine->park(engine);
+
i915_gem_batch_pool_fini(&engine->batch_pool);
- tasklet_kill(&engine->execlists.irq_tasklet);
engine->execlists.no_priolist = false;
}
}
+/**
+ * intel_engines_unpark: called when the GT is transitioning from idle->busy
+ * @i915: the i915 device
+ *
+ * The GT was idle and now about to fire up with some new user requests.
+ */
+void intel_engines_unpark(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id) {
+ if (engine->unpark)
+ engine->unpark(engine);
+ }
+}
+
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
switch (INTEL_GEN(engine->i915)) {
@@ -1627,6 +1645,20 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
}
}
+unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int which;
+
+ which = 0;
+ for_each_engine(engine, i915, id)
+ if (engine->default_state)
+ which |= BIT(engine->uabi_class);
+
+ return which;
+}
+
static void print_request(struct drm_printer *m,
struct drm_i915_gem_request *rq,
const char *prefix)
@@ -1640,7 +1672,38 @@ static void print_request(struct drm_printer *m,
rq->timeline->common->name);
}
-void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
+static void hexdump(struct drm_printer *m, const void *buf, size_t len)
+{
+ const size_t rowsize = 8 * sizeof(u32);
+ const void *prev = NULL;
+ bool skip = false;
+ size_t pos;
+
+ for (pos = 0; pos < len; pos += rowsize) {
+ char line[128];
+
+ if (prev && !memcmp(prev, buf + pos, rowsize)) {
+ if (!skip) {
+ drm_printf(m, "*\n");
+ skip = true;
+ }
+ continue;
+ }
+
+ WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
+ rowsize, sizeof(u32),
+ line, sizeof(line),
+ false) >= sizeof(line));
+ drm_printf(m, "%08zx %s\n", pos, line);
+
+ prev = buf + pos;
+ skip = false;
+ }
+}
+
+void intel_engine_dump(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ const char *header, ...)
{
struct intel_breadcrumbs * const b = &engine->breadcrumbs;
const struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -1648,17 +1711,29 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_request *rq;
struct rb_node *rb;
+ char hdr[80];
u64 addr;
- drm_printf(m, "%s\n", engine->name);
+ if (header) {
+ va_list ap;
+
+ va_start(ap, header);
+ drm_vprintf(m, header, &ap);
+ va_end(ap);
+ }
+
+ if (i915_terminally_wedged(&engine->i915->gpu_error))
+ drm_printf(m, "*** WEDGED ***\n");
+
drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
intel_engine_get_seqno(engine),
intel_engine_last_submit(engine),
engine->hangcheck.seqno,
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
engine->timeline->inflight_seqnos);
- drm_printf(m, "\tReset count: %d\n",
- i915_reset_engine_count(error, engine));
+ drm_printf(m, "\tReset count: %d (global %d)\n",
+ i915_reset_engine_count(error, engine),
+ i915_reset_count(error));
rcu_read_lock();
@@ -1693,9 +1768,23 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
rq ? rq->ring->tail : 0);
- drm_printf(m, "\tRING_CTL: 0x%08x [%s]\n",
+ drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
I915_READ(RING_CTL(engine->mmio_base)),
- I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
+ I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
+ if (INTEL_GEN(engine->i915) > 2) {
+ drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
+ I915_READ(RING_MI_MODE(engine->mmio_base)),
+ I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
+ }
+ if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
+ drm_printf(m, "\tSYNC_0: 0x%08x\n",
+ I915_READ(RING_SYNC_0(engine->mmio_base)));
+ drm_printf(m, "\tSYNC_1: 0x%08x\n",
+ I915_READ(RING_SYNC_1(engine->mmio_base)));
+ if (HAS_VEBOX(dev_priv))
+ drm_printf(m, "\tSYNC_2: 0x%08x\n",
+ I915_READ(RING_SYNC_2(engine->mmio_base)));
+ }
rcu_read_unlock();
@@ -1705,8 +1794,26 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
addr = intel_engine_get_last_batch_head(engine);
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
+ if (INTEL_GEN(dev_priv) >= 8)
+ addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
+ RING_DMA_FADD_UDW(engine->mmio_base));
+ else if (INTEL_GEN(dev_priv) >= 4)
+ addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
+ else
+ addr = I915_READ(DMA_FADD_I8XX);
+ drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+ if (INTEL_GEN(dev_priv) >= 4) {
+ drm_printf(m, "\tIPEIR: 0x%08x\n",
+ I915_READ(RING_IPEIR(engine->mmio_base)));
+ drm_printf(m, "\tIPEHR: 0x%08x\n",
+ I915_READ(RING_IPEHR(engine->mmio_base)));
+ } else {
+ drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
+ drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
+ }
- if (i915_modparams.enable_execlists) {
+ if (HAS_EXECLISTS(dev_priv)) {
const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
u32 ptr, read, write;
unsigned int idx;
@@ -1746,12 +1853,12 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
rq = port_unpack(&execlists->port[idx], &count);
if (rq) {
- drm_printf(m, "\t\tELSP[%d] count=%d, ",
- idx, count);
- print_request(m, rq, "rq: ");
+ snprintf(hdr, sizeof(hdr),
+ "\t\tELSP[%d] count=%d, rq: ",
+ idx, count);
+ print_request(m, rq, hdr);
} else {
- drm_printf(m, "\t\tELSP[%d] idle\n",
- idx);
+ drm_printf(m, "\t\tELSP[%d] idle\n", idx);
}
}
drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
@@ -1786,7 +1893,129 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
}
spin_unlock_irq(&b->rb_lock);
- drm_printf(m, "\n");
+ if (INTEL_GEN(dev_priv) >= 6) {
+ drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
+ }
+
+ drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
+ engine->irq_posted,
+ yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
+ &engine->irq_posted)),
+ yesno(test_bit(ENGINE_IRQ_EXECLIST,
+ &engine->irq_posted)));
+
+ drm_printf(m, "HWSP:\n");
+ hexdump(m, engine->status_page.page_addr, PAGE_SIZE);
+
+ drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
+}
+
+static u8 user_class_map[] = {
+ [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
+ [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
+ [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
+ [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
+};
+
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
+{
+ if (class >= ARRAY_SIZE(user_class_map))
+ return NULL;
+
+ class = user_class_map[class];
+
+ GEM_BUG_ON(class > MAX_ENGINE_CLASS);
+
+ if (instance > MAX_ENGINE_INSTANCE)
+ return NULL;
+
+ return i915->engine_class[class][instance];
+}
+
+/**
+ * intel_enable_engine_stats() - Enable engine busy tracking on engine
+ * @engine: engine to enable stats collection
+ *
+ * Start collecting the engine busyness data for @engine.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int intel_enable_engine_stats(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (!intel_engine_supports_stats(engine))
+ return -ENODEV;
+
+ spin_lock_irqsave(&engine->stats.lock, flags);
+ if (engine->stats.enabled == ~0)
+ goto busy;
+ if (engine->stats.enabled++ == 0)
+ engine->stats.enabled_at = ktime_get();
+ spin_unlock_irqrestore(&engine->stats.lock, flags);
+
+ return 0;
+
+busy:
+ spin_unlock_irqrestore(&engine->stats.lock, flags);
+
+ return -EBUSY;
+}
+
+static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
+{
+ ktime_t total = engine->stats.total;
+
+ /*
+ * If the engine is executing something at the moment
+ * add it to the total.
+ */
+ if (engine->stats.active)
+ total = ktime_add(total,
+ ktime_sub(ktime_get(), engine->stats.start));
+
+ return total;
+}
+
+/**
+ * intel_engine_get_busy_time() - Return current accumulated engine busyness
+ * @engine: engine to report on
+ *
+ * Returns accumulated time @engine was busy since engine stats were enabled.
+ */
+ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
+{
+ ktime_t total;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->stats.lock, flags);
+ total = __intel_engine_get_busy_time(engine);
+ spin_unlock_irqrestore(&engine->stats.lock, flags);
+
+ return total;
+}
+
+/**
+ * intel_disable_engine_stats() - Disable engine busy tracking on engine
+ * @engine: engine to disable stats collection
+ *
+ * Stops collecting the engine busyness data for @engine.
+ */
+void intel_disable_engine_stats(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (!intel_engine_supports_stats(engine))
+ return;
+
+ spin_lock_irqsave(&engine->stats.lock, flags);
+ WARN_ON_ONCE(engine->stats.enabled == 0);
+ if (--engine->stats.enabled == 0) {
+ engine->stats.total = __intel_engine_get_busy_time(engine);
+ engine->stats.active = 0;
+ }
+ spin_unlock_irqrestore(&engine->stats.lock, flags);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 1a0f5e0c8d10..f88c1b5dae4c 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -151,7 +151,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane);
+ fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
}
@@ -177,7 +177,7 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
u32 dpfc_ctl;
- dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN;
+ dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
if (params->fb.format->cpp[0] == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else
@@ -224,7 +224,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
- dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane);
+ dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
if (params->fb.format->cpp[0] == 2)
threshold++;
@@ -306,7 +306,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
dpfc_ctl = 0;
if (IS_IVYBRIDGE(dev_priv))
- dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
+ dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
if (params->fb.format->cpp[0] == 2)
threshold++;
@@ -531,7 +531,6 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
int size,
int fb_cpp)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
int compression_threshold = 1;
int ret;
u64 end;
@@ -541,7 +540,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
* If we enable FBC using a CFB on that memory range we'll get FIFO
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
- end = ggtt->stolen_size - 8 * 1024 * 1024;
+ end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
else
end = U64_MAX;
@@ -615,10 +614,16 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
fbc->compressed_llb = compressed_llb;
+ GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
+ fbc->compressed_fb.start,
+ U32_MAX));
+ GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
+ fbc->compressed_llb->start,
+ U32_MAX));
I915_WRITE(FBC_CFB_BASE,
- dev_priv->mm.stolen_base + fbc->compressed_fb.start);
+ dev_priv->dsm.start + fbc->compressed_fb.start);
I915_WRITE(FBC_LL_BASE,
- dev_priv->mm.stolen_base + compressed_llb->start);
+ dev_priv->dsm.start + compressed_llb->start);
}
DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
@@ -890,7 +895,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->vma = cache->vma;
params->crtc.pipe = crtc->pipe;
- params->crtc.plane = crtc->plane;
+ params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
params->fb.format = cache->fb.format;
@@ -1054,11 +1059,11 @@ out:
* enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
*/
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
- struct drm_atomic_state *state)
+ struct intel_atomic_state *state)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- struct drm_plane *plane;
- struct drm_plane_state *plane_state;
+ struct intel_plane *plane;
+ struct intel_plane_state *plane_state;
bool crtc_chosen = false;
int i;
@@ -1066,7 +1071,7 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
/* Does this atomic commit involve the CRTC currently tied to FBC? */
if (fbc->crtc &&
- !drm_atomic_get_existing_crtc_state(state, &fbc->crtc->base))
+ !intel_atomic_get_new_crtc_state(state, fbc->crtc))
goto out;
if (!intel_fbc_can_enable(dev_priv))
@@ -1076,25 +1081,22 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
* plane. We could go for fancier schemes such as checking the plane
* size, but this would just affect the few platforms that don't tie FBC
* to pipe or plane A. */
- for_each_new_plane_in_state(state, plane, plane_state, i) {
- struct intel_plane_state *intel_plane_state =
- to_intel_plane_state(plane_state);
- struct intel_crtc_state *intel_crtc_state;
- struct intel_crtc *crtc = to_intel_crtc(plane_state->crtc);
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
- if (!intel_plane_state->base.visible)
+ if (!plane_state->base.visible)
continue;
if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
continue;
- if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
+ if (fbc_on_plane_a_only(dev_priv) && plane->i9xx_plane != PLANE_A)
continue;
- intel_crtc_state = to_intel_crtc_state(
- drm_atomic_get_existing_crtc_state(state, &crtc->base));
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- intel_crtc_state->enable_fbc = true;
+ crtc_state->enable_fbc = true;
crtc_chosen = true;
break;
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index ea96682568e8..da48af11eb6b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -115,7 +115,6 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
int size, ret;
@@ -139,7 +138,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
* important and we should probably use that space with FBC or other
* features. */
obj = NULL;
- if (size * 2 < ggtt->stolen_usable_size)
+ if (size * 2 < dev_priv->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (obj == NULL)
obj = i915_gem_object_create(dev_priv, size);
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 10037c0fdf95..3c6bf5a34c3c 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -23,6 +23,7 @@
*/
#include "intel_guc.h"
+#include "intel_guc_submission.h"
#include "i915_drv.h"
static void gen8_guc_raise_irq(struct intel_guc *guc)
@@ -60,6 +61,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
void intel_guc_init_early(struct intel_guc *guc)
{
+ intel_guc_fw_init_early(guc);
intel_guc_ct_init_early(&guc->ct);
mutex_init(&guc->send_mutex);
@@ -67,6 +69,114 @@ void intel_guc_init_early(struct intel_guc *guc)
guc->notify = gen8_guc_raise_irq;
}
+int intel_guc_init_wq(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ /*
+ * GuC log buffer flush work item has to do register access to
+ * send the ack to GuC and this work item, if not synced before
+ * suspend, can potentially get executed after the GFX device is
+ * suspended.
+ * By marking the WQ as freezable, we don't have to bother about
+ * flushing of this work item from the suspend hooks, the pending
+ * work item if any will be either executed before the suspend
+ * or scheduled later on resume. This way the handling of work
+ * item can be kept same between system suspend & rpm suspend.
+ */
+ guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log",
+ WQ_HIGHPRI | WQ_FREEZABLE);
+ if (!guc->log.runtime.flush_wq)
+ return -ENOMEM;
+
+ /*
+ * Even though both sending GuC action, and adding a new workitem to
+ * GuC workqueue are serialized (each with its own locking), since
+ * we're using mutliple engines, it's possible that we're going to
+ * issue a preempt request with two (or more - each for different
+ * engine) workitems in GuC queue. In this situation, GuC may submit
+ * all of them, which will make us very confused.
+ * Our preemption contexts may even already be complete - before we
+ * even had the chance to sent the preempt action to GuC!. Rather
+ * than introducing yet another lock, we can just use ordered workqueue
+ * to make sure we're always sending a single preemption request with a
+ * single workitem.
+ */
+ if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
+ USES_GUC_SUBMISSION(dev_priv)) {
+ guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
+ WQ_HIGHPRI);
+ if (!guc->preempt_wq) {
+ destroy_workqueue(guc->log.runtime.flush_wq);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+void intel_guc_fini_wq(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
+ USES_GUC_SUBMISSION(dev_priv))
+ destroy_workqueue(guc->preempt_wq);
+
+ destroy_workqueue(guc->log.runtime.flush_wq);
+}
+
+static int guc_shared_data_create(struct intel_guc *guc)
+{
+ struct i915_vma *vma;
+ void *vaddr;
+
+ vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma);
+ return PTR_ERR(vaddr);
+ }
+
+ guc->shared_data = vma;
+ guc->shared_data_vaddr = vaddr;
+
+ return 0;
+}
+
+static void guc_shared_data_destroy(struct intel_guc *guc)
+{
+ i915_gem_object_unpin_map(guc->shared_data->obj);
+ i915_vma_unpin_and_release(&guc->shared_data);
+}
+
+int intel_guc_init(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ int ret;
+
+ ret = guc_shared_data_create(guc);
+ if (ret)
+ return ret;
+ GEM_BUG_ON(!guc->shared_data);
+
+ /* We need to notify the guc whenever we change the GGTT */
+ i915_ggtt_enable_guc(dev_priv);
+
+ return 0;
+}
+
+void intel_guc_fini(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ i915_ggtt_disable_guc(dev_priv);
+ guc_shared_data_destroy(guc);
+}
+
static u32 get_gt_type(struct drm_i915_private *dev_priv)
{
/* XXX: GT type based on PCI device ID? field seems unused by fw */
@@ -127,7 +237,7 @@ void intel_guc_init_params(struct intel_guc *guc)
}
/* If GuC submission is enabled, set up additional parameters here */
- if (i915_modparams.enable_guc_submission) {
+ if (USES_GUC_SUBMISSION(dev_priv)) {
u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
@@ -230,8 +340,7 @@ int intel_guc_sample_forcewake(struct intel_guc *guc)
action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */
- if (!intel_rc6_enabled() ||
- NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
+ if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
action[1] = 0;
else
/* bit 0 and 1 are for Render and Media domain separately */
@@ -268,7 +377,6 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
int intel_guc_suspend(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
- struct i915_gem_context *ctx;
u32 data[3];
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
@@ -276,14 +384,33 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv)
gen9_disable_guc_interrupts(dev_priv);
- ctx = dev_priv->kernel_context;
-
data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
/* any value greater than GUC_POWER_D0 */
data[1] = GUC_POWER_D1;
- /* first page is shared data with GuC */
- data[2] = guc_ggtt_offset(ctx->engine[RCS].state) +
- LRC_GUCSHR_PN * PAGE_SIZE;
+ data[2] = guc_ggtt_offset(guc->shared_data);
+
+ return intel_guc_send(guc, data, ARRAY_SIZE(data));
+}
+
+/**
+ * intel_guc_reset_engine() - ask GuC to reset an engine
+ * @guc: intel_guc structure
+ * @engine: engine to be reset
+ */
+int intel_guc_reset_engine(struct intel_guc *guc,
+ struct intel_engine_cs *engine)
+{
+ u32 data[7];
+
+ GEM_BUG_ON(!guc->execbuf_client);
+
+ data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
+ data[1] = engine->guc_id;
+ data[2] = 0;
+ data[3] = 0;
+ data[4] = 0;
+ data[5] = guc->execbuf_client->stage_id;
+ data[6] = guc_ggtt_offset(guc->shared_data);
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
@@ -295,7 +422,6 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv)
int intel_guc_resume(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
- struct i915_gem_context *ctx;
u32 data[3];
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
@@ -304,13 +430,9 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
if (i915_modparams.guc_log_level >= 0)
gen9_enable_guc_interrupts(dev_priv);
- ctx = dev_priv->kernel_context;
-
data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0;
- /* first page is shared data with GuC */
- data[2] = guc_ggtt_offset(ctx->engine[RCS].state) +
- LRC_GUCSHR_PN * PAGE_SIZE;
+ data[2] = guc_ggtt_offset(guc->shared_data);
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 418450b1ae27..52856a97477d 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -30,13 +30,18 @@
#include "intel_guc_fwif.h"
#include "intel_guc_ct.h"
#include "intel_guc_log.h"
+#include "intel_guc_reg.h"
#include "intel_uc_fw.h"
-#include "i915_guc_reg.h"
#include "i915_vma.h"
+struct guc_preempt_work {
+ struct work_struct work;
+ struct intel_engine_cs *engine;
+};
+
/*
* Top level structure of GuC. It handles firmware loading and manages client
- * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
+ * pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy
* ExecList submission.
*/
struct intel_guc {
@@ -54,8 +59,14 @@ struct intel_guc {
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
struct ida stage_ids;
+ struct i915_vma *shared_data;
+ void *shared_data_vaddr;
+
+ struct intel_guc_client *execbuf_client;
+ struct intel_guc_client *preempt_client;
- struct i915_guc_client *execbuf_client;
+ struct guc_preempt_work preempt_work[I915_NUM_ENGINES];
+ struct workqueue_struct *preempt_wq;
DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
/* Cyclic counter mod pagesize */
@@ -108,6 +119,10 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma)
void intel_guc_init_early(struct intel_guc *guc);
void intel_guc_init_send_regs(struct intel_guc *guc);
void intel_guc_init_params(struct intel_guc *guc);
+int intel_guc_init_wq(struct intel_guc *guc);
+void intel_guc_fini_wq(struct intel_guc *guc);
+int intel_guc_init(struct intel_guc *guc);
+void intel_guc_fini(struct intel_guc *guc);
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index c4cbec140101..24ad55752396 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -198,6 +198,7 @@ static int ctch_open(struct intel_guc *guc,
err = ctch_init(guc, ctch);
if (unlikely(err))
goto err_out;
+ GEM_BUG_ON(!ctch->vma);
}
/* vma should be already allocated and map'ed */
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index ef67a36354c5..cbc51c960425 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -30,14 +30,14 @@
#include "intel_guc_fw.h"
#include "i915_drv.h"
-#define SKL_FW_MAJOR 6
-#define SKL_FW_MINOR 1
+#define SKL_FW_MAJOR 9
+#define SKL_FW_MINOR 33
-#define BXT_FW_MAJOR 8
-#define BXT_FW_MINOR 7
+#define BXT_FW_MAJOR 9
+#define BXT_FW_MINOR 29
#define KBL_FW_MAJOR 9
-#define KBL_FW_MINOR 14
+#define KBL_FW_MINOR 39
#define GLK_FW_MAJOR 10
#define GLK_FW_MINOR 56
@@ -56,64 +56,100 @@ MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
#define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR)
-/**
- * intel_guc_fw_select() - selects GuC firmware for uploading
- *
- * @guc: intel_guc struct
- *
- * Return: zero when we know firmware, non-zero in other case
- */
-int intel_guc_fw_select(struct intel_guc *guc)
+static void guc_fw_select(struct intel_uc_fw *guc_fw)
{
+ struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- intel_uc_fw_init(&guc->fw, INTEL_UC_FW_TYPE_GUC);
+ GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
+
+ if (!HAS_GUC(dev_priv))
+ return;
if (i915_modparams.guc_firmware_path) {
- guc->fw.path = i915_modparams.guc_firmware_path;
- guc->fw.major_ver_wanted = 0;
- guc->fw.minor_ver_wanted = 0;
+ guc_fw->path = i915_modparams.guc_firmware_path;
+ guc_fw->major_ver_wanted = 0;
+ guc_fw->minor_ver_wanted = 0;
} else if (IS_SKYLAKE(dev_priv)) {
- guc->fw.path = I915_SKL_GUC_UCODE;
- guc->fw.major_ver_wanted = SKL_FW_MAJOR;
- guc->fw.minor_ver_wanted = SKL_FW_MINOR;
+ guc_fw->path = I915_SKL_GUC_UCODE;
+ guc_fw->major_ver_wanted = SKL_FW_MAJOR;
+ guc_fw->minor_ver_wanted = SKL_FW_MINOR;
} else if (IS_BROXTON(dev_priv)) {
- guc->fw.path = I915_BXT_GUC_UCODE;
- guc->fw.major_ver_wanted = BXT_FW_MAJOR;
- guc->fw.minor_ver_wanted = BXT_FW_MINOR;
+ guc_fw->path = I915_BXT_GUC_UCODE;
+ guc_fw->major_ver_wanted = BXT_FW_MAJOR;
+ guc_fw->minor_ver_wanted = BXT_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- guc->fw.path = I915_KBL_GUC_UCODE;
- guc->fw.major_ver_wanted = KBL_FW_MAJOR;
- guc->fw.minor_ver_wanted = KBL_FW_MINOR;
+ guc_fw->path = I915_KBL_GUC_UCODE;
+ guc_fw->major_ver_wanted = KBL_FW_MAJOR;
+ guc_fw->minor_ver_wanted = KBL_FW_MINOR;
} else if (IS_GEMINILAKE(dev_priv)) {
- guc->fw.path = I915_GLK_GUC_UCODE;
- guc->fw.major_ver_wanted = GLK_FW_MAJOR;
- guc->fw.minor_ver_wanted = GLK_FW_MINOR;
+ guc_fw->path = I915_GLK_GUC_UCODE;
+ guc_fw->major_ver_wanted = GLK_FW_MAJOR;
+ guc_fw->minor_ver_wanted = GLK_FW_MINOR;
} else {
- DRM_ERROR("No GuC firmware known for platform with GuC!\n");
- return -ENOENT;
+ DRM_WARN("%s: No firmware known for this platform!\n",
+ intel_uc_fw_type_repr(guc_fw->type));
}
-
- return 0;
}
-/*
- * Read the GuC status register (GUC_STATUS) and store it in the
- * specified location; then return a boolean indicating whether
- * the value matches either of two values representing completion
- * of the GuC boot process.
+/**
+ * intel_guc_fw_init_early() - initializes GuC firmware struct
+ * @guc: intel_guc struct
*
- * This is used for polling the GuC status in a wait_for()
- * loop below.
+ * On platforms with GuC selects firmware for uploading
*/
-static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
- u32 *status)
+void intel_guc_fw_init_early(struct intel_guc *guc)
{
- u32 val = I915_READ(GUC_STATUS);
- u32 uk_val = val & GS_UKERNEL_MASK;
- *status = val;
- return (uk_val == GS_UKERNEL_READY ||
- ((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
+ struct intel_uc_fw *guc_fw = &guc->fw;
+
+ intel_uc_fw_init(guc_fw, INTEL_UC_FW_TYPE_GUC);
+ guc_fw_select(guc_fw);
+}
+
+static void guc_prepare_xfer(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ /* Must program this register before loading the ucode with DMA */
+ I915_WRITE(GUC_SHIM_CONTROL, GUC_DISABLE_SRAM_INIT_TO_ZEROES |
+ GUC_ENABLE_READ_CACHE_LOGIC |
+ GUC_ENABLE_MIA_CACHING |
+ GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
+ GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
+ GUC_ENABLE_MIA_CLOCK_GATING);
+
+ if (IS_GEN9_LP(dev_priv))
+ I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+ else
+ I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+
+ if (IS_GEN9(dev_priv)) {
+ /* DOP Clock Gating Enable for GuC clocks */
+ I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
+ I915_READ(GEN7_MISCCPCTL)));
+
+ /* allows for 5us (in 10ns units) before GT can go to RC6 */
+ I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
+ }
+}
+
+/* Copy RSA signature from the fw image to HW for verification */
+static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uc_fw *guc_fw = &guc->fw;
+ struct sg_table *sg = vma->pages;
+ u32 rsa[UOS_RSA_SCRATCH_COUNT];
+ int i;
+
+ if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa),
+ guc_fw->rsa_offset) != sizeof(rsa))
+ return -EINVAL;
+
+ for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
+ I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
+
+ return 0;
}
/*
@@ -122,29 +158,19 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
* Architecturally, the DMA engine is bidirectional, and can potentially even
* transfer between GTT locations. This functionality is left out of the API
* for now as there is no need for it.
- *
- * Note that GuC needs the CSS header plus uKernel code to be copied by the
- * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
*/
-static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
- struct i915_vma *vma)
+static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uc_fw *guc_fw = &guc->fw;
unsigned long offset;
- struct sg_table *sg = vma->pages;
- u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
- int i, ret = 0;
-
- /* where RSA signature starts */
- offset = guc_fw->rsa_offset;
-
- /* Copy RSA signature from the fw image to HW for verification */
- sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
- for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
- I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
+ u32 status;
+ int ret;
- /* The header plus uCode will be copied to WOPCM via DMA, excluding any
- * other components */
+ /*
+ * The header plus uCode will be copied to WOPCM via DMA, excluding any
+ * other components
+ */
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
@@ -162,33 +188,62 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
/* Finally start the DMA */
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
+ /* Wait for DMA to finish */
+ ret = __intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0,
+ 2, 100, &status);
+ DRM_DEBUG_DRIVER("GuC DMA status %#x\n", status);
+
+ return ret;
+}
+
+/*
+ * Read the GuC status register (GUC_STATUS) and store it in the
+ * specified location; then return a boolean indicating whether
+ * the value matches either of two values representing completion
+ * of the GuC boot process.
+ *
+ * This is used for polling the GuC status in a wait_for()
+ * loop below.
+ */
+static inline bool guc_ready(struct intel_guc *guc, u32 *status)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ u32 val = I915_READ(GUC_STATUS);
+ u32 uk_val = val & GS_UKERNEL_MASK;
+
+ *status = val;
+ return (uk_val == GS_UKERNEL_READY) ||
+ ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE));
+}
+
+static int guc_wait_ucode(struct intel_guc *guc)
+{
+ u32 status;
+ int ret;
+
/*
- * Wait for the DMA to complete & the GuC to start up.
+ * Wait for the GuC to start up.
* NB: Docs recommend not using the interrupt for completion.
* Measurements indicate this should take no more than 20ms, so a
* timeout here indicates that the GuC has failed and is unusable.
* (Higher levels of the driver will attempt to fall back to
* execlist mode if this happens.)
*/
- ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
-
- DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
- I915_READ(DMA_CTRL), status);
+ ret = wait_for(guc_ready(guc, &status), 100);
+ DRM_DEBUG_DRIVER("GuC status %#x\n", status);
if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
DRM_ERROR("GuC firmware signature verification failed\n");
ret = -ENOEXEC;
}
- DRM_DEBUG_DRIVER("returning %d\n", ret);
-
return ret;
}
/*
* Load the GuC firmware blob into the MinuteIA.
*/
-static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
+static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
{
struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -198,34 +253,24 @@ static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- /* Enable MIA caching. GuC clock gating is disabled. */
- I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
-
- /* WaDisableMinuteIaClockGating:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
- ~GUC_ENABLE_MIA_CLOCK_GATING));
- }
-
- /* WaC6DisallowByGfxPause:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
- I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
+ guc_prepare_xfer(guc);
- if (IS_GEN9_LP(dev_priv))
- I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
- else
- I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
-
- if (IS_GEN9(dev_priv)) {
- /* DOP Clock Gating Enable for GuC clocks */
- I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
- I915_READ(GEN7_MISCCPCTL)));
+ /*
+ * Note that GuC needs the CSS header plus uKernel code to be copied
+ * by the DMA engine in one operation, whereas the RSA signature is
+ * loaded via MMIO.
+ */
+ ret = guc_xfer_rsa(guc, vma);
+ if (ret)
+ DRM_WARN("GuC firmware signature xfer error %d\n", ret);
- /* allows for 5us (in 10ns units) before GT can go to RC6 */
- I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
- }
+ ret = guc_xfer_ucode(guc, vma);
+ if (ret)
+ DRM_WARN("GuC firmware code xfer error %d\n", ret);
- ret = guc_ucode_xfer_dma(dev_priv, vma);
+ ret = guc_wait_ucode(guc);
+ if (ret)
+ DRM_ERROR("GuC firmware xfer error %d\n", ret);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -247,5 +292,5 @@ static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
*/
int intel_guc_fw_upload(struct intel_guc *guc)
{
- return intel_uc_fw_upload(&guc->fw, guc_ucode_xfer);
+ return intel_uc_fw_upload(&guc->fw, guc_fw_xfer);
}
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.h b/drivers/gpu/drm/i915/intel_guc_fw.h
index 023f5baa9dd6..4ec5d3d9e2b0 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.h
+++ b/drivers/gpu/drm/i915/intel_guc_fw.h
@@ -27,7 +27,7 @@
struct intel_guc;
-int intel_guc_fw_select(struct intel_guc *guc);
+void intel_guc_fw_init_early(struct intel_guc *guc);
int intel_guc_fw_upload(struct intel_guc *guc);
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 80c507435458..6a10aa6f04d3 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -544,9 +544,37 @@ union guc_log_control {
u32 value;
} __packed;
+struct guc_ctx_report {
+ u32 report_return_status;
+ u32 reserved1[64];
+ u32 affected_count;
+ u32 reserved2[2];
+} __packed;
+
+/* GuC Shared Context Data Struct */
+struct guc_shared_ctx_data {
+ u32 addr_of_last_preempted_data_low;
+ u32 addr_of_last_preempted_data_high;
+ u32 addr_of_last_preempted_data_high_tmp;
+ u32 padding;
+ u32 is_mapped_to_proxy;
+ u32 proxy_ctx_id;
+ u32 engine_reset_ctx_id;
+ u32 media_reset_count;
+ u32 reserved1[8];
+ u32 uk_last_ctx_switch_reason;
+ u32 was_reset;
+ u32 lrca_gpu_addr;
+ u64 execlist_ctx;
+ u32 reserved2[66];
+ struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM];
+} __packed;
+
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
enum intel_guc_action {
INTEL_GUC_ACTION_DEFAULT = 0x0,
+ INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2,
+ INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3,
INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
@@ -562,6 +590,18 @@ enum intel_guc_action {
INTEL_GUC_ACTION_LIMIT
};
+enum intel_guc_preempt_options {
+ INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4,
+ INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8,
+};
+
+enum intel_guc_report_status {
+ INTEL_GUC_REPORT_STATUS_UNKNOWN = 0x0,
+ INTEL_GUC_REPORT_STATUS_ACKED = 0x1,
+ INTEL_GUC_REPORT_STATUS_ERROR = 0x2,
+ INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
+};
+
/*
* The GuC sends its response to a command by overwriting the
* command in SS0. The response is distinguishable from a command
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 76d3eb1e4614..eaedd63e3819 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -411,30 +411,8 @@ static int guc_log_runtime_create(struct intel_guc *guc)
guc->log.runtime.relay_chan = guc_log_relay_chan;
INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
-
- /*
- * GuC log buffer flush work item has to do register access to
- * send the ack to GuC and this work item, if not synced before
- * suspend, can potentially get executed after the GFX device is
- * suspended.
- * By marking the WQ as freezable, we don't have to bother about
- * flushing of this work item from the suspend hooks, the pending
- * work item if any will be either executed before the suspend
- * or scheduled later on resume. This way the handling of work
- * item can be kept same between system suspend & rpm suspend.
- */
- guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log",
- WQ_HIGHPRI | WQ_FREEZABLE);
- if (!guc->log.runtime.flush_wq) {
- DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
- ret = -ENOMEM;
- goto err_relaychan;
- }
-
return 0;
-err_relaychan:
- relay_close(guc->log.runtime.relay_chan);
err_vaddr:
i915_gem_object_unpin_map(guc->log.vma->obj);
guc->log.runtime.buf_addr = NULL;
@@ -450,7 +428,6 @@ static void guc_log_runtime_destroy(struct intel_guc *guc)
if (!guc_log_has_runtime(guc))
return;
- destroy_workqueue(guc->log.runtime.flush_wq);
relay_close(guc->log.runtime.relay_chan);
i915_gem_object_unpin_map(guc->log.vma->obj);
guc->log.runtime.buf_addr = NULL;
@@ -505,7 +482,7 @@ static void guc_flush_logs(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- if (!i915_modparams.enable_guc_submission ||
+ if (!USES_GUC_SUBMISSION(dev_priv) ||
(i915_modparams.guc_log_level < 0))
return;
@@ -646,7 +623,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
void i915_guc_log_register(struct drm_i915_private *dev_priv)
{
- if (!i915_modparams.enable_guc_submission ||
+ if (!USES_GUC_SUBMISSION(dev_priv) ||
(i915_modparams.guc_log_level < 0))
return;
@@ -657,7 +634,7 @@ void i915_guc_log_register(struct drm_i915_private *dev_priv)
void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
{
- if (!i915_modparams.enable_guc_submission)
+ if (!USES_GUC_SUBMISSION(dev_priv))
return;
mutex_lock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h
index 35cf9918d09a..19a9247c5664 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/intel_guc_reg.h
@@ -21,8 +21,8 @@
* IN THE SOFTWARE.
*
*/
-#ifndef _I915_GUC_REG_H_
-#define _I915_GUC_REG_H_
+#ifndef _INTEL_GUC_REG_H_
+#define _INTEL_GUC_REG_H_
/* Definitions of GuC H/W registers, bits, etc */
@@ -52,7 +52,8 @@
#define SOFT_SCRATCH_COUNT 16
#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
-#define UOS_RSA_SCRATCH_MAX_COUNT 64
+#define UOS_RSA_SCRATCH_COUNT 64
+
#define DMA_ADDR_0_LOW _MMIO(0xc300)
#define DMA_ADDR_0_HIGH _MMIO(0xc304)
#define DMA_ADDR_1_LOW _MMIO(0xc308)
@@ -102,13 +103,6 @@
#define GUC_ENABLE_MIA_CLOCK_GATING (1<<15)
#define GUC_GEN10_SHIM_WC_ENABLE (1<<21)
-#define GUC_SHIM_CONTROL_VALUE (GUC_DISABLE_SRAM_INIT_TO_ZEROES | \
- GUC_ENABLE_READ_CACHE_LOGIC | \
- GUC_ENABLE_MIA_CACHING | \
- GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | \
- GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \
- GUC_ENABLE_MIA_CLOCK_GATING)
-
#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
#define GUC_SEND_TRIGGER (1<<0)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index f84c267728fd..4d2409466a3a 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -25,23 +25,24 @@
#include <linux/circ_buf.h>
#include <trace/events/dma_fence.h>
-#include "i915_guc_submission.h"
+#include "intel_guc_submission.h"
#include "i915_drv.h"
/**
* DOC: GuC-based command submission
*
* GuC client:
- * A i915_guc_client refers to a submission path through GuC. Currently, there
- * is only one of these (the execbuf_client) and this one is charged with all
- * submissions to the GuC. This struct is the owner of a doorbell, a process
- * descriptor and a workqueue (all of them inside a single gem object that
- * contains all required pages for these elements).
+ * A intel_guc_client refers to a submission path through GuC. Currently, there
+ * are two clients. One of them (the execbuf_client) is charged with all
+ * submissions to the GuC, the other one (preempt_client) is responsible for
+ * preempting the execbuf_client. This struct is the owner of a doorbell, a
+ * process descriptor and a workqueue (all of them inside a single gem object
+ * that contains all required pages for these elements).
*
* GuC stage descriptor:
* During initialization, the driver allocates a static pool of 1024 such
* descriptors, and shares them with the GuC.
- * Currently, there exists a 1:1 mapping between a i915_guc_client and a
+ * Currently, there exists a 1:1 mapping between a intel_guc_client and a
* guc_stage_desc (via the client's stage_id), so effectively only one
* gets used. This stage descriptor lets the GuC know about the doorbell,
* workqueue and process descriptor. Theoretically, it also lets the GuC
@@ -70,7 +71,7 @@
* WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
* represents in-order queue. The kernel driver packs ring tail pointer and an
* ELSP context descriptor dword into Work Item.
- * See guc_wq_item_append()
+ * See guc_add_request()
*
* ADS:
* The Additional Data Struct (ADS) has pointers for different buffers used by
@@ -81,12 +82,13 @@
*
*/
-static inline bool is_high_priority(struct i915_guc_client* client)
+static inline bool is_high_priority(struct intel_guc_client *client)
{
- return client->priority <= GUC_CLIENT_PRIORITY_HIGH;
+ return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH ||
+ client->priority == GUC_CLIENT_PRIORITY_HIGH);
}
-static int __reserve_doorbell(struct i915_guc_client *client)
+static int reserve_doorbell(struct intel_guc_client *client)
{
unsigned long offset;
unsigned long end;
@@ -100,7 +102,7 @@ static int __reserve_doorbell(struct i915_guc_client *client)
* priority contexts, the second half for high-priority ones.
*/
offset = 0;
- end = GUC_NUM_DOORBELLS/2;
+ end = GUC_NUM_DOORBELLS / 2;
if (is_high_priority(client)) {
offset = end;
end += offset;
@@ -118,7 +120,7 @@ static int __reserve_doorbell(struct i915_guc_client *client)
return 0;
}
-static void __unreserve_doorbell(struct i915_guc_client *client)
+static void unreserve_doorbell(struct intel_guc_client *client)
{
GEM_BUG_ON(client->doorbell_id == GUC_DOORBELL_INVALID);
@@ -150,7 +152,7 @@ static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static struct guc_stage_desc *__get_stage_desc(struct i915_guc_client *client)
+static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client)
{
struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
@@ -164,7 +166,7 @@ static struct guc_stage_desc *__get_stage_desc(struct i915_guc_client *client)
* client object which contains the page being used for the doorbell
*/
-static void __update_doorbell_desc(struct i915_guc_client *client, u16 new_id)
+static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id)
{
struct guc_stage_desc *desc;
@@ -173,12 +175,12 @@ static void __update_doorbell_desc(struct i915_guc_client *client, u16 new_id)
desc->db_id = new_id;
}
-static struct guc_doorbell_info *__get_doorbell(struct i915_guc_client *client)
+static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
{
return client->vaddr + client->doorbell_offset;
}
-static bool has_doorbell(struct i915_guc_client *client)
+static bool has_doorbell(struct intel_guc_client *client)
{
if (client->doorbell_id == GUC_DOORBELL_INVALID)
return false;
@@ -186,29 +188,21 @@ static bool has_doorbell(struct i915_guc_client *client)
return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
}
-static int __create_doorbell(struct i915_guc_client *client)
+static void __create_doorbell(struct intel_guc_client *client)
{
struct guc_doorbell_info *doorbell;
- int err;
doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_ENABLED;
doorbell->cookie = 0;
-
- err = __guc_allocate_doorbell(client->guc, client->stage_id);
- if (err)
- doorbell->db_status = GUC_DOORBELL_DISABLED;
-
- return err;
}
-static int __destroy_doorbell(struct i915_guc_client *client)
+static void __destroy_doorbell(struct intel_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
struct guc_doorbell_info *doorbell;
u16 db_id = client->doorbell_id;
- GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_DISABLED;
@@ -216,56 +210,49 @@ static int __destroy_doorbell(struct i915_guc_client *client)
/* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
* to go to zero after updating db_status before we call the GuC to
- * release the doorbell */
+ * release the doorbell
+ */
if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10))
WARN_ONCE(true, "Doorbell never became invalid after disable\n");
-
- return __guc_deallocate_doorbell(client->guc, client->stage_id);
}
-static int create_doorbell(struct i915_guc_client *client)
+static int create_doorbell(struct intel_guc_client *client)
{
int ret;
- ret = __reserve_doorbell(client);
- if (ret)
- return ret;
-
__update_doorbell_desc(client, client->doorbell_id);
-
- ret = __create_doorbell(client);
- if (ret)
- goto err;
+ __create_doorbell(client);
+
+ ret = __guc_allocate_doorbell(client->guc, client->stage_id);
+ if (ret) {
+ __destroy_doorbell(client);
+ __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
+ DRM_ERROR("Couldn't create client %u doorbell: %d\n",
+ client->stage_id, ret);
+ return ret;
+ }
return 0;
-
-err:
- __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
- __unreserve_doorbell(client);
- return ret;
}
-static int destroy_doorbell(struct i915_guc_client *client)
+static int destroy_doorbell(struct intel_guc_client *client)
{
- int err;
+ int ret;
GEM_BUG_ON(!has_doorbell(client));
- /* XXX: wait for any interrupts */
- /* XXX: wait for workqueue to drain */
-
- err = __destroy_doorbell(client);
- if (err)
- return err;
+ __destroy_doorbell(client);
+ ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
+ if (ret)
+ DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
+ client->stage_id, ret);
__update_doorbell_desc(client, GUC_DOORBELL_INVALID);
- __unreserve_doorbell(client);
-
- return 0;
+ return ret;
}
-static unsigned long __select_cacheline(struct intel_guc* guc)
+static unsigned long __select_cacheline(struct intel_guc *guc)
{
unsigned long offset;
@@ -276,12 +263,12 @@ static unsigned long __select_cacheline(struct intel_guc* guc)
guc->db_cacheline += cache_line_size();
DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
- offset, guc->db_cacheline, cache_line_size());
+ offset, guc->db_cacheline, cache_line_size());
return offset;
}
static inline struct guc_process_desc *
-__get_process_desc(struct i915_guc_client *client)
+__get_process_desc(struct intel_guc_client *client)
{
return client->vaddr + client->proc_desc_offset;
}
@@ -290,7 +277,7 @@ __get_process_desc(struct i915_guc_client *client)
* Initialise the process descriptor shared with the GuC firmware.
*/
static void guc_proc_desc_init(struct intel_guc *guc,
- struct i915_guc_client *client)
+ struct intel_guc_client *client)
{
struct guc_process_desc *desc;
@@ -311,6 +298,37 @@ static void guc_proc_desc_init(struct intel_guc *guc,
desc->priority = client->priority;
}
+static int guc_stage_desc_pool_create(struct intel_guc *guc)
+{
+ struct i915_vma *vma;
+ void *vaddr;
+
+ vma = intel_guc_allocate_vma(guc,
+ PAGE_ALIGN(sizeof(struct guc_stage_desc) *
+ GUC_MAX_STAGE_DESCRIPTORS));
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma);
+ return PTR_ERR(vaddr);
+ }
+
+ guc->stage_desc_pool = vma;
+ guc->stage_desc_pool_vaddr = vaddr;
+ ida_init(&guc->stage_ids);
+
+ return 0;
+}
+
+static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
+{
+ ida_destroy(&guc->stage_ids);
+ i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
+ i915_vma_unpin_and_release(&guc->stage_desc_pool);
+}
+
/*
* Initialise/clear the stage descriptor shared with the GuC firmware.
*
@@ -319,7 +337,7 @@ static void guc_proc_desc_init(struct intel_guc *guc,
* write queue, etc).
*/
static void guc_stage_desc_init(struct intel_guc *guc,
- struct i915_guc_client *client)
+ struct intel_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
@@ -331,7 +349,10 @@ static void guc_stage_desc_init(struct intel_guc *guc,
desc = __get_stage_desc(client);
memset(desc, 0, sizeof(*desc));
- desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE | GUC_STAGE_DESC_ATTR_KERNEL;
+ desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
+ GUC_STAGE_DESC_ATTR_KERNEL;
+ if (is_high_priority(client))
+ desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT;
desc->stage_id = client->stage_id;
desc->priority = client->priority;
desc->db_id = client->doorbell_id;
@@ -356,7 +377,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
* submission or, in other words, not using a direct submission
* model) the KMD's LRCA is not used for any work submission.
* Instead, the GuC uses the LRCA of the user mode context (see
- * guc_wq_item_append below).
+ * guc_add_request below).
*/
lrc->context_desc = lower_32_bits(ce->lrc_desc);
@@ -365,7 +386,8 @@ static void guc_stage_desc_init(struct intel_guc *guc,
guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
/* XXX: In direct submission, the GuC wants the HW context id
- * here. In proxy submission, it wants the stage id */
+ * here. In proxy submission, it wants the stage id
+ */
lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
@@ -378,7 +400,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
}
DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
- client->engines, desc->engines_used);
+ client->engines, desc->engines_used);
WARN_ON(desc->engines_used == 0);
/*
@@ -398,7 +420,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
}
static void guc_stage_desc_fini(struct intel_guc *guc,
- struct i915_guc_client *client)
+ struct intel_guc_client *client)
{
struct guc_stage_desc *desc;
@@ -407,23 +429,19 @@ static void guc_stage_desc_fini(struct intel_guc *guc,
}
/* Construct a Work Item and append it to the GuC's Work Queue */
-static void guc_wq_item_append(struct i915_guc_client *client,
- struct drm_i915_gem_request *rq)
+static void guc_wq_item_append(struct intel_guc_client *client,
+ u32 target_engine, u32 context_desc,
+ u32 ring_tail, u32 fence_id)
{
/* wqi_len is in DWords, and does not include the one-word header */
const size_t wqi_size = sizeof(struct guc_wq_item);
const u32 wqi_len = wqi_size / sizeof(u32) - 1;
- struct intel_engine_cs *engine = rq->engine;
- struct i915_gem_context *ctx = rq->ctx;
struct guc_process_desc *desc = __get_process_desc(client);
struct guc_wq_item *wqi;
- u32 ring_tail, wq_off;
+ u32 wq_off;
lockdep_assert_held(&client->wq_lock);
- ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
- GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
-
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
* wrapped to the beginning. This simplifies the implementation below.
@@ -445,19 +463,18 @@ static void guc_wq_item_append(struct i915_guc_client *client,
/* Now fill in the 4-word work queue item */
wqi->header = WQ_TYPE_INORDER |
(wqi_len << WQ_LEN_SHIFT) |
- (engine->guc_id << WQ_TARGET_SHIFT) |
+ (target_engine << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT;
-
- wqi->context_desc = lower_32_bits(intel_lr_context_descriptor(ctx, engine));
-
+ wqi->context_desc = context_desc;
wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
- wqi->fence_id = rq->global_seqno;
+ GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
+ wqi->fence_id = fence_id;
- /* Postincrement WQ tail for next time. */
+ /* Make the update visible to GuC */
WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
}
-static void guc_reset_wq(struct i915_guc_client *client)
+static void guc_reset_wq(struct intel_guc_client *client)
{
struct guc_process_desc *desc = __get_process_desc(client);
@@ -465,7 +482,7 @@ static void guc_reset_wq(struct i915_guc_client *client)
desc->tail = 0;
}
-static void guc_ring_doorbell(struct i915_guc_client *client)
+static void guc_ring_doorbell(struct intel_guc_client *client)
{
struct guc_doorbell_info *db;
u32 cookie;
@@ -475,29 +492,166 @@ static void guc_ring_doorbell(struct i915_guc_client *client)
/* pointer of current doorbell cacheline */
db = __get_doorbell(client);
- /* we're not expecting the doorbell cookie to change behind our back */
+ /*
+ * We're not expecting the doorbell cookie to change behind our back,
+ * we also need to treat 0 as a reserved value.
+ */
cookie = READ_ONCE(db->cookie);
- WARN_ON_ONCE(xchg(&db->cookie, cookie + 1) != cookie);
+ WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie);
/* XXX: doorbell was lost and need to acquire it again */
GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
}
+static void guc_add_request(struct intel_guc *guc,
+ struct drm_i915_gem_request *rq)
+{
+ struct intel_guc_client *client = guc->execbuf_client;
+ struct intel_engine_cs *engine = rq->engine;
+ u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(rq->ctx,
+ engine));
+ u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
+
+ spin_lock(&client->wq_lock);
+
+ guc_wq_item_append(client, engine->guc_id, ctx_desc,
+ ring_tail, rq->global_seqno);
+ guc_ring_doorbell(client);
+
+ client->submissions[engine->id] += 1;
+
+ spin_unlock(&client->wq_lock);
+}
+
+/*
+ * When we're doing submissions using regular execlists backend, writing to
+ * ELSP from CPU side is enough to make sure that writes to ringbuffer pages
+ * pinned in mappable aperture portion of GGTT are visible to command streamer.
+ * Writes done by GuC on our behalf are not guaranteeing such ordering,
+ * therefore, to ensure the flush, we're issuing a POSTING READ.
+ */
+static void flush_ggtt_writes(struct i915_vma *vma)
+{
+ struct drm_i915_private *dev_priv = to_i915(vma->obj->base.dev);
+
+ if (i915_vma_is_map_and_fenceable(vma))
+ POSTING_READ_FW(GUC_STATUS);
+}
+
+#define GUC_PREEMPT_FINISHED 0x1
+#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8
+static void inject_preempt_context(struct work_struct *work)
+{
+ struct guc_preempt_work *preempt_work =
+ container_of(work, typeof(*preempt_work), work);
+ struct intel_engine_cs *engine = preempt_work->engine;
+ struct intel_guc *guc = container_of(preempt_work, typeof(*guc),
+ preempt_work[engine->id]);
+ struct intel_guc_client *client = guc->preempt_client;
+ struct guc_stage_desc *stage_desc = __get_stage_desc(client);
+ struct intel_ring *ring = client->owner->engine[engine->id].ring;
+ u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner,
+ engine));
+ u32 *cs = ring->vaddr + ring->tail;
+ u32 data[7];
+
+ if (engine->id == RCS) {
+ cs = gen8_emit_ggtt_write_rcs(cs, GUC_PREEMPT_FINISHED,
+ intel_hws_preempt_done_address(engine));
+ } else {
+ cs = gen8_emit_ggtt_write(cs, GUC_PREEMPT_FINISHED,
+ intel_hws_preempt_done_address(engine));
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ GEM_BUG_ON(!IS_ALIGNED(ring->size,
+ GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32)));
+ GEM_BUG_ON((void *)cs - (ring->vaddr + ring->tail) !=
+ GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32));
+
+ ring->tail += GUC_PREEMPT_BREADCRUMB_DWORDS * sizeof(u32);
+ ring->tail &= (ring->size - 1);
+
+ flush_ggtt_writes(ring->vma);
+
+ spin_lock_irq(&client->wq_lock);
+ guc_wq_item_append(client, engine->guc_id, ctx_desc,
+ ring->tail / sizeof(u64), 0);
+ spin_unlock_irq(&client->wq_lock);
+
+ /*
+ * If GuC firmware performs an engine reset while that engine had
+ * a preemption pending, it will set the terminated attribute bit
+ * on our preemption stage descriptor. GuC firmware retains all
+ * pending work items for a high-priority GuC client, unlike the
+ * normal-priority GuC client where work items are dropped. It
+ * wants to make sure the preempt-to-idle work doesn't run when
+ * scheduling resumes, and uses this bit to inform its scheduler
+ * and presumably us as well. Our job is to clear it for the next
+ * preemption after reset, otherwise that and future preemptions
+ * will never complete. We'll just clear it every time.
+ */
+ stage_desc->attribute &= ~GUC_STAGE_DESC_ATTR_TERMINATED;
+
+ data[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION;
+ data[1] = client->stage_id;
+ data[2] = INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q |
+ INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q;
+ data[3] = engine->guc_id;
+ data[4] = guc->execbuf_client->priority;
+ data[5] = guc->execbuf_client->stage_id;
+ data[6] = guc_ggtt_offset(guc->shared_data);
+
+ if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
+ execlists_clear_active(&engine->execlists,
+ EXECLISTS_ACTIVE_PREEMPT);
+ tasklet_schedule(&engine->execlists.tasklet);
+ }
+}
+
+/*
+ * We're using user interrupt and HWSP value to mark that preemption has
+ * finished and GPU is idle. Normally, we could unwind and continue similar to
+ * execlists submission path. Unfortunately, with GuC we also need to wait for
+ * it to finish its own postprocessing, before attempting to submit. Otherwise
+ * GuC may silently ignore our submissions, and thus we risk losing request at
+ * best, executing out-of-order and causing kernel panic at worst.
+ */
+#define GUC_PREEMPT_POSTPROCESS_DELAY_MS 10
+static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
+{
+ struct intel_guc *guc = &engine->i915->guc;
+ struct guc_shared_ctx_data *data = guc->shared_data_vaddr;
+ struct guc_ctx_report *report =
+ &data->preempt_ctx_report[engine->guc_id];
+
+ WARN_ON(wait_for_atomic(report->report_return_status ==
+ INTEL_GUC_REPORT_STATUS_COMPLETE,
+ GUC_PREEMPT_POSTPROCESS_DELAY_MS));
+ /*
+ * GuC is expecting that we're also going to clear the affected context
+ * counter, let's also reset the return status to not depend on GuC
+ * resetting it after recieving another preempt action
+ */
+ report->affected_count = 0;
+ report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
+}
+
/**
- * i915_guc_submit() - Submit commands through GuC
+ * guc_submit() - Submit commands through GuC
* @engine: engine associated with the commands
*
* The only error here arises if the doorbell hardware isn't functioning
* as expected, which really shouln't happen.
*/
-static void i915_guc_submit(struct intel_engine_cs *engine)
+static void guc_submit(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_guc *guc = &dev_priv->guc;
- struct i915_guc_client *client = guc->execbuf_client;
+ struct intel_guc *guc = &engine->i915->guc;
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
- const unsigned int engine_id = engine->id;
unsigned int n;
for (n = 0; n < execlists_num_ports(execlists); n++) {
@@ -508,57 +662,22 @@ static void i915_guc_submit(struct intel_engine_cs *engine)
if (rq && count == 0) {
port_set(&port[n], port_pack(rq, ++count));
- if (i915_vma_is_map_and_fenceable(rq->ring->vma))
- POSTING_READ_FW(GUC_STATUS);
+ flush_ggtt_writes(rq->ring->vma);
- spin_lock(&client->wq_lock);
-
- guc_wq_item_append(client, rq);
- guc_ring_doorbell(client);
-
- client->submissions[engine_id] += 1;
-
- spin_unlock(&client->wq_lock);
+ guc_add_request(guc, rq);
}
}
}
-static void nested_enable_signaling(struct drm_i915_gem_request *rq)
-{
- /* If we use dma_fence_enable_sw_signaling() directly, lockdep
- * detects an ordering issue between the fence lockclass and the
- * global_timeline. This circular dependency can only occur via 2
- * different fences (but same fence lockclass), so we use the nesting
- * annotation here to prevent the warn, equivalent to the nesting
- * inside i915_gem_request_submit() for when we also enable the
- * signaler.
- */
-
- if (test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &rq->fence.flags))
- return;
-
- GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
- trace_dma_fence_enable_signal(&rq->fence);
-
- spin_lock_nested(&rq->lock, SINGLE_DEPTH_NESTING);
- intel_engine_enable_signaling(rq, true);
- spin_unlock(&rq->lock);
-}
-
static void port_assign(struct execlist_port *port,
struct drm_i915_gem_request *rq)
{
- GEM_BUG_ON(rq == port_request(port));
-
- if (port_isset(port))
- i915_gem_request_put(port_request(port));
+ GEM_BUG_ON(port_isset(port));
- port_set(port, port_pack(i915_gem_request_get(rq), port_count(port)));
- nested_enable_signaling(rq);
+ port_set(port, i915_gem_request_get(rq));
}
-static void i915_guc_dequeue(struct intel_engine_cs *engine)
+static void guc_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
@@ -568,13 +687,35 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
bool submit = false;
struct rb_node *rb;
- if (port_isset(port))
- port++;
-
spin_lock_irq(&engine->timeline->lock);
rb = execlists->first;
GEM_BUG_ON(rb_first(&execlists->queue) != rb);
- while (rb) {
+
+ if (!rb)
+ goto unlock;
+
+ if (port_isset(port)) {
+ if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
+ struct guc_preempt_work *preempt_work =
+ &engine->i915->guc.preempt_work[engine->id];
+
+ if (rb_entry(rb, struct i915_priolist, node)->priority >
+ max(port_request(port)->priotree.priority, 0)) {
+ execlists_set_active(execlists,
+ EXECLISTS_ACTIVE_PREEMPT);
+ queue_work(engine->i915->guc.preempt_wq,
+ &preempt_work->work);
+ goto unlock;
+ }
+ }
+
+ port++;
+ if (port_isset(port))
+ goto unlock;
+ }
+ GEM_BUG_ON(port_isset(port));
+
+ do {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
struct drm_i915_gem_request *rq, *rn;
@@ -592,10 +733,10 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
}
INIT_LIST_HEAD(&rq->priotree.link);
- rq->priotree.priority = INT_MAX;
__i915_gem_request_submit(rq);
- trace_i915_gem_request_in(rq, port_index(port, execlists));
+ trace_i915_gem_request_in(rq,
+ port_index(port, execlists));
last = rq;
submit = true;
}
@@ -605,24 +746,23 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
- }
+ } while (rb);
done:
execlists->first = rb;
if (submit) {
port_assign(port, last);
execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
- i915_guc_submit(engine);
+ guc_submit(engine);
}
+unlock:
spin_unlock_irq(&engine->timeline->lock);
}
-static void i915_guc_irq_handler(unsigned long data)
+static void guc_submission_tasklet(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
- const struct execlist_port * const last_port =
- &execlists->port[execlists->port_mask];
struct drm_i915_gem_request *rq;
rq = port_request(&port[0]);
@@ -637,14 +777,26 @@ static void i915_guc_irq_handler(unsigned long data)
if (!rq)
execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
- if (!port_isset(last_port))
- i915_guc_dequeue(engine);
+ if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
+ intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
+ GUC_PREEMPT_FINISHED) {
+ execlists_cancel_port_requests(&engine->execlists);
+ execlists_unwind_incomplete_requests(execlists);
+
+ wait_for_guc_preempt_report(engine);
+
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
+ intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
+ }
+
+ if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
+ guc_dequeue(engine);
}
/*
* Everything below here is concerned with setup & teardown, and is
* therefore not part of the somewhat time-critical batch-submission
- * path of i915_guc_submit() above.
+ * path of guc_submit() above.
*/
/* Check that a doorbell register is in the expected state */
@@ -668,101 +820,67 @@ static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
return false;
}
-/*
- * If the GuC thinks that the doorbell is unassigned (e.g. because we reset and
- * reloaded the GuC FW) we can use this function to tell the GuC to reassign the
- * doorbell to the rightful owner.
- */
-static int __reset_doorbell(struct i915_guc_client* client, u16 db_id)
+static bool guc_verify_doorbells(struct intel_guc *guc)
{
- int err;
+ u16 db_id;
- __update_doorbell_desc(client, db_id);
- err = __create_doorbell(client);
- if (!err)
- err = __destroy_doorbell(client);
+ for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
+ if (!doorbell_ok(guc, db_id))
+ return false;
- return err;
+ return true;
}
-/*
- * Set up & tear down each unused doorbell in turn, to ensure that all doorbell
- * HW is (re)initialised. For that end, we might have to borrow the first
- * client. Also, tell GuC about all the doorbells in use by all clients.
- * We do this because the KMD, the GuC and the doorbell HW can easily go out of
- * sync (e.g. we can reset the GuC, but not the doorbel HW).
- */
-static int guc_init_doorbell_hw(struct intel_guc *guc)
+static int guc_clients_doorbell_init(struct intel_guc *guc)
{
- struct i915_guc_client *client = guc->execbuf_client;
- bool recreate_first_client = false;
- u16 db_id;
int ret;
- /* For unused doorbells, make sure they are disabled */
- for_each_clear_bit(db_id, guc->doorbell_bitmap, GUC_NUM_DOORBELLS) {
- if (doorbell_ok(guc, db_id))
- continue;
-
- if (has_doorbell(client)) {
- /* Borrow execbuf_client (we will recreate it later) */
- destroy_doorbell(client);
- recreate_first_client = true;
- }
-
- ret = __reset_doorbell(client, db_id);
- WARN(ret, "Doorbell %u reset failed, err %d\n", db_id, ret);
- }
-
- if (recreate_first_client) {
- ret = __reserve_doorbell(client);
- if (unlikely(ret)) {
- DRM_ERROR("Couldn't re-reserve first client db: %d\n", ret);
- return ret;
- }
-
- __update_doorbell_desc(client, client->doorbell_id);
- }
+ ret = create_doorbell(guc->execbuf_client);
+ if (ret)
+ return ret;
- /* Now for every client (and not only execbuf_client) make sure their
- * doorbells are known by the GuC */
- //for (client = client_list; client != NULL; client = client->next)
- {
- ret = __create_doorbell(client);
- if (ret) {
- DRM_ERROR("Couldn't recreate client %u doorbell: %d\n",
- client->stage_id, ret);
- return ret;
- }
+ ret = create_doorbell(guc->preempt_client);
+ if (ret) {
+ destroy_doorbell(guc->execbuf_client);
+ return ret;
}
- /* Read back & verify all (used & unused) doorbell registers */
- for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
- WARN_ON(!doorbell_ok(guc, db_id));
-
return 0;
}
+static void guc_clients_doorbell_fini(struct intel_guc *guc)
+{
+ /*
+ * By the time we're here, GuC has already been reset.
+ * Instead of trying (in vain) to communicate with it, let's just
+ * cleanup the doorbell HW and our internal state.
+ */
+ __destroy_doorbell(guc->preempt_client);
+ __update_doorbell_desc(guc->preempt_client, GUC_DOORBELL_INVALID);
+ __destroy_doorbell(guc->execbuf_client);
+ __update_doorbell_desc(guc->execbuf_client, GUC_DOORBELL_INVALID);
+}
+
/**
- * guc_client_alloc() - Allocate an i915_guc_client
+ * guc_client_alloc() - Allocate an intel_guc_client
* @dev_priv: driver private data structure
* @engines: The set of engines to enable for this client
* @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
- * The kernel client to replace ExecList submission is created with
- * NORMAL priority. Priority of a client for scheduler can be HIGH,
- * while a preemption context can use CRITICAL.
+ * The kernel client to replace ExecList submission is created with
+ * NORMAL priority. Priority of a client for scheduler can be HIGH,
+ * while a preemption context can use CRITICAL.
* @ctx: the context that owns the client (we use the default render
- * context)
+ * context)
*
- * Return: An i915_guc_client object if success, else NULL.
+ * Return: An intel_guc_client object if success, else NULL.
*/
-static struct i915_guc_client *
+static struct intel_guc_client *
guc_client_alloc(struct drm_i915_private *dev_priv,
u32 engines,
u32 priority,
struct i915_gem_context *ctx)
{
- struct i915_guc_client *client;
+ struct intel_guc_client *client;
struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
void *vaddr;
@@ -780,7 +898,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
spin_lock_init(&client->wq_lock);
ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
- GFP_KERNEL);
+ GFP_KERNEL);
if (ret < 0)
goto err_client;
@@ -818,7 +936,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
guc_proc_desc_init(guc, client);
guc_stage_desc_init(guc, client);
- ret = create_doorbell(client);
+ ret = reserve_doorbell(client);
if (ret)
goto err_vaddr;
@@ -840,17 +958,9 @@ err_client:
return ERR_PTR(ret);
}
-static void guc_client_free(struct i915_guc_client *client)
+static void guc_client_free(struct intel_guc_client *client)
{
- /*
- * XXX: wait for any outstanding submissions before freeing memory.
- * Be sure to drop any locks
- */
-
- /* FIXME: in many cases, by the time we get here the GuC has been
- * reset, so we cannot destroy the doorbell properly. Ignore the
- * error message for now */
- destroy_doorbell(client);
+ unreserve_doorbell(client);
guc_stage_desc_fini(client->guc, client);
i915_gem_object_unpin_map(client->vma->obj);
i915_vma_unpin_and_release(&client->vma);
@@ -858,6 +968,50 @@ static void guc_client_free(struct i915_guc_client *client)
kfree(client);
}
+static int guc_clients_create(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_guc_client *client;
+
+ GEM_BUG_ON(guc->execbuf_client);
+ GEM_BUG_ON(guc->preempt_client);
+
+ client = guc_client_alloc(dev_priv,
+ INTEL_INFO(dev_priv)->ring_mask,
+ GUC_CLIENT_PRIORITY_KMD_NORMAL,
+ dev_priv->kernel_context);
+ if (IS_ERR(client)) {
+ DRM_ERROR("Failed to create GuC client for submission!\n");
+ return PTR_ERR(client);
+ }
+ guc->execbuf_client = client;
+
+ client = guc_client_alloc(dev_priv,
+ INTEL_INFO(dev_priv)->ring_mask,
+ GUC_CLIENT_PRIORITY_KMD_HIGH,
+ dev_priv->preempt_context);
+ if (IS_ERR(client)) {
+ DRM_ERROR("Failed to create GuC client for preemption!\n");
+ guc_client_free(guc->execbuf_client);
+ guc->execbuf_client = NULL;
+ return PTR_ERR(client);
+ }
+ guc->preempt_client = client;
+
+ return 0;
+}
+
+static void guc_clients_destroy(struct intel_guc *guc)
+{
+ struct intel_guc_client *client;
+
+ client = fetch_and_zero(&guc->execbuf_client);
+ guc_client_free(client);
+
+ client = fetch_and_zero(&guc->preempt_client);
+ guc_client_free(client);
+}
+
static void guc_policy_init(struct guc_policy *policy)
{
policy->execution_quantum = POLICY_DEFAULT_EXECUTION_QUANTUM_US;
@@ -941,7 +1095,8 @@ static int guc_ads_create(struct intel_guc *guc)
* because our GuC shared data is there.
*/
blob->ads.golden_context_lrca =
- guc_ggtt_offset(dev_priv->kernel_context->engine[RCS].state) + skipped_offset;
+ guc_ggtt_offset(dev_priv->kernel_context->engine[RCS].state) +
+ skipped_offset;
/*
* The GuC expects us to exclude the portion of the context image that
@@ -950,7 +1105,8 @@ static int guc_ads_create(struct intel_guc *guc)
* dwords). Weird guc is weird.
*/
for_each_engine(engine, dev_priv, id)
- blob->ads.eng_state_size[engine->guc_id] = engine->context_size - skipped_size;
+ blob->ads.eng_state_size[engine->guc_id] =
+ engine->context_size - skipped_size;
base = guc_ggtt_offset(vma);
blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
@@ -971,62 +1127,68 @@ static void guc_ads_destroy(struct intel_guc *guc)
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
*/
-int i915_guc_submission_init(struct drm_i915_private *dev_priv)
+int intel_guc_submission_init(struct intel_guc *guc)
{
- struct intel_guc *guc = &dev_priv->guc;
- struct i915_vma *vma;
- void *vaddr;
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
if (guc->stage_desc_pool)
return 0;
- vma = intel_guc_allocate_vma(guc,
- PAGE_ALIGN(sizeof(struct guc_stage_desc) *
- GUC_MAX_STAGE_DESCRIPTORS));
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- guc->stage_desc_pool = vma;
-
- vaddr = i915_gem_object_pin_map(guc->stage_desc_pool->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto err_vma;
- }
-
- guc->stage_desc_pool_vaddr = vaddr;
+ ret = guc_stage_desc_pool_create(guc);
+ if (ret)
+ return ret;
+ /*
+ * Keep static analysers happy, let them know that we allocated the
+ * vma after testing that it didn't exist earlier.
+ */
+ GEM_BUG_ON(!guc->stage_desc_pool);
ret = intel_guc_log_create(guc);
if (ret < 0)
- goto err_vaddr;
+ goto err_stage_desc_pool;
ret = guc_ads_create(guc);
if (ret < 0)
goto err_log;
+ GEM_BUG_ON(!guc->ads_vma);
- ida_init(&guc->stage_ids);
+ WARN_ON(!guc_verify_doorbells(guc));
+ ret = guc_clients_create(guc);
+ if (ret)
+ return ret;
+
+ for_each_engine(engine, dev_priv, id) {
+ guc->preempt_work[id].engine = engine;
+ INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context);
+ }
return 0;
err_log:
intel_guc_log_destroy(guc);
-err_vaddr:
- i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
-err_vma:
- i915_vma_unpin_and_release(&guc->stage_desc_pool);
+err_stage_desc_pool:
+ guc_stage_desc_pool_destroy(guc);
return ret;
}
-void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
+void intel_guc_submission_fini(struct intel_guc *guc)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id)
+ cancel_work_sync(&guc->preempt_work[id].work);
+
+ guc_clients_destroy(guc);
+ WARN_ON(!guc_verify_doorbells(guc));
- ida_destroy(&guc->stage_ids);
guc_ads_destroy(guc);
intel_guc_log_destroy(guc);
- i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
- i915_vma_unpin_and_release(&guc->stage_desc_pool);
+ guc_stage_desc_pool_destroy(guc);
}
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
@@ -1036,7 +1198,9 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
enum intel_engine_id id;
int irqs;
- /* tell all command streamers to forward interrupts (but not vblank) to GuC */
+ /* tell all command streamers to forward interrupts (but not vblank)
+ * to GuC
+ */
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
@@ -1097,10 +1261,19 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
}
-int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
+static void guc_submission_park(struct intel_engine_cs *engine)
{
- struct intel_guc *guc = &dev_priv->guc;
- struct i915_guc_client *client = guc->execbuf_client;
+ intel_engine_unpin_breadcrumbs_irq(engine);
+}
+
+static void guc_submission_unpark(struct intel_engine_cs *engine)
+{
+ intel_engine_pin_breadcrumbs_irq(engine);
+}
+
+int intel_guc_submission_enable(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
@@ -1118,61 +1291,49 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
sizeof(struct guc_wq_item) *
I915_NUM_ENGINES > GUC_WQ_SIZE);
- if (!client) {
- client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
- GUC_CLIENT_PRIORITY_KMD_NORMAL,
- dev_priv->kernel_context);
- if (IS_ERR(client)) {
- DRM_ERROR("Failed to create GuC client for execbuf!\n");
- return PTR_ERR(client);
- }
+ GEM_BUG_ON(!guc->execbuf_client);
- guc->execbuf_client = client;
- }
+ guc_reset_wq(guc->execbuf_client);
+ guc_reset_wq(guc->preempt_client);
err = intel_guc_sample_forcewake(guc);
if (err)
- goto err_execbuf_client;
-
- guc_reset_wq(client);
+ return err;
- err = guc_init_doorbell_hw(guc);
+ err = guc_clients_doorbell_init(guc);
if (err)
- goto err_execbuf_client;
+ return err;
/* Take over from manual control of ELSP (execlists) */
guc_interrupts_capture(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct intel_engine_execlists * const execlists = &engine->execlists;
- /* The tasklet was initialised by execlists, and may be in
- * a state of flux (across a reset) and so we just want to
- * take over the callback without changing any other state
- * in the tasklet.
- */
- execlists->irq_tasklet.func = i915_guc_irq_handler;
- clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
- tasklet_schedule(&execlists->irq_tasklet);
+ struct intel_engine_execlists * const execlists =
+ &engine->execlists;
+
+ execlists->tasklet.func = guc_submission_tasklet;
+ engine->park = guc_submission_park;
+ engine->unpark = guc_submission_unpark;
+
+ engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
}
return 0;
-
-err_execbuf_client:
- guc_client_free(guc->execbuf_client);
- guc->execbuf_client = NULL;
- return err;
}
-void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
+void intel_guc_submission_disable(struct intel_guc *guc)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
guc_interrupts_release(dev_priv);
+ guc_clients_doorbell_fini(guc);
/* Revert back to manual ELSP submission */
intel_engines_reset_default_submission(dev_priv);
-
- guc_client_free(guc->execbuf_client);
- guc->execbuf_client = NULL;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/intel_guc.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h
index cb4353b59059..fb081cefef93 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.h
+++ b/drivers/gpu/drm/i915/intel_guc_submission.h
@@ -22,8 +22,8 @@
*
*/
-#ifndef _I915_GUC_SUBMISSION_H_
-#define _I915_GUC_SUBMISSION_H_
+#ifndef _INTEL_GUC_SUBMISSION_H_
+#define _INTEL_GUC_SUBMISSION_H_
#include <linux/spinlock.h>
@@ -52,7 +52,7 @@ struct drm_i915_private;
* queue (a circular array of work items), again described in the process
* descriptor. Work queue pages are mapped momentarily as required.
*/
-struct i915_guc_client {
+struct intel_guc_client {
struct i915_vma *vma;
void *vaddr;
struct i915_gem_context *owner;
@@ -67,14 +67,17 @@ struct i915_guc_client {
u16 doorbell_id;
unsigned long doorbell_offset;
+ /* Protects GuC client's WQ access */
spinlock_t wq_lock;
/* Per-engine counts of GuC submissions */
u64 submissions[I915_NUM_ENGINES];
};
-int i915_guc_submission_init(struct drm_i915_private *dev_priv);
-int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
-void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
-void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
+int intel_guc_submission_init(struct intel_guc *guc);
+int intel_guc_submission_enable(struct intel_guc *guc);
+void intel_guc_submission_disable(struct intel_guc *guc);
+void intel_guc_submission_fini(struct intel_guc *guc);
+int intel_guc_preempt_work_create(struct intel_guc *guc);
+void intel_guc_preempt_work_destroy(struct intel_guc *guc);
#endif
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index b4a7f31f0214..a2fe7c8d4477 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -95,12 +95,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return 0;
}
- if (!i915_modparams.enable_execlists) {
- DRM_ERROR("i915 GVT-g loading failed due to disabled execlists mode\n");
- return -EIO;
- }
-
- if (i915_modparams.enable_guc_submission) {
+ if (USES_GUC_SUBMISSION(dev_priv)) {
DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
return -EIO;
}
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 12ac270a5f93..31f01d64c021 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -27,13 +27,9 @@
static bool
ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
{
- if (INTEL_GEN(engine->i915) >= 8) {
- return (ipehr >> 23) == 0x1c;
- } else {
- ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
- return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
- MI_SEMAPHORE_REGISTER);
- }
+ ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
+ return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
+ MI_SEMAPHORE_REGISTER);
}
static struct intel_engine_cs *
@@ -41,31 +37,20 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
u64 offset)
{
struct drm_i915_private *dev_priv = engine->i915;
+ u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
struct intel_engine_cs *signaller;
enum intel_engine_id id;
- if (INTEL_GEN(dev_priv) >= 8) {
- for_each_engine(signaller, dev_priv, id) {
- if (engine == signaller)
- continue;
-
- if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
- return signaller;
- }
- } else {
- u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
-
- for_each_engine(signaller, dev_priv, id) {
- if(engine == signaller)
- continue;
+ for_each_engine(signaller, dev_priv, id) {
+ if (engine == signaller)
+ continue;
- if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
- return signaller;
- }
+ if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
+ return signaller;
}
- DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
- engine->name, ipehr, offset);
+ DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x\n",
+ engine->name, ipehr);
return ERR_PTR(-ENODEV);
}
@@ -135,11 +120,6 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
return NULL;
*seqno = ioread32(vaddr + head + 4) + 1;
- if (INTEL_GEN(dev_priv) >= 8) {
- offset = ioread32(vaddr + head + 12);
- offset <<= 32;
- offset |= ioread32(vaddr + head + 8);
- }
return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
}
@@ -273,7 +253,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
return ENGINE_WAIT_KICK;
}
- if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+ if (IS_GEN(dev_priv, 6, 7) && tmp & RING_WAIT_SEMAPHORE) {
switch (semaphore_passed(engine)) {
default:
return ENGINE_DEAD;
@@ -369,13 +349,18 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
case ENGINE_ACTIVE_HEAD:
case ENGINE_ACTIVE_SUBUNITS:
- /* Seqno stuck with still active engine gets leeway,
+ /*
+ * Seqno stuck with still active engine gets leeway,
* in hopes that it is just a long shader.
*/
timeout = I915_SEQNO_DEAD_TIMEOUT;
break;
case ENGINE_DEAD:
+ if (drm_debug & DRM_UT_DRIVER) {
+ struct drm_printer p = drm_debug_printer("hangcheck");
+ intel_engine_dump(engine, &p, "%s", engine->name);
+ }
break;
default:
@@ -444,18 +429,18 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct intel_engine_hangcheck cur_state, *hc = &cur_state;
const bool busy = intel_engine_has_waiter(engine);
+ struct intel_engine_hangcheck hc;
semaphore_clear_deadlocks(dev_priv);
- hangcheck_load_sample(engine, hc);
- hangcheck_accumulate_sample(engine, hc);
- hangcheck_store_sample(engine, hc);
+ hangcheck_load_sample(engine, &hc);
+ hangcheck_accumulate_sample(engine, &hc);
+ hangcheck_store_sample(engine, &hc);
if (engine->hangcheck.stalled) {
hung |= intel_engine_flag(engine);
- if (hc->action != ENGINE_DEAD)
+ if (hc.action != ENGINE_DEAD)
stuck |= intel_engine_flag(engine);
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4dea833f9d1b..bced7b954d93 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -186,7 +186,7 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
@@ -245,7 +245,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
@@ -362,7 +362,7 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
@@ -513,12 +513,14 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
static void
intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
union hdmi_infoframe frame;
int ret;
ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+ conn_state->connector,
&crtc_state->base.adjusted_mode);
if (ret < 0)
return;
@@ -536,7 +538,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
+ u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -585,7 +587,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state)
@@ -687,7 +689,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
+ u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -726,7 +728,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
static void cpt_set_infoframes(struct drm_encoder *encoder,
@@ -769,7 +771,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
static void vlv_set_infoframes(struct drm_encoder *encoder,
@@ -783,7 +785,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
+ u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -822,7 +824,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
static void hsw_set_infoframes(struct drm_encoder *encoder,
@@ -855,7 +857,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
@@ -958,6 +960,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
u32 tmp, flags = 0;
int dotclock;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
+
tmp = I915_READ(intel_hdmi->hdmi_reg);
if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
@@ -1205,7 +1209,8 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder);
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
}
@@ -1215,7 +1220,8 @@ static void pch_disable_hdmi(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder);
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
}
static void pch_post_disable_hdmi(struct intel_encoder *encoder,
@@ -1225,24 +1231,34 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder,
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
}
-static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
+static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
{
- if (IS_G4X(dev_priv))
- return 165000;
- else if (IS_GEMINILAKE(dev_priv))
- return 594000;
- else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
- return 300000;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[encoder->port];
+ int max_tmds_clock;
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ max_tmds_clock = 594000;
+ else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
+ max_tmds_clock = 300000;
+ else if (INTEL_GEN(dev_priv) >= 5)
+ max_tmds_clock = 225000;
else
- return 225000;
+ max_tmds_clock = 165000;
+
+ if (info->max_tmds_clock)
+ max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock);
+
+ return max_tmds_clock;
}
static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
bool respect_downstream_limits,
bool force_dvi)
{
- struct drm_device *dev = intel_hdmi_to_dev(hdmi);
- int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev));
+ struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
+ int max_tmds_clock = intel_hdmi_source_max_tmds_clock(encoder);
if (respect_downstream_limits) {
struct intel_connector *connector = hdmi->attached_connector;
@@ -1337,6 +1353,12 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
if (HAS_GMCH_DISPLAY(dev_priv))
return false;
+ if (crtc_state->pipe_bpp <= 8*3)
+ return false;
+
+ if (!crtc_state->has_hdmi_sink)
+ return false;
+
/*
* HDMI 12bpc affects the clocks, so it's only possible
* when not cloning with other encoder types.
@@ -1361,7 +1383,7 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
}
}
- /* Display Wa #1139 */
+ /* Display WA #1139: glk */
if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
crtc_state->base.adjusted_mode.htotal > 5460)
return false;
@@ -1462,9 +1484,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
- if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && !force_dvi &&
- hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true, force_dvi) == MODE_OK &&
- hdmi_12bpc_possible(pipe_config)) {
+ if (hdmi_12bpc_possible(pipe_config) &&
+ hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true, force_dvi) == MODE_OK) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -1493,7 +1514,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->lane_count = 4;
- if (scdc->scrambling.supported && IS_GEMINILAKE(dev_priv)) {
+ if (scdc->scrambling.supported && (INTEL_GEN(dev_priv) >= 10 ||
+ IS_GEMINILAKE(dev_priv))) {
if (scdc->scrambling.low_rates)
pipe_config->hdmi_scrambling = true;
@@ -1527,7 +1549,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
- enum port port = hdmi_to_dig_port(hdmi)->port;
+ enum port port = hdmi_to_dig_port(hdmi)->base.port;
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter);
@@ -1611,12 +1633,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_hdmi_unset_edid(connector);
- if (intel_hdmi_set_edid(connector)) {
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
-
- hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
+ if (intel_hdmi_set_edid(connector))
status = connector_status_connected;
- } else
+ else
status = connector_status_disconnected;
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
@@ -1627,8 +1646,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
static void
intel_hdmi_force(struct drm_connector *connector)
{
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
-
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -1638,7 +1655,6 @@ intel_hdmi_force(struct drm_connector *connector)
return;
intel_hdmi_set_edid(connector);
- hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
}
static int intel_hdmi_get_modes(struct drm_connector *connector)
@@ -1671,10 +1687,9 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- vlv_phy_pre_encoder_enable(encoder);
+ vlv_phy_pre_encoder_enable(encoder, pipe_config);
/* HDMI 1.0V-2dB */
vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
@@ -1695,7 +1710,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
{
intel_hdmi_prepare(encoder, pipe_config);
- vlv_phy_pre_pll_enable(encoder);
+ vlv_phy_pre_pll_enable(encoder, pipe_config);
}
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
@@ -1704,14 +1719,14 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
{
intel_hdmi_prepare(encoder, pipe_config);
- chv_phy_pre_pll_enable(encoder);
+ chv_phy_pre_pll_enable(encoder, pipe_config);
}
static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- chv_phy_post_pll_disable(encoder);
+ chv_phy_post_pll_disable(encoder, old_crtc_state);
}
static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
@@ -1719,7 +1734,7 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
- vlv_phy_reset_lanes(encoder);
+ vlv_phy_reset_lanes(encoder, old_crtc_state);
}
static void chv_hdmi_post_disable(struct intel_encoder *encoder,
@@ -1732,7 +1747,7 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder,
mutex_lock(&dev_priv->sb_lock);
/* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, true);
+ chv_data_lane_soft_reset(encoder, old_crtc_state, true);
mutex_unlock(&dev_priv->sb_lock);
}
@@ -1745,7 +1760,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- chv_phy_pre_encoder_enable(encoder);
+ chv_phy_pre_encoder_enable(encoder, pipe_config);
/* FIXME: Program the support xxx V-dB */
/* Use 800mV-0dB */
@@ -2004,7 +2019,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_encoder->port;
DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
port_name(port));
@@ -2022,7 +2037,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->doublescan_allowed = 0;
connector->stereo_allowed = 1;
- if (IS_GEMINILAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
connector->ycbcr_420_allowed = true;
intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
@@ -2124,7 +2139,6 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
if (IS_G4X(dev_priv))
intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
- intel_dig_port->port = port;
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = 4;
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index c8a48cbc2b7d..974be3defa70 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -77,43 +77,57 @@ MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
GLK_HUC_FW_MINOR, GLK_BLD_NUM)
-/**
- * intel_huc_select_fw() - selects HuC firmware for loading
- * @huc: intel_huc struct
- */
-void intel_huc_select_fw(struct intel_huc *huc)
+static void huc_fw_select(struct intel_uc_fw *huc_fw)
{
+ struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
struct drm_i915_private *dev_priv = huc_to_i915(huc);
- intel_uc_fw_init(&huc->fw, INTEL_UC_FW_TYPE_HUC);
+ GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
+
+ if (!HAS_HUC(dev_priv))
+ return;
if (i915_modparams.huc_firmware_path) {
- huc->fw.path = i915_modparams.huc_firmware_path;
- huc->fw.major_ver_wanted = 0;
- huc->fw.minor_ver_wanted = 0;
+ huc_fw->path = i915_modparams.huc_firmware_path;
+ huc_fw->major_ver_wanted = 0;
+ huc_fw->minor_ver_wanted = 0;
} else if (IS_SKYLAKE(dev_priv)) {
- huc->fw.path = I915_SKL_HUC_UCODE;
- huc->fw.major_ver_wanted = SKL_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = SKL_HUC_FW_MINOR;
+ huc_fw->path = I915_SKL_HUC_UCODE;
+ huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR;
} else if (IS_BROXTON(dev_priv)) {
- huc->fw.path = I915_BXT_HUC_UCODE;
- huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR;
+ huc_fw->path = I915_BXT_HUC_UCODE;
+ huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- huc->fw.path = I915_KBL_HUC_UCODE;
- huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;
+ huc_fw->path = I915_KBL_HUC_UCODE;
+ huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
} else if (IS_GEMINILAKE(dev_priv)) {
- huc->fw.path = I915_GLK_HUC_UCODE;
- huc->fw.major_ver_wanted = GLK_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = GLK_HUC_FW_MINOR;
+ huc_fw->path = I915_GLK_HUC_UCODE;
+ huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR;
} else {
- DRM_ERROR("No HuC firmware known for platform with HuC!\n");
- return;
+ DRM_WARN("%s: No firmware known for this platform!\n",
+ intel_uc_fw_type_repr(huc_fw->type));
}
}
/**
+ * intel_huc_init_early() - initializes HuC struct
+ * @huc: intel_huc struct
+ *
+ * On platforms with HuC selects firmware for uploading
+ */
+void intel_huc_init_early(struct intel_huc *huc)
+{
+ struct intel_uc_fw *huc_fw = &huc->fw;
+
+ intel_uc_fw_init(huc_fw, INTEL_UC_FW_TYPE_HUC);
+ huc_fw_select(huc_fw);
+}
+
+/**
* huc_ucode_xfer() - DMA's the firmware
* @dev_priv: the drm_i915_private device
*
@@ -151,7 +165,7 @@ static int huc_ucode_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
/* Wait for DMA to finish */
- ret = wait_for((I915_READ(DMA_CTRL) & START_DMA) == 0, 100);
+ ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100);
DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
@@ -167,17 +181,17 @@ static int huc_ucode_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
* intel_huc_init_hw() - load HuC uCode to device
* @huc: intel_huc structure
*
- * Called from guc_setup() during driver loading and also after a GPU reset.
- * Be note that HuC loading must be done before GuC loading.
+ * Called from intel_uc_init_hw() during driver loading and also after a GPU
+ * reset. Be note that HuC loading must be done before GuC loading.
*
* The firmware image should have already been fetched into memory by the
- * earlier call to intel_huc_init(), so here we need only check that
+ * earlier call to intel_uc_init_fw(), so here we need only check that
* is succeeded, and then transfer the image to the h/w.
*
*/
-void intel_huc_init_hw(struct intel_huc *huc)
+int intel_huc_init_hw(struct intel_huc *huc)
{
- intel_uc_fw_upload(&huc->fw, huc_ucode_xfer);
+ return intel_uc_fw_upload(&huc->fw, huc_ucode_xfer);
}
/**
@@ -191,7 +205,7 @@ void intel_huc_init_hw(struct intel_huc *huc)
* signature through intel_guc_auth_huc(). It then waits for 50ms for
* firmware verification ACK and unpins the object.
*/
-void intel_huc_auth(struct intel_huc *huc)
+int intel_huc_auth(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_i915(huc);
struct intel_guc *guc = &i915->guc;
@@ -199,14 +213,14 @@ void intel_huc_auth(struct intel_huc *huc)
int ret;
if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- return;
+ return -ENOEXEC;
vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (IS_ERR(vma)) {
- DRM_ERROR("failed to pin huc fw object %d\n",
- (int)PTR_ERR(vma));
- return;
+ ret = PTR_ERR(vma);
+ DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret);
+ return ret;
}
ret = intel_guc_auth_huc(guc,
@@ -229,4 +243,5 @@ void intel_huc_auth(struct intel_huc *huc)
out:
i915_vma_unpin(vma);
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
index aaa38b9e5817..40039db59e04 100644
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ b/drivers/gpu/drm/i915/intel_huc.h
@@ -34,8 +34,8 @@ struct intel_huc {
/* HuC-specific additions */
};
-void intel_huc_select_fw(struct intel_huc *huc);
-void intel_huc_init_hw(struct intel_huc *huc);
-void intel_huc_auth(struct intel_huc *huc);
+void intel_huc_init_early(struct intel_huc *huc);
+int intel_huc_init_hw(struct intel_huc *huc);
+int intel_huc_auth(struct intel_huc *huc);
#endif
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 49fdf09f9919..ef9f91a0b0c9 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -128,22 +128,46 @@ intel_i2c_reset(struct drm_i915_private *dev_priv)
I915_WRITE(GMBUS4, 0);
}
-static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
+static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+ bool enable)
{
u32 val;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- if (!IS_PINEVIEW(dev_priv))
- return;
-
val = I915_READ(DSPCLK_GATE_D);
- if (enable)
- val |= DPCUNIT_CLOCK_GATE_DISABLE;
+ if (!enable)
+ val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
else
- val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+ val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
}
+static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ u32 val;
+
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ if (!enable)
+ val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
+ else
+ val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+}
+
+static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ u32 val;
+
+ val = I915_READ(GEN9_CLKGATE_DIS_4);
+ if (!enable)
+ val |= BXT_GMBUS_GATING_DIS;
+ else
+ val &= ~BXT_GMBUS_GATING_DIS;
+ I915_WRITE(GEN9_CLKGATE_DIS_4, val);
+}
+
static u32 get_reserved(struct intel_gmbus *bus)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
@@ -221,7 +245,10 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
struct drm_i915_private *dev_priv = bus->dev_priv;
intel_i2c_reset(dev_priv);
- intel_i2c_quirk_set(dev_priv, true);
+
+ if (IS_PINEVIEW(dev_priv))
+ pnv_gmbus_clock_gating(dev_priv, false);
+
set_data(bus, 1);
set_clock(bus, 1);
udelay(I2C_RISEFALL_TIME);
@@ -238,7 +265,9 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
set_data(bus, 1);
set_clock(bus, 1);
- intel_i2c_quirk_set(dev_priv, false);
+
+ if (IS_PINEVIEW(dev_priv))
+ pnv_gmbus_clock_gating(dev_priv, true);
}
static void
@@ -481,6 +510,13 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
int i = 0, inc, try = 0;
int ret = 0;
+ /* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
+ if (IS_GEN9_LP(dev_priv))
+ bxt_gmbus_clock_gating(dev_priv, false);
+ else if (HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv) || HAS_PCH_CNP(dev_priv))
+ pch_gmbus_clock_gating(dev_priv, false);
+
retry:
I915_WRITE_FW(GMBUS0, bus->reg0);
@@ -582,6 +618,13 @@ timeout:
ret = -EAGAIN;
out:
+ /* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
+ if (IS_GEN9_LP(dev_priv))
+ bxt_gmbus_clock_gating(dev_priv, true);
+ else if (HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv) || HAS_PCH_CNP(dev_priv))
+ pch_gmbus_clock_gating(dev_priv, true);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e71a8cd50498..7ece2f061b9e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -136,6 +136,7 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_gem_render_state.h"
#include "intel_mocs.h"
#define RING_EXECLIST_QFULL (1 << 0x2)
@@ -153,9 +154,7 @@
#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
#define GEN8_CTX_STATUS_COMPLETED_MASK \
- (GEN8_CTX_STATUS_ACTIVE_IDLE | \
- GEN8_CTX_STATUS_PREEMPTED | \
- GEN8_CTX_STATUS_ELEMENT_SWITCH)
+ (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
#define CTX_LRI_HEADER_0 0x01
#define CTX_CONTEXT_CONTROL 0x02
@@ -220,37 +219,6 @@ static void execlists_init_reg_state(u32 *reg_state,
struct intel_ring *ring);
/**
- * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
- * @dev_priv: i915 device private
- * @enable_execlists: value of i915.enable_execlists module parameter.
- *
- * Only certain platforms support Execlists (the prerequisites being
- * support for Logical Ring Contexts and Aliasing PPGTT or better).
- *
- * Return: 1 if Execlists is supported and has to be enabled.
- */
-int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
-{
- /* On platforms with execlist available, vGPU will only
- * support execlist mode, no ring buffer mode.
- */
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
- return 1;
-
- if (INTEL_GEN(dev_priv) >= 9)
- return 1;
-
- if (enable_execlists == 0)
- return 0;
-
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
- USES_PPGTT(dev_priv))
- return 1;
-
- return 0;
-}
-
-/**
* intel_lr_context_descriptor_update() - calculate & cache the descriptor
* descriptor for a pinned context
* @ctx: Context to work on
@@ -354,7 +322,7 @@ static void unwind_wa_tail(struct drm_i915_gem_request *rq)
assert_ring_tail_valid(rq->ring, rq->tail);
}
-static void unwind_incomplete_requests(struct intel_engine_cs *engine)
+static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *rq, *rn;
struct i915_priolist *uninitialized_var(p);
@@ -385,6 +353,17 @@ static void unwind_incomplete_requests(struct intel_engine_cs *engine)
}
}
+void
+execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
+{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+
+ spin_lock_irq(&engine->timeline->lock);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irq(&engine->timeline->lock);
+}
+
static inline void
execlists_context_status_change(struct drm_i915_gem_request *rq,
unsigned long status)
@@ -400,6 +379,20 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
status, rq);
}
+static inline void
+execlists_context_schedule_in(struct drm_i915_gem_request *rq)
+{
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+ intel_engine_context_in(rq->engine);
+}
+
+static inline void
+execlists_context_schedule_out(struct drm_i915_gem_request *rq)
+{
+ intel_engine_context_out(rq->engine);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+}
+
static void
execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
{
@@ -438,8 +431,6 @@ static inline void elsp_write(u64 desc, u32 __iomem *elsp)
static void execlists_submit_ports(struct intel_engine_cs *engine)
{
struct execlist_port *port = engine->execlists.port;
- u32 __iomem *elsp =
- engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
unsigned int n;
for (n = execlists_num_ports(&engine->execlists); n--; ) {
@@ -451,17 +442,23 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
if (rq) {
GEM_BUG_ON(count > !n);
if (!count++)
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+ execlists_context_schedule_in(rq);
port_set(&port[n], port_pack(rq, count));
desc = execlists_update_context(rq);
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
+
+ GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%x\n",
+ engine->name, n,
+ port[n].context_id, count,
+ rq->global_seqno);
} else {
GEM_BUG_ON(!n);
desc = 0;
}
- elsp_write(desc, elsp);
+ elsp_write(desc, engine->execlists.elsp);
}
+ execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
}
static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
@@ -497,8 +494,6 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
{
struct intel_context *ce =
&engine->i915->preempt_context->engine[engine->id];
- u32 __iomem *elsp =
- engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
unsigned int n;
GEM_BUG_ON(engine->i915->preempt_context->hw_id != PREEMPT_ID);
@@ -509,15 +504,12 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
ce->ring->tail &= (ce->ring->size - 1);
ce->lrc_reg_state[CTX_RING_TAIL+1] = ce->ring->tail;
+ GEM_TRACE("%s\n", engine->name);
for (n = execlists_num_ports(&engine->execlists); --n; )
- elsp_write(0, elsp);
+ elsp_write(0, engine->execlists.elsp);
- elsp_write(ce->lrc_desc, elsp);
-}
-
-static bool can_preempt(struct intel_engine_cs *engine)
-{
- return INTEL_INFO(engine->i915)->has_logical_ring_preemption;
+ elsp_write(ce->lrc_desc, engine->execlists.elsp);
+ execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
}
static void execlists_dequeue(struct intel_engine_cs *engine)
@@ -564,10 +556,21 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* know the next preemption status we see corresponds
* to this ELSP update.
*/
+ GEM_BUG_ON(!port_count(&port[0]));
if (port_count(&port[0]) > 1)
goto unlock;
- if (can_preempt(engine) &&
+ /*
+ * If we write to ELSP a second time before the HW has had
+ * a chance to respond to the previous write, we can confuse
+ * the HW and hit "undefined behaviour". After writing to ELSP,
+ * we must then wait until we see a context-switch event from
+ * the HW to indicate that it has had a chance to respond.
+ */
+ if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
+ goto unlock;
+
+ if (HAS_LOGICAL_RING_PREEMPTION(engine->i915) &&
rb_entry(rb, struct i915_priolist, node)->priority >
max(last->priotree.priority, 0)) {
/*
@@ -690,8 +693,8 @@ unlock:
}
}
-static void
-execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
+void
+execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
{
struct execlist_port *port = execlists->port;
unsigned int num_ports = execlists_num_ports(execlists);
@@ -700,6 +703,7 @@ execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
struct drm_i915_gem_request *rq = port_request(port);
GEM_BUG_ON(!execlists->active);
+ intel_engine_context_out(rq->engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED);
i915_gem_request_put(rq);
@@ -718,7 +722,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
spin_lock_irqsave(&engine->timeline->lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */
- execlist_cancel_port_requests(execlists);
+ execlists_cancel_port_requests(execlists);
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline->requests, link) {
@@ -768,7 +772,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
*/
-static void intel_lrc_irq_handler(unsigned long data)
+static void execlists_submission_tasklet(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -826,6 +830,10 @@ static void intel_lrc_irq_handler(unsigned long data)
head = execlists->csb_head;
tail = READ_ONCE(buf[write_idx]);
}
+ GEM_TRACE("%s cs-irq head=%d [%d], tail=%d [%d]\n",
+ engine->name,
+ head, GEN8_CSB_READ_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))),
+ tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))));
while (head != tail) {
struct drm_i915_gem_request *rq;
@@ -853,16 +861,31 @@ static void intel_lrc_irq_handler(unsigned long data)
*/
status = READ_ONCE(buf[2 * head]); /* maybe mmio! */
+ GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
+ engine->name, head,
+ status, buf[2*head + 1],
+ execlists->active);
+
+ if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
+ GEN8_CTX_STATUS_PREEMPTED))
+ execlists_set_active(execlists,
+ EXECLISTS_ACTIVE_HWACK);
+ if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
+ execlists_clear_active(execlists,
+ EXECLISTS_ACTIVE_HWACK);
+
if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
continue;
- if (status & GEN8_CTX_STATUS_ACTIVE_IDLE &&
+ /* We should never get a COMPLETED | IDLE_ACTIVE! */
+ GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
+
+ if (status & GEN8_CTX_STATUS_COMPLETE &&
buf[2*head + 1] == PREEMPT_ID) {
- execlist_cancel_port_requests(execlists);
+ GEM_TRACE("%s preempt-idle\n", engine->name);
- spin_lock_irq(&engine->timeline->lock);
- unwind_incomplete_requests(engine);
- spin_unlock_irq(&engine->timeline->lock);
+ execlists_cancel_port_requests(execlists);
+ execlists_unwind_incomplete_requests(execlists);
GEM_BUG_ON(!execlists_is_active(execlists,
EXECLISTS_ACTIVE_PREEMPT));
@@ -883,12 +906,17 @@ static void intel_lrc_irq_handler(unsigned long data)
GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
rq = port_unpack(port, &count);
+ GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x\n",
+ engine->name,
+ port->context_id, count,
+ rq ? rq->global_seqno : 0);
GEM_BUG_ON(count == 0);
if (--count == 0) {
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
+ GEM_BUG_ON(port_isset(&port[1]) &&
+ !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
GEM_BUG_ON(!i915_gem_request_completed(rq));
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
-
+ execlists_context_schedule_out(rq);
trace_i915_gem_request_out(rq);
i915_gem_request_put(rq);
@@ -926,7 +954,7 @@ static void insert_request(struct intel_engine_cs *engine,
list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests);
if (ptr_unmask_bits(p, 1))
- tasklet_hi_schedule(&engine->execlists.irq_tasklet);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
}
static void execlists_submit_request(struct drm_i915_gem_request *request)
@@ -1060,12 +1088,34 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
spin_unlock_irq(&engine->timeline->lock);
}
+static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
+{
+ unsigned int flags;
+ int err;
+
+ /*
+ * Clear this page out of any CPU caches for coherent swap-in/out.
+ * We only want to do this on the first bind so that we do not stall
+ * on an active context (which by nature is already on the GPU).
+ */
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+ err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+ if (err)
+ return err;
+ }
+
+ flags = PIN_GLOBAL | PIN_HIGH;
+ if (ctx->ggtt_offset_bias)
+ flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
+
+ return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
+}
+
static struct intel_ring *
execlists_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
- unsigned int flags;
void *vaddr;
int ret;
@@ -1082,11 +1132,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
}
GEM_BUG_ON(!ce->state);
- flags = PIN_GLOBAL | PIN_HIGH;
- if (ctx->ggtt_offset_bias)
- flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
-
- ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, flags);
+ ret = __context_pin(ctx, ce->state);
if (ret)
goto err;
@@ -1106,9 +1152,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
i915_ggtt_offset(ce->ring->vma);
- ce->state->obj->mm.dirty = true;
ce->state->obj->pin_global++;
-
i915_gem_context_get(ctx);
out:
return ce->ring;
@@ -1146,7 +1190,6 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_context *ce = &request->ctx->engine[engine->id];
- u32 *cs;
int ret;
GEM_BUG_ON(!ce->pin_count);
@@ -1157,17 +1200,9 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
*/
request->reserved_space += EXECLISTS_REQUEST_SIZE;
- cs = intel_ring_begin(request, 0);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (!ce->initialised) {
- ret = engine->init_context(request);
- if (ret)
- return ret;
-
- ce->initialised = true;
- }
+ ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
+ if (ret)
+ return ret;
/* Note that after this point, we have committed to using
* this request as it is being used to both track the
@@ -1476,9 +1511,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
execlists->csb_head = -1;
execlists->active = 0;
+ execlists->elsp =
+ dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine));
+
/* After a GPU reset, we may have requests to replay */
- if (!i915_modparams.enable_guc_submission && execlists->first)
- tasklet_schedule(&execlists->irq_tasklet);
+ if (execlists->first)
+ tasklet_schedule(&execlists->tasklet);
return 0;
}
@@ -1523,6 +1561,8 @@ static void reset_common_ring(struct intel_engine_cs *engine,
struct intel_context *ce;
unsigned long flags;
+ GEM_TRACE("%s seqno=%x\n",
+ engine->name, request ? request->global_seqno : 0);
spin_lock_irqsave(&engine->timeline->lock, flags);
/*
@@ -1534,10 +1574,10 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* guessing the missed context-switch events by looking at what
* requests were completed.
*/
- execlist_cancel_port_requests(execlists);
+ execlists_cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */
- unwind_incomplete_requests(engine);
+ __unwind_incomplete_requests(engine);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
@@ -1797,10 +1837,8 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
- *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
- *cs++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
- *cs++ = 0;
- *cs++ = request->global_seqno;
+ cs = gen8_emit_ggtt_write(cs, request->global_seqno,
+ intel_hws_seqno_address(request->engine));
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
request->tail = intel_ring_offset(request, cs);
@@ -1810,24 +1848,14 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
}
static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
-static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
+static void gen8_emit_breadcrumb_rcs(struct drm_i915_gem_request *request,
u32 *cs)
{
/* We're using qword write, seqno should be aligned to 8 bytes. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
- /* w/a for post sync ops following a GPGPU operation we
- * need a prior CS_STALL, which is emitted by the flush
- * following the batch.
- */
- *cs++ = GFX_OP_PIPE_CONTROL(6);
- *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE;
- *cs++ = intel_hws_seqno_address(request->engine);
- *cs++ = 0;
- *cs++ = request->global_seqno;
- /* We're thrashing one dword of HWS. */
- *cs++ = 0;
+ cs = gen8_emit_ggtt_write_rcs(cs, request->global_seqno,
+ intel_hws_seqno_address(request->engine));
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
request->tail = intel_ring_offset(request, cs);
@@ -1835,7 +1863,7 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
gen8_emit_wa_tail(request, cs);
}
-static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
+static const int gen8_emit_breadcrumb_rcs_sz = 8 + WA_TAIL_DWORDS;
static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
{
@@ -1868,8 +1896,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
* Tasklet cannot be active at this point due intel_mark_active/idle
* so this is just for documentation.
*/
- if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->execlists.irq_tasklet.state)))
- tasklet_kill(&engine->execlists.irq_tasklet);
+ if (WARN_ON(test_bit(TASKLET_STATE_SCHED,
+ &engine->execlists.tasklet.state)))
+ tasklet_kill(&engine->execlists.tasklet);
dev_priv = engine->i915;
@@ -1893,7 +1922,12 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = execlists_submit_request;
engine->cancel_requests = execlists_cancel_requests;
engine->schedule = execlists_schedule;
- engine->execlists.irq_tasklet.func = intel_lrc_irq_handler;
+ engine->execlists.tasklet.func = execlists_submission_tasklet;
+
+ engine->park = NULL;
+ engine->unpark = NULL;
+
+ engine->flags |= I915_ENGINE_SUPPORTS_STATS;
}
static void
@@ -1952,8 +1986,8 @@ logical_ring_setup(struct intel_engine_cs *engine)
engine->execlists.fw_domains = fw_domains;
- tasklet_init(&engine->execlists.irq_tasklet,
- intel_lrc_irq_handler, (unsigned long)engine);
+ tasklet_init(&engine->execlists.tasklet,
+ execlists_submission_tasklet, (unsigned long)engine);
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
@@ -1991,8 +2025,8 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
engine->init_hw = gen8_init_render_ring;
engine->init_context = gen8_init_rcs_context;
engine->emit_flush = gen8_emit_flush_render;
- engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
- engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
+ engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs;
+ engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_rcs_sz;
ret = intel_engine_create_scratch(engine, PAGE_SIZE);
if (ret)
@@ -2109,7 +2143,6 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
- CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
(HAS_RESOURCE_STREAMER(dev_priv) ?
CTX_CTRL_RS_CTX_ENABLE : 0)));
CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0);
@@ -2186,6 +2219,7 @@ populate_lr_context(struct i915_gem_context *ctx,
struct intel_ring *ring)
{
void *vaddr;
+ u32 *regs;
int ret;
ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
@@ -2202,11 +2236,31 @@ populate_lr_context(struct i915_gem_context *ctx,
}
ctx_obj->mm.dirty = true;
+ if (engine->default_state) {
+ /*
+ * We only want to copy over the template context state;
+ * skipping over the headers reserved for GuC communication,
+ * leaving those as zero.
+ */
+ const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE;
+ void *defaults;
+
+ defaults = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (IS_ERR(defaults))
+ return PTR_ERR(defaults);
+
+ memcpy(vaddr + start, defaults + start, engine->context_size);
+ i915_gem_object_unpin_map(engine->default_state);
+ }
+
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
-
- execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
- ctx, engine, ring);
+ regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ execlists_init_reg_state(regs, ctx, engine, ring);
+ if (!engine->default_state)
+ regs[CTX_CONTEXT_CONTROL + 1] |=
+ _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
i915_gem_object_unpin_map(ctx_obj);
@@ -2259,7 +2313,6 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
ce->ring = ring;
ce->state = vma;
- ce->initialised |= engine->init_context == NULL;
return 0;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 689fde1a63a9..6d4f9b995a11 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -107,9 +107,4 @@ intel_lr_context_descriptor(struct i915_gem_context *ctx,
return ctx->engine[engine->id].lrc_desc;
}
-
-/* Execlists */
-int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
- int enable_execlists);
-
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 38572d65e46e..ef80499113ee 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -125,6 +125,8 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
u32 tmp, flags = 0;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS);
+
tmp = I915_READ(lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NHSYNC;
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 28a778b785ac..4e43f873c889 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -57,7 +57,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
intel_connector_update_eld_conn_type(connector);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 1d946240e55f..c58e5f53bab0 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -32,6 +32,8 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
+
+#include "intel_opregion.h"
#include "i915_drv.h"
#include "intel_drv.h"
@@ -367,7 +369,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
if (intel_encoder->type == INTEL_OUTPUT_DSI)
port = 0;
else
- port = intel_ddi_get_encoder_port(intel_encoder);
+ port = intel_encoder->port;
if (port == PORT_E) {
port = 0;
@@ -383,7 +385,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
case INTEL_OUTPUT_ANALOG:
type = DISPLAY_TYPE_CRT;
break;
- case INTEL_OUTPUT_UNKNOWN:
+ case INTEL_OUTPUT_DDI:
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_DP_MST:
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
new file mode 100644
index 000000000000..e0e437ba9e51
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright © 2008-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_OPREGION_H_
+#define _INTEL_OPREGION_H_
+
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+
+struct drm_i915_private;
+struct intel_encoder;
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct intel_opregion {
+ struct opregion_header *header;
+ struct opregion_acpi *acpi;
+ struct opregion_swsci *swsci;
+ u32 swsci_gbda_sub_functions;
+ u32 swsci_sbcb_sub_functions;
+ struct opregion_asle *asle;
+ void *rvda;
+ void *vbt_firmware;
+ const void *vbt;
+ u32 vbt_size;
+ u32 *lid_state;
+ struct work_struct asle_work;
+};
+
+#define OPREGION_SIZE (8 * 1024)
+
+#ifdef CONFIG_ACPI
+
+int intel_opregion_setup(struct drm_i915_private *dev_priv);
+void intel_opregion_register(struct drm_i915_private *dev_priv);
+void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
+int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+ bool enable);
+int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+ pci_power_t state);
+int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
+
+#else /* CONFIG_ACPI*/
+
+static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
+{
+ return 0;
+}
+
+static inline void intel_opregion_register(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline int
+intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
+{
+ return 0;
+}
+
+static inline int
+intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
+{
+ return 0;
+}
+
+static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_ACPI */
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 1b397b41cb4f..41e9465d44a8 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -219,7 +219,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_wc(&dev_priv->ggtt.mappable,
+ regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
overlay->flip_addr,
PAGE_SIZE);
@@ -1508,7 +1508,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable,
+ regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
overlay->flip_addr);
return regs;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index adc51e452e3e..fa6831f8c004 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -543,25 +543,6 @@ static u32 pwm_get_backlight(struct intel_connector *connector)
return DIV_ROUND_UP(duty_ns * 100, CRC_PMIC_PWM_PERIOD_NS);
}
-static u32 intel_panel_get_backlight(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 val = 0;
-
- mutex_lock(&dev_priv->backlight_lock);
-
- if (panel->backlight.enabled) {
- val = panel->backlight.get(connector);
- val = intel_panel_compute_brightness(connector, val);
- }
-
- mutex_unlock(&dev_priv->backlight_lock);
-
- DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
- return val;
-}
-
static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
@@ -649,31 +630,6 @@ intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state,
panel->backlight.set(conn_state, level);
}
-/* set backlight brightness to level in range [0..max], scaling wrt hw min */
-static void intel_panel_set_backlight(const struct drm_connector_state *conn_state,
- u32 user_level, u32 user_max)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 hw_level;
-
- if (!panel->backlight.present)
- return;
-
- mutex_lock(&dev_priv->backlight_lock);
-
- WARN_ON(panel->backlight.max == 0);
-
- hw_level = scale_user_to_hw(connector, user_level, user_max);
- panel->backlight.level = hw_level;
-
- if (panel->backlight.enabled)
- intel_panel_actually_set_backlight(conn_state, hw_level);
-
- mutex_unlock(&dev_priv->backlight_lock);
-}
-
/* set backlight brightness to level in range [0..max], assuming hw min is
* respected.
*/
@@ -1182,6 +1138,50 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
}
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+static u32 intel_panel_get_backlight(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 val = 0;
+
+ mutex_lock(&dev_priv->backlight_lock);
+
+ if (panel->backlight.enabled) {
+ val = panel->backlight.get(connector);
+ val = intel_panel_compute_brightness(connector, val);
+ }
+
+ mutex_unlock(&dev_priv->backlight_lock);
+
+ DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+ return val;
+}
+
+/* set backlight brightness to level in range [0..max], scaling wrt hw min */
+static void intel_panel_set_backlight(const struct drm_connector_state *conn_state,
+ u32 user_level, u32 user_max)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 hw_level;
+
+ if (!panel->backlight.present)
+ return;
+
+ mutex_lock(&dev_priv->backlight_lock);
+
+ WARN_ON(panel->backlight.max == 0);
+
+ hw_level = scale_user_to_hw(connector, user_level, user_max);
+ panel->backlight.level = hw_level;
+
+ if (panel->backlight.enabled)
+ intel_panel_actually_set_backlight(conn_state, hw_level);
+
+ mutex_unlock(&dev_priv->backlight_lock);
+}
+
static int intel_backlight_device_update_status(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index 899839f2f7c6..1f5cd572a7ff 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -269,7 +269,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
dig_port = enc_to_dig_port(&encoder->base);
- switch (dig_port->port) {
+ switch (dig_port->base.port) {
case PORT_B:
*source = INTEL_PIPE_CRC_SOURCE_DP_B;
break;
@@ -281,7 +281,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
break;
default:
WARN(1, "nonexisting DP port %c\n",
- port_name(dig_port->port));
+ port_name(dig_port->base.port));
break;
}
break;
@@ -541,8 +541,6 @@ retry:
* completely disable it.
*/
pipe_config->ips_force_disable = enable;
- if (pipe_config->ips_enabled == enable)
- pipe_config->base.connectors_changed = true;
}
if (IS_HASWELL(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f0d0dbab4150..1db79a860b96 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -52,16 +52,13 @@
* which brings the most power savings; deeper states save more power, but
* require higher latency to switch to and wake up.
*/
-#define INTEL_RC6_ENABLE (1<<0)
-#define INTEL_RC6p_ENABLE (1<<1)
-#define INTEL_RC6pp_ENABLE (1<<2)
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (HAS_LLC(dev_priv)) {
/*
* WaCompressedResourceDisplayNewHashMode:skl,kbl
- * Display WA#0390: skl,kbl
+ * Display WA #0390: skl,kbl
*
* Must match Sampler, Pixel Back End, and Media. See
* WaCompressedResourceSamplerPbeMediaNewHashMode.
@@ -75,9 +72,6 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
- I915_WRITE(GEN8_CONFIG0,
- I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
-
/* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
I915_WRITE(GEN8_CHICKEN_DCPR_1,
I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
@@ -515,38 +509,41 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
fifo_state->plane[PLANE_CURSOR] = 63;
}
-static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
+static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
{
uint32_t dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x7f;
- if (plane)
+ if (i9xx_plane == PLANE_B)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
return size;
}
-static int i830_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
+static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
{
uint32_t dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x1ff;
- if (plane)
+ if (i9xx_plane == PLANE_B)
size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
return size;
}
-static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
+static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
{
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -554,9 +551,8 @@ static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A",
- size);
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
return size;
}
@@ -922,7 +918,7 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
* and the size of 8 whole lines. This adjustment is always performed
* in the actual pixel depth regardless of whether FBC is enabled or not."
*/
-static int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
+static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
{
int tlb_miss = fifo_size * 64 - width * cpp * 8;
@@ -1099,8 +1095,8 @@ static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
- int clock, htotal, cpp, width, wm;
- int latency = dev_priv->wm.pri_latency[level] * 10;
+ unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
+ unsigned int clock, htotal, cpp, width, wm;
if (latency == 0)
return USHRT_MAX;
@@ -1139,7 +1135,7 @@ static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
level == G4X_WM_LEVEL_NORMAL) {
wm = intel_wm_method1(clock, cpp, latency);
} else {
- int small, large;
+ unsigned int small, large;
small = intel_wm_method1(clock, cpp, latency);
large = intel_wm_method2(clock, htotal, width, cpp, latency);
@@ -1152,7 +1148,7 @@ static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
wm = DIV_ROUND_UP(wm, 64) + 2;
- return min_t(int, wm, USHRT_MAX);
+ return min_t(unsigned int, wm, USHRT_MAX);
}
static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
@@ -1403,17 +1399,29 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
static int g4x_compute_intermediate_wm(struct drm_device *dev,
struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *new_crtc_state)
{
- struct g4x_wm_state *intermediate = &crtc_state->wm.g4x.intermediate;
- const struct g4x_wm_state *optimal = &crtc_state->wm.g4x.optimal;
- const struct g4x_wm_state *active = &crtc->wm.active.g4x;
+ struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
+ const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(intel_state, crtc);
+ const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
enum plane_id plane_id;
+ if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
+ *intermediate = *optimal;
+
+ intermediate->cxsr = false;
+ intermediate->hpll_en = false;
+ goto out;
+ }
+
intermediate->cxsr = optimal->cxsr && active->cxsr &&
- !crtc_state->disable_cxsr;
+ !new_crtc_state->disable_cxsr;
intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
- !crtc_state->disable_cxsr;
+ !new_crtc_state->disable_cxsr;
intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -1455,12 +1463,13 @@ static int g4x_compute_intermediate_wm(struct drm_device *dev,
WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
intermediate->fbc_en && intermediate->hpll_en);
+out:
/*
* If our intermediate WM are identical to the final WM, then we can
* omit the post-vblank programming; only update if it's different.
*/
if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
- crtc_state->wm.need_postvbl_update = true;
+ new_crtc_state->wm.need_postvbl_update = true;
return 0;
}
@@ -1596,7 +1605,7 @@ static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
- int clock, htotal, cpp, width, wm;
+ unsigned int clock, htotal, cpp, width, wm;
if (dev_priv->wm.pri_latency[level] == 0)
return USHRT_MAX;
@@ -1622,7 +1631,7 @@ static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
dev_priv->wm.pri_latency[level] * 10);
}
- return min_t(int, wm, USHRT_MAX);
+ return min_t(unsigned int, wm, USHRT_MAX);
}
static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
@@ -2023,16 +2032,27 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
static int vlv_compute_intermediate_wm(struct drm_device *dev,
struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *new_crtc_state)
{
- struct vlv_wm_state *intermediate = &crtc_state->wm.vlv.intermediate;
- const struct vlv_wm_state *optimal = &crtc_state->wm.vlv.optimal;
- const struct vlv_wm_state *active = &crtc->wm.active.vlv;
+ struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
+ const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(intel_state, crtc);
+ const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
int level;
+ if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
+ *intermediate = *optimal;
+
+ intermediate->cxsr = false;
+ goto out;
+ }
+
intermediate->num_levels = min(optimal->num_levels, active->num_levels);
intermediate->cxsr = optimal->cxsr && active->cxsr &&
- !crtc_state->disable_cxsr;
+ !new_crtc_state->disable_cxsr;
for (level = 0; level < intermediate->num_levels; level++) {
enum plane_id plane_id;
@@ -2051,12 +2071,13 @@ static int vlv_compute_intermediate_wm(struct drm_device *dev,
vlv_invalidate_wms(crtc, intermediate, level);
+out:
/*
* If our intermediate WM are identical to the final WM, then we can
* omit the post-vblank programming; only update if it's different.
*/
if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
- crtc_state->wm.need_postvbl_update = true;
+ new_crtc_state->wm.need_postvbl_update = true;
return 0;
}
@@ -2255,8 +2276,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
else
wm_info = &i830_a_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev_priv, 0);
- crtc = intel_get_crtc_for_plane(dev_priv, 0);
+ fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A);
+ crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode =
&crtc->config->base.adjusted_mode;
@@ -2282,8 +2303,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
if (IS_GEN2(dev_priv))
wm_info = &i830_bc_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev_priv, 1);
- crtc = intel_get_crtc_for_plane(dev_priv, 1);
+ fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
+ crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode =
&crtc->config->base.adjusted_mode;
@@ -2395,7 +2416,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
adjusted_mode = &crtc->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
&i845_wm_info,
- dev_priv->display.get_fifo_size(dev_priv, 0),
+ dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
4, pessimal_latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
@@ -3924,6 +3945,7 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct drm_crtc_state *crtc_state = &cstate->base;
struct drm_atomic_state *state = crtc_state->state;
struct drm_plane *plane;
@@ -3966,7 +3988,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
crtc_clock = crtc_state->adjusted_mode.crtc_clock;
dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
- if (IS_GEMINILAKE(to_i915(intel_crtc->base.dev)))
+ if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
dotclk *= 2;
pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale);
@@ -6392,29 +6414,8 @@ static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_CONTROL, 0);
}
-static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
-{
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
- mode = GEN6_RC_CTL_RC6_ENABLE;
- else
- mode = 0;
- }
- if (HAS_RC6p(dev_priv))
- DRM_DEBUG_DRIVER("Enabling RC6 states: "
- "RC6 %s RC6p %s RC6pp %s\n",
- onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
- onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
- onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
-
- else
- DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
- onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
-}
-
static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool enable_rc6 = true;
unsigned long rc6_ctx_base;
u32 rc_ctl;
@@ -6439,9 +6440,8 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
* for this check.
*/
rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
- if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
- (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
- ggtt->stolen_reserved_size))) {
+ if (!((rc6_ctx_base >= dev_priv->dsm_reserved.start) &&
+ (rc6_ctx_base + PAGE_SIZE < dev_priv->dsm_reserved.end))) {
DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
enable_rc6 = false;
}
@@ -6474,42 +6474,30 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
return enable_rc6;
}
-int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
+static bool sanitize_rc6(struct drm_i915_private *i915)
{
- /* No RC6 before Ironlake and code is gone for ilk. */
- if (INTEL_INFO(dev_priv)->gen < 6)
- return 0;
+ struct intel_device_info *info = mkwrite_device_info(i915);
- if (!enable_rc6)
- return 0;
+ /* Powersaving is controlled by the host when inside a VM */
+ if (intel_vgpu_active(i915))
+ info->has_rc6 = 0;
- if (IS_GEN9_LP(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
+ if (info->has_rc6 &&
+ IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(i915)) {
DRM_INFO("RC6 disabled by BIOS\n");
- return 0;
- }
-
- /* Respect the kernel parameter if it is set */
- if (enable_rc6 >= 0) {
- int mask;
-
- if (HAS_RC6p(dev_priv))
- mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
- INTEL_RC6pp_ENABLE;
- else
- mask = INTEL_RC6_ENABLE;
-
- if ((enable_rc6 & mask) != enable_rc6)
- DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
- "(requested %d, valid %d)\n",
- enable_rc6 & mask, enable_rc6, mask);
-
- return enable_rc6 & mask;
+ info->has_rc6 = 0;
}
- if (IS_IVYBRIDGE(dev_priv))
- return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+ /*
+ * We assume that we do not have any deep rc6 levels if we don't have
+ * have the previous rc6 level supported, i.e. we use HAS_RC6()
+ * as the initial coarse check for rc6 in general, moving on to
+ * progressively finer/deeper levels.
+ */
+ if (!info->has_rc6 && info->has_rc6p)
+ info->has_rc6p = 0;
- return INTEL_RC6_ENABLE;
+ return info->has_rc6;
}
static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
@@ -6579,9 +6567,10 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
{
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- /* Program defaults and thresholds for RPS*/
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
+ /* Program defaults and thresholds for RPS */
+ if (IS_GEN9(dev_priv))
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
/* 1 second timeout*/
I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
@@ -6601,7 +6590,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 rc6_mode, rc6_mask = 0;
+ u32 rc6_mode;
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC_STATE, 0);
@@ -6614,12 +6603,19 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_CONTROL, 0);
/* 2b: Program RC6 thresholds.*/
-
- /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
- if (IS_SKYLAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10) {
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+ } else if (IS_SKYLAKE(dev_priv)) {
+ /*
+ * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
+ * when CPG is enabled
+ */
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
- else
+ } else {
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
+ }
+
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_engine(engine, dev_priv, id)
@@ -6635,9 +6631,6 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
/* 3a: Enable RC6 */
- if (intel_rc6_enabled() & INTEL_RC6_ENABLE)
- rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
- DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
/* WaRsUseTimeoutMode:cnl (pre-prod) */
@@ -6647,7 +6640,9 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
rc6_mode = GEN6_RC_CTL_EI_MODE(1);
I915_WRITE(GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE | rc6_mode | rc6_mask);
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ rc6_mode);
/*
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
@@ -6656,8 +6651,8 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
I915_WRITE(GEN9_PG_ENABLE, 0);
else
- I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
- (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
+ I915_WRITE(GEN9_PG_ENABLE,
+ GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -6666,7 +6661,6 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- uint32_t rc6_mask = 0;
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC_STATE, 0);
@@ -6688,13 +6682,11 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
/* 3: Enable RC6 */
- if (intel_rc6_enabled() & INTEL_RC6_ENABLE)
- rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
- intel_print_rc6_info(dev_priv, rc6_mask);
- I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
- GEN7_RC_CTL_TO_MODE |
- rc6_mask);
+ I915_WRITE(GEN6_RC_CONTROL,
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN7_RC_CTL_TO_MODE |
+ GEN6_RC_CTL_RC6_ENABLE);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -6743,9 +6735,8 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 rc6vids, rc6_mask = 0;
+ u32 rc6vids, rc6_mask;
u32 gtfifodbg;
- int rc6_mode;
int ret;
I915_WRITE(GEN6_RC_STATE, 0);
@@ -6780,22 +6771,12 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
- /* Check if we are enabling RC6 */
- rc6_mode = intel_rc6_enabled();
- if (rc6_mode & INTEL_RC6_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
-
/* We don't use those on Haswell */
- if (!IS_HASWELL(dev_priv)) {
- if (rc6_mode & INTEL_RC6p_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
-
- if (rc6_mode & INTEL_RC6pp_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
- }
-
- intel_print_rc6_info(dev_priv, rc6_mask);
-
+ rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+ if (HAS_RC6p(dev_priv))
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+ if (HAS_RC6pp(dev_priv))
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
GEN6_RC_CTL_EI_MODE(1) |
@@ -7037,7 +7018,7 @@ static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
{
unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
- WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
+ WARN_ON(pctx_addr != dev_priv->dsm.start +
dev_priv->vlv_pctx->stolen->start);
}
@@ -7052,16 +7033,15 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned long pctx_paddr, paddr;
+ resource_size_t pctx_paddr, paddr;
+ resource_size_t pctx_size = 32*1024;
u32 pcbr;
- int pctx_size = 32*1024;
pcbr = I915_READ(VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
- paddr = (dev_priv->mm.stolen_base +
- (ggtt->stolen_size - pctx_size));
+ paddr = dev_priv->dsm.end + 1 - pctx_size;
+ GEM_BUG_ON(paddr > U32_MAX);
pctx_paddr = (paddr & (~4095));
I915_WRITE(VLV_PCBR, pctx_paddr);
@@ -7073,16 +7053,16 @@ static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *pctx;
- unsigned long pctx_paddr;
+ resource_size_t pctx_paddr;
+ resource_size_t pctx_size = 24*1024;
u32 pcbr;
- int pctx_size = 24*1024;
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
/* BIOS set it up already, grab the pre-alloc'd space */
- int pcbr_offset;
+ resource_size_t pcbr_offset;
- pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
+ pcbr_offset = (pcbr & (~4095)) - dev_priv->dsm.start;
pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
pcbr_offset,
I915_GTT_OFFSET_NONE,
@@ -7106,7 +7086,11 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
goto out;
}
- pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
+ GEM_BUG_ON(range_overflows_t(u64,
+ dev_priv->dsm.start,
+ pctx->stolen->start,
+ U32_MAX));
+ pctx_paddr = dev_priv->dsm.start + pctx->stolen->start;
I915_WRITE(VLV_PCBR, pctx_paddr);
out:
@@ -7238,7 +7222,7 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 gtfifodbg, rc6_mode = 0, pcbr;
+ u32 gtfifodbg, rc6_mode, pcbr;
gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
GT_FIFO_FREE_ENTRIES_CHV);
@@ -7279,10 +7263,9 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
pcbr = I915_READ(VLV_PCBR);
/* 3: Enable RC6 */
- if ((intel_rc6_enabled() & INTEL_RC6_ENABLE) &&
- (pcbr >> VLV_PCBR_ADDR_SHIFT))
+ rc6_mode = 0;
+ if (pcbr >> VLV_PCBR_ADDR_SHIFT)
rc6_mode = GEN7_RC_CTL_TO_MODE;
-
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -7334,7 +7317,7 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 gtfifodbg, rc6_mode = 0;
+ u32 gtfifodbg;
valleyview_check_pctx(dev_priv);
@@ -7367,12 +7350,8 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
- if (intel_rc6_enabled() & INTEL_RC6_ENABLE)
- rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
-
- intel_print_rc6_info(dev_priv, rc6_mode);
-
- I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+ I915_WRITE(GEN6_RC_CONTROL,
+ GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -7899,12 +7878,11 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
*/
- if (!i915_modparams.enable_rc6) {
+ if (!sanitize_rc6(dev_priv)) {
DRM_INFO("RC6 disabled, disabling runtime PM support\n");
intel_runtime_pm_get(dev_priv);
}
- mutex_lock(&dev_priv->drm.struct_mutex);
mutex_lock(&dev_priv->pcu_lock);
/* Initialize RPS limits (for userspace) */
@@ -7946,9 +7924,6 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
rps->boost_freq = rps->max_freq;
mutex_unlock(&dev_priv->pcu_lock);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- intel_autoenable_gt_powersave(dev_priv);
}
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
@@ -7956,7 +7931,7 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev_priv))
valleyview_cleanup_gt_powersave(dev_priv);
- if (!i915_modparams.enable_rc6)
+ if (!HAS_RC6(dev_priv))
intel_runtime_pm_put(dev_priv);
}
@@ -7973,9 +7948,6 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) < 6)
return;
- if (cancel_delayed_work_sync(&dev_priv->gt_pm.autoenable_work))
- intel_runtime_pm_put(dev_priv);
-
/* gen6_rps_idle() will be called later to disable interrupts */
}
@@ -8126,7 +8098,8 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->pcu_lock);
- intel_enable_rc6(dev_priv);
+ if (HAS_RC6(dev_priv))
+ intel_enable_rc6(dev_priv);
intel_enable_rps(dev_priv);
if (HAS_LLC(dev_priv))
intel_enable_llc_pstate(dev_priv);
@@ -8134,65 +8107,6 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->pcu_lock);
}
-static void __intel_autoenable_gt_powersave(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work,
- typeof(*dev_priv),
- gt_pm.autoenable_work.work);
- struct intel_engine_cs *rcs;
- struct drm_i915_gem_request *req;
-
- rcs = dev_priv->engine[RCS];
- if (rcs->last_retired_context)
- goto out;
-
- if (!rcs->init_context)
- goto out;
-
- mutex_lock(&dev_priv->drm.struct_mutex);
-
- req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
- if (IS_ERR(req))
- goto unlock;
-
- if (!i915_modparams.enable_execlists && i915_switch_context(req) == 0)
- rcs->init_context(req);
-
- /* Mark the device busy, calling intel_enable_gt_powersave() */
- i915_add_request(req);
-
-unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
-out:
- intel_runtime_pm_put(dev_priv);
-}
-
-void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
-{
- if (IS_IRONLAKE_M(dev_priv)) {
- ironlake_enable_drps(dev_priv);
- intel_init_emon(dev_priv);
- } else if (INTEL_INFO(dev_priv)->gen >= 6) {
- /*
- * PCU communication is slow and this doesn't need to be
- * done at any specific time, so do this out of our fast path
- * to make resume and init faster.
- *
- * We depend on the HW RC6 power context save/restore
- * mechanism when entering D3 through runtime PM suspend. So
- * disable RPM until RPS/RC6 is properly setup. We can only
- * get here via the driver load/system resume/runtime resume
- * paths, so the _noresume version is enough (and in case of
- * runtime resume it's necessary).
- */
- if (queue_delayed_work(dev_priv->wq,
- &dev_priv->gt_pm.autoenable_work,
- round_jiffies_up_relative(HZ)))
- intel_runtime_pm_get_noresume(dev_priv);
- }
-}
-
static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
/*
@@ -8504,13 +8418,14 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
if (!HAS_PCH_CNP(dev_priv))
return;
- /* Wa #1181 */
+ /* Display WA #1181: cnp */
I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
CNP_PWM_CGE_GATING_DISABLE);
}
static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ u32 val;
cnp_init_clock_gating(dev_priv);
/* This is not an Wa. Enable for better image quality */
@@ -8525,11 +8440,18 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_FBC_MEMORY_WAKE);
+ val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
+ /* ReadHitWriteOnlyDisable:cnl */
+ val |= RCCUNIT_CLKGATE_DIS;
/* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
- I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
- I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
- SARBUNIT_CLKGATE_DIS);
+ val |= SARBUNIT_CLKGATE_DIS;
+ I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
+
+ /* WaDisableVFclkgate:cnl */
+ val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
+ val |= VFUNIT_CLKGATE_DIS;
+ I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
}
static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9324,7 +9246,7 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
ret = 0;
goto out;
}
- ret = _wait_for(COND, timeout_base_ms * 1000, 10);
+ ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
if (!ret)
goto out;
@@ -9416,8 +9338,6 @@ void intel_pm_setup(struct drm_i915_private *dev_priv)
{
mutex_init(&dev_priv->pcu_lock);
- INIT_DELAYED_WORK(&dev_priv->gt_pm.autoenable_work,
- __intel_autoenable_gt_powersave);
atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
dev_priv->runtime_pm.suspended = false;
@@ -9428,12 +9348,13 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
const i915_reg_t reg)
{
u32 lower, upper, tmp;
+ unsigned long flags;
int loop = 2;
/* The register accessed do not need forcewake. We borrow
* uncore lock to prevent concurrent access to range reg.
*/
- spin_lock_irq(&dev_priv->uncore.lock);
+ spin_lock_irqsave(&dev_priv->uncore.lock, flags);
/* vlv and chv residency counters are 40 bits in width.
* With a control bit, we can choose between upper or lower
@@ -9464,39 +9385,51 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
* now.
*/
- spin_unlock_irq(&dev_priv->uncore.lock);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
return lower | (u64)upper << 8;
}
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
+u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
const i915_reg_t reg)
{
- u64 time_hw, units, div;
+ u64 time_hw;
+ u32 mul, div;
- if (!intel_rc6_enabled())
+ if (!HAS_RC6(dev_priv))
return 0;
- intel_runtime_pm_get(dev_priv);
-
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- units = 1000;
+ mul = 1000000;
div = dev_priv->czclk_freq;
-
time_hw = vlv_residency_raw(dev_priv, reg);
- } else if (IS_GEN9_LP(dev_priv)) {
- units = 1000;
- div = 1200; /* 833.33ns */
-
- time_hw = I915_READ(reg);
} else {
- units = 128000; /* 1.28us */
- div = 100000;
+ /* 833.33ns units on Gen9LP, 1.28us elsewhere. */
+ if (IS_GEN9_LP(dev_priv)) {
+ mul = 10000;
+ div = 12;
+ } else {
+ mul = 1280;
+ div = 1;
+ }
time_hw = I915_READ(reg);
}
- intel_runtime_pm_put(dev_priv);
- return DIV_ROUND_UP_ULL(time_hw * units, div);
+ return DIV_ROUND_UP_ULL(time_hw * mul, div);
+}
+
+u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
+{
+ u32 cagf;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+ else
+ cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+
+ return cagf;
}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 55ea5eb3b7df..2e32615eeada 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -163,7 +163,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
[3] = 1 - 1,
[4] = DP_SET_POWER_D0,
};
- enum port port = dig_port->port;
+ enum port port = dig_port->base.port;
u32 aux_ctl;
int i;
@@ -376,7 +376,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
* ones. Since by Display design transcoder EDP is tied to port A
* we can safely escape based on the port A.
*/
- if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) {
+ if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8da1bde442dd..e2085820b586 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -28,9 +28,12 @@
*/
#include <linux/log2.h>
+
#include <drm/drmP.h>
-#include "i915_drv.h"
#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "i915_gem_render_state.h"
#include "i915_trace.h"
#include "intel_drv.h"
@@ -337,50 +340,6 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
return 0;
}
-static int
-gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
-{
- u32 flags;
- u32 *cs;
-
- cs = intel_ring_begin(req, mode & EMIT_INVALIDATE ? 12 : 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- flags = PIPE_CONTROL_CS_STALL;
-
- if (mode & EMIT_FLUSH) {
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
- }
- if (mode & EMIT_INVALIDATE) {
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
-
- /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
- cs = gen8_emit_pipe_control(cs,
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_STALL_AT_SCOREBOARD,
- 0);
- }
-
- cs = gen8_emit_pipe_control(cs, flags,
- i915_ggtt_offset(req->engine->scratch) +
- 2 * CACHELINE_BYTES);
-
- intel_ring_advance(req, cs);
-
- return 0;
-}
-
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -424,7 +383,6 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
} else if (IS_GEN6(dev_priv)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
- /* XXX: gen8 returns to sanity */
mmio = RING_HWS_PGA(engine->mmio_base);
}
@@ -434,13 +392,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
I915_WRITE(mmio, engine->status_page.ggtt_offset);
POSTING_READ(mmio);
- /*
- * Flush the TLB for this page
- *
- * FIXME: These two bits have disappeared on gen8, so a question
- * arises: do we still need this and if so how should we go about
- * invalidating the TLB?
- */
+ /* Flush the TLB for this page */
if (IS_GEN(dev_priv, 6, 7)) {
i915_reg_t reg = RING_INSTPM(engine->mmio_base);
@@ -480,10 +432,14 @@ static bool stop_ring(struct intel_engine_cs *engine)
}
}
- I915_WRITE_CTL(engine, 0);
+ I915_WRITE_HEAD(engine, I915_READ_TAIL(engine));
+
I915_WRITE_HEAD(engine, 0);
I915_WRITE_TAIL(engine, 0);
+ /* The ring must be empty before it is disabled */
+ I915_WRITE_CTL(engine, 0);
+
return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
}
@@ -604,8 +560,6 @@ static void reset_ring_common(struct intel_engine_cs *engine,
struct intel_context *ce = &request->ctx->engine[engine->id];
struct i915_hw_ppgtt *ppgtt;
- /* FIXME consider gen8 reset */
-
if (ce->state) {
I915_WRITE(CCID,
i915_ggtt_offset(ce->state) |
@@ -637,6 +591,7 @@ static void reset_ring_common(struct intel_engine_cs *engine,
request->ring->head = request->postfix;
} else {
engine->legacy_active_context = NULL;
+ engine->legacy_active_ppgtt = NULL;
}
}
@@ -706,62 +661,6 @@ static int init_render_ring(struct intel_engine_cs *engine)
return init_workarounds_ring(engine);
}
-static void render_ring_cleanup(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- i915_vma_unpin_and_release(&dev_priv->semaphore);
-}
-
-static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *cs)
-{
- struct drm_i915_private *dev_priv = req->i915;
- struct intel_engine_cs *waiter;
- enum intel_engine_id id;
-
- for_each_engine(waiter, dev_priv, id) {
- u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
- if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
- continue;
-
- *cs++ = GFX_OP_PIPE_CONTROL(6);
- *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_CS_STALL;
- *cs++ = lower_32_bits(gtt_offset);
- *cs++ = upper_32_bits(gtt_offset);
- *cs++ = req->global_seqno;
- *cs++ = 0;
- *cs++ = MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id);
- *cs++ = 0;
- }
-
- return cs;
-}
-
-static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *cs)
-{
- struct drm_i915_private *dev_priv = req->i915;
- struct intel_engine_cs *waiter;
- enum intel_engine_id id;
-
- for_each_engine(waiter, dev_priv, id) {
- u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
- if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
- continue;
-
- *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
- *cs++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
- *cs++ = upper_32_bits(gtt_offset);
- *cs++ = req->global_seqno;
- *cs++ = MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id);
- *cs++ = 0;
- }
-
- return cs;
-}
-
static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
{
struct drm_i915_private *dev_priv = req->i915;
@@ -844,70 +743,6 @@ static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
req->engine->semaphore.signal(req, cs));
}
-static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
- u32 *cs)
-{
- struct intel_engine_cs *engine = req->engine;
-
- if (engine->semaphore.signal)
- cs = engine->semaphore.signal(req, cs);
-
- *cs++ = GFX_OP_PIPE_CONTROL(6);
- *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE;
- *cs++ = intel_hws_seqno_address(engine);
- *cs++ = 0;
- *cs++ = req->global_seqno;
- /* We're thrashing one dword of HWS. */
- *cs++ = 0;
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
-
- req->tail = intel_ring_offset(req, cs);
- assert_ring_tail_valid(req->ring, req->tail);
-}
-
-static const int gen8_render_emit_breadcrumb_sz = 8;
-
-/**
- * intel_ring_sync - sync the waiter to the signaller on seqno
- *
- * @waiter - ring that is waiting
- * @signaller - ring which has, or will signal
- * @seqno - seqno which the waiter will block on
- */
-
-static int
-gen8_ring_sync_to(struct drm_i915_gem_request *req,
- struct drm_i915_gem_request *signal)
-{
- struct drm_i915_private *dev_priv = req->i915;
- u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
- struct i915_hw_ppgtt *ppgtt;
- u32 *cs;
-
- cs = intel_ring_begin(req, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_SAD_GTE_SDD;
- *cs++ = signal->global_seqno;
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
- intel_ring_advance(req, cs);
-
- /* When the !RCS engines idle waiting upon a semaphore, they lose their
- * pagetables and we must reload them before executing the batch.
- * We do this on the i915_switch_context() following the wait and
- * before the dispatch.
- */
- ppgtt = req->ctx->ppgtt;
- if (ppgtt && req->engine->id != RCS)
- ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
- return 0;
-}
-
static int
gen6_ring_sync_to(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal)
@@ -1083,25 +918,6 @@ hsw_vebox_irq_disable(struct intel_engine_cs *engine)
gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
}
-static void
-gen8_irq_enable(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask |
- engine->irq_keep_mask));
- POSTING_READ_FW(RING_IMR(engine->mmio_base));
-}
-
-static void
-gen8_irq_disable(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
-}
-
static int
i965_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, u32 length,
@@ -1359,12 +1175,13 @@ static int context_pin(struct i915_gem_context *ctx)
struct i915_vma *vma = ctx->engine[RCS].state;
int ret;
- /* Clear this page out of any CPU caches for coherent swap-in/out.
+ /*
+ * Clear this page out of any CPU caches for coherent swap-in/out.
* We only want to do this on the first bind so that we do not stall
* on an active context (which by nature is already on the GPU).
*/
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
+ ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
if (ret)
return ret;
}
@@ -1379,11 +1196,34 @@ alloc_context_vma(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ int err;
obj = i915_gem_object_create(i915, engine->context_size);
if (IS_ERR(obj))
return ERR_CAST(obj);
+ if (engine->default_state) {
+ void *defaults, *vaddr;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ defaults = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (IS_ERR(defaults)) {
+ err = PTR_ERR(defaults);
+ goto err_map;
+ }
+
+ memcpy(vaddr, defaults, engine->context_size);
+
+ i915_gem_object_unpin_map(engine->default_state);
+ i915_gem_object_unpin_map(obj);
+ }
+
/*
* Try to make the context utilize L3 as well as LLC.
*
@@ -1405,10 +1245,18 @@ alloc_context_vma(struct intel_engine_cs *engine)
}
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
- if (IS_ERR(vma))
- i915_gem_object_put(obj);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
return vma;
+
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
}
static struct intel_ring *
@@ -1441,20 +1289,9 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
if (ret)
goto err;
- ce->state->obj->mm.dirty = true;
ce->state->obj->pin_global++;
}
- /* The kernel context is only used as a placeholder for flushing the
- * active context. It is never used for submitting user rendering and
- * as such never requires the golden render context, and so we can skip
- * emitting it when we switch to the kernel context. This is required
- * as during eviction we cannot allocate and pin the renderstate in
- * order to initialise the context.
- */
- if (i915_gem_context_is_kernel(ctx))
- ce->initialised = true;
-
i915_gem_context_get(ctx);
out:
@@ -1548,10 +1385,194 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
intel_ring_reset(engine->buffer, 0);
}
-static int ring_request_alloc(struct drm_i915_gem_request *request)
+static inline int mi_set_context(struct drm_i915_gem_request *rq, u32 flags)
{
+ struct drm_i915_private *i915 = rq->i915;
+ struct intel_engine_cs *engine = rq->engine;
+ enum intel_engine_id id;
+ const int num_rings =
+ /* Use an extended w/a on gen7 if signalling from other rings */
+ (HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
+ INTEL_INFO(i915)->num_rings - 1 :
+ 0;
+ int len;
u32 *cs;
+ flags |= MI_MM_SPACE_GTT;
+ if (IS_HASWELL(i915))
+ /* These flags are for resource streamer on HSW+ */
+ flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
+ else
+ flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
+
+ len = 4;
+ if (IS_GEN7(i915))
+ len += 2 + (num_rings ? 4*num_rings + 6 : 0);
+
+ cs = intel_ring_begin(rq, len);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
+ if (IS_GEN7(i915)) {
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ if (num_rings) {
+ struct intel_engine_cs *signaller;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
+ for_each_engine(signaller, i915, id) {
+ if (signaller == engine)
+ continue;
+
+ *cs++ = i915_mmio_reg_offset(
+ RING_PSMI_CTL(signaller->mmio_base));
+ *cs++ = _MASKED_BIT_ENABLE(
+ GEN6_PSMI_SLEEP_MSG_DISABLE);
+ }
+ }
+ }
+
+ *cs++ = MI_NOOP;
+ *cs++ = MI_SET_CONTEXT;
+ *cs++ = i915_ggtt_offset(rq->ctx->engine[RCS].state) | flags;
+ /*
+ * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
+ * WaMiSetContext_Hang:snb,ivb,vlv
+ */
+ *cs++ = MI_NOOP;
+
+ if (IS_GEN7(i915)) {
+ if (num_rings) {
+ struct intel_engine_cs *signaller;
+ i915_reg_t last_reg = {}; /* keep gcc quiet */
+
+ *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
+ for_each_engine(signaller, i915, id) {
+ if (signaller == engine)
+ continue;
+
+ last_reg = RING_PSMI_CTL(signaller->mmio_base);
+ *cs++ = i915_mmio_reg_offset(last_reg);
+ *cs++ = _MASKED_BIT_DISABLE(
+ GEN6_PSMI_SLEEP_MSG_DISABLE);
+ }
+
+ /* Insert a delay before the next switch! */
+ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(last_reg);
+ *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = MI_NOOP;
+ }
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int remap_l3(struct drm_i915_gem_request *rq, int slice)
+{
+ u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
+ int i;
+
+ if (!remap_info)
+ return 0;
+
+ cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Note: We do not worry about the concurrent register cacheline hang
+ * here because no other code should access these registers other than
+ * at initialization time.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
+ for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
+ *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
+ *cs++ = remap_info[i];
+ }
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int switch_context(struct drm_i915_gem_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ struct i915_gem_context *to_ctx = rq->ctx;
+ struct i915_hw_ppgtt *to_mm =
+ to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
+ struct i915_gem_context *from_ctx = engine->legacy_active_context;
+ struct i915_hw_ppgtt *from_mm = engine->legacy_active_ppgtt;
+ u32 hw_flags = 0;
+ int ret, i;
+
+ lockdep_assert_held(&rq->i915->drm.struct_mutex);
+ GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
+
+ if (to_mm != from_mm ||
+ (to_mm && intel_engine_flag(engine) & to_mm->pd_dirty_rings)) {
+ trace_switch_mm(engine, to_ctx);
+ ret = to_mm->switch_mm(to_mm, rq);
+ if (ret)
+ goto err;
+
+ to_mm->pd_dirty_rings &= ~intel_engine_flag(engine);
+ engine->legacy_active_ppgtt = to_mm;
+ hw_flags = MI_FORCE_RESTORE;
+ }
+
+ if (to_ctx->engine[engine->id].state &&
+ (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
+ GEM_BUG_ON(engine->id != RCS);
+
+ /*
+ * The kernel context(s) is treated as pure scratch and is not
+ * expected to retain any state (as we sacrifice it during
+ * suspend and on resume it may be corrupted). This is ok,
+ * as nothing actually executes using the kernel context; it
+ * is purely used for flushing user contexts.
+ */
+ if (i915_gem_context_is_kernel(to_ctx))
+ hw_flags = MI_RESTORE_INHIBIT;
+
+ ret = mi_set_context(rq, hw_flags);
+ if (ret)
+ goto err_mm;
+
+ engine->legacy_active_context = to_ctx;
+ }
+
+ if (to_ctx->remap_slice) {
+ for (i = 0; i < MAX_L3_SLICES; i++) {
+ if (!(to_ctx->remap_slice & BIT(i)))
+ continue;
+
+ ret = remap_l3(rq, i);
+ if (ret)
+ goto err_ctx;
+ }
+
+ to_ctx->remap_slice = 0;
+ }
+
+ return 0;
+
+err_ctx:
+ engine->legacy_active_context = from_ctx;
+err_mm:
+ engine->legacy_active_ppgtt = from_mm;
+err:
+ return ret;
+}
+
+static int ring_request_alloc(struct drm_i915_gem_request *request)
+{
+ int ret;
+
GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
/* Flush enough space to reduce the likelihood of waiting after
@@ -1560,37 +1581,28 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
- cs = intel_ring_begin(request, 0);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
+ ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
+ if (ret)
+ return ret;
+
+ ret = switch_context(request);
+ if (ret)
+ return ret;
request->reserved_space -= LEGACY_REQUEST_SIZE;
return 0;
}
-static noinline int wait_for_space(struct drm_i915_gem_request *req,
- unsigned int bytes)
+static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_gem_request *target;
long timeout;
- lockdep_assert_held(&req->i915->drm.struct_mutex);
+ lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
if (intel_ring_update_space(ring) >= bytes)
return 0;
- /*
- * Space is reserved in the ringbuffer for finalising the request,
- * as that cannot be allowed to fail. During request finalisation,
- * reserved_space is set to 0 to stop the overallocation and the
- * assumption is that then we never need to wait (which has the
- * risk of failing with EINTR).
- *
- * See also i915_gem_request_alloc() and i915_add_request().
- */
- GEM_BUG_ON(!req->reserved_space);
-
list_for_each_entry(target, &ring->request_list, ring_link) {
/* Would completion of this request free enough space? */
if (bytes <= __intel_ring_space(target->postfix,
@@ -1614,6 +1626,22 @@ static noinline int wait_for_space(struct drm_i915_gem_request *req,
return 0;
}
+int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes)
+{
+ GEM_BUG_ON(bytes > ring->effective_size);
+ if (unlikely(bytes > ring->effective_size - ring->emit))
+ bytes += ring->size - ring->emit;
+
+ if (unlikely(bytes > ring->space)) {
+ int ret = wait_for_space(ring, bytes);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ GEM_BUG_ON(ring->space < bytes);
+ return 0;
+}
+
u32 *intel_ring_begin(struct drm_i915_gem_request *req,
unsigned int num_dwords)
{
@@ -1653,7 +1681,20 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req,
}
if (unlikely(total_bytes > ring->space)) {
- int ret = wait_for_space(req, total_bytes);
+ int ret;
+
+ /*
+ * Space is reserved in the ringbuffer for finalising the
+ * request, as that cannot be allowed to fail. During request
+ * finalisation, reserved_space is set to 0 to stop the
+ * overallocation and the assumption is that then we never need
+ * to wait (which has the risk of failing with EINTR).
+ *
+ * See also i915_gem_request_alloc() and i915_add_request().
+ */
+ GEM_BUG_ON(!req->reserved_space);
+
+ ret = wait_for_space(ring, total_bytes);
if (unlikely(ret))
return ERR_PTR(ret);
}
@@ -1748,8 +1789,6 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
return PTR_ERR(cs);
cmd = MI_FLUSH_DW;
- if (INTEL_GEN(req->i915) >= 8)
- cmd += 1;
/* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
@@ -1769,38 +1808,9 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
*cs++ = cmd;
*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
- if (INTEL_GEN(req->i915) >= 8) {
- *cs++ = 0; /* upper addr */
- *cs++ = 0; /* value */
- } else {
- *cs++ = 0;
- *cs++ = MI_NOOP;
- }
- intel_ring_advance(req, cs);
- return 0;
-}
-
-static int
-gen8_emit_bb_start(struct drm_i915_gem_request *req,
- u64 offset, u32 len,
- unsigned int dispatch_flags)
-{
- bool ppgtt = USES_PPGTT(req->i915) &&
- !(dispatch_flags & I915_DISPATCH_SECURE);
- u32 *cs;
-
- cs = intel_ring_begin(req, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* FIXME(BDW): Address space and security selectors. */
- *cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
- I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
+ *cs++ = 0;
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
-
return 0;
}
@@ -1857,8 +1867,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
return PTR_ERR(cs);
cmd = MI_FLUSH_DW;
- if (INTEL_GEN(req->i915) >= 8)
- cmd += 1;
/* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
@@ -1877,13 +1885,8 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
cmd |= MI_INVALIDATE_TLB;
*cs++ = cmd;
*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
- if (INTEL_GEN(req->i915) >= 8) {
- *cs++ = 0; /* upper addr */
- *cs++ = 0; /* value */
- } else {
- *cs++ = 0;
- *cs++ = MI_NOOP;
- }
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return 0;
@@ -1892,110 +1895,61 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
- struct drm_i915_gem_object *obj;
- int ret, i;
+ int i;
- if (!i915_modparams.semaphores)
+ if (!HAS_LEGACY_SEMAPHORES(dev_priv))
return;
- if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
- struct i915_vma *vma;
-
- obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
- if (IS_ERR(obj))
- goto err;
-
- vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
- if (IS_ERR(vma))
- goto err_obj;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- if (ret)
- goto err_obj;
-
- ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (ret)
- goto err_obj;
-
- dev_priv->semaphore = vma;
- }
-
- if (INTEL_GEN(dev_priv) >= 8) {
- u32 offset = i915_ggtt_offset(dev_priv->semaphore);
-
- engine->semaphore.sync_to = gen8_ring_sync_to;
- engine->semaphore.signal = gen8_xcs_signal;
-
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- u32 ring_offset;
-
- if (i != engine->id)
- ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
- else
- ring_offset = MI_SEMAPHORE_SYNC_INVALID;
-
- engine->semaphore.signal_ggtt[i] = ring_offset;
- }
- } else if (INTEL_GEN(dev_priv) >= 6) {
- engine->semaphore.sync_to = gen6_ring_sync_to;
- engine->semaphore.signal = gen6_signal;
+ GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
+ engine->semaphore.sync_to = gen6_ring_sync_to;
+ engine->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on pre-gen8
- * platform. And there is no VCS2 ring on the pre-gen8
- * platform. So the semaphore between RCS and VCS2 is
- * initialized as INVALID. Gen8 will initialize the
- * sema between VCS2 and RCS later.
- */
- for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
- static const struct {
- u32 wait_mbox;
- i915_reg_t mbox_reg;
- } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
- [RCS_HW] = {
- [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
- [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
- [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
- },
- [VCS_HW] = {
- [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
- [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
- [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
- },
- [BCS_HW] = {
- [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
- [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
- [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
- },
- [VECS_HW] = {
- [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
- [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
- [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
- },
- };
+ /*
+ * The current semaphore is only applied on pre-gen8
+ * platform. And there is no VCS2 ring on the pre-gen8
+ * platform. So the semaphore between RCS and VCS2 is
+ * initialized as INVALID.
+ */
+ for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
+ static const struct {
u32 wait_mbox;
i915_reg_t mbox_reg;
+ } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
+ [RCS_HW] = {
+ [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
+ [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
+ [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
+ },
+ [VCS_HW] = {
+ [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
+ [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
+ [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
+ },
+ [BCS_HW] = {
+ [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
+ [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
+ [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
+ },
+ [VECS_HW] = {
+ [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
+ [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
+ [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
+ },
+ };
+ u32 wait_mbox;
+ i915_reg_t mbox_reg;
- if (i == engine->hw_id) {
- wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
- mbox_reg = GEN6_NOSYNC;
- } else {
- wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
- mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
- }
-
- engine->semaphore.mbox.wait[i] = wait_mbox;
- engine->semaphore.mbox.signal[i] = mbox_reg;
+ if (i == engine->hw_id) {
+ wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
+ mbox_reg = GEN6_NOSYNC;
+ } else {
+ wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
+ mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
}
- }
-
- return;
-err_obj:
- i915_gem_object_put(obj);
-err:
- DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
- i915_modparams.semaphores = 0;
+ engine->semaphore.mbox.wait[i] = wait_mbox;
+ engine->semaphore.mbox.signal[i] = mbox_reg;
+ }
}
static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
@@ -2003,11 +1957,7 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
{
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
- if (INTEL_GEN(dev_priv) >= 8) {
- engine->irq_enable = gen8_irq_enable;
- engine->irq_disable = gen8_irq_disable;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
- } else if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
engine->irq_enable = gen6_irq_enable;
engine->irq_disable = gen6_irq_disable;
engine->irq_seqno_barrier = gen6_seqno_barrier;
@@ -2028,17 +1978,23 @@ static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
engine->cancel_requests = cancel_requests;
+
+ engine->park = NULL;
+ engine->unpark = NULL;
}
static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
{
+ i9xx_set_default_submission(engine);
engine->submit_request = gen6_bsd_submit_request;
- engine->cancel_requests = cancel_requests;
}
static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
+ /* gen8+ are only supported with execlists */
+ GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8);
+
intel_ring_init_irq(dev_priv, engine);
intel_ring_init_semaphores(dev_priv, engine);
@@ -2052,26 +2008,20 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->emit_breadcrumb = i9xx_emit_breadcrumb;
engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
- if (i915_modparams.semaphores) {
+ if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
int num_rings;
engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
- if (INTEL_GEN(dev_priv) >= 8) {
- engine->emit_breadcrumb_sz += num_rings * 6;
- } else {
- engine->emit_breadcrumb_sz += num_rings * 3;
- if (num_rings & 1)
- engine->emit_breadcrumb_sz++;
- }
+ engine->emit_breadcrumb_sz += num_rings * 3;
+ if (num_rings & 1)
+ engine->emit_breadcrumb_sz++;
}
engine->set_default_submission = i9xx_set_default_submission;
- if (INTEL_GEN(dev_priv) >= 8)
- engine->emit_bb_start = gen8_emit_bb_start;
- else if (INTEL_GEN(dev_priv) >= 6)
+ if (INTEL_GEN(dev_priv) >= 6)
engine->emit_bb_start = gen6_emit_bb_start;
else if (INTEL_GEN(dev_priv) >= 4)
engine->emit_bb_start = i965_emit_bb_start;
@@ -2091,20 +2041,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
if (HAS_L3_DPF(dev_priv))
engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- if (INTEL_GEN(dev_priv) >= 8) {
- engine->init_context = intel_rcs_ctx_init;
- engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
- engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
- engine->emit_flush = gen8_render_ring_flush;
- if (i915_modparams.semaphores) {
- int num_rings;
-
- engine->semaphore.signal = gen8_rcs_signal;
-
- num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
- engine->emit_breadcrumb_sz += num_rings * 8;
- }
- } else if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;
if (IS_GEN6(dev_priv))
@@ -2123,7 +2060,6 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
engine->emit_bb_start = hsw_emit_bb_start;
engine->init_hw = init_render_ring;
- engine->cleanup = render_ring_cleanup;
ret = intel_init_ring_buffer(engine);
if (ret)
@@ -2153,8 +2089,7 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
if (IS_GEN6(dev_priv))
engine->set_default_submission = gen6_bsd_set_default_submission;
engine->emit_flush = gen6_bsd_ring_flush;
- if (INTEL_GEN(dev_priv) < 8)
- engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+ engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
} else {
engine->mmio_base = BSD_RING_BASE;
engine->emit_flush = bsd_ring_flush;
@@ -2174,8 +2109,7 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
intel_ring_default_vfuncs(dev_priv, engine);
engine->emit_flush = gen6_ring_flush;
- if (INTEL_GEN(dev_priv) < 8)
- engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
+ engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
return intel_init_ring_buffer(engine);
}
@@ -2187,12 +2121,9 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
intel_ring_default_vfuncs(dev_priv, engine);
engine->emit_flush = gen6_ring_flush;
-
- if (INTEL_GEN(dev_priv) < 8) {
- engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
- engine->irq_enable = hsw_vebox_irq_enable;
- engine->irq_disable = hsw_vebox_irq_disable;
- }
+ engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
+ engine->irq_enable = hsw_vebox_irq_enable;
+ engine->irq_disable = hsw_vebox_irq_disable;
return intel_init_ring_buffer(engine);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2863d5a65187..c5ff203e42d6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -6,6 +6,7 @@
#include "i915_gem_batch_pool.h"
#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
+#include "i915_pmu.h"
#include "i915_selftest.h"
struct drm_printer;
@@ -47,16 +48,6 @@ struct intel_hw_status_page {
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
*/
-#define gen8_semaphore_seqno_size sizeof(uint64_t)
-#define GEN8_SEMAPHORE_OFFSET(__from, __to) \
- (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
-#define GEN8_SIGNAL_OFFSET(__ring, to) \
- (dev_priv->semaphore->node.start + \
- GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
-#define GEN8_WAIT_OFFSET(__ring, from) \
- (dev_priv->semaphore->node.start + \
- GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
-
enum intel_engine_hangcheck_action {
ENGINE_IDLE = 0,
ENGINE_WAIT,
@@ -166,7 +157,6 @@ struct i915_ctx_workarounds {
};
struct drm_i915_gem_request;
-struct intel_render_state;
/*
* Engine IDs definitions.
@@ -195,9 +185,9 @@ struct i915_priolist {
*/
struct intel_engine_execlists {
/**
- * @irq_tasklet: softirq tasklet for bottom handler
+ * @tasklet: softirq tasklet for bottom handler
*/
- struct tasklet_struct irq_tasklet;
+ struct tasklet_struct tasklet;
/**
* @default_priolist: priority list for I915_PRIORITY_NORMAL
@@ -210,6 +200,11 @@ struct intel_engine_execlists {
bool no_priolist;
/**
+ * @elsp: the ExecList Submission Port register
+ */
+ u32 __iomem *elsp;
+
+ /**
* @port: execlist port states
*
* For each hardware ELSP (ExecList Submission Port) we keep
@@ -253,6 +248,7 @@ struct intel_engine_execlists {
unsigned int active;
#define EXECLISTS_ACTIVE_USER 0
#define EXECLISTS_ACTIVE_PREEMPT 1
+#define EXECLISTS_ACTIVE_HWACK 2
/**
* @port_mask: number of execlist ports - 1
@@ -290,11 +286,14 @@ struct intel_engine_execlists {
struct intel_engine_cs {
struct drm_i915_private *i915;
char name[INTEL_ENGINE_CS_MAX_NAME];
+
enum intel_engine_id id;
- unsigned int uabi_id;
unsigned int hw_id;
unsigned int guc_id;
+ u8 uabi_id;
+ u8 uabi_class;
+
u8 class;
u8 instance;
u32 context_size;
@@ -304,7 +303,7 @@ struct intel_engine_cs {
struct intel_ring *buffer;
struct intel_timeline *timeline;
- struct intel_render_state *render_state;
+ struct drm_i915_gem_object *default_state;
atomic_t irq_count;
unsigned long irq_posted;
@@ -340,12 +339,49 @@ struct intel_engine_cs {
struct timer_list hangcheck; /* detect missed interrupts */
unsigned int hangcheck_interrupts;
+ unsigned int irq_enabled;
bool irq_armed : 1;
- bool irq_enabled : 1;
I915_SELFTEST_DECLARE(bool mock : 1);
} breadcrumbs;
+ struct {
+ /**
+ * @enable: Bitmask of enable sample events on this engine.
+ *
+ * Bits correspond to sample event types, for instance
+ * I915_SAMPLE_QUEUED is bit 0 etc.
+ */
+ u32 enable;
+ /**
+ * @enable_count: Reference count for the enabled samplers.
+ *
+ * Index number corresponds to the bit number from @enable.
+ */
+ unsigned int enable_count[I915_PMU_SAMPLE_BITS];
+ /**
+ * @sample: Counter values for sampling events.
+ *
+ * Our internal timer stores the current counters in this field.
+ */
+#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
+ struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
+ /**
+ * @busy_stats: Has enablement of engine stats tracking been
+ * requested.
+ */
+ bool busy_stats;
+ /**
+ * @disable_busy_stats: Work item for busy stats disabling.
+ *
+ * Same as with @enable_busy_stats action, with the difference
+ * that we delay it in case there are rapid enable-disable
+ * actions, which can happen during tool startup (like perf
+ * stat).
+ */
+ struct delayed_work disable_busy_stats;
+ } pmu;
+
/*
* A pool of objects to use as shadow copies of client batch buffers
* when the command parser is enabled. Prevents the client from
@@ -366,6 +402,9 @@ struct intel_engine_cs {
void (*reset_hw)(struct intel_engine_cs *engine,
struct drm_i915_gem_request *req);
+ void (*park)(struct intel_engine_cs *engine);
+ void (*unpark)(struct intel_engine_cs *engine);
+
void (*set_default_submission)(struct intel_engine_cs *engine);
struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
@@ -462,18 +501,15 @@ struct intel_engine_cs {
* ie. transpose of f(x, y)
*/
struct {
- union {
#define GEN6_SEMAPHORE_LAST VECS_HW
#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
- struct {
- /* our mbox written by others */
- u32 wait[GEN6_NUM_SEMAPHORES];
- /* mboxes this ring signals to */
- i915_reg_t signal[GEN6_NUM_SEMAPHORES];
- } mbox;
- u64 signal_ggtt[I915_NUM_ENGINES];
- };
+ struct {
+ /* our mbox written by others */
+ u32 wait[GEN6_NUM_SEMAPHORES];
+ /* mboxes this ring signals to */
+ i915_reg_t signal[GEN6_NUM_SEMAPHORES];
+ } mbox;
/* AKA wait() */
int (*sync_to)(struct drm_i915_gem_request *req,
@@ -501,13 +537,16 @@ struct intel_engine_cs {
* stream (ring).
*/
struct i915_gem_context *legacy_active_context;
+ struct i915_hw_ppgtt *legacy_active_ppgtt;
/* status_notifier: list of callbacks for context-switch changes */
struct atomic_notifier_head context_status_notifier;
struct intel_engine_hangcheck hangcheck;
- bool needs_cmd_parser;
+#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
+#define I915_ENGINE_SUPPORTS_STATS BIT(1)
+ unsigned int flags;
/*
* Table of commands the command parser needs to know about
@@ -532,8 +571,50 @@ struct intel_engine_cs {
* certain bits to encode the command length in the header).
*/
u32 (*get_cmd_length_mask)(u32 cmd_header);
+
+ struct {
+ /**
+ * @lock: Lock protecting the below fields.
+ */
+ spinlock_t lock;
+ /**
+ * @enabled: Reference count indicating number of listeners.
+ */
+ unsigned int enabled;
+ /**
+ * @active: Number of contexts currently scheduled in.
+ */
+ unsigned int active;
+ /**
+ * @enabled_at: Timestamp when busy stats were enabled.
+ */
+ ktime_t enabled_at;
+ /**
+ * @start: Timestamp of the last idle to active transition.
+ *
+ * Idle is defined as active == 0, active is active > 0.
+ */
+ ktime_t start;
+ /**
+ * @total: Total time this engine was busy.
+ *
+ * Accumulated time not counting the most recent block in cases
+ * where engine is currently busy (active > 0).
+ */
+ ktime_t total;
+ } stats;
};
+static inline bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
+}
+
+static inline bool intel_engine_supports_stats(struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_SUPPORTS_STATS;
+}
+
static inline void
execlists_set_active(struct intel_engine_execlists *execlists,
unsigned int bit)
@@ -555,6 +636,12 @@ execlists_is_active(const struct intel_engine_execlists *execlists,
return test_bit(bit, (unsigned long *)&execlists->active);
}
+void
+execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
+
+void
+execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
+
static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists)
{
@@ -624,6 +711,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
*/
#define I915_GEM_HWS_INDEX 0x30
#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+#define I915_GEM_HWS_PREEMPT_INDEX 0x32
+#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
@@ -648,6 +737,7 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
+int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
unsigned int n);
@@ -776,6 +866,11 @@ static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
}
+static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
+{
+ return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR;
+}
+
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
@@ -846,6 +941,9 @@ unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
#define ENGINE_WAKEUP_WAITER BIT(0)
#define ENGINE_WAKEUP_ASLEEP BIT(1)
+void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
+void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
+
void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
@@ -864,14 +962,123 @@ static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
return batch + 6;
}
+static inline u32 *
+gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
+{
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ /* w/a for post sync ops following a GPGPU operation we
+ * need a prior CS_STALL, which is emitted by the flush
+ * following the batch.
+ */
+ *cs++ = GFX_OP_PIPE_CONTROL(6);
+ *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE;
+ *cs++ = gtt_offset;
+ *cs++ = 0;
+ *cs++ = value;
+ /* We're thrashing one dword of HWS. */
+ *cs++ = 0;
+
+ return cs;
+}
+
+static inline u32 *
+gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
+{
+ /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
+ GEM_BUG_ON(gtt_offset & (1 << 5));
+ /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
+}
+
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
-void intel_engines_mark_idle(struct drm_i915_private *i915);
+bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
+
+void intel_engines_park(struct drm_i915_private *i915);
+void intel_engines_unpark(struct drm_i915_private *i915);
+
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
+unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
-void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
+__printf(3, 4)
+void intel_engine_dump(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ const char *header, ...);
+
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
+
+static inline void intel_engine_context_in(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (READ_ONCE(engine->stats.enabled) == 0)
+ return;
+
+ spin_lock_irqsave(&engine->stats.lock, flags);
+
+ if (engine->stats.enabled > 0) {
+ if (engine->stats.active++ == 0)
+ engine->stats.start = ktime_get();
+ GEM_BUG_ON(engine->stats.active == 0);
+ }
+
+ spin_unlock_irqrestore(&engine->stats.lock, flags);
+}
+
+static inline void intel_engine_context_out(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (READ_ONCE(engine->stats.enabled) == 0)
+ return;
+
+ spin_lock_irqsave(&engine->stats.lock, flags);
+
+ if (engine->stats.enabled > 0) {
+ ktime_t last;
+
+ if (engine->stats.active && --engine->stats.active == 0) {
+ /*
+ * Decrement the active context count and in case GPU
+ * is now idle add up to the running total.
+ */
+ last = ktime_sub(ktime_get(), engine->stats.start);
+
+ engine->stats.total = ktime_add(engine->stats.total,
+ last);
+ } else if (engine->stats.active == 0) {
+ /*
+ * After turning on engine stats, context out might be
+ * the first event in which case we account from the
+ * time stats gathering was turned on.
+ */
+ last = ktime_sub(ktime_get(), engine->stats.enabled_at);
+
+ engine->stats.total = ktime_add(engine->stats.total,
+ last);
+ }
+ }
+
+ spin_unlock_irqrestore(&engine->stats.lock, flags);
+}
+
+int intel_enable_engine_stats(struct intel_engine_cs *engine);
+void intel_disable_engine_stats(struct intel_engine_cs *engine);
+
+ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 7e115f3927f6..d758da6156a8 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -130,6 +130,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "INIT";
case POWER_DOMAIN_MODESET:
return "MODESET";
+ case POWER_DOMAIN_GT_IRQ:
+ return "GT_IRQ";
default:
MISSING_CASE(domain);
return "?";
@@ -715,7 +717,8 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
- WARN_ON(!intel_cdclk_state_compare(&dev_priv->cdclk.hw, &cdclk_state));
+ /* Can't read out voltage_level so can't use intel_cdclk_changed() */
+ WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
gen9_assert_dbuf_enabled(dev_priv);
@@ -1714,6 +1717,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
@@ -1732,12 +1736,13 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
@@ -1794,6 +1799,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_GMBUS) | \
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 7437944b388f..2b8764897d68 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1429,6 +1429,8 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
u8 val;
bool ret;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_SDVO);
+
sdvox = I915_READ(intel_sdvo->sdvo_reg);
ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
@@ -1510,7 +1512,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
u32 temp;
intel_sdvo_set_active_outputs(intel_sdvo, 0);
@@ -1569,7 +1571,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
u32 temp;
bool input1, input2;
int i;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 4a8a5d918a83..dd485f59eb1d 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -263,13 +263,9 @@ skl_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
- PLANE_COLOR_PIPE_GAMMA_ENABLE |
- PLANE_COLOR_PIPE_CSC_ENABLE |
- PLANE_COLOR_PLANE_GAMMA_DISABLE);
- }
-
+ plane_state->color_ctl);
if (key->flags) {
I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), key->max_value);
@@ -1056,6 +1052,9 @@ intel_check_sprite_plane(struct intel_plane *plane,
state->ctl = g4x_sprite_ctl(crtc_state, state);
}
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ state->color_ctl = glk_plane_color_ctl(crtc_state, state);
+
return 0;
}
@@ -1385,7 +1384,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
}
intel_plane->pipe = pipe;
- intel_plane->plane = plane;
+ intel_plane->i9xx_plane = plane;
intel_plane->id = PLANE_SPRITE0 + plane;
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
intel_plane->check_plane = intel_check_sprite_plane;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index a79a7591b2cf..b3dabc219e6a 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -822,7 +822,7 @@ intel_enable_tv(struct intel_encoder *encoder,
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_wait_for_vblank(dev_priv,
- to_intel_crtc(encoder->base.crtc)->pipe);
+ to_intel_crtc(pipe_config->base.crtc)->pipe);
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
}
@@ -868,6 +868,8 @@ static void
intel_tv_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_TVOUT);
+
pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
@@ -980,7 +982,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
u32 tv_ctl;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 25bd162f38d2..907deac6e3fa 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -23,8 +23,9 @@
*/
#include "intel_uc.h"
+#include "intel_guc_submission.h"
+#include "intel_guc.h"
#include "i915_drv.h"
-#include "i915_guc_submission.h"
/* Reset GuC providing us with fresh state for both GuC and HuC.
*/
@@ -33,9 +34,9 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
int ret;
u32 guc_status;
- ret = intel_guc_reset(dev_priv);
+ ret = intel_reset_guc(dev_priv);
if (ret) {
- DRM_ERROR("GuC reset failed, ret = %d\n", ret);
+ DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
return ret;
}
@@ -47,55 +48,93 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
return ret;
}
-void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
+static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
{
- if (!HAS_GUC(dev_priv)) {
- if (i915_modparams.enable_guc_loading > 0 ||
- i915_modparams.enable_guc_submission > 0)
- DRM_INFO("Ignoring GuC options, no hardware\n");
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+ struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ int enable_guc = 0;
- i915_modparams.enable_guc_loading = 0;
- i915_modparams.enable_guc_submission = 0;
- return;
- }
+ /* Default is to enable GuC/HuC if we know their firmwares */
+ if (intel_uc_fw_is_selected(guc_fw))
+ enable_guc |= ENABLE_GUC_SUBMISSION;
+ if (intel_uc_fw_is_selected(huc_fw))
+ enable_guc |= ENABLE_GUC_LOAD_HUC;
- /* A negative value means "use platform default" */
- if (i915_modparams.enable_guc_loading < 0)
- i915_modparams.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
+ /* Any platform specific fine-tuning can be done here */
+
+ return enable_guc;
+}
- /* Verify firmware version */
- if (i915_modparams.enable_guc_loading) {
- if (HAS_HUC_UCODE(dev_priv))
- intel_huc_select_fw(&dev_priv->huc);
+/**
+ * intel_uc_sanitize_options - sanitize uC related modparam options
+ * @dev_priv: device private
+ *
+ * In case of "enable_guc" option this function will attempt to modify
+ * it only if it was initially set to "auto(-1)". Default value for this
+ * modparam varies between platforms and it is hardcoded in driver code.
+ * Any other modparam value is only monitored against availability of the
+ * related hardware or firmware definitions.
+ */
+void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
+{
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+ struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
- if (intel_guc_fw_select(&dev_priv->guc))
- i915_modparams.enable_guc_loading = 0;
+ /* A negative value means "use platform default" */
+ if (i915_modparams.enable_guc < 0)
+ i915_modparams.enable_guc = __get_platform_enable_guc(dev_priv);
+
+ DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
+ i915_modparams.enable_guc,
+ yesno(intel_uc_is_using_guc_submission()),
+ yesno(intel_uc_is_using_huc()));
+
+ /* Verify GuC firmware availability */
+ if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) {
+ DRM_WARN("Incompatible option detected: enable_guc=%d, %s!\n",
+ i915_modparams.enable_guc,
+ !HAS_GUC(dev_priv) ? "no GuC hardware" :
+ "no GuC firmware");
}
- /* Can't enable guc submission without guc loaded */
- if (!i915_modparams.enable_guc_loading)
- i915_modparams.enable_guc_submission = 0;
+ /* Verify HuC firmware availability */
+ if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) {
+ DRM_WARN("Incompatible option detected: enable_guc=%d, %s!\n",
+ i915_modparams.enable_guc,
+ !HAS_HUC(dev_priv) ? "no HuC hardware" :
+ "no HuC firmware");
+ }
- /* A negative value means "use platform default" */
- if (i915_modparams.enable_guc_submission < 0)
- i915_modparams.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
+ /* Make sure that sanitization was done */
+ GEM_BUG_ON(i915_modparams.enable_guc < 0);
}
void intel_uc_init_early(struct drm_i915_private *dev_priv)
{
intel_guc_init_early(&dev_priv->guc);
+ intel_huc_init_early(&dev_priv->huc);
}
void intel_uc_init_fw(struct drm_i915_private *dev_priv)
{
- intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw);
+ if (!USES_GUC(dev_priv))
+ return;
+
+ if (USES_HUC(dev_priv))
+ intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw);
+
intel_uc_fw_fetch(dev_priv, &dev_priv->guc.fw);
}
void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
{
+ if (!USES_GUC(dev_priv))
+ return;
+
intel_uc_fw_fini(&dev_priv->guc.fw);
- intel_uc_fw_fini(&dev_priv->huc.fw);
+
+ if (USES_HUC(dev_priv))
+ intel_uc_fw_fini(&dev_priv->huc.fw);
}
/**
@@ -149,30 +188,91 @@ static void guc_disable_communication(struct intel_guc *guc)
guc->send = intel_guc_send_nop;
}
-int intel_uc_init_hw(struct drm_i915_private *dev_priv)
+int intel_uc_init_wq(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ if (!USES_GUC(dev_priv))
+ return 0;
+
+ ret = intel_guc_init_wq(&dev_priv->guc);
+ if (ret) {
+ DRM_ERROR("Couldn't allocate workqueues for GuC\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void intel_uc_fini_wq(struct drm_i915_private *dev_priv)
+{
+ if (!USES_GUC(dev_priv))
+ return;
+
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
+
+ intel_guc_fini_wq(&dev_priv->guc);
+}
+
+int intel_uc_init(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
- int ret, attempts;
+ int ret;
- if (!i915_modparams.enable_guc_loading)
+ if (!USES_GUC(dev_priv))
return 0;
- guc_disable_communication(guc);
- gen9_reset_guc_interrupts(dev_priv);
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
- /* We need to notify the guc whenever we change the GGTT */
- i915_ggtt_enable_guc(dev_priv);
+ ret = intel_guc_init(guc);
+ if (ret)
+ return ret;
- if (i915_modparams.enable_guc_submission) {
+ if (USES_GUC_SUBMISSION(dev_priv)) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
*/
- ret = i915_guc_submission_init(dev_priv);
- if (ret)
- goto err_guc;
+ ret = intel_guc_submission_init(guc);
+ if (ret) {
+ intel_guc_fini(guc);
+ return ret;
+ }
}
+ return 0;
+}
+
+void intel_uc_fini(struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+
+ if (!USES_GUC(dev_priv))
+ return;
+
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
+
+ if (USES_GUC_SUBMISSION(dev_priv))
+ intel_guc_submission_fini(guc);
+
+ intel_guc_fini(guc);
+}
+
+int intel_uc_init_hw(struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+ struct intel_huc *huc = &dev_priv->huc;
+ int ret, attempts;
+
+ if (!USES_GUC(dev_priv))
+ return 0;
+
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
+
+ guc_disable_communication(guc);
+ gen9_reset_guc_interrupts(dev_priv);
+
/* init WOPCM */
I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
I915_WRITE(DMA_GUC_WOPCM_OFFSET,
@@ -192,9 +292,14 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
*/
ret = __intel_uc_reset_hw(dev_priv);
if (ret)
- goto err_submission;
+ goto err_out;
+
+ if (USES_HUC(dev_priv)) {
+ ret = intel_huc_init_hw(huc);
+ if (ret)
+ goto err_out;
+ }
- intel_huc_init_hw(&dev_priv->huc);
intel_guc_init_params(guc);
ret = intel_guc_fw_upload(guc);
if (ret == 0 || ret != -EAGAIN)
@@ -212,79 +317,67 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
if (ret)
goto err_log_capture;
- intel_huc_auth(&dev_priv->huc);
- if (i915_modparams.enable_guc_submission) {
+ if (USES_HUC(dev_priv)) {
+ ret = intel_huc_auth(huc);
+ if (ret)
+ goto err_communication;
+ }
+
+ if (USES_GUC_SUBMISSION(dev_priv)) {
if (i915_modparams.guc_log_level >= 0)
gen9_enable_guc_interrupts(dev_priv);
- ret = i915_guc_submission_enable(dev_priv);
+ ret = intel_guc_submission_enable(guc);
if (ret)
goto err_interrupts;
}
- dev_info(dev_priv->drm.dev, "GuC %s (firmware %s [version %u.%u])\n",
- i915_modparams.enable_guc_submission ? "submission enabled" :
- "loaded",
- guc->fw.path,
+ dev_info(dev_priv->drm.dev, "GuC firmware version %u.%u\n",
guc->fw.major_ver_found, guc->fw.minor_ver_found);
+ dev_info(dev_priv->drm.dev, "GuC submission %s\n",
+ enableddisabled(USES_GUC_SUBMISSION(dev_priv)));
+ dev_info(dev_priv->drm.dev, "HuC %s\n",
+ enableddisabled(USES_HUC(dev_priv)));
return 0;
/*
* We've failed to load the firmware :(
- *
- * Decide whether to disable GuC submission and fall back to
- * execlist mode, and whether to hide the error by returning
- * zero or to return -EIO, which the caller will treat as a
- * nonfatal error (i.e. it doesn't prevent driver load, but
- * marks the GPU as wedged until reset).
*/
err_interrupts:
- guc_disable_communication(guc);
gen9_disable_guc_interrupts(dev_priv);
+err_communication:
+ guc_disable_communication(guc);
err_log_capture:
guc_capture_load_err_log(guc);
-err_submission:
- if (i915_modparams.enable_guc_submission)
- i915_guc_submission_fini(dev_priv);
-err_guc:
- i915_ggtt_disable_guc(dev_priv);
-
- if (i915_modparams.enable_guc_loading > 1 ||
- i915_modparams.enable_guc_submission > 1) {
- DRM_ERROR("GuC init failed. Firmware loading disabled.\n");
- ret = -EIO;
- } else {
- DRM_NOTE("GuC init failed. Firmware loading disabled.\n");
- ret = 0;
- }
-
- if (i915_modparams.enable_guc_submission) {
- i915_modparams.enable_guc_submission = 0;
- DRM_NOTE("Falling back from GuC submission to execlist mode\n");
- }
-
- i915_modparams.enable_guc_loading = 0;
+err_out:
+ /*
+ * Note that there is no fallback as either user explicitly asked for
+ * the GuC or driver default option was to run with the GuC enabled.
+ */
+ if (GEM_WARN_ON(ret == -EIO))
+ ret = -EINVAL;
+ dev_err(dev_priv->drm.dev, "GuC initialization failed %d\n", ret);
return ret;
}
void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
{
- guc_free_load_err_log(&dev_priv->guc);
+ struct intel_guc *guc = &dev_priv->guc;
- if (!i915_modparams.enable_guc_loading)
+ guc_free_load_err_log(guc);
+
+ if (!USES_GUC(dev_priv))
return;
- if (i915_modparams.enable_guc_submission)
- i915_guc_submission_disable(dev_priv);
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
- guc_disable_communication(&dev_priv->guc);
+ if (USES_GUC_SUBMISSION(dev_priv))
+ intel_guc_submission_disable(guc);
- if (i915_modparams.enable_guc_submission) {
- gen9_disable_guc_interrupts(dev_priv);
- i915_guc_submission_fini(dev_priv);
- }
+ guc_disable_communication(guc);
- i915_ggtt_disable_guc(dev_priv);
+ if (USES_GUC_SUBMISSION(dev_priv))
+ gen9_disable_guc_interrupts(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index e18d3bb02088..8a7249722ef1 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -26,13 +26,36 @@
#include "intel_guc.h"
#include "intel_huc.h"
+#include "i915_params.h"
void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
void intel_uc_init_early(struct drm_i915_private *dev_priv);
void intel_uc_init_mmio(struct drm_i915_private *dev_priv);
void intel_uc_init_fw(struct drm_i915_private *dev_priv);
void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
+int intel_uc_init_wq(struct drm_i915_private *dev_priv);
+void intel_uc_fini_wq(struct drm_i915_private *dev_priv);
int intel_uc_init_hw(struct drm_i915_private *dev_priv);
void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
+int intel_uc_init(struct drm_i915_private *dev_priv);
+void intel_uc_fini(struct drm_i915_private *dev_priv);
+
+static inline bool intel_uc_is_using_guc(void)
+{
+ GEM_BUG_ON(i915_modparams.enable_guc < 0);
+ return i915_modparams.enable_guc > 0;
+}
+
+static inline bool intel_uc_is_using_guc_submission(void)
+{
+ GEM_BUG_ON(i915_modparams.enable_guc < 0);
+ return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
+}
+
+static inline bool intel_uc_is_using_huc(void)
+{
+ GEM_BUG_ON(i915_modparams.enable_guc < 0);
+ return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC;
+}
#endif
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index 973888e94cba..784eff9cdfc8 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -105,7 +105,7 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
}
/* now RSA */
- if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
+ if (css->key_size_dw != UOS_RSA_SCRATCH_COUNT) {
DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n",
intel_uc_fw_type_repr(uc_fw->type), css->key_size_dw);
err = -ENOEXEC;
@@ -214,7 +214,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return -EIO;
+ return -ENOEXEC;
uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("%s fw load %s\n",
@@ -299,7 +299,7 @@ void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
*
* Pretty printer for uC firmware.
*/
-void intel_uc_fw_dump(struct intel_uc_fw *uc_fw, struct drm_printer *p)
+void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
{
drm_printf(p, "%s firmware: %s\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index 132903669391..d5fd4609c785 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -110,12 +110,17 @@ void intel_uc_fw_init(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
uc_fw->type = type;
}
+static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw)
+{
+ return uc_fw->path != NULL;
+}
+
void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
struct intel_uc_fw *uc_fw);
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
int (*xfer)(struct intel_uc_fw *uc_fw,
struct i915_vma *vma));
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
-void intel_uc_fw_dump(struct intel_uc_fw *uc_fw, struct drm_printer *p);
+void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8c2ce81f01c2..89547b614aa6 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -69,17 +69,104 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
HRTIMER_MODE_REL);
}
+static inline int
+__wait_for_ack(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d,
+ const u32 ack,
+ const u32 value)
+{
+ return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value,
+ FORCEWAKE_ACK_TIMEOUT_MS);
+}
+
+static inline int
+wait_ack_clear(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d,
+ const u32 ack)
+{
+ return __wait_for_ack(i915, d, ack, 0);
+}
+
+static inline int
+wait_ack_set(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d,
+ const u32 ack)
+{
+ return __wait_for_ack(i915, d, ack, ack);
+}
+
static inline void
fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
const struct intel_uncore_forcewake_domain *d)
{
- if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
+ if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL))
DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
intel_uncore_forcewake_domain_to_str(d->id));
}
+enum ack_type {
+ ACK_CLEAR = 0,
+ ACK_SET
+};
+
+static int
+fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d,
+ const enum ack_type type)
+{
+ const u32 ack_bit = FORCEWAKE_KERNEL;
+ const u32 value = type == ACK_SET ? ack_bit : 0;
+ unsigned int pass;
+ bool ack_detected;
+
+ /*
+ * There is a possibility of driver's wake request colliding
+ * with hardware's own wake requests and that can cause
+ * hardware to not deliver the driver's ack message.
+ *
+ * Use a fallback bit toggle to kick the gpu state machine
+ * in the hope that the original ack will be delivered along with
+ * the fallback ack.
+ *
+ * This workaround is described in HSDES #1604254524
+ */
+
+ pass = 1;
+ do {
+ wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK);
+
+ __raw_i915_write32(i915, d->reg_set,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK));
+ /* Give gt some time to relax before the polling frenzy */
+ udelay(10 * pass);
+ wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK);
+
+ ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value;
+
+ __raw_i915_write32(i915, d->reg_set,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK));
+ } while (!ack_detected && pass++ < 10);
+
+ DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
+ intel_uncore_forcewake_domain_to_str(d->id),
+ type == ACK_SET ? "set" : "clear",
+ __raw_i915_read32(i915, d->reg_ack),
+ pass);
+
+ return ack_detected ? 0 : -ETIMEDOUT;
+}
+
+static inline void
+fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
+{
+ if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL)))
+ return;
+
+ if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR))
+ fw_domain_wait_ack_clear(i915, d);
+}
+
static inline void
fw_domain_get(struct drm_i915_private *i915,
const struct intel_uncore_forcewake_domain *d)
@@ -88,17 +175,26 @@ fw_domain_get(struct drm_i915_private *i915,
}
static inline void
-fw_domain_wait_ack(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_set(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
+ if (wait_ack_set(i915, d, FORCEWAKE_KERNEL))
DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
intel_uncore_forcewake_domain_to_str(d->id));
}
static inline void
+fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
+{
+ if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL)))
+ return;
+
+ if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET))
+ fw_domain_wait_ack_set(i915, d);
+}
+
+static inline void
fw_domain_put(const struct drm_i915_private *i915,
const struct intel_uncore_forcewake_domain *d)
{
@@ -119,7 +215,27 @@ fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
}
for_each_fw_domain_masked(d, fw_domains, i915, tmp)
- fw_domain_wait_ack(i915, d);
+ fw_domain_wait_ack_set(i915, d);
+
+ i915->uncore.fw_domains_active |= fw_domains;
+}
+
+static void
+fw_domains_get_with_fallback(struct drm_i915_private *i915,
+ enum forcewake_domains fw_domains)
+{
+ struct intel_uncore_forcewake_domain *d;
+ unsigned int tmp;
+
+ GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
+ fw_domain_wait_ack_clear_fallback(i915, d);
+ fw_domain_get(i915, d);
+ }
+
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp)
+ fw_domain_wait_ack_set_fallback(i915, d);
i915->uncore.fw_domains_active |= fw_domains;
}
@@ -229,6 +345,7 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
+/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
bool restore)
{
@@ -237,6 +354,8 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
int retry_count = 100;
enum forcewake_domains fw, active_domains;
+ iosf_mbi_assert_punit_acquired();
+
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly. Wait until all pending
* timers are run before holding.
@@ -416,14 +535,18 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
GT_FIFO_CTL_RC6_POLICY_STALL);
}
+ iosf_mbi_punit_acquire();
intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
+ iosf_mbi_punit_release();
}
void intel_uncore_suspend(struct drm_i915_private *dev_priv)
{
- iosf_mbi_unregister_pmic_bus_access_notifier(
+ iosf_mbi_punit_acquire();
+ iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
&dev_priv->uncore.pmic_bus_access_nb);
intel_uncore_forcewake_reset(dev_priv, false);
+ iosf_mbi_punit_release();
}
void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
@@ -442,9 +565,6 @@ void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
{
- i915_modparams.enable_rc6 =
- sanitize_rc6_option(dev_priv, i915_modparams.enable_rc6);
-
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_sanitize_gt_powersave(dev_priv);
}
@@ -1148,7 +1268,8 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
}
if (INTEL_GEN(dev_priv) >= 9) {
- dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
+ dev_priv->uncore.funcs.force_wake_get =
+ fw_domains_get_with_fallback;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
@@ -1309,18 +1430,18 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
iosf_mbi_register_pmic_bus_access_notifier(
&dev_priv->uncore.pmic_bus_access_nb);
-
- i915_check_and_clear_faults(dev_priv);
}
void intel_uncore_fini(struct drm_i915_private *dev_priv)
{
- iosf_mbi_unregister_pmic_bus_access_notifier(
- &dev_priv->uncore.pmic_bus_access_nb);
-
/* Paranoia: make sure we have disabled everything before we exit. */
intel_uncore_sanitize(dev_priv);
+
+ iosf_mbi_punit_acquire();
+ iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
+ &dev_priv->uncore.pmic_bus_access_nb);
intel_uncore_forcewake_reset(dev_priv, false);
+ iosf_mbi_punit_release();
}
static const struct reg_whitelist {
@@ -1400,10 +1521,14 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
engine->name);
- I915_WRITE_FW(RING_CTL(base), 0);
+ I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
+
I915_WRITE_FW(RING_HEAD(base), 0);
I915_WRITE_FW(RING_TAIL(base), 0);
+ /* The ring must be empty before it is disabled */
+ I915_WRITE_FW(RING_CTL(base), 0);
+
/* Check acts as a post */
if (I915_READ_FW(RING_HEAD(base)) != 0)
DRM_DEBUG_DRIVER("%s: ring head not parked\n",
@@ -1801,18 +1926,13 @@ bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
return intel_get_gpu_reset(dev_priv) != NULL;
}
-/*
- * When GuC submission is enabled, GuC manages ELSP and can initiate the
- * engine reset too. For now, fall back to full GPU reset if it is enabled.
- */
bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
{
return (dev_priv->info.has_reset_engine &&
- !dev_priv->guc.execbuf_client &&
i915_modparams.reset >= 2);
}
-int intel_guc_reset(struct drm_i915_private *dev_priv)
+int intel_reset_guc(struct drm_i915_private *dev_priv)
{
int ret;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index f225c288a121..e3d7745a9151 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -304,6 +304,10 @@ struct bdb_general_features {
#define DVO_PORT_MIPIC 23 /* 171 */
#define DVO_PORT_MIPID 24 /* 171 */
+#define HDMI_MAX_DATA_RATE_PLATFORM 0 /* 204 */
+#define HDMI_MAX_DATA_RATE_297 1 /* 204 */
+#define HDMI_MAX_DATA_RATE_165 2 /* 204 */
+
#define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33
/* DDC Bus DDI Type 155+ */
@@ -342,8 +346,8 @@ struct child_device_config {
u8 i2c_speed;
u8 dp_onboard_redriver; /* 158 */
u8 dp_ondock_redriver; /* 158 */
- u8 hdmi_level_shifter_value:4; /* 169 */
- u8 hdmi_max_data_rate:4; /* 204 */
+ u8 hdmi_level_shifter_value:5; /* 169 */
+ u8 hdmi_max_data_rate:3; /* 204 */
u16 dtd_buf_ptr; /* 161 */
u8 edidless_efp:1; /* 161 */
u8 compression_enable:1; /* 198 */
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 5cc8101bb2b1..2ea69394f428 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -27,6 +27,7 @@
#include <linux/prime_numbers.h>
#include "mock_drm.h"
+#include "i915_random.h"
static const unsigned int page_sizes[] = {
I915_GTT_PAGE_SIZE_2M,
@@ -989,17 +990,9 @@ static int gpu_write(struct i915_vma *vma,
i915_vma_unpin(batch);
i915_vma_close(batch);
- err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- goto err_request;
-
- err = i915_switch_context(rq);
- if (err)
- goto err_request;
-
- err = rq->engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- flags);
+ err = engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ flags);
if (err)
goto err_request;
@@ -1047,19 +1040,78 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
return err;
}
+static int __igt_write_huge(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *obj,
+ u64 size, u64 offset,
+ u32 dword, u32 val)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ struct i915_vma *vma;
+ int err;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_unbind(vma);
+ if (err)
+ goto out_vma_close;
+
+ err = i915_vma_pin(vma, size, 0, flags | offset);
+ if (err) {
+ /*
+ * The ggtt may have some pages reserved so
+ * refrain from erroring out.
+ */
+ if (err == -ENOSPC && i915_is_ggtt(vm))
+ err = 0;
+
+ goto out_vma_close;
+ }
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_vma_unpin;
+
+ err = gpu_write(vma, ctx, engine, dword, val);
+ if (err) {
+ pr_err("gpu-write failed at offset=%llx\n", offset);
+ goto out_vma_unpin;
+ }
+
+ err = cpu_check(obj, dword, val);
+ if (err) {
+ pr_err("cpu-check failed at offset=%llx\n", offset);
+ goto out_vma_unpin;
+ }
+
+out_vma_unpin:
+ i915_vma_unpin(vma);
+out_vma_close:
+ i915_vma_close(vma);
+
+ return err;
+}
+
static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ static struct intel_engine_cs *engines[I915_NUM_ENGINES];
struct intel_engine_cs *engine;
- struct i915_vma *vma;
- unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
unsigned int max_page_size;
unsigned int id;
u64 max;
u64 num;
u64 size;
+ int *order;
+ int i, n;
int err = 0;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
@@ -1071,78 +1123,56 @@ static int igt_write_huge(struct i915_gem_context *ctx,
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
max = div_u64((vm->total - size), max_page_size);
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
+ n = 0;
for_each_engine(engine, i915, id) {
- IGT_TIMEOUT(end_time);
-
if (!intel_engine_can_store_dword(engine)) {
- pr_info("store-dword-imm not supported on engine=%u\n",
- id);
+ pr_info("store-dword-imm not supported on engine=%u\n", id);
continue;
}
+ engines[n++] = engine;
+ }
- /*
- * Try various offsets until we timeout -- we want to avoid
- * issues hidden by effectively always using offset = 0.
- */
- for_each_prime_number_from(num, 0, max) {
- u64 offset = num * max_page_size;
- u32 dword;
-
- err = i915_vma_unbind(vma);
- if (err)
- goto out_vma_close;
-
- err = i915_vma_pin(vma, size, max_page_size, flags | offset);
- if (err) {
- /*
- * The ggtt may have some pages reserved so
- * refrain from erroring out.
- */
- if (err == -ENOSPC && i915_is_ggtt(vm)) {
- err = 0;
- continue;
- }
-
- goto out_vma_close;
- }
+ if (!n)
+ return 0;
- err = igt_check_page_sizes(vma);
- if (err)
- goto out_vma_unpin;
+ /*
+ * To keep things interesting when alternating between engines in our
+ * randomized order, lets also make feeding to the same engine a few
+ * times in succession a possibility by enlarging the permutation array.
+ */
+ order = i915_random_order(n * I915_NUM_ENGINES, &prng);
+ if (!order)
+ return -ENOMEM;
- dword = offset_in_page(num) / 4;
+ /*
+ * Try various offsets in an ascending/descending fashion until we
+ * timeout -- we want to avoid issues hidden by effectively always using
+ * offset = 0.
+ */
+ i = 0;
+ for_each_prime_number_from(num, 0, max) {
+ u64 offset_low = num * max_page_size;
+ u64 offset_high = (max - num) * max_page_size;
+ u32 dword = offset_in_page(num) / 4;
- err = gpu_write(vma, ctx, engine, dword, num + 1);
- if (err) {
- pr_err("gpu-write failed at offset=%llx", offset);
- goto out_vma_unpin;
- }
+ engine = engines[order[i] % n];
+ i = (i + 1) % (n * I915_NUM_ENGINES);
- err = cpu_check(obj, dword, num + 1);
- if (err) {
- pr_err("cpu-check failed at offset=%llx", offset);
- goto out_vma_unpin;
- }
+ err = __igt_write_huge(ctx, engine, obj, size, offset_low, dword, num + 1);
+ if (err)
+ break;
- i915_vma_unpin(vma);
+ err = __igt_write_huge(ctx, engine, obj, size, offset_high, dword, num + 1);
+ if (err)
+ break;
- if (num > 0 &&
- igt_timeout(end_time,
- "%s timed out on engine=%u at offset=%llx, max_page_size=%x\n",
- __func__, id, offset, max_page_size))
- break;
- }
+ if (igt_timeout(end_time,
+ "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
+ __func__, engine->id, offset_low, offset_high, max_page_size))
+ break;
}
-out_vma_unpin:
- if (i915_vma_is_pinned(vma))
- i915_vma_unpin(vma);
-out_vma_close:
- i915_vma_close(vma);
+ kfree(order);
return err;
}
@@ -1159,6 +1189,9 @@ static int igt_ppgtt_exhaust_huge(void *arg)
int n, i;
int err = -ENODEV;
+ if (supported == I915_GTT_PAGE_SIZE_4K)
+ return 0;
+
/*
* Sanity check creating objects with a varying mix of page sizes --
* ensuring that our writes lands in the right place.
@@ -1604,7 +1637,7 @@ static int igt_shrink_thp(void *arg)
* shmem to truncate our pages.
*/
i915_gem_shrink_all(i915);
- if (!IS_ERR_OR_NULL(obj->mm.pages)) {
+ if (i915_gem_object_has_pages(obj)) {
pr_err("shrink-all didn't truncate the pages\n");
err = -EINVAL;
goto out_close;
@@ -1716,6 +1749,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_get(dev_priv);
ctx = live_context(dev_priv, file);
if (IS_ERR(ctx)) {
@@ -1726,6 +1760,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
err = i915_subtests(tests, ctx);
out_unlock:
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
mock_file_free(dev_priv, file);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index 35d778d70626..7a0d1e17c1ad 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -33,7 +33,7 @@ static int cpu_set(struct drm_i915_gem_object *obj,
{
unsigned int needs_clflush;
struct page *page;
- typeof(v) *map;
+ u32 *map;
int err;
err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
@@ -59,7 +59,7 @@ static int cpu_get(struct drm_i915_gem_object *obj,
{
unsigned int needs_clflush;
struct page *page;
- typeof(v) map;
+ u32 *map;
int err;
err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
@@ -82,7 +82,7 @@ static int gtt_set(struct drm_i915_gem_object *obj,
u32 v)
{
struct i915_vma *vma;
- typeof(v) *map;
+ u32 __iomem *map;
int err;
err = i915_gem_object_set_to_gtt_domain(obj, true);
@@ -98,7 +98,7 @@ static int gtt_set(struct drm_i915_gem_object *obj,
if (IS_ERR(map))
return PTR_ERR(map);
- map[offset / sizeof(*map)] = v;
+ iowrite32(v, &map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
return 0;
@@ -109,7 +109,7 @@ static int gtt_get(struct drm_i915_gem_object *obj,
u32 *v)
{
struct i915_vma *vma;
- typeof(v) map;
+ u32 __iomem *map;
int err;
err = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -125,7 +125,7 @@ static int gtt_get(struct drm_i915_gem_object *obj,
if (IS_ERR(map))
return PTR_ERR(map);
- *v = map[offset / sizeof(*map)];
+ *v = ioread32(&map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
return 0;
@@ -135,7 +135,7 @@ static int wc_set(struct drm_i915_gem_object *obj,
unsigned long offset,
u32 v)
{
- typeof(v) *map;
+ u32 *map;
int err;
err = i915_gem_object_set_to_wc_domain(obj, true);
@@ -156,7 +156,7 @@ static int wc_get(struct drm_i915_gem_object *obj,
unsigned long offset,
u32 *v)
{
- typeof(v) map;
+ u32 *map;
int err;
err = i915_gem_object_set_to_wc_domain(obj, false);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index def5052862ae..56a803d11916 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -158,14 +158,6 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
goto err_batch;
}
- err = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- goto err_request;
-
- err = i915_switch_context(rq);
- if (err)
- goto err_request;
-
flags = 0;
if (INTEL_GEN(vm->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
@@ -272,6 +264,23 @@ out_unmap:
return err;
}
+static int file_add_object(struct drm_file *file,
+ struct drm_i915_gem_object *obj)
+{
+ int err;
+
+ GEM_BUG_ON(obj->base.handle_count);
+
+ /* tie the object to the drm_file for easy reaping */
+ err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ i915_gem_object_get(obj);
+ obj->base.handle_count++;
+ return 0;
+}
+
static struct drm_i915_gem_object *
create_test_object(struct i915_gem_context *ctx,
struct drm_file *file,
@@ -281,7 +290,6 @@ create_test_object(struct i915_gem_context *ctx,
struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
u64 size;
- u32 handle;
int err;
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
@@ -291,8 +299,7 @@ create_test_object(struct i915_gem_context *ctx,
if (IS_ERR(obj))
return obj;
- /* tie the handle to the drm_file for easy reaping */
- err = drm_gem_handle_create(file, &obj->base, &handle);
+ err = file_add_object(file, obj);
i915_gem_object_put(obj);
if (err)
return ERR_PTR(err);
@@ -325,7 +332,7 @@ static int igt_ctx_exec(void *arg)
LIST_HEAD(objects);
unsigned long ncontexts, ndwords, dw;
bool first_shared_gtt = true;
- int err;
+ int err = -ENODEV;
/* Create a few different contexts (with different mm) and write
* through each ctx/mm using the GPU making sure those writes end
@@ -369,7 +376,9 @@ static int igt_ctx_exec(void *arg)
}
}
+ intel_runtime_pm_get(i915);
err = gpu_fill(obj, ctx, engine, dw);
+ intel_runtime_pm_put(i915);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index f463105ff48d..e1ddad635d73 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -355,6 +355,7 @@ static int igt_evict_contexts(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
@@ -463,6 +464,7 @@ out_locked:
}
if (drm_mm_node_allocated(&hole))
drm_mm_remove_node(&hole);
+ intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 9da0c9f99916..4a28d713a7d8 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -216,13 +216,21 @@ static int lowlevel_hole(struct drm_i915_private *i915,
hole_size = (hole_end - hole_start) >> size;
if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
- count = hole_size;
+ count = hole_size >> 1;
+ if (!count) {
+ pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
+ __func__, hole_start, hole_end, size, hole_size);
+ break;
+ }
+
do {
- count >>= 1;
order = i915_random_order(count, &prng);
- } while (!order && count);
- if (!order)
- break;
+ if (order)
+ break;
+ } while (count >>= 1);
+ if (!count)
+ return -ENOMEM;
+ GEM_BUG_ON(!order);
GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
@@ -267,7 +275,9 @@ static int lowlevel_hole(struct drm_i915_private *i915,
mock_vma.node.size = BIT_ULL(size);
mock_vma.node.start = addr;
+ intel_runtime_pm_get(i915);
vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
+ intel_runtime_pm_put(i915);
}
count = n;
@@ -697,18 +707,26 @@ static int drunk_hole(struct drm_i915_private *i915,
unsigned int *order, count, n;
struct i915_vma *vma;
u64 hole_size;
- int err;
+ int err = -ENODEV;
hole_size = (hole_end - hole_start) >> size;
if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
- count = hole_size;
+ count = hole_size >> 1;
+ if (!count) {
+ pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
+ __func__, hole_start, hole_end, size, hole_size);
+ break;
+ }
+
do {
- count >>= 1;
order = i915_random_order(count, &prng);
- } while (!order && count);
- if (!order)
- break;
+ if (order)
+ break;
+ } while (count >>= 1);
+ if (!count)
+ return -ENOMEM;
+ GEM_BUG_ON(!order);
/* Ignore allocation failures (i.e. don't report them as
* a test failure) as we are purposefully allocating very
@@ -956,7 +974,7 @@ static int exercise_ggtt(struct drm_i915_private *i915,
u64 hole_start, hole_end, last = 0;
struct drm_mm_node *node;
IGT_TIMEOUT(end_time);
- int err;
+ int err = 0;
mutex_lock(&i915->drm.struct_mutex);
restart:
@@ -1047,6 +1065,7 @@ static int igt_ggtt_page(void *arg)
goto out_remove;
}
+ intel_runtime_pm_get(i915);
for (n = 0; n < count; n++) {
u64 offset = tmp.start + order[n] * PAGE_SIZE;
u32 __iomem *vaddr;
@@ -1055,7 +1074,7 @@ static int igt_ggtt_page(void *arg)
i915_gem_object_get_dma_address(obj, 0),
offset, I915_CACHE_NONE, 0);
- vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+ vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
iowrite32(n, vaddr + n);
io_mapping_unmap_atomic(vaddr);
@@ -1073,7 +1092,7 @@ static int igt_ggtt_page(void *arg)
i915_gem_object_get_dma_address(obj, 0),
offset, I915_CACHE_NONE, 0);
- vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+ vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
val = ioread32(vaddr + n);
io_mapping_unmap_atomic(vaddr);
@@ -1086,6 +1105,7 @@ static int igt_ggtt_page(void *arg)
break;
}
}
+ intel_runtime_pm_put(i915);
kfree(order);
out_remove:
@@ -1160,7 +1180,7 @@ static int igt_gtt_reserve(void *arg)
struct drm_i915_gem_object *obj, *on;
LIST_HEAD(objects);
u64 total;
- int err;
+ int err = -ENODEV;
/* i915_gem_gtt_reserve() tries to reserve the precise range
* for the node, and evicts if it has to. So our test checks that
@@ -1351,7 +1371,7 @@ static int igt_gtt_insert(void *arg)
}, *ii;
LIST_HEAD(objects);
u64 total;
- int err;
+ int err = -ENODEV;
/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
* to the node, evicting if required.
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 1b8774a42e48..f32aa6bb79e2 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -317,6 +317,7 @@ static int igt_partial_tiling(void *arg)
}
mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
if (1) {
IGT_TIMEOUT(end);
@@ -418,6 +419,7 @@ next_tiling: ;
}
out_unlock:
+ intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
out:
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
index a999161e8db1..647bf2bbd799 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
@@ -332,7 +332,7 @@ static int live_nop_request(void *arg)
struct intel_engine_cs *engine;
struct live_test t;
unsigned int id;
- int err;
+ int err = -ENODEV;
/* Submit various sized batches of empty requests, to each engine
* (individually), and wait for the batch to complete. We can check
@@ -459,14 +459,6 @@ empty_request(struct intel_engine_cs *engine,
if (IS_ERR(request))
return request;
- err = engine->emit_flush(request, EMIT_INVALIDATE);
- if (err)
- goto out_request;
-
- err = i915_switch_context(request);
- if (err)
- goto out_request;
-
err = engine->emit_bb_start(request,
batch->node.start,
batch->node.size,
@@ -675,12 +667,6 @@ static int live_all_engines(void *arg)
goto out_request;
}
- err = engine->emit_flush(request[id], EMIT_INVALIDATE);
- GEM_BUG_ON(err);
-
- err = i915_switch_context(request[id]);
- GEM_BUG_ON(err);
-
err = engine->emit_bb_start(request[id],
batch->node.start,
batch->node.size,
@@ -797,12 +783,6 @@ static int live_sequential_engines(void *arg)
}
}
- err = engine->emit_flush(request[id], EMIT_INVALIDATE);
- GEM_BUG_ON(err);
-
- err = i915_switch_context(request[id]);
- GEM_BUG_ON(err);
-
err = engine->emit_bb_start(request[id],
batch->node.start,
batch->node.size,
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
index 4795877abe56..3000e6a7d82d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
@@ -79,7 +79,7 @@ static int igt_sync(void *arg)
}, *p;
struct intel_timeline *tl;
int order, offset;
- int ret;
+ int ret = -ENODEV;
tl = mock_timeline(0);
if (!tl)
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index d7dd98a6acad..088f45bc6199 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -20,3 +20,4 @@ selftest(evict, i915_gem_evict_live_selftests)
selftest(hugepages, i915_gem_huge_page_live_selftests)
selftest(contexts, i915_gem_context_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
+selftest(guc, intel_guc_live_selftest)
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c
index b85872cc7fbe..2088ae57aa89 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.c
+++ b/drivers/gpu/drm/i915/selftests/i915_random.c
@@ -57,7 +57,7 @@ unsigned int *i915_random_order(unsigned int count, struct rnd_state *state)
{
unsigned int *order, i;
- order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
+ order = kmalloc_array(count, sizeof(*order), GFP_KERNEL | __GFP_NOWARN);
if (!order)
return order;
diff --git a/drivers/gpu/drm/i915/selftests/i915_syncmap.c b/drivers/gpu/drm/i915/selftests/i915_syncmap.c
index bcab3d00a785..47f4ae18a1ef 100644
--- a/drivers/gpu/drm/i915/selftests/i915_syncmap.c
+++ b/drivers/gpu/drm/i915/selftests/i915_syncmap.c
@@ -333,7 +333,7 @@ static int igt_syncmap_join_below(void *arg)
{
struct i915_syncmap *sync;
unsigned int step, order, idx;
- int err;
+ int err = -ENODEV;
i915_syncmap_init(&sync);
@@ -402,7 +402,7 @@ static int igt_syncmap_neighbours(void *arg)
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
struct i915_syncmap *sync;
- int err;
+ int err = -ENODEV;
/*
* Each leaf holds KSYNCMAP seqno. Check that when we create KSYNCMAP
@@ -447,7 +447,7 @@ static int igt_syncmap_compact(void *arg)
{
struct i915_syncmap *sync;
unsigned int idx, order;
- int err;
+ int err = -ENODEV;
i915_syncmap_init(&sync);
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 2e86ec136b35..eb89e301b602 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -150,7 +150,7 @@ static int igt_vma_create(void *arg)
IGT_TIMEOUT(end_time);
LIST_HEAD(contexts);
LIST_HEAD(objects);
- int err;
+ int err = -ENOMEM;
/* Exercise creating many vma amonst many objections, checking the
* vma creation and lookup routines.
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
new file mode 100644
index 000000000000..3f9016466dea
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+/* max doorbell number + negative test for each client type */
+#define ATTEMPTS (GUC_NUM_DOORBELLS + GUC_CLIENT_PRIORITY_NUM)
+
+static struct intel_guc_client *clients[ATTEMPTS];
+
+static bool available_dbs(struct intel_guc *guc, u32 priority)
+{
+ unsigned long offset;
+ unsigned long end;
+ u16 id;
+
+ /* first half is used for normal priority, second half for high */
+ offset = 0;
+ end = GUC_NUM_DOORBELLS / 2;
+ if (priority <= GUC_CLIENT_PRIORITY_HIGH) {
+ offset = end;
+ end += offset;
+ }
+
+ id = find_next_zero_bit(guc->doorbell_bitmap, end, offset);
+ if (id < end)
+ return true;
+
+ return false;
+}
+
+static int check_all_doorbells(struct intel_guc *guc)
+{
+ u16 db_id;
+
+ pr_info_once("Max number of doorbells: %d", GUC_NUM_DOORBELLS);
+ for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) {
+ if (!doorbell_ok(guc, db_id)) {
+ pr_err("doorbell %d, not ok\n", db_id);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Basic client sanity check, handy to validate create_clients.
+ */
+static int validate_client(struct intel_guc_client *client,
+ int client_priority,
+ bool is_preempt_client)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
+ struct i915_gem_context *ctx_owner = is_preempt_client ?
+ dev_priv->preempt_context : dev_priv->kernel_context;
+
+ if (client->owner != ctx_owner ||
+ client->engines != INTEL_INFO(dev_priv)->ring_mask ||
+ client->priority != client_priority ||
+ client->doorbell_id == GUC_DOORBELL_INVALID)
+ return -EINVAL;
+ else
+ return 0;
+}
+
+static bool client_doorbell_in_sync(struct intel_guc_client *client)
+{
+ return doorbell_ok(client->guc, client->doorbell_id);
+}
+
+/*
+ * Check that we're able to synchronize guc_clients with their doorbells
+ *
+ * We're creating clients and reserving doorbells once, at module load. During
+ * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to
+ * GuC being reset. In other words - GuC clients are still around, but the
+ * status of their doorbells may be incorrect. This is the reason behind
+ * validating that the doorbells status expected by the driver matches what the
+ * GuC/HW have.
+ */
+static int igt_guc_clients(void *args)
+{
+ struct drm_i915_private *dev_priv = args;
+ struct intel_guc *guc;
+ int err = 0;
+
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ guc = &dev_priv->guc;
+ if (!guc) {
+ pr_err("No guc object!\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ err = check_all_doorbells(guc);
+ if (err)
+ goto unlock;
+
+ /*
+ * Get rid of clients created during driver load because the test will
+ * recreate them.
+ */
+ guc_clients_destroy(guc);
+ if (guc->execbuf_client || guc->preempt_client) {
+ pr_err("guc_clients_destroy lied!\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ err = guc_clients_create(guc);
+ if (err) {
+ pr_err("Failed to create clients\n");
+ goto unlock;
+ }
+ GEM_BUG_ON(!guc->execbuf_client);
+ GEM_BUG_ON(!guc->preempt_client);
+
+ err = validate_client(guc->execbuf_client,
+ GUC_CLIENT_PRIORITY_KMD_NORMAL, false);
+ if (err) {
+ pr_err("execbug client validation failed\n");
+ goto out;
+ }
+
+ err = validate_client(guc->preempt_client,
+ GUC_CLIENT_PRIORITY_KMD_HIGH, true);
+ if (err) {
+ pr_err("preempt client validation failed\n");
+ goto out;
+ }
+
+ /* each client should now have reserved a doorbell */
+ if (!has_doorbell(guc->execbuf_client) ||
+ !has_doorbell(guc->preempt_client)) {
+ pr_err("guc_clients_create didn't reserve doorbells\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Now create the doorbells */
+ guc_clients_doorbell_init(guc);
+
+ /* each client should now have received a doorbell */
+ if (!client_doorbell_in_sync(guc->execbuf_client) ||
+ !client_doorbell_in_sync(guc->preempt_client)) {
+ pr_err("failed to initialize the doorbells\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Basic test - an attempt to reallocate a valid doorbell to the
+ * client it is currently assigned should not cause a failure.
+ */
+ err = guc_clients_doorbell_init(guc);
+ if (err)
+ goto out;
+
+ /*
+ * Negative test - a client with no doorbell (invalid db id).
+ * After destroying the doorbell, the db id is changed to
+ * GUC_DOORBELL_INVALID and the firmware will reject any attempt to
+ * allocate a doorbell with an invalid id (db has to be reserved before
+ * allocation).
+ */
+ destroy_doorbell(guc->execbuf_client);
+ if (client_doorbell_in_sync(guc->execbuf_client)) {
+ pr_err("destroy db did not work\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ unreserve_doorbell(guc->execbuf_client);
+ err = guc_clients_doorbell_init(guc);
+ if (err != -EIO) {
+ pr_err("unexpected (err = %d)", err);
+ goto out;
+ }
+
+ if (!available_dbs(guc, guc->execbuf_client->priority)) {
+ pr_err("doorbell not available when it should\n");
+ err = -EIO;
+ goto out;
+ }
+
+ /* clean after test */
+ err = reserve_doorbell(guc->execbuf_client);
+ if (err) {
+ pr_err("failed to reserve back the doorbell back\n");
+ }
+ err = create_doorbell(guc->execbuf_client);
+ if (err) {
+ pr_err("recreate doorbell failed\n");
+ goto out;
+ }
+
+out:
+ /*
+ * Leave clean state for other test, plus the driver always destroy the
+ * clients during unload.
+ */
+ destroy_doorbell(guc->execbuf_client);
+ destroy_doorbell(guc->preempt_client);
+ guc_clients_destroy(guc);
+ guc_clients_create(guc);
+ guc_clients_doorbell_init(guc);
+unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return err;
+}
+
+/*
+ * Create as many clients as number of doorbells. Note that there's already
+ * client(s)/doorbell(s) created during driver load, but this test creates
+ * its own and do not interact with the existing ones.
+ */
+static int igt_guc_doorbells(void *arg)
+{
+ struct drm_i915_private *dev_priv = arg;
+ struct intel_guc *guc;
+ int i, err = 0;
+ u16 db_id;
+
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ guc = &dev_priv->guc;
+ if (!guc) {
+ pr_err("No guc object!\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ err = check_all_doorbells(guc);
+ if (err)
+ goto unlock;
+
+ for (i = 0; i < ATTEMPTS; i++) {
+ clients[i] = guc_client_alloc(dev_priv,
+ INTEL_INFO(dev_priv)->ring_mask,
+ i % GUC_CLIENT_PRIORITY_NUM,
+ dev_priv->kernel_context);
+
+ if (!clients[i]) {
+ pr_err("[%d] No guc client\n", i);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (IS_ERR(clients[i])) {
+ if (PTR_ERR(clients[i]) != -ENOSPC) {
+ pr_err("[%d] unexpected error\n", i);
+ err = PTR_ERR(clients[i]);
+ goto out;
+ }
+
+ if (available_dbs(guc, i % GUC_CLIENT_PRIORITY_NUM)) {
+ pr_err("[%d] non-db related alloc fail\n", i);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* expected, ran out of dbs for this client type */
+ continue;
+ }
+
+ /*
+ * The check below is only valid because we keep a doorbell
+ * assigned during the whole life of the client.
+ */
+ if (clients[i]->stage_id >= GUC_NUM_DOORBELLS) {
+ pr_err("[%d] more clients than doorbells (%d >= %d)\n",
+ i, clients[i]->stage_id, GUC_NUM_DOORBELLS);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = validate_client(clients[i],
+ i % GUC_CLIENT_PRIORITY_NUM, false);
+ if (err) {
+ pr_err("[%d] client_alloc sanity check failed!\n", i);
+ err = -EINVAL;
+ goto out;
+ }
+
+ db_id = clients[i]->doorbell_id;
+
+ err = create_doorbell(clients[i]);
+ if (err) {
+ pr_err("[%d] Failed to create a doorbell\n", i);
+ goto out;
+ }
+
+ /* doorbell id shouldn't change, we are holding the mutex */
+ if (db_id != clients[i]->doorbell_id) {
+ pr_err("[%d] doorbell id changed (%d != %d)\n",
+ i, db_id, clients[i]->doorbell_id);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = check_all_doorbells(guc);
+ if (err)
+ goto out;
+ }
+
+out:
+ for (i = 0; i < ATTEMPTS; i++)
+ if (!IS_ERR_OR_NULL(clients[i])) {
+ destroy_doorbell(clients[i]);
+ guc_client_free(clients[i]);
+ }
+unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return err;
+}
+
+int intel_guc_live_selftest(struct drm_i915_private *dev_priv)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_guc_clients),
+ SUBTEST(igt_guc_doorbells),
+ };
+
+ if (!USES_GUC_SUBMISSION(dev_priv))
+ return 0;
+
+ return i915_subtests(tests, dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 71ce06680d66..d1f91a533afa 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -114,14 +114,6 @@ static int emit_recurse_batch(struct hang *h,
if (err)
goto unpin_vma;
- err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- goto unpin_hws;
-
- err = i915_switch_context(rq);
- if (err)
- goto unpin_hws;
-
i915_vma_move_to_active(vma, rq, 0);
if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj);
@@ -140,6 +132,12 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
+ *batch++ = MI_ARB_CHECK;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
@@ -148,6 +146,12 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
+ *batch++ = MI_ARB_CHECK;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
*batch++ = lower_32_bits(vma->node.start);
} else if (INTEL_GEN(i915) >= 4) {
@@ -155,12 +159,24 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
+ *batch++ = MI_ARB_CHECK;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start);
} else {
*batch++ = MI_STORE_DWORD_IMM;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
+ *batch++ = MI_ARB_CHECK;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1;
*batch++ = lower_32_bits(vma->node.start);
}
@@ -173,7 +189,6 @@ static int emit_recurse_batch(struct hang *h,
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
-unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
@@ -243,6 +258,16 @@ static void hang_fini(struct hang *h)
i915_gem_wait_for_idle(h->i915, I915_WAIT_LOCKED);
}
+static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq)
+{
+ return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
+ rq->fence.seqno),
+ 10) &&
+ wait_for(i915_seqno_passed(hws_seqno(h, rq),
+ rq->fence.seqno),
+ 1000));
+}
+
static int igt_hang_sanitycheck(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -305,6 +330,9 @@ static void global_reset_lock(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ pr_debug("%s: current gpu_error=%08lx\n",
+ __func__, i915->gpu_error.flags);
+
while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
wait_event(i915->gpu_error.reset_queue,
!test_bit(I915_RESET_BACKOFF,
@@ -362,54 +390,128 @@ static int igt_global_reset(void *arg)
return err;
}
-static int igt_reset_engine(void *arg)
+static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
{
- struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- unsigned int reset_count, reset_engine_count;
+ struct hang h;
int err = 0;
- /* Check that we can issue a global GPU and engine reset */
+ /* Check that we can issue an engine reset on an idle engine (no-op) */
if (!intel_has_reset_engine(i915))
return 0;
+ if (active) {
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ return err;
+ }
+
for_each_engine(engine, i915, id) {
- set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags);
+ unsigned int reset_count, reset_engine_count;
+ IGT_TIMEOUT(end_time);
+
+ if (active && !intel_engine_can_store_dword(engine))
+ continue;
+
reset_count = i915_reset_count(&i915->gpu_error);
reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
engine);
- err = i915_reset_engine(engine, I915_RESET_QUIET);
- if (err) {
- pr_err("i915_reset_engine failed\n");
- break;
- }
+ set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ do {
+ if (active) {
+ struct drm_i915_gem_request *rq;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ rq = hang_create_request(&h, engine,
+ i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ mutex_unlock(&i915->drm.struct_mutex);
+ break;
+ }
+
+ i915_gem_request_get(rq);
+ __i915_add_request(rq, true);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (!wait_for_hang(&h, rq)) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ i915_gem_request_put(rq);
+ err = -EIO;
+ break;
+ }
- if (i915_reset_count(&i915->gpu_error) != reset_count) {
- pr_err("Full GPU reset recorded! (engine reset expected)\n");
- err = -EINVAL;
- break;
- }
+ i915_gem_request_put(rq);
+ }
+
+ engine->hangcheck.stalled = true;
+ engine->hangcheck.seqno =
+ intel_engine_get_seqno(engine);
+
+ err = i915_reset_engine(engine, I915_RESET_QUIET);
+ if (err) {
+ pr_err("i915_reset_engine failed\n");
+ break;
+ }
+
+ if (i915_reset_count(&i915->gpu_error) != reset_count) {
+ pr_err("Full GPU reset recorded! (engine reset expected)\n");
+ err = -EINVAL;
+ break;
+ }
+
+ reset_engine_count += active;
+ if (i915_reset_engine_count(&i915->gpu_error, engine) !=
+ reset_engine_count) {
+ pr_err("%s engine reset %srecorded!\n",
+ engine->name, active ? "not " : "");
+ err = -EINVAL;
+ break;
+ }
+
+ engine->hangcheck.stalled = false;
+ } while (time_before(jiffies, end_time));
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
- if (i915_reset_engine_count(&i915->gpu_error, engine) ==
- reset_engine_count) {
- pr_err("No %s engine reset recorded!\n", engine->name);
- err = -EINVAL;
+ if (err)
break;
- }
- clear_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags);
+ cond_resched();
}
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
+ if (active) {
+ mutex_lock(&i915->drm.struct_mutex);
+ hang_fini(&h);
+ mutex_unlock(&i915->drm.struct_mutex);
+ }
+
return err;
}
+static int igt_reset_idle_engine(void *arg)
+{
+ return __igt_reset_engine(arg, false);
+}
+
+static int igt_reset_active_engine(void *arg)
+{
+ return __igt_reset_engine(arg, true);
+}
+
static int active_engine(void *data)
{
struct intel_engine_cs *engine = data;
@@ -471,11 +573,12 @@ err_file:
return err;
}
-static int igt_reset_active_engines(void *arg)
+static int __igt_reset_engine_others(struct drm_i915_private *i915,
+ bool active)
{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine, *active;
+ struct intel_engine_cs *engine, *other;
enum intel_engine_id id, tmp;
+ struct hang h;
int err = 0;
/* Check that issuing a reset on one engine does not interfere
@@ -485,24 +588,36 @@ static int igt_reset_active_engines(void *arg)
if (!intel_has_reset_engine(i915))
return 0;
+ if (active) {
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ return err;
+ }
+
for_each_engine(engine, i915, id) {
- struct task_struct *threads[I915_NUM_ENGINES];
+ struct task_struct *threads[I915_NUM_ENGINES] = {};
unsigned long resets[I915_NUM_ENGINES];
unsigned long global = i915_reset_count(&i915->gpu_error);
+ unsigned long count = 0;
IGT_TIMEOUT(end_time);
+ if (active && !intel_engine_can_store_dword(engine))
+ continue;
+
memset(threads, 0, sizeof(threads));
- for_each_engine(active, i915, tmp) {
+ for_each_engine(other, i915, tmp) {
struct task_struct *tsk;
- if (active == engine)
- continue;
-
resets[tmp] = i915_reset_engine_count(&i915->gpu_error,
- active);
+ other);
+
+ if (other == engine)
+ continue;
- tsk = kthread_run(active_engine, active,
- "igt/%s", active->name);
+ tsk = kthread_run(active_engine, other,
+ "igt/%s", other->name);
if (IS_ERR(tsk)) {
err = PTR_ERR(tsk);
goto unwind;
@@ -512,20 +627,70 @@ static int igt_reset_active_engines(void *arg)
get_task_struct(tsk);
}
- set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags);
+ set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
do {
+ if (active) {
+ struct drm_i915_gem_request *rq;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ rq = hang_create_request(&h, engine,
+ i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ mutex_unlock(&i915->drm.struct_mutex);
+ break;
+ }
+
+ i915_gem_request_get(rq);
+ __i915_add_request(rq, true);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (!wait_for_hang(&h, rq)) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ i915_gem_request_put(rq);
+ err = -EIO;
+ break;
+ }
+
+ i915_gem_request_put(rq);
+ }
+
+ engine->hangcheck.stalled = true;
+ engine->hangcheck.seqno =
+ intel_engine_get_seqno(engine);
+
err = i915_reset_engine(engine, I915_RESET_QUIET);
if (err) {
- pr_err("i915_reset_engine(%s) failed, err=%d\n",
- engine->name, err);
+ pr_err("i915_reset_engine(%s:%s) failed, err=%d\n",
+ engine->name, active ? "active" : "idle", err);
break;
}
+
+ engine->hangcheck.stalled = false;
+ count++;
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags);
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ pr_info("i915_reset_engine(%s:%s): %lu resets\n",
+ engine->name, active ? "active" : "idle", count);
+
+ if (i915_reset_engine_count(&i915->gpu_error, engine) -
+ resets[engine->id] != (active ? count : 0)) {
+ pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
+ engine->name, active ? "active" : "idle", count,
+ i915_reset_engine_count(&i915->gpu_error,
+ engine) - resets[engine->id]);
+ if (!err)
+ err = -EINVAL;
+ }
unwind:
- for_each_engine(active, i915, tmp) {
+ for_each_engine(other, i915, tmp) {
int ret;
if (!threads[tmp])
@@ -533,27 +698,29 @@ unwind:
ret = kthread_stop(threads[tmp]);
if (ret) {
- pr_err("kthread for active engine %s failed, err=%d\n",
- active->name, ret);
+ pr_err("kthread for other engine %s failed, err=%d\n",
+ other->name, ret);
if (!err)
err = ret;
}
put_task_struct(threads[tmp]);
if (resets[tmp] != i915_reset_engine_count(&i915->gpu_error,
- active)) {
+ other)) {
pr_err("Innocent engine %s was reset (count=%ld)\n",
- active->name,
+ other->name,
i915_reset_engine_count(&i915->gpu_error,
- active) - resets[tmp]);
- err = -EIO;
+ other) - resets[tmp]);
+ if (!err)
+ err = -EINVAL;
}
}
if (global != i915_reset_count(&i915->gpu_error)) {
pr_err("Global reset (count=%ld)!\n",
i915_reset_count(&i915->gpu_error) - global);
- err = -EIO;
+ if (!err)
+ err = -EINVAL;
}
if (err)
@@ -565,9 +732,25 @@ unwind:
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
+ if (active) {
+ mutex_lock(&i915->drm.struct_mutex);
+ hang_fini(&h);
+ mutex_unlock(&i915->drm.struct_mutex);
+ }
+
return err;
}
+static int igt_reset_idle_engine_others(void *arg)
+{
+ return __igt_reset_engine_others(arg, false);
+}
+
+static int igt_reset_active_engine_others(void *arg)
+{
+ return __igt_reset_engine_others(arg, true);
+}
+
static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
{
u32 reset_count;
@@ -583,16 +766,6 @@ static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
return reset_count;
}
-static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq)
-{
- return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
- rq->fence.seqno),
- 10) &&
- wait_for(i915_seqno_passed(hws_seqno(h, rq),
- rq->fence.seqno),
- 1000));
-}
-
static int igt_wait_reset(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -626,9 +799,9 @@ static int igt_wait_reset(void *arg)
if (!wait_for_hang(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("Failed to start request %x, at %x\n",
- rq->fence.seqno, hws_seqno(&h, rq));
- intel_engine_dump(rq->engine, &p);
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
i915_reset(i915, 0);
i915_gem_set_wedged(i915);
@@ -721,9 +894,10 @@ static int igt_reset_queue(void *arg)
if (!wait_for_hang(&h, prev)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("Failed to start request %x, at %x\n",
- prev->fence.seqno, hws_seqno(&h, prev));
- intel_engine_dump(rq->engine, &p);
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, prev->fence.seqno, hws_seqno(&h, prev));
+ intel_engine_dump(prev->engine, &p,
+ "%s\n", prev->engine->name);
i915_gem_request_put(rq);
i915_gem_request_put(prev);
@@ -827,9 +1001,9 @@ static int igt_handle_error(void *arg)
if (!wait_for_hang(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("Failed to start request %x, at %x\n",
- rq->fence.seqno, hws_seqno(&h, rq));
- intel_engine_dump(rq->engine, &p);
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
i915_reset(i915, 0);
i915_gem_set_wedged(i915);
@@ -872,21 +1046,26 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
SUBTEST(igt_hang_sanitycheck),
- SUBTEST(igt_reset_engine),
- SUBTEST(igt_reset_active_engines),
+ SUBTEST(igt_reset_idle_engine),
+ SUBTEST(igt_reset_active_engine),
+ SUBTEST(igt_reset_idle_engine_others),
+ SUBTEST(igt_reset_active_engine_others),
SUBTEST(igt_wait_reset),
SUBTEST(igt_reset_queue),
SUBTEST(igt_handle_error),
};
+ bool saved_hangcheck;
int err;
if (!intel_has_gpu_reset(i915))
return 0;
intel_runtime_pm_get(i915);
+ saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
err = i915_subtests(tests, i915);
+ i915_modparams.enable_hangcheck = saved_hangcheck;
intel_runtime_pm_put(i915);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 3cac22eb47ce..2f6367643171 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -120,10 +120,10 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
!IS_CHERRYVIEW(dev_priv))
return 0;
- if (IS_VALLEYVIEW(dev_priv)) /* XXX system lockup! */
- return 0;
-
- if (IS_BROADWELL(dev_priv)) /* XXX random GPU hang afterwards! */
+ /*
+ * This test may lockup the machine or cause GPU hangs afterwards.
+ */
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
return 0;
valid = kzalloc(BITS_TO_LONGS(FW_RANGE) * sizeof(*valid),
@@ -148,7 +148,10 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
for_each_set_bit(offset, valid, FW_RANGE) {
i915_reg_t reg = { offset };
+ iosf_mbi_punit_acquire();
intel_uncore_forcewake_reset(dev_priv, false);
+ iosf_mbi_punit_release();
+
check_for_unclaimed_mmio(dev_priv);
(void)I915_READ(reg);
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 331c2b09869e..55c0e2c15782 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -32,6 +32,13 @@ static struct mock_request *first_request(struct mock_engine *engine)
link);
}
+static void advance(struct mock_engine *engine,
+ struct mock_request *request)
+{
+ list_del_init(&request->link);
+ mock_seqno_advance(&engine->base, request->base.global_seqno);
+}
+
static void hw_delay_complete(struct timer_list *t)
{
struct mock_engine *engine = from_timer(engine, t, hw_delay);
@@ -39,15 +46,23 @@ static void hw_delay_complete(struct timer_list *t)
spin_lock(&engine->hw_lock);
- request = first_request(engine);
- if (request) {
- list_del_init(&request->link);
- mock_seqno_advance(&engine->base, request->base.global_seqno);
- }
-
+ /* Timer fired, first request is complete */
request = first_request(engine);
if (request)
- mod_timer(&engine->hw_delay, jiffies + request->delay);
+ advance(engine, request);
+
+ /*
+ * Also immediately signal any subsequent 0-delay requests, but
+ * requeue the timer for the next delayed request.
+ */
+ while ((request = first_request(engine))) {
+ if (request->delay) {
+ mod_timer(&engine->hw_delay, jiffies + request->delay);
+ break;
+ }
+
+ advance(engine, request);
+ }
spin_unlock(&engine->hw_lock);
}
@@ -98,16 +113,22 @@ static void mock_submit_request(struct drm_i915_gem_request *request)
spin_lock_irq(&engine->hw_lock);
list_add_tail(&mock->link, &engine->hw_queue);
- if (mock->link.prev == &engine->hw_queue)
- mod_timer(&engine->hw_delay, jiffies + mock->delay);
+ if (mock->link.prev == &engine->hw_queue) {
+ if (mock->delay)
+ mod_timer(&engine->hw_delay, jiffies + mock->delay);
+ else
+ advance(engine, mock);
+ }
spin_unlock_irq(&engine->hw_lock);
}
static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
{
- const unsigned long sz = roundup_pow_of_two(sizeof(struct intel_ring));
+ const unsigned long sz = PAGE_SIZE / 2;
struct intel_ring *ring;
+ BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz);
+
ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
if (!ring)
return NULL;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 04eb9362f4f8..1bc61f3f76fc 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -85,6 +85,8 @@ static void mock_device_release(struct drm_device *dev)
i915_gemfs_fini(i915);
+ drm_mode_config_cleanup(&i915->drm);
+
drm_dev_fini(&i915->drm);
put_device(&i915->drm.pdev->dev);
}
@@ -179,20 +181,15 @@ struct drm_i915_private *mock_gem_device(void)
I915_GTT_PAGE_SIZE_64K |
I915_GTT_PAGE_SIZE_2M;
- spin_lock_init(&i915->mm.object_stat_lock);
mock_uncore_init(i915);
+ i915_gem_init__mm(i915);
init_waitqueue_head(&i915->gpu_error.wait_queue);
init_waitqueue_head(&i915->gpu_error.reset_queue);
i915->wq = alloc_ordered_workqueue("mock", 0);
if (!i915->wq)
- goto put_device;
-
- INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
- init_llist_head(&i915->mm.free_list);
- INIT_LIST_HEAD(&i915->mm.unbound_list);
- INIT_LIST_HEAD(&i915->mm.bound_list);
+ goto err_drv;
mock_init_contexts(i915);
@@ -271,6 +268,9 @@ err_objects:
kmem_cache_destroy(i915->objects);
err_wq:
destroy_workqueue(i915->wq);
+err_drv:
+ drm_mode_config_cleanup(&i915->drm);
+ drm_dev_fini(&i915->drm);
put_device:
put_device(&pdev->dev);
err:
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 336e1afb250f..e96873f96116 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -110,8 +110,8 @@ void mock_init_ggtt(struct drm_i915_private *i915)
ggtt->base.i915 = i915;
- ggtt->mappable_base = 0;
- ggtt->mappable_end = 2048 * PAGE_SIZE;
+ ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
ggtt->base.total = 4096 * PAGE_SIZE;
ggtt->base.clear_range = nop_clear_range;