summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-09-01 11:26:46 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-09-01 11:26:46 -0700
commit477f70cd2a67904e04c2c2b9bd0fa2e95222f2f6 (patch)
tree1897dd1de49e1ea24897163533e2d8ead5dad0ad /drivers/gpu/drm/msm
parent835d31d319d9c8c4eb6cac074643360ba0ecab10 (diff)
parent8f0284f190e6a0aa09015090568c03f18288231a (diff)
downloadlinux-477f70cd2a67904e04c2c2b9bd0fa2e95222f2f6.tar.bz2
Merge tag 'drm-next-2021-08-31-1' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "Highlights: - i915 has seen a lot of refactoring and uAPI cleanups due to a change in the upstream direction going forward This has all been audited with known userspace, but there may be some pitfalls that were missed. - i915 now uses common TTM to enable discrete memory on DG1/2 GPUs - i915 enables Jasper and Elkhart Lake by default and has preliminary XeHP/DG2 support - amdgpu adds support for Cyan Skillfish - lots of implicit fencing rules documented and fixed up in drivers - msm now uses the core scheduler - the irq midlayer has been removed for non-legacy drivers - the sysfb code now works on more than x86. Otherwise the usual smattering of stuff everywhere, panels, bridges, refactorings. Detailed summary: core: - extract i915 eDP backlight into core - DP aux bus support - drm_device.irq_enabled removed - port drivers to native irq interfaces - export gem shadow plane handling for vgem - print proper driver name in framebuffer registration - driver fixes for implicit fencing rules - ARM fixed rate compression modifier added - updated fb damage handling - rmfb ioctl logging/docs - drop drm_gem_object_put_locked - define DRM_FORMAT_MAX_PLANES - add gem fb vmap/vunmap helpers - add lockdep_assert(once) helpers - mark drm irq midlayer as legacy - use offset adjusted bo mapping conversion vgaarb: - cleanups fbdev: - extend efifb handling to all arches - div by 0 fixes for multiple drivers udmabuf: - add hugepage mapping support dma-buf: - non-dynamic exporter fixups - document implicit fencing rules amdgpu: - Initial Cyan Skillfish support - switch virtual DCE over to vkms based atomic - VCN/JPEG power down fixes - NAVI PCIE link handling fixes - AMD HDMI freesync fixes - Yellow Carp + Beige Goby fixes - Clockgating/S0ix/SMU/EEPROM fixes - embed hw fence in job - rework dma-resv handling - ensure eviction to system ram amdkfd: - uapi: SVM address range query added - sysfs leak fix - GPUVM TLB optimizations - vmfault/migration counters i915: - Enable JSL and EHL by default - preliminary XeHP/DG2 support - remove all CNL support (never shipped) - move to TTM for discrete memory support - allow mixed object mmap handling - GEM uAPI spring cleaning - add I915_MMAP_OBJECT_FIXED - reinstate ADL-P mmap ioctls - drop a bunch of unused by userspace features - disable and remove GPU relocations - revert some i915 misfeatures - major refactoring of GuC for Gen11+ - execbuffer object locking separate step - reject caching/set-domain on discrete - Enable pipe DMC loading on XE-LPD and ADL-P - add PSF GV point support - Refactor and fix DDI buffer translations - Clean up FBC CFB allocation code - Finish INTEL_GEN() and friends macro conversions nouveau: - add eDP backlight support - implicit fence fix msm: - a680/7c3 support - drm/scheduler conversion panfrost: - rework GPU reset virtio: - fix fencing for planes ast: - add detect support bochs: - move to tiny GPU driver vc4: - use hotplug irqs - HDMI codec support vmwgfx: - use internal vmware device headers ingenic: - demidlayering irq rcar-du: - shutdown fixes - convert to bridge connector helpers zynqmp-dsub: - misc fixes mgag200: - convert PLL handling to atomic mediatek: - MT8133 AAL support - gem mmap object support - MT8167 support etnaviv: - NXP Layerscape LS1028A SoC support - GEM mmap cleanups tegra: - new user API exynos: - missing unlock fix - build warning fix - use refcount_t" * tag 'drm-next-2021-08-31-1' of git://anongit.freedesktop.org/drm/drm: (1318 commits) drm/amd/display: Move AllowDRAMSelfRefreshOrDRAMClockChangeInVblank to bounding box drm/amd/display: Remove duplicate dml init drm/amd/display: Update bounding box states (v2) drm/amd/display: Update number of DCN3 clock states drm/amdgpu: disable GFX CGCG in aldebaran drm/amdgpu: Clear RAS interrupt status on aldebaran drm/amdgpu: Add support for RAS XGMI err query drm/amdkfd: Account for SH/SE count when setting up cu masks. drm/amdgpu: rename amdgpu_bo_get_preferred_pin_domain drm/amdgpu: drop redundant cancel_delayed_work_sync call drm/amdgpu: add missing cleanups for more ASICs on UVD/VCE suspend drm/amdgpu: add missing cleanups for Polaris12 UVD/VCE on suspend drm/amdkfd: map SVM range with correct access permission drm/amdkfd: check access permisson to restore retry fault drm/amdgpu: Update RAS XGMI Error Query drm/amdgpu: Add driver infrastructure for MCA RAS drm/amd/display: Add Logging for HDMI color depth information drm/amd/amdgpu: consolidate PSP TA init shared buf functions drm/amd/amdgpu: add name field back to ras_common_if drm/amdgpu: Fix build with missing pm_suspend_target_state module export ...
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r--drivers/gpu/drm/msm/Kconfig7
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c35
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c24
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c137
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c34
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c40
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c102
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c85
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c11
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c8
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c139
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c76
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c50
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c9
-rw-r--r--drivers/gpu/drm/msm/dp/dp_power.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c15
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h18
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h74
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c33
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c169
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c146
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c83
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c8
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c190
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c158
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h18
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c14
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c53
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h44
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c134
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h50
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c11
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c350
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c220
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h139
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c203
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h5
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c6
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c69
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h12
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c53
60 files changed, 2028 insertions, 1150 deletions
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 52536e7adb95..e9c6af78b1d7 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -14,6 +14,7 @@ config DRM_MSM
select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
+ select DRM_SCHED
select SHMEM
select TMPFS
select QCOM_SCM if ARCH_QCOM
@@ -115,9 +116,9 @@ config DRM_MSM_DSI_10NM_PHY
Choose this option if DSI PHY on SDM845 is used on the platform.
config DRM_MSM_DSI_7NM_PHY
- bool "Enable DSI 7nm PHY driver in MSM DRM (used by SM8150/SM8250)"
+ bool "Enable DSI 7nm PHY driver in MSM DRM"
depends on DRM_MSM_DSI
default y
help
- Choose this option if DSI PHY on SM8150/SM8250 is used on the
- platform.
+ Choose this option if DSI PHY on SM8150/SM8250/SC7280 is used on
+ the platform.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 2c00aa70b708..904535eda0c4 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -90,6 +90,7 @@ msm-y := \
msm_gem_submit.o \
msm_gem_vma.o \
msm_gpu.o \
+ msm_gpu_devfreq.o \
msm_iommu.o \
msm_perf.o \
msm_rd.o \
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index fc2c905b6c9e..c9d11d57aed6 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -117,13 +117,13 @@ reset_set(void *data, u64 val)
if (a5xx_gpu->pm4_bo) {
msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
- drm_gem_object_put_locked(a5xx_gpu->pm4_bo);
+ drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
- drm_gem_object_put_locked(a5xx_gpu->pfp_bo);
+ drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 7a271de9a212..5e2750eb3810 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -18,6 +18,18 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
+static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (a5xx_gpu->has_whereami) {
+ OUT_PKT7(ring, CP_WHERE_AM_I, 2);
+ OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
+ OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
+ }
+}
+
void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
bool sync)
{
@@ -30,11 +42,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
* Most flush operations need to issue a WHERE_AM_I opcode to sync up
* the rptr shadow
*/
- if (a5xx_gpu->has_whereami && sync) {
- OUT_PKT7(ring, CP_WHERE_AM_I, 2);
- OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
- OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
- }
+ if (sync)
+ update_shadow_rptr(gpu, ring);
spin_lock_irqsave(&ring->preempt_lock, flags);
@@ -168,6 +177,16 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
ibs++;
break;
}
+
+ /*
+ * Periodically update shadow-wptr if needed, so that we
+ * can see partial progress of submits with large # of
+ * cmds.. otherwise we could needlessly stall waiting for
+ * ringbuffer state, simply due to looking at a shadow
+ * rptr value that has not been updated
+ */
+ if ((ibs % 32) == 0)
+ update_shadow_rptr(gpu, ring);
}
/*
@@ -1415,7 +1434,7 @@ struct a5xx_gpu_state {
static int a5xx_crashdumper_init(struct msm_gpu *gpu,
struct a5xx_crashdumper *dumper)
{
- dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
+ dumper->ptr = msm_gem_kernel_new(gpu->dev,
SZ_1M, MSM_BO_WC, gpu->aspace,
&dumper->bo, &dumper->iova);
@@ -1517,7 +1536,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
if (a5xx_crashdumper_run(gpu, &dumper)) {
kfree(a5xx_state->hlsqregs);
- msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
+ msm_gem_kernel_put(dumper.bo, gpu->aspace);
return;
}
@@ -1525,7 +1544,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
count * sizeof(u32));
- msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
+ msm_gem_kernel_put(dumper.bo, gpu->aspace);
}
static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index cdb165236a88..0e63a1429189 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -362,7 +362,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
*/
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
- ptr = msm_gem_kernel_new_locked(drm, bosize,
+ ptr = msm_gem_kernel_new(drm, bosize,
MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace,
&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
if (IS_ERR(ptr))
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index ee72510ff8ce..8abc9a2b114a 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -240,7 +240,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
A5XX_PREEMPT_COUNTER_SIZE,
MSM_BO_WC, gpu->aspace, &counters_bo, &counters_iova);
if (IS_ERR(counters)) {
- msm_gem_kernel_put(bo, gpu->aspace, true);
+ msm_gem_kernel_put(bo, gpu->aspace);
return PTR_ERR(counters);
}
@@ -272,9 +272,8 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
int i;
for (i = 0; i < gpu->nr_rings; i++) {
- msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true);
- msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i],
- gpu->aspace, true);
+ msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace);
+ msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->aspace);
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index b349692219b7..a7c58018959f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -519,9 +519,9 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
if (!pdcptr)
goto err;
- if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
+ if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
pdc_in_aop = true;
- else if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu))
+ else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
pdc_address_offset = 0x30090;
else
pdc_address_offset = 0x30080;
@@ -933,6 +933,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
/* Use a known rate to bring up the GMU */
clk_set_rate(gmu->core_clk, 200000000);
+ clk_set_rate(gmu->hub_clk, 150000000);
ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
if (ret) {
pm_runtime_put(gmu->gxpd);
@@ -1129,12 +1130,12 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
{
- msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false);
- msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false);
- msm_gem_kernel_put(gmu->icache.obj, gmu->aspace, false);
- msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace, false);
- msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace, false);
- msm_gem_kernel_put(gmu->log.obj, gmu->aspace, false);
+ msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace);
+ msm_gem_kernel_put(gmu->debug.obj, gmu->aspace);
+ msm_gem_kernel_put(gmu->icache.obj, gmu->aspace);
+ msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace);
+ msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace);
+ msm_gem_kernel_put(gmu->log.obj, gmu->aspace);
gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
msm_gem_address_space_put(gmu->aspace);
@@ -1393,6 +1394,9 @@ static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
gmu->nr_clocks, "gmu");
+ gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks,
+ gmu->nr_clocks, "hub");
+
return 0;
}
@@ -1504,7 +1508,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
* are otherwise unused by a660.
*/
gmu->dummy.size = SZ_4K;
- if (adreno_is_a660(adreno_gpu)) {
+ if (adreno_is_a660_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000);
if (ret)
goto err_memory;
@@ -1522,7 +1526,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
SZ_16M - SZ_16K, 0x04000);
if (ret)
goto err_memory;
- } else if (adreno_is_a640(adreno_gpu)) {
+ } else if (adreno_is_a640_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
SZ_256K - SZ_16K, 0x04000);
if (ret)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 71dfa60070cc..3c74f64e3126 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -66,6 +66,7 @@ struct a6xx_gmu {
int nr_clocks;
struct clk_bulk_data *clocks;
struct clk *core_clk;
+ struct clk *hub_clk;
/* current performance index set externally */
int current_perf_index;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 9c5e4618aa0a..40c9fef457a4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -52,21 +52,25 @@ static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return true;
}
-static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- uint32_t wptr;
- unsigned long flags;
/* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */
if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) {
- struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
-
OUT_PKT7(ring, CP_WHERE_AM_I, 2);
OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring)));
OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring)));
}
+}
+
+static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ uint32_t wptr;
+ unsigned long flags;
+
+ update_shadow_rptr(gpu, ring);
spin_lock_irqsave(&ring->preempt_lock, flags);
@@ -145,7 +149,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
- unsigned int i;
+ unsigned int i, ibs = 0;
a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
@@ -181,8 +185,19 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
break;
}
+
+ /*
+ * Periodically update shadow-wptr if needed, so that we
+ * can see partial progress of submits with large # of
+ * cmds.. otherwise we could needlessly stall waiting for
+ * ringbuffer state, simply due to looking at a shadow
+ * rptr value that has not been updated
+ */
+ if ((ibs % 32) == 0)
+ update_shadow_rptr(gpu, ring);
}
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
@@ -652,7 +667,7 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
regs = a650_protect;
count = ARRAY_SIZE(a650_protect);
count_max = 48;
- } else if (adreno_is_a660(adreno_gpu)) {
+ } else if (adreno_is_a660_family(adreno_gpu)) {
regs = a660_protect;
count = ARRAY_SIZE(a660_protect);
count_max = 48;
@@ -683,7 +698,7 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
if (adreno_is_a618(adreno_gpu))
return;
- if (adreno_is_a640(adreno_gpu))
+ if (adreno_is_a640_family(adreno_gpu))
amsbc = 1;
if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) {
@@ -694,6 +709,13 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
uavflagprd_inv = 2;
}
+ if (adreno_is_7c3(adreno_gpu)) {
+ lower_bit = 1;
+ amsbc = 1;
+ rgb565_predicator = 1;
+ uavflagprd_inv = 2;
+ }
+
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
@@ -740,6 +762,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
+ const char *sqe_name = adreno_gpu->info->fw[ADRENO_FW_SQE];
u32 *buf = msm_gem_get_vaddr(obj);
bool ret = false;
@@ -756,8 +779,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
*
* a660 targets have all the critical security fixes from the start
*/
- if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) ||
- adreno_is_a640(adreno_gpu)) {
+ if (!strcmp(sqe_name, "a630_sqe.fw")) {
/*
* If the lowest nibble is 0xa that is an indication that this
* microcode has been patched. The actual version is in dword
@@ -778,7 +800,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
DRM_DEV_ERROR(&gpu->pdev->dev,
"a630 SQE ucode is too old. Have version %x need at least %x\n",
buf[0] & 0xfff, 0x190);
- } else if (adreno_is_a650(adreno_gpu)) {
+ } else if (!strcmp(sqe_name, "a650_sqe.fw")) {
if ((buf[0] & 0xfff) >= 0x095) {
ret = true;
goto out;
@@ -787,7 +809,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
DRM_DEV_ERROR(&gpu->pdev->dev,
"a650 SQE ucode is too old. Have version %x need at least %x\n",
buf[0] & 0xfff, 0x095);
- } else if (adreno_is_a660(adreno_gpu)) {
+ } else if (!strcmp(sqe_name, "a660_sqe.fw")) {
ret = true;
} else {
DRM_DEV_ERROR(&gpu->pdev->dev,
@@ -897,7 +919,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
a6xx_set_hwcg(gpu, true);
/* VBIF/GBIF start*/
- if (adreno_is_a640(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) {
+ if (adreno_is_a640_family(adreno_gpu) ||
+ adreno_is_a650_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
@@ -935,13 +958,14 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
- if (adreno_is_a640(adreno_gpu) || adreno_is_a650_family(adreno_gpu))
+ if (adreno_is_a640_family(adreno_gpu) ||
+ adreno_is_a650_family(adreno_gpu))
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
else
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
- if (adreno_is_a660(adreno_gpu))
+ if (adreno_is_a660_family(adreno_gpu))
gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020);
/* Setting the mem pool size */
@@ -952,8 +976,10 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
*/
if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
- else if (adreno_is_a640(adreno_gpu))
+ else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
+ else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
else
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
@@ -990,13 +1016,15 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
/* Protect registers from the CP */
a6xx_set_cp_protect(gpu);
- if (adreno_is_a660(adreno_gpu)) {
+ if (adreno_is_a660_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0);
- /* Set dualQ + disable afull for A660 GPU but not for A635 */
- gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
}
+ /* Set dualQ + disable afull for A660 GPU */
+ if (adreno_is_a660(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
+
/* Enable expanded apriv for targets that support it */
if (gpu->hw_apriv) {
gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
@@ -1035,7 +1063,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) {
if (!a6xx_gpu->shadow_bo) {
- a6xx_gpu->shadow = msm_gem_kernel_new_locked(gpu->dev,
+ a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
sizeof(u32) * gpu->nr_rings,
MSM_BO_WC | MSM_BO_MAP_PRIV,
gpu->aspace, &a6xx_gpu->shadow_bo,
@@ -1383,13 +1411,13 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
- u32 cntl1_regval = 0;
+ u32 gpu_scid, cntl1_regval = 0;
if (IS_ERR(a6xx_gpu->llc_mmio))
return;
if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
- u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
+ gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
gpu_scid &= 0x1f;
cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
@@ -1409,26 +1437,34 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
}
}
- if (cntl1_regval) {
+ if (!cntl1_regval)
+ return;
+
+ /*
+ * Program the slice IDs for the various GPU blocks and GPU MMU
+ * pagetables
+ */
+ if (!a6xx_gpu->have_mmu500) {
+ a6xx_llc_write(a6xx_gpu,
+ REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval);
+
/*
- * Program the slice IDs for the various GPU blocks and GPU MMU
- * pagetables
+ * Program cacheability overrides to not allocate cache
+ * lines on a write miss
*/
- if (a6xx_gpu->have_mmu500)
- gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0),
- cntl1_regval);
- else {
- a6xx_llc_write(a6xx_gpu,
- REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval);
-
- /*
- * Program cacheability overrides to not allocate cache
- * lines on a write miss
- */
- a6xx_llc_rmw(a6xx_gpu,
- REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03);
- }
+ a6xx_llc_rmw(a6xx_gpu,
+ REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03);
+ return;
}
+
+ gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
+
+ /* On A660, the SCID programming for UCHE traffic is done in
+ * A6XX_GBIF_SCACHE_CNTL0[14:10]
+ */
+ if (adreno_is_a660_family(adreno_gpu))
+ gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
+ (1 << 8), (gpu_scid << 10) | (1 << 8));
}
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
@@ -1477,7 +1513,7 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
- msm_gpu_resume_devfreq(gpu);
+ msm_devfreq_resume(gpu);
a6xx_llc_activate(a6xx_gpu);
@@ -1494,7 +1530,7 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
a6xx_llc_deactivate(a6xx_gpu);
- devfreq_suspend_device(gpu->devfreq.devfreq);
+ msm_devfreq_suspend(gpu);
ret = a6xx_gmu_stop(a6xx_gpu);
if (ret)
@@ -1667,11 +1703,11 @@ static u32 a618_get_speed_bin(u32 fuse)
return UINT_MAX;
}
-static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse)
+static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse)
{
u32 val = UINT_MAX;
- if (revn == 618)
+ if (adreno_cmp_rev(ADRENO_REV(6, 1, 8, ANY_ID), rev))
val = a618_get_speed_bin(fuse);
if (val == UINT_MAX) {
@@ -1684,14 +1720,13 @@ static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse)
return (1 << val);
}
-static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
- u32 revn)
+static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev)
{
u32 supp_hw = UINT_MAX;
- u16 speedbin;
+ u32 speedbin;
int ret;
- ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin);
+ ret = nvmem_cell_read_variable_le_u32(dev, "speed_bin", &speedbin);
/*
* -ENOENT means that the platform doesn't support speedbin which is
* fine
@@ -1704,9 +1739,8 @@ static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
ret);
goto done;
}
- speedbin = le16_to_cpu(speedbin);
- supp_hw = fuse_to_supp_hw(dev, revn, speedbin);
+ supp_hw = fuse_to_supp_hw(dev, rev, speedbin);
done:
ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
@@ -1772,12 +1806,13 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
*/
info = adreno_info(config->rev);
- if (info && (info->revn == 650 || info->revn == 660))
+ if (info && (info->revn == 650 || info->revn == 660 ||
+ adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev)))
adreno_gpu->base.hw_apriv = true;
a6xx_llc_slices_init(pdev, a6xx_gpu);
- ret = a6xx_set_supported_hw(&pdev->dev, a6xx_gpu, info->revn);
+ ret = a6xx_set_supported_hw(&pdev->dev, config->rev);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index ad4ea0ed5d99..e8f65cd8eca6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -112,7 +112,7 @@ static void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src,
static int a6xx_crashdumper_init(struct msm_gpu *gpu,
struct a6xx_crashdumper *dumper)
{
- dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
+ dumper->ptr = msm_gem_kernel_new(gpu->dev,
SZ_1M, MSM_BO_WC, gpu->aspace,
&dumper->bo, &dumper->iova);
@@ -961,7 +961,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
a6xx_get_clusters(gpu, a6xx_state, dumper);
a6xx_get_dbgahb_clusters(gpu, a6xx_state, dumper);
- msm_gem_kernel_put(dumper->bo, gpu->aspace, true);
+ msm_gem_kernel_put(dumper->bo, gpu->aspace);
}
if (snapshot_debugbus)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 919433732b43..d4c65bf0a1b7 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -382,6 +382,36 @@ static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
+static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /*
+ * Send a single "off" entry just to get things running
+ * TODO: bus scaling
+ */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x07;
+
+ msg->ddr_cmds_addrs[0] = 0x50004;
+ msg->ddr_cmds_addrs[1] = 0x50000;
+ msg->ddr_cmds_addrs[2] = 0x50088;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x5006c;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
@@ -428,10 +458,12 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
if (adreno_is_a618(adreno_gpu))
a618_build_bw_table(&msg);
- else if (adreno_is_a640(adreno_gpu))
+ else if (adreno_is_a640_family(adreno_gpu))
a640_build_bw_table(&msg);
else if (adreno_is_a650(adreno_gpu))
a650_build_bw_table(&msg);
+ else if (adreno_is_7c3(adreno_gpu))
+ adreno_7c3_build_bw_table(&msg);
else if (adreno_is_a660(adreno_gpu))
a660_build_bw_table(&msg);
else
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 6dad8015c9a1..2a6ce76656aa 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -8,8 +8,6 @@
#include "adreno_gpu.h"
-#define ANY_ID 0xff
-
bool hang_debug = false;
MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
module_param_named(hang_debug, hang_debug, bool, 0600);
@@ -300,6 +298,30 @@ static const struct adreno_info gpulist[] = {
.init = a6xx_gpu_init,
.zapfw = "a660_zap.mdt",
.hwcg = a660_hwcg,
+ }, {
+ .rev = ADRENO_REV(6, 3, 5, ANY_ID),
+ .name = "Adreno 7c Gen 3",
+ .fw = {
+ [ADRENO_FW_SQE] = "a660_sqe.fw",
+ [ADRENO_FW_GMU] = "a660_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .hwcg = a660_hwcg,
+ }, {
+ .rev = ADRENO_REV(6, 8, 0, ANY_ID),
+ .revn = 680,
+ .name = "A680",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a640_gmu.bin",
+ },
+ .gmem = SZ_2M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a640_zap.mdt",
+ .hwcg = a640_hwcg,
},
};
@@ -325,6 +347,15 @@ static inline bool _rev_match(uint8_t entry, uint8_t id)
return (entry == ANY_ID) || (entry == id);
}
+bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2)
+{
+
+ return _rev_match(rev1.core, rev2.core) &&
+ _rev_match(rev1.major, rev2.major) &&
+ _rev_match(rev1.minor, rev2.minor) &&
+ _rev_match(rev1.patchid, rev2.patchid);
+}
+
const struct adreno_info *adreno_info(struct adreno_rev rev)
{
int i;
@@ -332,10 +363,7 @@ const struct adreno_info *adreno_info(struct adreno_rev rev)
/* identify gpu: */
for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
const struct adreno_info *info = &gpulist[i];
- if (_rev_match(info->rev.core, rev.core) &&
- _rev_match(info->rev.major, rev.major) &&
- _rev_match(info->rev.minor, rev.minor) &&
- _rev_match(info->rev.patchid, rev.patchid))
+ if (adreno_cmp_rev(info->rev, rev))
return info;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 9f5a30234b33..748665232d29 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -261,8 +261,8 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
return ret;
}
return -EINVAL;
- case MSM_PARAM_NR_RINGS:
- *value = gpu->nr_rings;
+ case MSM_PARAM_PRIORITIES:
+ *value = gpu->nr_rings * NR_SCHED_PRIORITIES;
return 0;
case MSM_PARAM_PP_PGTABLE:
*value = 0;
@@ -390,7 +390,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
struct drm_gem_object *bo;
void *ptr;
- ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
+ ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4,
MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
if (IS_ERR(ptr))
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 8dbe0d157520..225c277a6223 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -42,6 +42,8 @@ struct adreno_rev {
uint8_t patchid;
};
+#define ANY_ID 0xff
+
#define ADRENO_REV(core, major, minor, patchid) \
((struct adreno_rev){ core, major, minor, patchid })
@@ -141,6 +143,8 @@ struct adreno_platform_config {
__ret; \
})
+bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2);
+
static inline bool adreno_is_a2xx(struct adreno_gpu *gpu)
{
return (gpu->revn < 300);
@@ -237,9 +241,9 @@ static inline int adreno_is_a630(struct adreno_gpu *gpu)
return gpu->revn == 630;
}
-static inline int adreno_is_a640(struct adreno_gpu *gpu)
+static inline int adreno_is_a640_family(struct adreno_gpu *gpu)
{
- return gpu->revn == 640;
+ return (gpu->revn == 640) || (gpu->revn == 680);
}
static inline int adreno_is_a650(struct adreno_gpu *gpu)
@@ -247,15 +251,27 @@ static inline int adreno_is_a650(struct adreno_gpu *gpu)
return gpu->revn == 650;
}
+static inline int adreno_is_7c3(struct adreno_gpu *gpu)
+{
+ /* The order of args is important here to handle ANY_ID correctly */
+ return adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), gpu->rev);
+}
+
static inline int adreno_is_a660(struct adreno_gpu *gpu)
{
return gpu->revn == 660;
}
+static inline int adreno_is_a660_family(struct adreno_gpu *gpu)
+{
+ return adreno_is_a660(gpu) || adreno_is_7c3(gpu);
+}
+
/* check for a650, a660, or any derivatives */
static inline int adreno_is_a650_family(struct adreno_gpu *gpu)
{
- return gpu->revn == 650 || gpu->revn == 620 || gpu->revn == 660;
+ return gpu->revn == 650 || gpu->revn == 620 ||
+ adreno_is_a660_family(gpu);
}
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 9a5c70c87cc8..768012243b44 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -30,12 +30,6 @@
#include "dpu_core_perf.h"
#include "dpu_trace.h"
-#define DPU_DRM_BLEND_OP_NOT_DEFINED 0
-#define DPU_DRM_BLEND_OP_OPAQUE 1
-#define DPU_DRM_BLEND_OP_PREMULTIPLIED 2
-#define DPU_DRM_BLEND_OP_COVERAGE 3
-#define DPU_DRM_BLEND_OP_MAX 4
-
/* layer mixer index on dpu_crtc */
#define LEFT_MIXER 0
#define RIGHT_MIXER 1
@@ -146,20 +140,43 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
{
struct dpu_hw_mixer *lm = mixer->hw_lm;
uint32_t blend_op;
+ uint32_t fg_alpha, bg_alpha;
- /* default to opaque blending */
- blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
- DPU_BLEND_BG_ALPHA_BG_CONST;
+ fg_alpha = pstate->base.alpha >> 8;
+ bg_alpha = 0xff - fg_alpha;
- if (format->alpha_enable) {
+ /* default to opaque blending */
+ if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
+ !format->alpha_enable) {
+ blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
+ DPU_BLEND_BG_ALPHA_BG_CONST;
+ } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
+ blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
+ DPU_BLEND_BG_ALPHA_FG_PIXEL;
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |= DPU_BLEND_BG_MOD_ALPHA |
+ DPU_BLEND_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= DPU_BLEND_BG_INV_ALPHA;
+ }
+ } else {
/* coverage blending */
blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
- DPU_BLEND_BG_ALPHA_FG_PIXEL |
- DPU_BLEND_BG_INV_ALPHA;
+ DPU_BLEND_BG_ALPHA_FG_PIXEL;
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |= DPU_BLEND_FG_MOD_ALPHA |
+ DPU_BLEND_FG_INV_MOD_ALPHA |
+ DPU_BLEND_BG_MOD_ALPHA |
+ DPU_BLEND_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= DPU_BLEND_BG_INV_ALPHA;
+ }
}
lm->ops.setup_blend_config(lm, pstate->stage,
- 0xFF, 0, blend_op);
+ fg_alpha, bg_alpha, blend_op);
DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
&format->base.pixel_format, format->alpha_enable, blend_op);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 1c04b7cce43e..0e9d3fa1544b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -274,20 +274,20 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
/* return EWOULDBLOCK since we know the wait isn't necessary */
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
- DRM_ERROR("encoder is disabled id=%u, intr=%d, irq=%d",
+ DRM_ERROR("encoder is disabled id=%u, intr=%d, irq=%d\n",
DRMID(phys_enc->parent), intr_idx,
irq->irq_idx);
return -EWOULDBLOCK;
}
if (irq->irq_idx < 0) {
- DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, irq=%s",
+ DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, irq=%s\n",
DRMID(phys_enc->parent), intr_idx,
irq->name);
return 0;
}
- DRM_DEBUG_KMS("id=%u, intr=%d, irq=%d, pp=%d, pending_cnt=%d",
+ DRM_DEBUG_KMS("id=%u, intr=%d, irq=%d, pp=%d, pending_cnt=%d\n",
DRMID(phys_enc->parent), intr_idx,
irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
@@ -303,8 +303,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
if (irq_status) {
unsigned long flags;
- DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, "
- "irq=%d, pp=%d, atomic_cnt=%d",
+ DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n",
DRMID(phys_enc->parent), intr_idx,
irq->irq_idx,
phys_enc->hw_pp->idx - PINGPONG_0,
@@ -315,8 +314,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
ret = 0;
} else {
ret = -ETIMEDOUT;
- DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, "
- "irq=%d, pp=%d, atomic_cnt=%d",
+ DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n",
DRMID(phys_enc->parent), intr_idx,
irq->irq_idx,
phys_enc->hw_pp->idx - PINGPONG_0,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 704dace895cb..b131fd376192 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -974,6 +974,7 @@ static const struct dpu_perf_cfg sdm845_perf_data = {
.amortizable_threshold = 25,
.min_prefill_lines = 24,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sdm845_qos_linear),
.entries = sdm845_qos_linear
@@ -1001,6 +1002,7 @@ static const struct dpu_perf_cfg sc7180_perf_data = {
.min_dram_ib = 1600000,
.min_prefill_lines = 24,
.danger_lut_tbl = {0xff, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc7180_qos_linear),
.entries = sc7180_qos_linear
@@ -1028,6 +1030,7 @@ static const struct dpu_perf_cfg sm8150_perf_data = {
.min_dram_ib = 800000,
.min_prefill_lines = 24,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff8, 0xf000, 0xffff},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sm8150_qos_linear),
.entries = sm8150_qos_linear
@@ -1056,6 +1059,7 @@ static const struct dpu_perf_cfg sm8250_perf_data = {
.min_dram_ib = 800000,
.min_prefill_lines = 35,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc7180_qos_linear),
.entries = sc7180_qos_linear
@@ -1084,6 +1088,7 @@ static const struct dpu_perf_cfg sc7280_perf_data = {
.min_dram_ib = 1600000,
.min_prefill_lines = 24,
.danger_lut_tbl = {0xffff, 0xffff, 0x0},
+ .safe_lut_tbl = {0xff00, 0xff00, 0xffff},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
.entries = sc7180_qos_macrotile
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index f8a74f6cdc4c..64740ddb983e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -345,10 +345,12 @@ static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
int i;
for (i = 0; i < ctx->mixer_count; i++) {
- DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
- DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
- DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
- DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
+ enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
+
+ DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
}
DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 4fd913522931..ae48f41821cf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -471,30 +471,68 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
struct dpu_kms *dpu_kms)
{
struct drm_encoder *encoder = NULL;
+ struct msm_display_info info;
int i, rc = 0;
if (!(priv->dsi[0] || priv->dsi[1]))
return rc;
- /*TODO: Support two independent DSI connectors */
- encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
- if (IS_ERR(encoder)) {
- DPU_ERROR("encoder init failed for dsi display\n");
- return PTR_ERR(encoder);
- }
-
- priv->encoders[priv->num_encoders++] = encoder;
-
+ /*
+ * We support following confiurations:
+ * - Single DSI host (dsi0 or dsi1)
+ * - Two independent DSI hosts
+ * - Bonded DSI0 and DSI1 hosts
+ *
+ * TODO: Support swapping DSI0 and DSI1 in the bonded setup.
+ */
for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ int other = (i + 1) % 2;
+
if (!priv->dsi[i])
continue;
+ if (msm_dsi_is_bonded_dsi(priv->dsi[i]) &&
+ !msm_dsi_is_master_dsi(priv->dsi[i]))
+ continue;
+
+ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
+ if (IS_ERR(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return PTR_ERR(encoder);
+ }
+
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ memset(&info, 0, sizeof(info));
+ info.intf_type = encoder->encoder_type;
+
rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
i, rc);
break;
}
+
+ info.h_tile_instance[info.num_of_h_tiles++] = i;
+ info.capabilities = msm_dsi_is_cmd_mode(priv->dsi[i]) ?
+ MSM_DISPLAY_CAP_CMD_MODE :
+ MSM_DISPLAY_CAP_VID_MODE;
+
+ if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
+ rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
+ other, rc);
+ break;
+ }
+
+ info.h_tile_instance[info.num_of_h_tiles++] = other;
+ }
+
+ rc = dpu_encoder_setup(dev, encoder, &info);
+ if (rc)
+ DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+ encoder->base.id, rc);
}
return rc;
@@ -505,6 +543,7 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
struct dpu_kms *dpu_kms)
{
struct drm_encoder *encoder = NULL;
+ struct msm_display_info info;
int rc = 0;
if (!priv->dp)
@@ -516,6 +555,7 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
return PTR_ERR(encoder);
}
+ memset(&info, 0, sizeof(info));
rc = msm_dp_modeset_init(priv->dp, dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
@@ -524,6 +564,14 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
}
priv->encoders[priv->num_encoders++] = encoder;
+
+ info.num_of_h_tiles = 1;
+ info.capabilities = MSM_DISPLAY_CAP_VID_MODE;
+ info.intf_type = encoder->encoder_type;
+ rc = dpu_encoder_setup(dev, encoder, &info);
+ if (rc)
+ DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+ encoder->base.id, rc);
return rc;
}
@@ -726,41 +774,6 @@ static void dpu_kms_destroy(struct msm_kms *kms)
msm_kms_destroy(&dpu_kms->base);
}
-static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
- struct drm_encoder *encoder,
- bool cmd_mode)
-{
- struct msm_display_info info;
- struct msm_drm_private *priv = encoder->dev->dev_private;
- int i, rc = 0;
-
- memset(&info, 0, sizeof(info));
-
- info.intf_type = encoder->encoder_type;
- info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
- MSM_DISPLAY_CAP_VID_MODE;
-
- switch (info.intf_type) {
- case DRM_MODE_ENCODER_DSI:
- /* TODO: No support for DSI swap */
- for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
- if (priv->dsi[i]) {
- info.h_tile_instance[info.num_of_h_tiles] = i;
- info.num_of_h_tiles++;
- }
- }
- break;
- case DRM_MODE_ENCODER_TMDS:
- info.num_of_h_tiles = 1;
- break;
- }
-
- rc = dpu_encoder_setup(encoder->dev, encoder, &info);
- if (rc)
- DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
- encoder->base.id, rc);
-}
-
static irqreturn_t dpu_irq(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
@@ -863,7 +876,6 @@ static const struct msm_kms_funcs kms_funcs = {
.get_format = dpu_get_msm_format,
.round_pixclk = dpu_kms_round_pixclk,
.destroy = dpu_kms_destroy,
- .set_encoder_mode = _dpu_kms_set_encoder_mode,
.snapshot = dpu_kms_mdp_snapshot,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = dpu_kms_debugfs_init,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index ec4a6f04394a..c989621209aa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -1339,9 +1339,7 @@ static void dpu_plane_reset(struct drm_plane *plane)
return;
}
- pstate->base.plane = plane;
-
- plane->state = &pstate->base;
+ __drm_atomic_helper_plane_reset(plane, &pstate->base);
}
#ifdef CONFIG_DEBUG_FS
@@ -1647,6 +1645,12 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
if (ret)
DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
+ drm_plane_create_alpha_property(plane);
+ drm_plane_create_blend_mode_property(plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+
drm_plane_create_rotation_property(plane,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 4a5b518288b0..cdcaf470f148 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -19,30 +19,12 @@ static int mdp4_hw_init(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp4_kms->dev;
- uint32_t version, major, minor, dmap_cfg, vg_cfg;
+ u32 dmap_cfg, vg_cfg;
unsigned long clk;
int ret = 0;
pm_runtime_get_sync(dev->dev);
- mdp4_enable(mdp4_kms);
- version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
- mdp4_disable(mdp4_kms);
-
- major = FIELD(version, MDP4_VERSION_MAJOR);
- minor = FIELD(version, MDP4_VERSION_MINOR);
-
- DBG("found MDP4 version v%d.%d", major, minor);
-
- if (major != 4) {
- DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
- major, minor);
- ret = -ENXIO;
- goto out;
- }
-
- mdp4_kms->rev = minor;
-
if (mdp4_kms->rev > 1) {
mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
@@ -88,7 +70,6 @@ static int mdp4_hw_init(struct msm_kms *kms)
if (mdp4_kms->rev > 1)
mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
-out:
pm_runtime_put_sync(dev->dev);
return ret;
@@ -108,13 +89,6 @@ static void mdp4_disable_commit(struct msm_kms *kms)
static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
- int i;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
-
- /* see 119ecb7fd */
- for_each_new_crtc_in_state(state, crtc, crtc_state, i)
- drm_crtc_vblank_get(crtc);
}
static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
@@ -133,12 +107,6 @@ static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
- struct drm_crtc *crtc;
-
- /* see 119ecb7fd */
- for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
- drm_crtc_vblank_put(crtc);
}
static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
@@ -411,14 +379,32 @@ fail:
return ret;
}
+static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
+ u32 *major, u32 *minor)
+{
+ struct drm_device *dev = mdp4_kms->dev;
+ u32 version;
+
+ mdp4_enable(mdp4_kms);
+ version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
+ mdp4_disable(mdp4_kms);
+
+ *major = FIELD(version, MDP4_VERSION_MAJOR);
+ *minor = FIELD(version, MDP4_VERSION_MINOR);
+
+ DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor);
+}
+
struct msm_kms *mdp4_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct mdp4_platform_config *config = mdp4_get_config(pdev);
+ struct msm_drm_private *priv = dev->dev_private;
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
struct msm_gem_address_space *aspace;
int irq, ret;
+ u32 major, minor;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) {
@@ -433,7 +419,8 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- kms = &mdp4_kms->base.base;
+ priv->kms = &mdp4_kms->base.base;
+ kms = priv->kms;
mdp4_kms->dev = dev;
@@ -479,15 +466,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (IS_ERR(mdp4_kms->pclk))
mdp4_kms->pclk = NULL;
- if (mdp4_kms->rev >= 2) {
- mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
- if (IS_ERR(mdp4_kms->lut_clk)) {
- DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
- ret = PTR_ERR(mdp4_kms->lut_clk);
- goto fail;
- }
- }
-
mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(mdp4_kms->axi_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
@@ -496,8 +474,27 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
}
clk_set_rate(mdp4_kms->clk, config->max_clk);
- if (mdp4_kms->lut_clk)
+
+ read_mdp_hw_revision(mdp4_kms, &major, &minor);
+
+ if (major != 4) {
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ mdp4_kms->rev = minor;
+
+ if (mdp4_kms->rev >= 2) {
+ mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
+ if (IS_ERR(mdp4_kms->lut_clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
+ ret = PTR_ERR(mdp4_kms->lut_clk);
+ goto fail;
+ }
clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
+ }
pm_runtime_enable(dev->dev);
mdp4_kms->rpm_enabled = true;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index 81b0c7cf954e..1220f2b20e05 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -737,7 +737,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
}
/*
- * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI
+ * In bonded DSI case, CTL0 and CTL1 are always assigned to two DSI
* interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
* only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
* Single FLUSH is supported from hw rev v3.0.
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 15aed45022bc..b3b42672b2d4 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -209,13 +209,6 @@ static int mdp5_set_split_display(struct msm_kms *kms,
slave_encoder);
}
-static void mdp5_set_encoder_mode(struct msm_kms *kms,
- struct drm_encoder *encoder,
- bool cmd_mode)
-{
- mdp5_encoder_set_intf_mode(encoder, cmd_mode);
-}
-
static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -287,7 +280,6 @@ static const struct mdp_kms_funcs kms_funcs = {
.get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk,
.set_split_display = mdp5_set_split_display,
- .set_encoder_mode = mdp5_set_encoder_mode,
.destroy = mdp5_kms_destroy,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = mdp5_kms_debugfs_init,
@@ -448,6 +440,9 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
}
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
+ if (!ret)
+ mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id]));
+
break;
}
default:
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
index c92a9508c8d3..c22b07f68670 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
@@ -16,7 +16,6 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/ktime.h>
-#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/dma-buf.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 4a3293b590b0..eb40d8413bca 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -353,6 +353,9 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
if (!(aux->retry_cnt % MAX_AUX_RETRIES))
dp_catalog_aux_update_cfg(aux->catalog);
}
+ /* reset aux if link is in connected state */
+ if (dp_catalog_link_is_connected(aux->catalog))
+ dp_catalog_aux_reset(aux->catalog);
} else {
aux->retry_cnt = 0;
switch (aux->aux_error_num) {
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index c0423e76eed7..cc2bb8295329 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -372,6 +372,7 @@ void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog,
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
+ DRM_DEBUG_DP("enable=%d\n", enable);
if (enable) {
/*
* To make sure link reg writes happens before other operation,
@@ -580,6 +581,7 @@ void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
config = (en ? config | intr_mask : config & ~intr_mask);
+ DRM_DEBUG_DP("intr_mask=%#x config=%#x\n", intr_mask, config);
dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
config & DP_DP_HPD_INT_MASK);
}
@@ -610,6 +612,7 @@ u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog)
u32 status;
status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
+ DRM_DEBUG_DP("aux status: %#x\n", status);
status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
@@ -685,6 +688,7 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
/* Make sure to clear the current pattern before starting a new one */
dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
+ DRM_DEBUG_DP("pattern: %#x\n", pattern);
switch (pattern) {
case DP_PHY_TEST_PATTERN_D10_2:
dp_write_link(catalog, REG_DP_STATE_CTRL,
@@ -745,7 +749,7 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
break;
default:
- DRM_DEBUG_DP("No valid test pattern requested:0x%x\n", pattern);
+ DRM_DEBUG_DP("No valid test pattern requested: %#x\n", pattern);
break;
}
}
@@ -929,7 +933,7 @@ void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog)
select = dp_catalog->audio_data;
acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
- DRM_DEBUG_DP("select = 0x%x, acr_ctrl = 0x%x\n", select, acr_ctrl);
+ DRM_DEBUG_DP("select: %#x, acr_ctrl: %#x\n", select, acr_ctrl);
dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index eaddfd739885..62e75dc8afc6 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -81,13 +81,6 @@ struct dp_ctrl_private {
struct completion video_comp;
};
-struct dp_cr_status {
- u8 lane_0_1;
- u8 lane_2_3;
-};
-
-#define DP_LANE0_1_CR_DONE 0x11
-
static int dp_aux_link_configure(struct drm_dp_aux *aux,
struct dp_link_info *link)
{
@@ -120,7 +113,7 @@ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES))
pr_warn("PUSH_IDLE pattern timedout\n");
- pr_debug("mainlink off done\n");
+ DRM_DEBUG_DP("mainlink off done\n");
}
static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
@@ -1011,6 +1004,8 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
u32 voltage_swing_level = link->phy_params.v_level;
u32 pre_emphasis_level = link->phy_params.p_level;
+ DRM_DEBUG_DP("voltage level: %d emphasis level: %d\n", voltage_swing_level,
+ pre_emphasis_level);
ret = dp_catalog_ctrl_update_vx_px(ctrl->catalog,
voltage_swing_level, pre_emphasis_level);
@@ -1078,7 +1073,7 @@ static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
}
static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
- struct dp_cr_status *cr, int *training_step)
+ int *training_step)
{
int tries, old_v_level, ret = 0;
u8 link_status[DP_LINK_STATUS_SIZE];
@@ -1107,9 +1102,6 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
if (ret)
return ret;
- cr->lane_0_1 = link_status[0];
- cr->lane_2_3 = link_status[1];
-
if (drm_dp_clock_recovery_ok(link_status,
ctrl->link->link_params.num_lanes)) {
return 0;
@@ -1186,7 +1178,7 @@ static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
}
static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
- struct dp_cr_status *cr, int *training_step)
+ int *training_step)
{
int tries = 0, ret = 0;
char pattern;
@@ -1202,10 +1194,6 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
else
pattern = DP_TRAINING_PATTERN_2;
- ret = dp_ctrl_update_vx_px(ctrl);
- if (ret)
- return ret;
-
ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, pattern);
if (ret)
return ret;
@@ -1218,8 +1206,6 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
ret = dp_ctrl_read_link_status(ctrl, link_status);
if (ret)
return ret;
- cr->lane_0_1 = link_status[0];
- cr->lane_2_3 = link_status[1];
if (drm_dp_channel_eq_ok(link_status,
ctrl->link->link_params.num_lanes)) {
@@ -1239,7 +1225,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl);
static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
- struct dp_cr_status *cr, int *training_step)
+ int *training_step)
{
int ret = 0;
u8 encoding = DP_SET_ANSI_8B10B;
@@ -1255,7 +1241,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
&encoding, 1);
- ret = dp_ctrl_link_train_1(ctrl, cr, training_step);
+ ret = dp_ctrl_link_train_1(ctrl, training_step);
if (ret) {
DRM_ERROR("link training #1 failed. ret=%d\n", ret);
goto end;
@@ -1264,7 +1250,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
/* print success info as this is a result of user initiated action */
DRM_DEBUG_DP("link training #1 successful\n");
- ret = dp_ctrl_link_train_2(ctrl, cr, training_step);
+ ret = dp_ctrl_link_train_2(ctrl, training_step);
if (ret) {
DRM_ERROR("link training #2 failed. ret=%d\n", ret);
goto end;
@@ -1280,7 +1266,7 @@ end:
}
static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
- struct dp_cr_status *cr, int *training_step)
+ int *training_step)
{
int ret = 0;
@@ -1295,7 +1281,7 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
* a link training pattern, we have to first do soft reset.
*/
- ret = dp_ctrl_link_train(ctrl, cr, training_step);
+ ret = dp_ctrl_link_train(ctrl, training_step);
return ret;
}
@@ -1382,6 +1368,7 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset)
if (reset)
dp_catalog_ctrl_reset(ctrl->catalog);
+ DRM_DEBUG_DP("flip=%d\n", flip);
dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_init(phy);
dp_catalog_ctrl_enable_irq(ctrl->catalog, true);
@@ -1492,14 +1479,16 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
{
int ret = 0;
- struct dp_cr_status cr;
int training_step = DP_TRAINING_NONE;
dp_ctrl_push_idle(&ctrl->dp_ctrl);
+ ctrl->link->phy_params.p_level = 0;
+ ctrl->link->phy_params.v_level = 0;
+
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
- ret = dp_ctrl_setup_main_link(ctrl, &cr, &training_step);
+ ret = dp_ctrl_setup_main_link(ctrl, &training_step);
if (ret)
goto end;
@@ -1630,6 +1619,35 @@ void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
}
}
+static bool dp_ctrl_clock_recovery_any_ok(
+ const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ int reduced_cnt;
+
+ if (lane_count <= 1)
+ return false;
+
+ /*
+ * only interested in the lane number after reduced
+ * lane_count = 4, then only interested in 2 lanes
+ * lane_count = 2, then only interested in 1 lane
+ */
+ reduced_cnt = lane_count >> 1;
+
+ return drm_dp_clock_recovery_ok(link_status, reduced_cnt);
+}
+
+static bool dp_ctrl_channel_eq_ok(struct dp_ctrl_private *ctrl)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ int num_lanes = ctrl->link->link_params.num_lanes;
+
+ dp_ctrl_read_link_status(ctrl, link_status);
+
+ return drm_dp_channel_eq_ok(link_status, num_lanes);
+}
+
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
{
int rc = 0;
@@ -1637,7 +1655,7 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
u32 rate = 0;
int link_train_max_retries = 5;
u32 const phy_cts_pixel_clk_khz = 148500;
- struct dp_cr_status cr;
+ u8 link_status[DP_LINK_STATUS_SIZE];
unsigned int training_step;
if (!dp_ctrl)
@@ -1664,6 +1682,9 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
ctrl->link->link_params.rate,
ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
+ ctrl->link->phy_params.p_level = 0;
+ ctrl->link->phy_params.v_level = 0;
+
rc = dp_ctrl_enable_mainlink_clocks(ctrl);
if (rc)
return rc;
@@ -1677,19 +1698,21 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
}
training_step = DP_TRAINING_NONE;
- rc = dp_ctrl_setup_main_link(ctrl, &cr, &training_step);
+ rc = dp_ctrl_setup_main_link(ctrl, &training_step);
if (rc == 0) {
/* training completed successfully */
break;
} else if (training_step == DP_TRAINING_1) {
/* link train_1 failed */
- if (!dp_catalog_link_is_connected(ctrl->catalog)) {
+ if (!dp_catalog_link_is_connected(ctrl->catalog))
break;
- }
+
+ dp_ctrl_read_link_status(ctrl, link_status);
rc = dp_ctrl_link_rate_down_shift(ctrl);
if (rc < 0) { /* already in RBR = 1.6G */
- if (cr.lane_0_1 & DP_LANE0_1_CR_DONE) {
+ if (dp_ctrl_clock_recovery_any_ok(link_status,
+ ctrl->link->link_params.num_lanes)) {
/*
* some lanes are ready,
* reduce lane number
@@ -1705,12 +1728,18 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
}
}
} else if (training_step == DP_TRAINING_2) {
- /* link train_2 failed, lower lane rate */
- if (!dp_catalog_link_is_connected(ctrl->catalog)) {
+ /* link train_2 failed */
+ if (!dp_catalog_link_is_connected(ctrl->catalog))
break;
- }
- rc = dp_ctrl_link_lane_down_shift(ctrl);
+ dp_ctrl_read_link_status(ctrl, link_status);
+
+ if (!drm_dp_clock_recovery_ok(link_status,
+ ctrl->link->link_params.num_lanes))
+ rc = dp_ctrl_link_rate_down_shift(ctrl);
+ else
+ rc = dp_ctrl_link_lane_down_shift(ctrl);
+
if (rc < 0) {
/* end with failure */
break; /* lane == 1 already */
@@ -1721,17 +1750,19 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
return rc;
- /* stop txing train pattern */
- dp_ctrl_clear_training_pattern(ctrl);
+ if (rc == 0) { /* link train successfully */
+ /*
+ * do not stop train pattern here
+ * stop link training at on_stream
+ * to pass compliance test
+ */
+ } else {
+ /*
+ * link training failed
+ * end txing train pattern here
+ */
+ dp_ctrl_clear_training_pattern(ctrl);
- /*
- * keep transmitting idle pattern until video ready
- * to avoid main link from loss of sync
- */
- if (rc == 0) /* link train successfully */
- dp_ctrl_push_idle(dp_ctrl);
- else {
- /* link training failed */
dp_ctrl_deinitialize_mainlink(ctrl);
rc = -ECONNRESET;
}
@@ -1739,9 +1770,15 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
return rc;
}
+static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
+{
+ int training_step = DP_TRAINING_NONE;
+
+ return dp_ctrl_setup_main_link(ctrl, &training_step);
+}
+
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
{
- u32 rate = 0;
int ret = 0;
bool mainlink_ready = false;
struct dp_ctrl_private *ctrl;
@@ -1751,10 +1788,6 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
- rate = ctrl->panel->link_info.rate;
-
- ctrl->link->link_params.rate = rate;
- ctrl->link->link_params.num_lanes = ctrl->panel->link_info.num_lanes;
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n",
@@ -1769,6 +1802,12 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
}
}
+ if (!dp_ctrl_channel_eq_ok(ctrl))
+ dp_ctrl_link_retrain(ctrl);
+
+ /* stop txing train pattern to end link training */
+ dp_ctrl_clear_training_pattern(ctrl);
+
ret = dp_ctrl_enable_stream_clocks(ctrl);
if (ret) {
DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 867388a399ad..fbe4c2cd52a3 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -55,7 +55,6 @@ enum {
EV_HPD_INIT_SETUP,
EV_HPD_PLUG_INT,
EV_IRQ_HPD_INT,
- EV_HPD_REPLUG_INT,
EV_HPD_UNPLUG_INT,
EV_USER_NOTIFICATION,
EV_CONNECT_PENDING_TIMEOUT,
@@ -102,8 +101,6 @@ struct dp_display_private {
struct dp_display_mode dp_mode;
struct msm_dp dp_display;
- bool encoder_mode_set;
-
/* wait for audio signaling */
struct completion audio_comp;
@@ -268,6 +265,8 @@ static bool dp_display_is_ds_bridge(struct dp_panel *panel)
static bool dp_display_is_sink_count_zero(struct dp_display_private *dp)
{
+ DRM_DEBUG_DP("present=%#x sink_count=%d\n", dp->panel->dpcd[DP_DOWNSTREAMPORT_PRESENT],
+ dp->link->sink_count);
return dp_display_is_ds_bridge(dp->panel) &&
(dp->link->sink_count == 0);
}
@@ -284,20 +283,6 @@ static void dp_display_send_hpd_event(struct msm_dp *dp_display)
}
-static void dp_display_set_encoder_mode(struct dp_display_private *dp)
-{
- struct msm_drm_private *priv = dp->dp_display.drm_dev->dev_private;
- struct msm_kms *kms = priv->kms;
-
- if (!dp->encoder_mode_set && dp->dp_display.encoder &&
- kms->funcs->set_encoder_mode) {
- kms->funcs->set_encoder_mode(kms,
- dp->dp_display.encoder, false);
-
- dp->encoder_mode_set = true;
- }
-}
-
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
bool hpd)
{
@@ -313,6 +298,7 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
dp->dp_display.is_connected = hpd;
+ DRM_DEBUG_DP("hpd=%d\n", hpd);
dp_display_send_hpd_event(&dp->dp_display);
return 0;
@@ -362,6 +348,7 @@ static void dp_display_host_init(struct dp_display_private *dp, int reset)
{
bool flip = false;
+ DRM_DEBUG_DP("core_initialized=%d\n", dp->core_initialized);
if (dp->core_initialized) {
DRM_DEBUG_DP("DP core already initialized\n");
return;
@@ -370,8 +357,6 @@ static void dp_display_host_init(struct dp_display_private *dp, int reset)
if (dp->usbpd->orientation == ORIENTATION_CC2)
flip = true;
- dp_display_set_encoder_mode(dp);
-
dp_power_init(dp->power, flip);
dp_ctrl_host_init(dp->ctrl, flip, reset);
dp_aux_init(dp->aux);
@@ -466,8 +451,10 @@ static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
{
u32 sink_request = dp->link->sink_request;
+ DRM_DEBUG_DP("%d\n", sink_request);
if (dp->hpd_state == ST_DISCONNECTED) {
if (sink_request & DP_LINK_STATUS_UPDATED) {
+ DRM_DEBUG_DP("Disconnected sink_request: %d\n", sink_request);
DRM_ERROR("Disconnected, no DP_LINK_STATUS_UPDATED\n");
return -EINVAL;
}
@@ -499,6 +486,7 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
rc = dp_link_process_request(dp->link);
if (!rc) {
sink_request = dp->link->sink_request;
+ DRM_DEBUG_DP("hpd_state=%d sink_request=%d\n", dp->hpd_state, sink_request);
if (sink_request & DS_PORT_STATUS_CHANGED)
rc = dp_display_handle_port_ststus_changed(dp);
else
@@ -521,6 +509,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
+ DRM_DEBUG_DP("hpd_state=%d\n", state);
if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
mutex_unlock(&dp->event_mutex);
return 0;
@@ -656,6 +645,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
/* start sentinel checking in case of missing uevent */
dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
+ DRM_DEBUG_DP("hpd_state=%d\n", state);
/* signal the disconnect event early to ensure proper teardown */
dp_display_handle_plugged_change(g_dp_display, false);
@@ -714,6 +704,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
if (ret == -ECONNRESET) { /* cable unplugged */
dp->core_initialized = false;
}
+ DRM_DEBUG_DP("hpd_state=%d\n", state);
mutex_unlock(&dp->event_mutex);
@@ -855,6 +846,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data)
dp_display = g_dp_display;
+ DRM_DEBUG_DP("sink_count=%d\n", dp->link->sink_count);
if (dp_display->power_on) {
DRM_DEBUG_DP("Link already setup, return\n");
return 0;
@@ -916,6 +908,7 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
dp_display->power_on = false;
+ DRM_DEBUG_DP("sink count: %d\n", dp->link->sink_count);
return 0;
}
@@ -1015,10 +1008,8 @@ int dp_display_get_test_bpp(struct msm_dp *dp)
void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
{
struct dp_display_private *dp_display;
- struct drm_device *drm;
dp_display = container_of(dp, struct dp_display_private, dp_display);
- drm = dp->drm_dev;
/*
* if we are reading registers we need the link clocks to be on
@@ -1119,9 +1110,6 @@ static int hpd_event_thread(void *data)
case EV_IRQ_HPD_INT:
dp_irq_hpd_handle(dp_priv, todo->data);
break;
- case EV_HPD_REPLUG_INT:
- /* do nothing */
- break;
case EV_USER_NOTIFICATION:
dp_display_send_hpd_notification(dp_priv,
todo->data);
@@ -1163,12 +1151,11 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog);
+ DRM_DEBUG_DP("hpd isr status=%#x\n", hpd_isr_status);
if (hpd_isr_status & 0x0F) {
/* hpd related interrupts */
- if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK ||
- hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
+ if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK)
dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
- }
if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
/* stop sentinel connect pending checking */
@@ -1176,8 +1163,10 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
}
- if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK)
- dp_add_event(dp, EV_HPD_REPLUG_INT, 0, 0);
+ if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
+ dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
+ dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3);
+ }
if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK)
dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
@@ -1286,12 +1275,15 @@ static int dp_pm_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct msm_dp *dp_display = platform_get_drvdata(pdev);
struct dp_display_private *dp;
- u32 status;
+ int sink_count = 0;
dp = container_of(dp_display, struct dp_display_private, dp_display);
mutex_lock(&dp->event_mutex);
+ DRM_DEBUG_DP("Before, core_inited=%d power_on=%d\n",
+ dp->core_initialized, dp_display->power_on);
+
/* start from disconnected state */
dp->hpd_state = ST_DISCONNECTED;
@@ -1300,14 +1292,25 @@ static int dp_pm_resume(struct device *dev)
dp_catalog_ctrl_hpd_config(dp->catalog);
- status = dp_catalog_link_is_connected(dp->catalog);
+ /*
+ * set sink to normal operation mode -- D0
+ * before dpcd read
+ */
+ dp_link_psm_config(dp->link, &dp->panel->link_info, false);
+
+ if (dp_catalog_link_is_connected(dp->catalog)) {
+ sink_count = drm_dp_read_sink_count(dp->aux);
+ if (sink_count < 0)
+ sink_count = 0;
+ }
+ dp->link->sink_count = sink_count;
/*
* can not declared display is connected unless
* HDMI cable is plugged in and sink_count of
* dongle become 1
*/
- if (status && dp->link->sink_count)
+ if (dp->link->sink_count)
dp->dp_display.is_connected = true;
else
dp->dp_display.is_connected = false;
@@ -1315,6 +1318,9 @@ static int dp_pm_resume(struct device *dev)
dp_display_handle_plugged_change(g_dp_display,
dp->dp_display.is_connected);
+ DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n",
+ dp->link->sink_count, dp->dp_display.is_connected,
+ dp->core_initialized, dp_display->power_on);
mutex_unlock(&dp->event_mutex);
@@ -1331,6 +1337,9 @@ static int dp_pm_suspend(struct device *dev)
mutex_lock(&dp->event_mutex);
+ DRM_DEBUG_DP("Before, core_inited=%d power_on=%d\n",
+ dp->core_initialized, dp_display->power_on);
+
if (dp->core_initialized == true) {
/* mainlink enabled */
if (dp_power_clk_status(dp->power, DP_CTRL_PM))
@@ -1344,6 +1353,9 @@ static int dp_pm_suspend(struct device *dev)
/* host_init will be called at pm_resume */
dp->core_initialized = false;
+ DRM_DEBUG_DP("After, core_inited=%d power_on=%d\n",
+ dp->core_initialized, dp_display->power_on);
+
mutex_unlock(&dp->event_mutex);
return 0;
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 1195044a7a3b..a5bdfc5029de 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -1027,43 +1027,29 @@ int dp_link_process_request(struct dp_link *dp_link)
if (link->request.test_requested == DP_TEST_LINK_EDID_READ) {
dp_link->sink_request |= DP_TEST_LINK_EDID_READ;
- return ret;
- }
-
- ret = dp_link_process_ds_port_status_change(link);
- if (!ret) {
+ } else if (!dp_link_process_ds_port_status_change(link)) {
dp_link->sink_request |= DS_PORT_STATUS_CHANGED;
- return ret;
- }
-
- ret = dp_link_process_link_training_request(link);
- if (!ret) {
+ } else if (!dp_link_process_link_training_request(link)) {
dp_link->sink_request |= DP_TEST_LINK_TRAINING;
- return ret;
- }
-
- ret = dp_link_process_phy_test_pattern_request(link);
- if (!ret) {
+ } else if (!dp_link_process_phy_test_pattern_request(link)) {
dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN;
- return ret;
- }
-
- ret = dp_link_process_link_status_update(link);
- if (!ret) {
- dp_link->sink_request |= DP_LINK_STATUS_UPDATED;
- return ret;
- }
-
- if (dp_link_is_video_pattern_requested(link)) {
- ret = 0;
- dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN;
- }
-
- if (dp_link_is_audio_pattern_requested(link)) {
- dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN;
- return -EINVAL;
+ } else {
+ ret = dp_link_process_link_status_update(link);
+ if (!ret) {
+ dp_link->sink_request |= DP_LINK_STATUS_UPDATED;
+ } else {
+ if (dp_link_is_video_pattern_requested(link)) {
+ ret = 0;
+ dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN;
+ }
+ if (dp_link_is_audio_pattern_requested(link)) {
+ dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN;
+ ret = -EINVAL;
+ }
+ }
}
+ DRM_DEBUG_DP("sink request=%#x", dp_link->sink_request);
return ret;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 440b32753430..2181b60e1d1d 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -271,7 +271,7 @@ static u8 dp_panel_get_edid_checksum(struct edid *edid)
{
struct edid *last_block;
u8 *raw_edid;
- bool is_edid_corrupt;
+ bool is_edid_corrupt = false;
if (!edid) {
DRM_ERROR("invalid edid input\n");
@@ -303,7 +303,12 @@ void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
- u8 checksum = dp_panel_get_edid_checksum(dp_panel->edid);
+ u8 checksum;
+
+ if (dp_panel->edid)
+ checksum = dp_panel_get_edid_checksum(dp_panel->edid);
+ else
+ checksum = dp_panel->connector->real_edid_checksum;
dp_link_send_edid_checksum(panel->link, checksum);
dp_link_send_test_response(panel->link);
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
index 3961ba4efc3c..b48b45e92bfa 100644
--- a/drivers/gpu/drm/msm/dp/dp_power.c
+++ b/drivers/gpu/drm/msm/dp/dp_power.c
@@ -208,6 +208,9 @@ static int dp_power_clk_set_rate(struct dp_power_private *power,
int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type)
{
+ DRM_DEBUG_DP("core_clk_on=%d link_clk_on=%d stream_clk_on=%d\n",
+ dp_power->core_clks_on, dp_power->link_clks_on, dp_power->stream_clks_on);
+
if (pm_type == DP_CORE_PM)
return dp_power->core_clks_on;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 75afc12a7b25..614dc7f26f2c 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -13,6 +13,13 @@ struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
return msm_dsi->encoder;
}
+bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
+{
+ unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host);
+
+ return !(host_flags & MIPI_DSI_MODE_VIDEO);
+}
+
static int dsi_get_phy(struct msm_dsi *msm_dsi)
{
struct platform_device *pdev = msm_dsi->pdev;
@@ -26,8 +33,10 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
}
phy_pdev = of_find_device_by_node(phy_node);
- if (phy_pdev)
+ if (phy_pdev) {
msm_dsi->phy = platform_get_drvdata(phy_pdev);
+ msm_dsi->phy_dev = &phy_pdev->dev;
+ }
of_node_put(phy_node);
@@ -36,8 +45,6 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
return -EPROBE_DEFER;
}
- msm_dsi->phy_dev = get_device(&phy_pdev->dev);
-
return 0;
}
@@ -244,8 +251,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
- msm_dsi_manager_setup_encoder(msm_dsi->id);
-
priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
priv->connectors[priv->num_connectors++] = msm_dsi->connector;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 9b8e9b07eced..b50db91cb8a7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -80,10 +80,10 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id);
struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
-void msm_dsi_manager_setup_encoder(int id);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
bool msm_dsi_manager_validate_current_config(u8 id);
+void msm_dsi_manager_tpg_enable(void);
/* msm dsi */
static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
@@ -109,7 +109,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
struct msm_dsi_phy_shared_timings *phy_shared_timings,
- bool is_dual_dsi);
+ bool is_bonded_dsi, struct msm_dsi_phy *phy);
int msm_dsi_host_power_off(struct mipi_dsi_host *host);
int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
const struct drm_display_mode *mode);
@@ -123,7 +123,7 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
struct msm_dsi_phy_clk_request *clk_req,
- bool is_dual_dsi);
+ bool is_bonded_dsi);
void msm_dsi_host_destroy(struct mipi_dsi_host *host);
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev);
@@ -145,9 +145,11 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host);
-int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi);
-int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host);
+void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host);
+
/* dsi phy */
struct msm_dsi_phy;
struct msm_dsi_phy_shared_timings {
@@ -164,10 +166,9 @@ struct msm_dsi_phy_clk_request {
void msm_dsi_phy_driver_register(void);
void msm_dsi_phy_driver_unregister(void);
int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
- struct msm_dsi_phy_clk_request *clk_req);
+ struct msm_dsi_phy_clk_request *clk_req,
+ struct msm_dsi_phy_shared_timings *shared_timings);
void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
-void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
- struct msm_dsi_phy_shared_timings *shared_timing);
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
enum msm_dsi_phy_usecase uc);
int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
@@ -175,6 +176,7 @@ int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy);
int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy);
void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy);
+bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable);
#endif /* __DSI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index eadbcc78fd72..49b551ad1bff 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -105,6 +105,32 @@ enum dsi_lane_swap {
LANE_SWAP_3210 = 7,
};
+enum video_config_bpp {
+ VIDEO_CONFIG_18BPP = 0,
+ VIDEO_CONFIG_24BPP = 1,
+};
+
+enum video_pattern_sel {
+ VID_PRBS = 0,
+ VID_INCREMENTAL = 1,
+ VID_FIXED = 2,
+ VID_MDSS_GENERAL_PATTERN = 3,
+};
+
+enum cmd_mdp_stream0_pattern_sel {
+ CMD_MDP_PRBS = 0,
+ CMD_MDP_INCREMENTAL = 1,
+ CMD_MDP_FIXED = 2,
+ CMD_MDP_MDSS_GENERAL_PATTERN = 3,
+};
+
+enum cmd_dma_pattern_sel {
+ CMD_DMA_PRBS = 0,
+ CMD_DMA_INCREMENTAL = 1,
+ CMD_DMA_FIXED = 2,
+ CMD_DMA_CUSTOM_PATTERN_DMA_FIFO = 3,
+};
+
#define DSI_IRQ_CMD_DMA_DONE 0x00000001
#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002
#define DSI_IRQ_CMD_MDP_DONE 0x00000100
@@ -518,6 +544,7 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
#define DSI_LANE_STATUS_DLN0_DIRECTION 0x00010000
#define REG_DSI_LANE_CTRL 0x000000a8
+#define DSI_LANE_CTRL_HS_REQ_SEL_PHY 0x01000000
#define DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST 0x10000000
#define REG_DSI_LANE_SWAP_CTRL 0x000000ac
@@ -564,6 +591,53 @@ static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
#define REG_DSI_PHY_RESET 0x00000128
#define DSI_PHY_RESET_RESET 0x00000001
+#define REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL 0x00000160
+
+#define REG_DSI_TPG_MAIN_CONTROL 0x00000198
+#define DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN 0x00000100
+
+#define REG_DSI_TPG_VIDEO_CONFIG 0x000001a0
+#define DSI_TPG_VIDEO_CONFIG_BPP__MASK 0x00000003
+#define DSI_TPG_VIDEO_CONFIG_BPP__SHIFT 0
+static inline uint32_t DSI_TPG_VIDEO_CONFIG_BPP(enum video_config_bpp val)
+{
+ return ((val) << DSI_TPG_VIDEO_CONFIG_BPP__SHIFT) & DSI_TPG_VIDEO_CONFIG_BPP__MASK;
+}
+#define DSI_TPG_VIDEO_CONFIG_RGB 0x00000004
+
+#define REG_DSI_TEST_PATTERN_GEN_CTRL 0x00000158
+#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK 0x00030000
+#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT 16
+static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL(enum cmd_dma_pattern_sel val)
+{
+ return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK;
+}
+#define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK 0x00000300
+#define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT 8
+static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(enum cmd_mdp_stream0_pattern_sel val)
+{
+ return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK;
+}
+#define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK 0x00000030
+#define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT 4
+static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(enum video_pattern_sel val)
+{
+ return ((val) << DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK;
+}
+#define DSI_TEST_PATTERN_GEN_CTRL_TPG_DMA_FIFO_MODE 0x00000004
+#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_TPG_EN 0x00000002
+#define DSI_TEST_PATTERN_GEN_CTRL_EN 0x00000001
+
+#define REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0 0x00000168
+
+#define REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER 0x00000180
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER 0x00000001
+
+#define REG_DSI_TPG_MAIN_CONTROL2 0x0000019c
+#define DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN 0x00000080
+#define DSI_TPG_MAIN_CONTROL2_CMD_MDP1_CHECKERED_RECTANGLE_PATTERN 0x00010000
+#define DSI_TPG_MAIN_CONTROL2_CMD_MDP2_CHECKERED_RECTANGLE_PATTERN 0x02000000
+
#define REG_DSI_T_CLK_PRE_EXTEND 0x0000017c
#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK 0x00000001
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index f3f1c03c7db9..96bbc8b6d009 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -32,9 +32,8 @@ static const char * const dsi_6g_bus_clk_names[] = {
static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
- .num = 4,
+ .num = 3,
.regs = {
- {"gdsc", -1, -1},
{"vdd", 150000, 100}, /* 3.0 V */
{"vdda", 100000, 100}, /* 1.2 V */
{"vddio", 100000, 100}, /* 1.8 V */
@@ -53,9 +52,8 @@ static const char * const dsi_8916_bus_clk_names[] = {
static const struct msm_dsi_config msm8916_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
- .num = 3,
+ .num = 2,
.regs = {
- {"gdsc", -1, -1},
{"vdda", 100000, 100}, /* 1.2 V */
{"vddio", 100000, 100}, /* 1.8 V */
},
@@ -73,9 +71,8 @@ static const char * const dsi_8976_bus_clk_names[] = {
static const struct msm_dsi_config msm8976_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
- .num = 3,
+ .num = 2,
.regs = {
- {"gdsc", -1, -1},
{"vdda", 100000, 100}, /* 1.2 V */
{"vddio", 100000, 100}, /* 1.8 V */
},
@@ -89,9 +86,8 @@ static const struct msm_dsi_config msm8976_dsi_cfg = {
static const struct msm_dsi_config msm8994_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
- .num = 7,
+ .num = 6,
.regs = {
- {"gdsc", -1, -1},
{"vdda", 100000, 100}, /* 1.25 V */
{"vddio", 100000, 100}, /* 1.8 V */
{"vcca", 10000, 100}, /* 1.0 V */
@@ -154,7 +150,6 @@ static const struct msm_dsi_config sdm660_dsi_cfg = {
.reg_cfg = {
.num = 2,
.regs = {
- {"vdd", 73400, 32 }, /* 0.9 V */
{"vdda", 12560, 4 }, /* 1.2 V */
},
},
@@ -200,6 +195,24 @@ static const struct msm_dsi_config sc7180_dsi_cfg = {
.num_dsi = 1,
};
+static const char * const dsi_sc7280_bus_clk_names[] = {
+ "iface", "bus",
+};
+
+static const struct msm_dsi_config sc7280_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vdda", 8350, 0 }, /* 1.2 V */
+ },
+ },
+ .bus_clk_names = dsi_sc7280_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_sc7280_bus_clk_names),
+ .io_start = { 0xae94000 },
+ .num_dsi = 1,
+};
+
static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
.link_clk_set_rate = dsi_link_clk_set_rate_v2,
.link_clk_enable = dsi_link_clk_enable_v2,
@@ -267,6 +280,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
&sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_0,
+ &sc7280_dsi_cfg, &msm_dsi_6g_v2_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index ade9b609c7d9..41e99a9fb5de 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -24,6 +24,7 @@
#define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000
#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000
#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
+#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000
#define MSM_DSI_V2_VER_MINOR_8064 0x0
@@ -47,7 +48,7 @@ struct msm_dsi_host_cfg_ops {
void* (*tx_buf_get)(struct msm_dsi_host *msm_host);
void (*tx_buf_put)(struct msm_dsi_host *msm_host);
int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova);
- int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+ int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
};
struct msm_dsi_cfg_handler {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index ed504fe5074f..e269df285136 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -27,6 +27,7 @@
#include "dsi_cfg.h"
#include "msm_kms.h"
#include "msm_gem.h"
+#include "phy/dsi_phy.h"
#define DSI_RESET_TOGGLE_DELAY_MS 20
@@ -167,6 +168,9 @@ struct msm_dsi_host {
int dlane_swap;
int num_data_lanes;
+ /* from phy DT */
+ bool cphy_mode;
+
u32 dma_cmd_ctrl_restore;
bool registered;
@@ -203,35 +207,22 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
{
const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
struct device *dev = &msm_host->pdev->dev;
- struct regulator *gdsc_reg;
struct clk *ahb_clk;
int ret;
u32 major = 0, minor = 0;
- gdsc_reg = regulator_get(dev, "gdsc");
- if (IS_ERR(gdsc_reg)) {
- pr_err("%s: cannot get gdsc\n", __func__);
- goto exit;
- }
-
ahb_clk = msm_clk_get(msm_host->pdev, "iface");
if (IS_ERR(ahb_clk)) {
pr_err("%s: cannot get interface clock\n", __func__);
- goto put_gdsc;
+ goto exit;
}
pm_runtime_get_sync(dev);
- ret = regulator_enable(gdsc_reg);
- if (ret) {
- pr_err("%s: unable to enable gdsc\n", __func__);
- goto put_gdsc;
- }
-
ret = clk_prepare_enable(ahb_clk);
if (ret) {
pr_err("%s: unable to enable ahb_clk\n", __func__);
- goto disable_gdsc;
+ goto runtime_put;
}
ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
@@ -246,11 +237,8 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
disable_clks:
clk_disable_unprepare(ahb_clk);
-disable_gdsc:
- regulator_disable(gdsc_reg);
+runtime_put:
pm_runtime_put_sync(dev);
-put_gdsc:
- regulator_put(gdsc_reg);
exit:
return cfg_hnd;
}
@@ -510,6 +498,7 @@ int msm_dsi_runtime_resume(struct device *dev)
int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
{
+ u32 byte_intf_rate;
int ret;
DBG("Set clk rates: pclk=%d, byteclk=%d",
@@ -529,8 +518,13 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
}
if (msm_host->byte_intf_clk) {
- ret = clk_set_rate(msm_host->byte_intf_clk,
- msm_host->byte_clk_rate / 2);
+ /* For CPHY, byte_intf_clk is same as byte_clk */
+ if (msm_host->cphy_mode)
+ byte_intf_rate = msm_host->byte_clk_rate;
+ else
+ byte_intf_rate = msm_host->byte_clk_rate / 2;
+
+ ret = clk_set_rate(msm_host->byte_intf_clk, byte_intf_rate);
if (ret) {
pr_err("%s: Failed to set rate byte intf clk, %d\n",
__func__, ret);
@@ -679,7 +673,7 @@ void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
clk_disable_unprepare(msm_host->byte_clk);
}
-static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
struct drm_display_mode *mode = msm_host->mode;
u32 pclk_rate;
@@ -687,22 +681,22 @@ static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
pclk_rate = mode->clock * 1000;
/*
- * For dual DSI mode, the current DRM mode has the complete width of the
+ * For bonded DSI mode, the current DRM mode has the complete width of the
* panel. Since, the complete panel is driven by two DSI controllers,
* the clock rates have to be split between the two dsi controllers.
* Adjust the byte and pixel clock rates for each dsi host accordingly.
*/
- if (is_dual_dsi)
+ if (is_bonded_dsi)
pclk_rate /= 2;
return pclk_rate;
}
-static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
u8 lanes = msm_host->lanes;
u32 bpp = dsi_get_bpp(msm_host->format);
- u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_dual_dsi);
+ u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_bonded_dsi);
u64 pclk_bpp = (u64)pclk_rate * bpp;
if (lanes == 0) {
@@ -710,7 +704,11 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
lanes = 1;
}
- do_div(pclk_bpp, (8 * lanes));
+ /* CPHY "byte_clk" is in units of 16 bits */
+ if (msm_host->cphy_mode)
+ do_div(pclk_bpp, (16 * lanes));
+ else
+ do_div(pclk_bpp, (8 * lanes));
msm_host->pixel_clk_rate = pclk_rate;
msm_host->byte_clk_rate = pclk_bpp;
@@ -720,28 +718,28 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
}
-int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
if (!msm_host->mode) {
pr_err("%s: mode not set\n", __func__);
return -EINVAL;
}
- dsi_calc_pclk(msm_host, is_dual_dsi);
+ dsi_calc_pclk(msm_host, is_bonded_dsi);
msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
return 0;
}
-int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
u32 bpp = dsi_get_bpp(msm_host->format);
u64 pclk_bpp;
unsigned int esc_mhz, esc_div;
unsigned long byte_mhz;
- dsi_calc_pclk(msm_host, is_dual_dsi);
+ dsi_calc_pclk(msm_host, is_bonded_dsi);
- pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_dual_dsi) * bpp;
+ pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_bonded_dsi) * bpp;
do_div(pclk_bpp, 8);
msm_host->src_clk_rate = pclk_bpp;
@@ -834,7 +832,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
}
static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
- struct msm_dsi_phy_shared_timings *phy_shared_timings)
+ struct msm_dsi_phy_shared_timings *phy_shared_timings, struct msm_dsi_phy *phy)
{
u32 flags = msm_host->mode_flags;
enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
@@ -849,11 +847,11 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
if (flags & MIPI_DSI_MODE_VIDEO) {
if (flags & MIPI_DSI_MODE_VIDEO_HSE)
data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
- if (flags & MIPI_DSI_MODE_VIDEO_HFP)
+ if (flags & MIPI_DSI_MODE_VIDEO_NO_HFP)
data |= DSI_VID_CFG0_HFP_POWER_STOP;
- if (flags & MIPI_DSI_MODE_VIDEO_HBP)
+ if (flags & MIPI_DSI_MODE_VIDEO_NO_HBP)
data |= DSI_VID_CFG0_HBP_POWER_STOP;
- if (flags & MIPI_DSI_MODE_VIDEO_HSA)
+ if (flags & MIPI_DSI_MODE_VIDEO_NO_HSA)
data |= DSI_VID_CFG0_HSA_POWER_STOP;
/* Always set low power stop mode for BLLP
* to let command engine send packets
@@ -908,7 +906,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK);
data = 0;
- if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
+ if (!(flags & MIPI_DSI_MODE_NO_EOT_PACKET))
data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
@@ -929,6 +927,10 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) {
lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL);
+
+ if (msm_dsi_phy_set_continuous_clock(phy, enable))
+ lane_ctrl &= ~DSI_LANE_CTRL_HS_REQ_SEL_PHY;
+
dsi_write(msm_host, REG_DSI_LANE_CTRL,
lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
}
@@ -936,9 +938,12 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
data |= DSI_CTRL_ENABLE;
dsi_write(msm_host, REG_DSI_CTRL, data);
+
+ if (msm_host->cphy_mode)
+ dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0));
}
-static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
struct drm_display_mode *mode = msm_host->mode;
u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
@@ -956,13 +961,13 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
DBG("");
/*
- * For dual DSI mode, the current DRM mode has
+ * For bonded DSI mode, the current DRM mode has
* the complete width of the panel. Since, the complete
* panel is driven by two DSI controllers, the horizontal
* timings have to be split between the two dsi controllers.
* Adjust the DSI host timing values accordingly.
*/
- if (is_dual_dsi) {
+ if (is_bonded_dsi) {
h_total /= 2;
hs_end /= 2;
ha_start /= 2;
@@ -2226,6 +2231,8 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct clk *byte_clk_provider, *pixel_clk_provider;
int ret;
+ msm_host->cphy_mode = src_phy->cphy_mode;
+
ret = msm_dsi_phy_get_clk_provider(src_phy,
&byte_clk_provider, &pixel_clk_provider);
if (ret) {
@@ -2285,19 +2292,26 @@ void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
struct msm_dsi_phy_clk_request *clk_req,
- bool is_dual_dsi)
+ bool is_bonded_dsi)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
- ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_dual_dsi);
+ ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_bonded_dsi);
if (ret) {
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
return;
}
- clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
+ /* CPHY transmits 16 bits over 7 clock cycles
+ * "byte_clk" is in units of 16-bits (see dsi_calc_pclk),
+ * so multiply by 7 to get the "bitclk rate"
+ */
+ if (msm_host->cphy_mode)
+ clk_req->bitclk_rate = msm_host->byte_clk_rate * 7;
+ else
+ clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
clk_req->escclk_rate = msm_host->esc_clk_rate;
}
@@ -2354,7 +2368,7 @@ static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
struct msm_dsi_phy_shared_timings *phy_shared_timings,
- bool is_dual_dsi)
+ bool is_bonded_dsi, struct msm_dsi_phy *phy)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
@@ -2392,9 +2406,9 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
goto fail_disable_clk;
}
- dsi_timing_setup(msm_host, is_dual_dsi);
+ dsi_timing_setup(msm_host, is_bonded_dsi);
dsi_sw_reset(msm_host);
- dsi_ctrl_config(msm_host, true, phy_shared_timings);
+ dsi_ctrl_config(msm_host, true, phy_shared_timings, phy);
if (msm_host->disp_en_gpio)
gpiod_set_value(msm_host->disp_en_gpio, 1);
@@ -2425,7 +2439,7 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
goto unlock_ret;
}
- dsi_ctrl_config(msm_host, false, NULL);
+ dsi_ctrl_config(msm_host, false, NULL, NULL);
if (msm_host->disp_en_gpio)
gpiod_set_value(msm_host->disp_en_gpio, 0);
@@ -2495,3 +2509,64 @@ void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_ho
pm_runtime_put_sync(&msm_host->pdev->dev);
}
+
+static void msm_dsi_host_video_test_pattern_setup(struct msm_dsi_host *msm_host)
+{
+ u32 reg;
+
+ reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL);
+
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, 0xff);
+ /* draw checkered rectangle pattern */
+ dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL,
+ DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN);
+ /* use 24-bit RGB test pttern */
+ dsi_write(msm_host, REG_DSI_TPG_VIDEO_CONFIG,
+ DSI_TPG_VIDEO_CONFIG_BPP(VIDEO_CONFIG_24BPP) |
+ DSI_TPG_VIDEO_CONFIG_RGB);
+
+ reg |= DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(VID_MDSS_GENERAL_PATTERN);
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+ DBG("Video test pattern setup done\n");
+}
+
+static void msm_dsi_host_cmd_test_pattern_setup(struct msm_dsi_host *msm_host)
+{
+ u32 reg;
+
+ reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL);
+
+ /* initial value for test pattern */
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0, 0xff);
+
+ reg |= DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(CMD_MDP_MDSS_GENERAL_PATTERN);
+
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg);
+ /* draw checkered rectangle pattern */
+ dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL2,
+ DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN);
+
+ DBG("Cmd test pattern setup done\n");
+}
+
+void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ bool is_video_mode = !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO);
+ u32 reg;
+
+ if (is_video_mode)
+ msm_dsi_host_video_test_pattern_setup(msm_host);
+ else
+ msm_dsi_host_cmd_test_pattern_setup(msm_host);
+
+ reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL);
+ /* enable the test pattern generator */
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, (reg | DSI_TEST_PATTERN_GEN_CTRL_EN));
+
+ /* for command mode need to trigger one frame from tpg */
+ if (!is_video_mode)
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER,
+ DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER);
+}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 4ebfedc4a9ac..c41d39f5b7cf 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -21,14 +21,14 @@
struct msm_dsi_manager {
struct msm_dsi *dsi[DSI_MAX];
- bool is_dual_dsi;
+ bool is_bonded_dsi;
bool is_sync_needed;
int master_dsi_link_id;
};
static struct msm_dsi_manager msm_dsim_glb;
-#define IS_DUAL_DSI() (msm_dsim_glb.is_dual_dsi)
+#define IS_BONDED_DSI() (msm_dsim_glb.is_bonded_dsi)
#define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed)
#define IS_MASTER_DSI_LINK(id) (msm_dsim_glb.master_dsi_link_id == id)
@@ -42,18 +42,17 @@ static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id)
return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
}
-static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id)
+static int dsi_mgr_parse_of(struct device_node *np, int id)
{
struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
- /* We assume 2 dsi nodes have the same information of dual-dsi and
- * sync-mode, and only one node specifies master in case of dual mode.
+ /* We assume 2 dsi nodes have the same information of bonded dsi and
+ * sync-mode, and only one node specifies master in case of bonded mode.
*/
- if (!msm_dsim->is_dual_dsi)
- msm_dsim->is_dual_dsi = of_property_read_bool(
- np, "qcom,dual-dsi-mode");
+ if (!msm_dsim->is_bonded_dsi)
+ msm_dsim->is_bonded_dsi = of_property_read_bool(np, "qcom,dual-dsi-mode");
- if (msm_dsim->is_dual_dsi) {
+ if (msm_dsim->is_bonded_dsi) {
if (of_property_read_bool(np, "qcom,master-dsi"))
msm_dsim->master_dsi_link_id = id;
if (!msm_dsim->is_sync_needed)
@@ -72,7 +71,7 @@ static int dsi_mgr_setup_components(int id)
struct msm_dsi *clk_slave_dsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
int ret;
- if (!IS_DUAL_DSI()) {
+ if (!IS_BONDED_DSI()) {
ret = msm_dsi_host_register(msm_dsi->host, true);
if (ret)
return ret;
@@ -100,7 +99,7 @@ static int dsi_mgr_setup_components(int id)
if (ret)
return ret;
- /* PLL0 is to drive both 2 DSI link clocks in Dual DSI mode. */
+ /* PLL0 is to drive both 2 DSI link clocks in bonded DSI mode. */
msm_dsi_phy_set_usecase(clk_master_dsi->phy,
MSM_DSI_PHY_MASTER);
msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
@@ -119,12 +118,11 @@ static int enable_phy(struct msm_dsi *msm_dsi,
{
struct msm_dsi_phy_clk_request clk_req;
int ret;
- bool is_dual_dsi = IS_DUAL_DSI();
+ bool is_bonded_dsi = IS_BONDED_DSI();
- msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_dual_dsi);
+ msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_bonded_dsi);
- ret = msm_dsi_phy_enable(msm_dsi->phy, &clk_req);
- msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings);
+ ret = msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings);
return ret;
}
@@ -138,12 +136,12 @@ dsi_mgr_phy_enable(int id,
struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
int ret;
- /* In case of dual DSI, some registers in PHY1 have been programmed
+ /* In case of bonded DSI, some registers in PHY1 have been programmed
* during PLL0 clock's set_rate. The PHY1 reset called by host1 here
* will silently reset those PHY1 registers. Therefore we need to reset
* and enable both PHYs before any PLL clock operation.
*/
- if (IS_DUAL_DSI() && mdsi && sdsi) {
+ if (IS_BONDED_DSI() && mdsi && sdsi) {
if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
msm_dsi_host_reset_phy(mdsi->host);
msm_dsi_host_reset_phy(sdsi->host);
@@ -178,11 +176,11 @@ static void dsi_mgr_phy_disable(int id)
struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
/* disable DSI phy
- * In dual-dsi configuration, the phy should be disabled for the
+ * In bonded dsi configuration, the phy should be disabled for the
* first controller only when the second controller is disabled.
*/
msm_dsi->phy_enabled = false;
- if (IS_DUAL_DSI() && mdsi && sdsi) {
+ if (IS_BONDED_DSI() && mdsi && sdsi) {
if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
msm_dsi_phy_disable(sdsi->phy);
msm_dsi_phy_disable(mdsi->phy);
@@ -217,24 +215,6 @@ static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
return dsi_bridge->id;
}
-static bool dsi_mgr_is_cmd_mode(struct msm_dsi *msm_dsi)
-{
- unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host);
- return !(host_flags & MIPI_DSI_MODE_VIDEO);
-}
-
-void msm_dsi_manager_setup_encoder(int id)
-{
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct msm_drm_private *priv = msm_dsi->dev->dev_private;
- struct msm_kms *kms = priv->kms;
- struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi);
-
- if (encoder && kms->funcs->set_encoder_mode)
- kms->funcs->set_encoder_mode(kms, encoder,
- dsi_mgr_is_cmd_mode(msm_dsi));
-}
-
static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
{
struct msm_drm_private *priv = conn->dev->dev_private;
@@ -244,7 +224,7 @@ static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
struct msm_dsi *master_dsi, *slave_dsi;
struct drm_panel *panel;
- if (IS_DUAL_DSI() && !IS_MASTER_DSI_LINK(id)) {
+ if (IS_BONDED_DSI() && !IS_MASTER_DSI_LINK(id)) {
master_dsi = other_dsi;
slave_dsi = msm_dsi;
} else {
@@ -253,7 +233,7 @@ static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
}
/*
- * There is only 1 panel in the global panel list for dual DSI mode.
+ * There is only 1 panel in the global panel list for bonded DSI mode.
* Therefore slave dsi should get the drm_panel instance from master
* dsi.
*/
@@ -264,20 +244,20 @@ static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
return PTR_ERR(panel);
}
- if (!panel || !IS_DUAL_DSI())
+ if (!panel || !IS_BONDED_DSI())
goto out;
drm_object_attach_property(&conn->base,
conn->dev->mode_config.tile_property, 0);
/*
- * Set split display info to kms once dual DSI panel is connected to
+ * Set split display info to kms once bonded DSI panel is connected to
* both hosts.
*/
if (other_dsi && other_dsi->panel && kms->funcs->set_split_display) {
kms->funcs->set_split_display(kms, master_dsi->encoder,
slave_dsi->encoder,
- dsi_mgr_is_cmd_mode(msm_dsi));
+ msm_dsi_is_cmd_mode(msm_dsi));
}
out:
@@ -317,7 +297,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
return 0;
/*
- * In dual DSI mode, we have one connector that can be
+ * In bonded DSI mode, we have one connector that can be
* attached to the drm_panel.
*/
num = drm_panel_get_modes(panel, connector);
@@ -366,30 +346,30 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX];
- bool is_dual_dsi = IS_DUAL_DSI();
+ bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
DBG("id=%d", id);
if (!msm_dsi_device_connected(msm_dsi))
return;
- /* Do nothing with the host if it is slave-DSI in case of dual DSI */
- if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+ /* Do nothing with the host if it is slave-DSI in case of bonded DSI */
+ if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
return;
ret = dsi_mgr_phy_enable(id, phy_shared_timings);
if (ret)
goto phy_en_fail;
- ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_dual_dsi);
+ ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_bonded_dsi, msm_dsi->phy);
if (ret) {
pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
goto host_on_fail;
}
- if (is_dual_dsi && msm_dsi1) {
+ if (is_bonded_dsi && msm_dsi1) {
ret = msm_dsi_host_power_on(msm_dsi1->host,
- &phy_shared_timings[DSI_1], is_dual_dsi);
+ &phy_shared_timings[DSI_1], is_bonded_dsi, msm_dsi1->phy);
if (ret) {
pr_err("%s: power on host1 failed, %d\n",
__func__, ret);
@@ -415,7 +395,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
goto host_en_fail;
}
- if (is_dual_dsi && msm_dsi1) {
+ if (is_bonded_dsi && msm_dsi1) {
ret = msm_dsi_host_enable(msm_dsi1->host);
if (ret) {
pr_err("%s: enable host1 failed, %d\n", __func__, ret);
@@ -431,7 +411,7 @@ host_en_fail:
if (panel)
drm_panel_unprepare(panel);
panel_prep_fail:
- if (is_dual_dsi && msm_dsi1)
+ if (is_bonded_dsi && msm_dsi1)
msm_dsi_host_power_off(msm_dsi1->host);
host1_on_fail:
msm_dsi_host_power_off(host);
@@ -441,20 +421,33 @@ phy_en_fail:
return;
}
+void msm_dsi_manager_tpg_enable(void)
+{
+ struct msm_dsi *m_dsi = dsi_mgr_get_dsi(DSI_0);
+ struct msm_dsi *s_dsi = dsi_mgr_get_dsi(DSI_1);
+
+ /* if dual dsi, trigger tpg on master first then slave */
+ if (m_dsi) {
+ msm_dsi_host_test_pattern_en(m_dsi->host);
+ if (IS_BONDED_DSI() && s_dsi)
+ msm_dsi_host_test_pattern_en(s_dsi->host);
+ }
+}
+
static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_panel *panel = msm_dsi->panel;
- bool is_dual_dsi = IS_DUAL_DSI();
+ bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
DBG("id=%d", id);
if (!msm_dsi_device_connected(msm_dsi))
return;
- /* Do nothing with the host if it is slave-DSI in case of dual DSI */
- if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+ /* Do nothing with the host if it is slave-DSI in case of bonded DSI */
+ if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
return;
if (panel) {
@@ -471,15 +464,15 @@ static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_panel *panel = msm_dsi->panel;
- bool is_dual_dsi = IS_DUAL_DSI();
+ bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
DBG("id=%d", id);
if (!msm_dsi_device_connected(msm_dsi))
return;
- /* Do nothing with the host if it is slave-DSI in case of dual DSI */
- if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+ /* Do nothing with the host if it is slave-DSI in case of bonded DSI */
+ if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
return;
if (panel) {
@@ -497,7 +490,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
- bool is_dual_dsi = IS_DUAL_DSI();
+ bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
DBG("id=%d", id);
@@ -506,18 +499,18 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
return;
/*
- * Do nothing with the host if it is slave-DSI in case of dual DSI.
+ * Do nothing with the host if it is slave-DSI in case of bonded DSI.
* It is safe to call dsi_mgr_phy_disable() here because a single PHY
* won't be diabled until both PHYs request disable.
*/
- if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+ if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
goto disable_phy;
ret = msm_dsi_host_disable(host);
if (ret)
pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
- if (is_dual_dsi && msm_dsi1) {
+ if (is_bonded_dsi && msm_dsi1) {
ret = msm_dsi_host_disable(msm_dsi1->host);
if (ret)
pr_err("%s: host1 disable failed, %d\n", __func__, ret);
@@ -537,7 +530,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
if (ret)
pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
- if (is_dual_dsi && msm_dsi1) {
+ if (is_bonded_dsi && msm_dsi1) {
ret = msm_dsi_host_power_off(msm_dsi1->host);
if (ret)
pr_err("%s: host1 power off failed, %d\n",
@@ -556,15 +549,15 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
struct mipi_dsi_host *host = msm_dsi->host;
- bool is_dual_dsi = IS_DUAL_DSI();
+ bool is_bonded_dsi = IS_BONDED_DSI();
DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
- if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+ if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
return;
msm_dsi_host_set_display_mode(host, adjusted_mode);
- if (is_dual_dsi && other_dsi)
+ if (is_bonded_dsi && other_dsi)
msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
}
@@ -640,15 +633,15 @@ fail:
bool msm_dsi_manager_validate_current_config(u8 id)
{
- bool is_dual_dsi = IS_DUAL_DSI();
+ bool is_bonded_dsi = IS_BONDED_DSI();
/*
- * For dual DSI, we only have one drm panel. For this
+ * For bonded DSI, we only have one drm panel. For this
* use case, we register only one bridge/connector.
* Skip bridge/connector initialisation if it is
- * slave-DSI for dual DSI configuration.
+ * slave-DSI for bonded DSI configuration.
*/
- if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) {
+ if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) {
DBG("Skip bridge registration for slave DSI->id: %d\n", id);
return false;
}
@@ -740,7 +733,7 @@ int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
if (!msg->tx_buf || !msg->tx_len)
return 0;
- /* In dual master case, panel requires the same commands sent to
+ /* In bonded master case, panel requires the same commands sent to
* both DSI links. Host issues the command trigger to both links
* when DSI_1 calls the cmd transfer function, no matter it happens
* before or after DSI_0 cmd transfer.
@@ -809,9 +802,9 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
msm_dsim->dsi[id] = msm_dsi;
- ret = dsi_mgr_parse_dual_dsi(msm_dsi->pdev->dev.of_node, id);
+ ret = dsi_mgr_parse_of(msm_dsi->pdev->dev.of_node, id);
if (ret) {
- pr_err("%s: failed to parse dual DSI info\n", __func__);
+ pr_err("%s: failed to parse OF DSI info\n", __func__);
goto fail;
}
@@ -840,3 +833,12 @@ void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi)
msm_dsim->dsi[msm_dsi->id] = NULL;
}
+bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi)
+{
+ return IS_BONDED_DSI();
+}
+
+bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
+{
+ return IS_MASTER_DSI_LINK(msm_dsi->id);
+}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 6ca6bfd4809b..8c65ef6968ca 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -5,6 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
+#include <dt-bindings/phy/phy.h>
#include "dsi_phy.h"
@@ -461,6 +462,51 @@ int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
return 0;
}
+int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
+ s32 ui, ui_x7;
+ s32 tmax, tmin;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ ui_x7 = ui * 7;
+
+ temp = S_DIV_ROUND_UP(38 * coeff, ui_x7);
+ tmin = max_t(s32, temp, 0);
+ temp = (95 * coeff) / ui_x7;
+ tmax = max_t(s32, temp, 0);
+ timing->clk_prepare = linear_inter(tmax, tmin, 50, 0, false);
+
+ tmin = DIV_ROUND_UP(50 * coeff, ui_x7);
+ tmax = 255;
+ timing->hs_rqst = linear_inter(tmax, tmin, 1, 0, false);
+
+ tmin = DIV_ROUND_UP(100 * coeff, ui_x7) - 1;
+ tmax = 255;
+ timing->hs_exit = linear_inter(tmax, tmin, 10, 0, false);
+
+ tmin = 1;
+ tmax = 32;
+ timing->shared_timings.clk_post = linear_inter(tmax, tmin, 80, 0, false);
+
+ tmin = min_t(s32, 64, S_DIV_ROUND_UP(262 * coeff, ui_x7) - 1);
+ tmax = 64;
+ timing->shared_timings.clk_pre = linear_inter(tmax, tmin, 20, 0, false);
+
+ DBG("%d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->clk_prepare, timing->hs_exit, timing->hs_rqst);
+
+ return 0;
+}
+
static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
{
struct regulator_bulk_data *s = phy->supplies;
@@ -593,6 +639,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_7nm_cfgs },
{ .compatible = "qcom,dsi-phy-7nm-8150",
.data = &dsi_phy_7nm_8150_cfgs },
+ { .compatible = "qcom,sc7280-dsi-phy-7nm",
+ .data = &dsi_phy_7nm_7280_cfgs },
#endif
{}
};
@@ -625,17 +673,13 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
{
struct msm_dsi_phy *phy;
struct device *dev = &pdev->dev;
- const struct of_device_id *match;
+ u32 phy_type;
int ret;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- match = of_match_node(dsi_phy_dt_match, dev->of_node);
- if (!match)
- return -ENODEV;
-
phy->provided_clocks = devm_kzalloc(dev,
struct_size(phy->provided_clocks, hws, NUM_PROVIDED_CLKS),
GFP_KERNEL);
@@ -644,7 +688,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->provided_clocks->num = NUM_PROVIDED_CLKS;
- phy->cfg = match->data;
+ phy->cfg = of_device_get_match_data(&pdev->dev);
+ if (!phy->cfg)
+ return -ENODEV;
+
phy->pdev = pdev;
phy->id = dsi_phy_get_id(phy);
@@ -657,6 +704,8 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
"qcom,dsi-phy-regulator-ldo-mode");
+ if (!of_property_read_u32(dev->of_node, "phy-type", &phy_type))
+ phy->cphy_mode = (phy_type == PHY_TYPE_CPHY);
phy->base = msm_ioremap_size(pdev, "dsi_phy", "DSI_PHY", &phy->base_size);
if (IS_ERR(phy->base)) {
@@ -754,7 +803,8 @@ void __exit msm_dsi_phy_driver_unregister(void)
}
int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
- struct msm_dsi_phy_clk_request *clk_req)
+ struct msm_dsi_phy_clk_request *clk_req,
+ struct msm_dsi_phy_shared_timings *shared_timings)
{
struct device *dev = &phy->pdev->dev;
int ret;
@@ -782,6 +832,9 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
goto phy_en_fail;
}
+ memcpy(shared_timings, &phy->timing.shared_timings,
+ sizeof(*shared_timings));
+
/*
* Resetting DSI PHY silently changes its PLL registers to reset status,
* which will confuse clock driver and result in wrong output rate of
@@ -821,13 +874,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
dsi_phy_disable_resource(phy);
}
-void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
- struct msm_dsi_phy_shared_timings *shared_timings)
-{
- memcpy(shared_timings, &phy->timing.shared_timings,
- sizeof(*shared_timings));
-}
-
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
enum msm_dsi_phy_usecase uc)
{
@@ -835,6 +881,15 @@ void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
phy->usecase = uc;
}
+/* Returns true if we have to clear DSI_LANE_CTRL.HS_REQ_SEL_PHY */
+bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
+{
+ if (!phy || !phy->cfg->ops.set_continuous_clock)
+ return false;
+
+ return phy->cfg->ops.set_continuous_clock(phy, enable);
+}
+
int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
{
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 5b0feef87127..b91303ada74f 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -24,6 +24,7 @@ struct msm_dsi_phy_ops {
void (*disable)(struct msm_dsi_phy *phy);
void (*save_pll_state)(struct msm_dsi_phy *phy);
int (*restore_pll_state)(struct msm_dsi_phy *phy);
+ bool (*set_continuous_clock)(struct msm_dsi_phy *phy, bool enable);
};
struct msm_dsi_phy_cfg {
@@ -51,6 +52,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_zero;
@@ -99,6 +101,7 @@ struct msm_dsi_phy {
enum msm_dsi_phy_usecase usecase;
bool regulator_ldo_mode;
+ bool cphy_mode;
struct clk_hw *vco_hw;
bool pll_on;
@@ -119,5 +122,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req);
#endif /* __DSI_PHY_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index e46b10fc793a..d8128f50b0dd 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -84,7 +84,7 @@ struct dsi_pll_10nm {
#define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, clk_hw)
/*
- * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * Global list of private DSI PLL struct pointers. We need this for bonded DSI
* mode, where the master PLL's clk_ops needs access the slave's private data
*/
static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index a34cf151c517..d13552b2213b 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -86,7 +86,7 @@ struct dsi_pll_14nm {
/*
* Private struct for N1/N2 post-divider clocks. These clocks are similar to
* the generic clk_divider class of clocks. The only difference is that it
- * also sets the slave DSI PLL's post-dividers if in Dual DSI mode
+ * also sets the slave DSI PLL's post-dividers if in bonded DSI mode
*/
struct dsi_pll_14nm_postdiv {
struct clk_hw hw;
@@ -102,7 +102,7 @@ struct dsi_pll_14nm_postdiv {
#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw)
/*
- * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * Global list of private DSI PLL struct pointers. We need this for bonded DSI
* mode, where the master PLL's clk_ops needs access the slave's private data
*/
static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
@@ -658,7 +658,7 @@ static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
val |= value << shift;
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
- /* If we're master in dual DSI mode, then the slave PLL's post-dividers
+ /* If we're master in bonded DSI mode, then the slave PLL's post-dividers
* follow the master's post dividers
*/
if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) {
@@ -1050,7 +1050,7 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
.reg_cfg = {
.num = 1,
.regs = {
- {"vcca", 17000, 32},
+ {"vcca", 73400, 32},
},
},
.ops = {
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 7c23d4c47338..cb297b08458e 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -83,7 +83,7 @@ struct dsi_pll_7nm {
#define to_pll_7nm(x) container_of(x, struct dsi_pll_7nm, clk_hw)
/*
- * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * Global list of private DSI PLL struct pointers. We need this for bonded DSI
* mode, where the master PLL's clk_ops needs access the slave's private data
*/
static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
@@ -256,7 +256,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi
(config->frac_div_start & 0x30000) >> 16);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
- dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, pll->phy->cphy_mode ? 0x00 : 0x10);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, config->pll_clock_inverters);
}
@@ -642,7 +642,8 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- CLK_SET_RATE_PARENT, 1, 8);
+ CLK_SET_RATE_PARENT, 1,
+ pll_7nm->phy->cphy_mode ? 7 : 8);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -663,32 +664,47 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- 0, 1, 4);
+ if (pll_7nm->phy->cphy_mode)
+ hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 2, 7);
+ else
+ hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 4);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
- snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
- snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
- snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
-
- hw = devm_clk_hw_register_mux(dev, clk_name,
- ((const char *[]){
- parent, parent2, parent3, parent4
- }), 4, 0, pll_7nm->phy->base +
- REG_DSI_7nm_PHY_CMN_CLK_CFG1,
- 0, 2, 0, NULL);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
- goto fail;
+ /* in CPHY mode, pclk_mux will always have post_out_div as parent
+ * don't register a pclk_mux clock and just use post_out_div instead
+ */
+ if (pll_7nm->phy->cphy_mode) {
+ u32 data;
+
+ data = dsi_phy_read(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data | 3);
+
+ snprintf(parent, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
+ } else {
+ snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
+ snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
+ snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
+ snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
+ snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
+
+ hw = devm_clk_hw_register_mux(dev, clk_name,
+ ((const char *[]){
+ parent, parent2, parent3, parent4
+ }), 4, 0, pll_7nm->phy->base +
+ REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+ 0, 2, 0, NULL);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto fail;
+ }
+
+ snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
}
snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
hw = devm_clk_hw_register_divider(dev, clk_name, parent,
@@ -813,15 +829,21 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_dphy_timing *timing = &phy->timing;
void __iomem *base = phy->base;
bool less_than_1500_mhz;
- u32 vreg_ctrl_0, glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
+ u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;
+ u32 glbl_pemph_ctrl_0;
+ u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
u32 glbl_rescode_top_ctrl, glbl_rescode_bot_ctrl;
u32 data;
DBG("");
- if (msm_dsi_dphy_timing_calc_v4(timing, clk_req)) {
+ if (phy->cphy_mode)
+ ret = msm_dsi_cphy_timing_calc_v4(timing, clk_req);
+ else
+ ret = msm_dsi_dphy_timing_calc_v4(timing, clk_req);
+ if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev,
- "%s: D-PHY timing calculation failed\n", __func__);
+ "%s: PHY timing calculation failed\n", __func__);
return -EINVAL;
}
@@ -842,6 +864,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* Alter PHY configurations if data rate less than 1.5GHZ*/
less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000);
+ /* For C-PHY, no low power settings for lower clk rate */
+ if (phy->cphy_mode)
+ less_than_1500_mhz = false;
+
if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52;
glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00;
@@ -856,6 +882,17 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
glbl_rescode_bot_ctrl = 0x3c;
}
+ if (phy->cphy_mode) {
+ vreg_ctrl_0 = 0x51;
+ vreg_ctrl_1 = 0x55;
+ glbl_pemph_ctrl_0 = 0x11;
+ lane_ctrl0 = 0x17;
+ } else {
+ vreg_ctrl_1 = 0x5c;
+ glbl_pemph_ctrl_0 = 0x00;
+ lane_ctrl0 = 0x1f;
+ }
+
/* de-assert digital and pll power down */
data = BIT(6) | BIT(5);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
@@ -876,15 +913,22 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG0, 0x21);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG1, 0x84);
+ if (phy->cphy_mode)
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_CTRL, BIT(6));
+
/* Enable LDO */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, 0x5c);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, vreg_ctrl_1);
+
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
glbl_str_swi_cal_sel_ctrl);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0,
glbl_hstx_str_ctrl_0);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0,
+ glbl_pemph_ctrl_0);
+ if (phy->cphy_mode)
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1, 0x01);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL,
glbl_rescode_top_ctrl);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL,
@@ -894,10 +938,11 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* Remove power down from all blocks */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x7f);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0x1f);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, lane_ctrl0);
/* Select full-rate mode */
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40);
+ if (!phy->cphy_mode)
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40);
ret = dsi_7nm_set_usecase(phy);
if (ret) {
@@ -907,22 +952,36 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
}
/* DSI PHY timings */
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
- timing->shared_timings.clk_pre);
- dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
- timing->shared_timings.clk_post);
+ if (phy->cphy_mode) {
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5,
+ timing->shared_timings.clk_pre);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->clk_prepare);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7,
+ timing->shared_timings.clk_post);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
+ } else {
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
+ timing->shared_timings.clk_pre);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
+ timing->shared_timings.clk_post);
+ }
/* DSI lane settings */
dsi_phy_hw_v4_0_lane_settings(phy);
@@ -932,6 +991,21 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
return 0;
}
+static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *base = phy->base;
+ u32 data;
+
+ data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1);
+ if (enable)
+ data |= BIT(5) | BIT(6);
+ else
+ data &= ~(BIT(5) | BIT(6));
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1, data);
+
+ return enable;
+}
+
static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->base;
@@ -972,6 +1046,7 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
.pll_init = dsi_pll_7nm_init,
.save_pll_state = dsi_7nm_pll_save_state,
.restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
},
.min_pll_rate = 600000000UL,
#ifdef CONFIG_64BIT
@@ -998,9 +1073,36 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
.pll_init = dsi_pll_7nm_init,
.save_pll_state = dsi_7nm_pll_save_state,
.restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
},
.min_pll_rate = 1000000000UL,
.max_pll_rate = 3500000000UL,
.io_start = { 0xae94400, 0xae96400 },
.num_dsi_phy = 2,
};
+
+const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs = {
+ .has_phy_lane = true,
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vdds", 37550, 0},
+ },
+ },
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000ULL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae94400 },
+ .num_dsi_phy = 1,
+ .quirks = DSI_PHY_7NM_QUIRK_V4_1,
+};
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9b8fa2ad0d84..2e6fc185e54d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -14,7 +14,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
-#include <drm/drm_irq.h>
#include <drm/drm_prime.h>
#include <drm/drm_of.h>
#include <drm/drm_vblank.h>
@@ -201,6 +200,71 @@ void msm_rmw(void __iomem *addr, u32 mask, u32 or)
msm_writel(val | or, addr);
}
+static irqreturn_t msm_irq(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ BUG_ON(!kms);
+
+ return kms->funcs->irq(kms);
+}
+
+static void msm_irq_preinstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ BUG_ON(!kms);
+
+ kms->funcs->irq_preinstall(kms);
+}
+
+static int msm_irq_postinstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ BUG_ON(!kms);
+
+ if (kms->funcs->irq_postinstall)
+ return kms->funcs->irq_postinstall(kms);
+
+ return 0;
+}
+
+static int msm_irq_install(struct drm_device *dev, unsigned int irq)
+{
+ int ret;
+
+ if (irq == IRQ_NOTCONNECTED)
+ return -ENOTCONN;
+
+ msm_irq_preinstall(dev);
+
+ ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
+ if (ret)
+ return ret;
+
+ ret = msm_irq_postinstall(dev);
+ if (ret) {
+ free_irq(irq, dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void msm_irq_uninstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ kms->funcs->irq_uninstall(kms);
+ free_irq(kms->irq, dev);
+}
+
struct msm_vblank_work {
struct work_struct work;
int crtc_id;
@@ -265,7 +329,7 @@ static int msm_drm_uninit(struct device *dev)
}
/* We must cancel and cleanup any pending vblank enable/disable
- * work before drm_irq_uninstall() to avoid work re-enabling an
+ * work before msm_irq_uninstall() to avoid work re-enabling an
* irq after uninstall has disabled it.
*/
@@ -294,7 +358,7 @@ static int msm_drm_uninit(struct device *dev)
drm_mode_config_cleanup(ddev);
pm_runtime_get_sync(dev);
- drm_irq_uninstall(ddev);
+ msm_irq_uninstall(ddev);
pm_runtime_put_sync(dev);
if (kms && kms->funcs)
@@ -539,6 +603,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (IS_ERR(priv->event_thread[i].worker)) {
ret = PTR_ERR(priv->event_thread[i].worker);
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
+ ret = PTR_ERR(priv->event_thread[i].worker);
goto err_msm_uninit;
}
@@ -553,7 +618,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (kms) {
pm_runtime_get_sync(dev);
- ret = drm_irq_install(ddev, kms->irq);
+ ret = msm_irq_install(ddev, kms->irq);
pm_runtime_put_sync(dev);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
@@ -662,43 +727,6 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
context_close(ctx);
}
-static irqreturn_t msm_irq(int irq, void *arg)
-{
- struct drm_device *dev = arg;
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_kms *kms = priv->kms;
- BUG_ON(!kms);
- return kms->funcs->irq(kms);
-}
-
-static void msm_irq_preinstall(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_kms *kms = priv->kms;
- BUG_ON(!kms);
- kms->funcs->irq_preinstall(kms);
-}
-
-static int msm_irq_postinstall(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_kms *kms = priv->kms;
- BUG_ON(!kms);
-
- if (kms->funcs->irq_postinstall)
- return kms->funcs->irq_postinstall(kms);
-
- return 0;
-}
-
-static void msm_irq_uninstall(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_kms *kms = priv->kms;
- BUG_ON(!kms);
- kms->funcs->irq_uninstall(kms);
-}
-
int msm_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -911,6 +939,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
ktime_t timeout = to_ktime(args->timeout);
struct msm_gpu_submitqueue *queue;
struct msm_gpu *gpu = priv->gpu;
+ struct dma_fence *fence;
int ret;
if (args->pad) {
@@ -925,10 +954,35 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
if (!queue)
return -ENOENT;
- ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout,
- true);
+ /*
+ * Map submitqueue scoped "seqno" (which is actually an idr key)
+ * back to underlying dma-fence
+ *
+ * The fence is removed from the fence_idr when the submit is
+ * retired, so if the fence is not found it means there is nothing
+ * to wait for
+ */
+ ret = mutex_lock_interruptible(&queue->lock);
+ if (ret)
+ return ret;
+ fence = idr_find(&queue->fence_idr, args->fence);
+ if (fence)
+ fence = dma_fence_get_rcu(fence);
+ mutex_unlock(&queue->lock);
+
+ if (!fence)
+ return 0;
+
+ ret = dma_fence_wait_timeout(fence, true, timeout_to_jiffies(&timeout));
+ if (ret == 0) {
+ ret = -ETIMEDOUT;
+ } else if (ret != -ERESTARTSYS) {
+ ret = 0;
+ }
+ dma_fence_put(fence);
msm_submitqueue_put(queue);
+
return ret;
}
@@ -1004,17 +1058,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
};
-static const struct file_operations fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .poll = drm_poll,
- .read = drm_read,
- .llseek = no_llseek,
- .mmap = msm_gem_mmap,
-};
+DEFINE_DRM_GEM_FOPS(fops);
static const struct drm_driver msm_driver = {
.driver_features = DRIVER_GEM |
@@ -1025,16 +1069,12 @@ static const struct drm_driver msm_driver = {
.open = msm_open,
.postclose = msm_postclose,
.lastclose = drm_fb_helper_lastclose,
- .irq_handler = msm_irq,
- .irq_preinstall = msm_irq_preinstall,
- .irq_postinstall = msm_irq_postinstall,
- .irq_uninstall = msm_irq_uninstall,
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
- .gem_prime_mmap = msm_gem_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 1a48a709ffb3..8b005d1ac899 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -309,7 +309,6 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
-int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
@@ -350,7 +349,9 @@ void __exit msm_dsi_unregister(void);
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder);
void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi);
-
+bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi);
+bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi);
+bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi);
#else
static inline void __init msm_dsi_register(void)
{
@@ -367,7 +368,18 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
static inline void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi)
{
}
-
+static inline bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
+{
+ return false;
+}
+static inline bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi)
+{
+ return false;
+}
+static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
+{
+ return false;
+}
#endif
#ifdef CONFIG_DRM_MSM_DP
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 227404077e39..0daaeb54ff6f 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -8,13 +8,12 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_prime.h>
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_kms.h"
-extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
/*
@@ -48,15 +47,8 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
struct msm_fbdev *fbdev = to_msm_fbdev(helper);
struct drm_gem_object *bo = msm_framebuffer_bo(fbdev->fb, 0);
- int ret = 0;
- ret = drm_gem_mmap_obj(bo, bo->size, vma);
- if (ret) {
- pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
- return ret;
- }
-
- return msm_gem_mmap_obj(bo, vma);
+ return drm_gem_prime_mmap(bo, vma);
}
static int msm_fbdev_create(struct drm_fb_helper *helper,
@@ -169,7 +161,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
}
/* the fw fb could be anywhere in memory */
- ret = drm_aperture_remove_framebuffers(false, "msm");
+ ret = drm_aperture_remove_framebuffers(false, dev->driver);
if (ret)
goto fini;
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index cd59a5918038..f2cece542c3f 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -11,7 +11,8 @@
struct msm_fence_context *
-msm_fence_context_alloc(struct drm_device *dev, const char *name)
+msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
+ const char *name)
{
struct msm_fence_context *fctx;
@@ -22,7 +23,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name)
fctx->dev = dev;
strncpy(fctx->name, name, sizeof(fctx->name));
fctx->context = dma_fence_context_alloc(1);
- init_waitqueue_head(&fctx->event);
+ fctx->fenceptr = fenceptr;
spin_lock_init(&fctx->spinlock);
return fctx;
@@ -35,46 +36,12 @@ void msm_fence_context_free(struct msm_fence_context *fctx)
static inline bool fence_completed(struct msm_fence_context *fctx, uint32_t fence)
{
- return (int32_t)(fctx->completed_fence - fence) >= 0;
-}
-
-/* legacy path for WAIT_FENCE ioctl: */
-int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
- ktime_t *timeout, bool interruptible)
-{
- int ret;
-
- if (fence > fctx->last_fence) {
- DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n",
- fctx->name, fence, fctx->last_fence);
- return -EINVAL;
- }
-
- if (!timeout) {
- /* no-wait: */
- ret = fence_completed(fctx, fence) ? 0 : -EBUSY;
- } else {
- unsigned long remaining_jiffies = timeout_to_jiffies(timeout);
-
- if (interruptible)
- ret = wait_event_interruptible_timeout(fctx->event,
- fence_completed(fctx, fence),
- remaining_jiffies);
- else
- ret = wait_event_timeout(fctx->event,
- fence_completed(fctx, fence),
- remaining_jiffies);
-
- if (ret == 0) {
- DBG("timeout waiting for fence: %u (completed: %u)",
- fence, fctx->completed_fence);
- ret = -ETIMEDOUT;
- } else if (ret != -ERESTARTSYS) {
- ret = 0;
- }
- }
-
- return ret;
+ /*
+ * Note: Check completed_fence first, as fenceptr is in a write-combine
+ * mapping, so it will be more expensive to read.
+ */
+ return (int32_t)(fctx->completed_fence - fence) >= 0 ||
+ (int32_t)(*fctx->fenceptr - fence) >= 0;
}
/* called from workqueue */
@@ -83,8 +50,6 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
spin_lock(&fctx->spinlock);
fctx->completed_fence = max(fence, fctx->completed_fence);
spin_unlock(&fctx->spinlock);
-
- wake_up_all(&fctx->event);
}
struct msm_fence {
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index 2d9af66dcca5..4783db528bcc 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -9,23 +9,53 @@
#include "msm_drv.h"
+/**
+ * struct msm_fence_context - fence context for gpu
+ *
+ * Each ringbuffer has a single fence context, with the GPU writing an
+ * incrementing fence seqno at the end of each submit
+ */
struct msm_fence_context {
struct drm_device *dev;
+ /** name: human readable name for fence timeline */
char name[32];
+ /** context: see dma_fence_context_alloc() */
unsigned context;
- /* last_fence == completed_fence --> no pending work */
- uint32_t last_fence; /* last assigned fence */
- uint32_t completed_fence; /* last completed fence */
- wait_queue_head_t event;
+
+ /**
+ * last_fence:
+ *
+ * Last assigned fence, incremented each time a fence is created
+ * on this fence context. If last_fence == completed_fence,
+ * there is no remaining pending work
+ */
+ uint32_t last_fence;
+
+ /**
+ * completed_fence:
+ *
+ * The last completed fence, updated from the CPU after interrupt
+ * from GPU
+ */
+ uint32_t completed_fence;
+
+ /**
+ * fenceptr:
+ *
+ * The address that the GPU directly writes with completed fence
+ * seqno. This can be ahead of completed_fence. We can peek at
+ * this to see if a fence has already signaled but the CPU hasn't
+ * gotten around to handling the irq and updating completed_fence
+ */
+ volatile uint32_t *fenceptr;
+
spinlock_t spinlock;
};
struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev,
- const char *name);
+ volatile uint32_t *fenceptr, const char *name);
void msm_fence_context_free(struct msm_fence_context *fctx);
-int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
- ktime_t *timeout, bool interruptible);
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 1e8a971a86f2..22308a1b66fc 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -131,7 +131,6 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
sync_for_device(msm_obj);
- GEM_WARN_ON(msm_obj->active_count);
update_inactive(msm_obj);
}
@@ -218,31 +217,6 @@ static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
return prot;
}
-int msm_gem_mmap_obj(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
-
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
-
- return 0;
-}
-
-int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret) {
- DBG("mmap failed: %d", ret);
- return ret;
- }
-
- return msm_gem_mmap_obj(vma->vm_private_data, vma);
-}
-
static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
@@ -804,41 +778,6 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
msm_obj->vaddr = NULL;
}
-/* must be called before _move_to_active().. */
-int msm_gem_sync_object(struct drm_gem_object *obj,
- struct msm_fence_context *fctx, bool exclusive)
-{
- struct dma_resv_list *fobj;
- struct dma_fence *fence;
- int i, ret;
-
- fobj = dma_resv_shared_list(obj->resv);
- if (!fobj || (fobj->shared_count == 0)) {
- fence = dma_resv_excl_fence(obj->resv);
- /* don't need to wait on our own fences, since ring is fifo */
- if (fence && (fence->context != fctx->context)) {
- ret = dma_fence_wait(fence, true);
- if (ret)
- return ret;
- }
- }
-
- if (!exclusive || !fobj)
- return 0;
-
- for (i = 0; i < fobj->shared_count; i++) {
- fence = rcu_dereference_protected(fobj->shared[i],
- dma_resv_held(obj->resv));
- if (fence->context != fctx->context) {
- ret = dma_fence_wait(fence, true);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -848,14 +787,12 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
GEM_WARN_ON(!msm_gem_is_locked(obj));
GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
GEM_WARN_ON(msm_obj->dontneed);
- GEM_WARN_ON(!msm_obj->sgt);
if (msm_obj->active_count++ == 0) {
mutex_lock(&priv->mm_lock);
if (msm_obj->evictable)
mark_unevictable(msm_obj);
- list_del(&msm_obj->mm_list);
- list_add_tail(&msm_obj->mm_list, &gpu->active_list);
+ list_move_tail(&msm_obj->mm_list, &gpu->active_list);
mutex_unlock(&priv->mm_lock);
}
}
@@ -1062,7 +999,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
}
#endif
-/* don't call directly! Use drm_gem_object_put_locked() and friends */
+/* don't call directly! Use drm_gem_object_put() */
void msm_gem_free_object(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -1114,6 +1051,17 @@ void msm_gem_free_object(struct drm_gem_object *obj)
kfree(msm_obj);
}
+static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
+
+ return 0;
+}
+
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle,
@@ -1151,6 +1099,7 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.get_sg_table = msm_gem_prime_get_sg_table,
.vmap = msm_gem_prime_vmap,
.vunmap = msm_gem_prime_vunmap,
+ .mmap = msm_gem_object_mmap,
.vm_ops = &vm_ops,
};
@@ -1183,7 +1132,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
- INIT_LIST_HEAD(&msm_obj->submit_entry);
INIT_LIST_HEAD(&msm_obj->vmas);
*obj = &msm_obj->base;
@@ -1192,8 +1140,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
return 0;
}
-static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
- uint32_t size, uint32_t flags, bool struct_mutex_locked)
+struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
@@ -1280,26 +1227,10 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
return obj;
fail:
- if (struct_mutex_locked) {
- drm_gem_object_put_locked(obj);
- } else {
- drm_gem_object_put(obj);
- }
+ drm_gem_object_put(obj);
return ERR_PTR(ret);
}
-struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
- uint32_t size, uint32_t flags)
-{
- return _msm_gem_new(dev, size, flags, true);
-}
-
-struct drm_gem_object *msm_gem_new(struct drm_device *dev,
- uint32_t size, uint32_t flags)
-{
- return _msm_gem_new(dev, size, flags, false);
-}
-
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt)
{
@@ -1358,12 +1289,12 @@ fail:
return ERR_PTR(ret);
}
-static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova, bool locked)
+ struct drm_gem_object **bo, uint64_t *iova)
{
void *vaddr;
- struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
+ struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
int ret;
if (IS_ERR(obj))
@@ -1387,42 +1318,21 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
return vaddr;
err:
- if (locked)
- drm_gem_object_put_locked(obj);
- else
- drm_gem_object_put(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(ret);
}
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova)
-{
- return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
-}
-
-void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova)
-{
- return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
-}
-
void msm_gem_kernel_put(struct drm_gem_object *bo,
- struct msm_gem_address_space *aspace, bool locked)
+ struct msm_gem_address_space *aspace)
{
if (IS_ERR_OR_NULL(bo))
return;
msm_gem_put_vaddr(bo);
msm_gem_unpin_iova(bo, aspace);
-
- if (locked)
- drm_gem_object_put_locked(bo);
- else
- drm_gem_object_put(bo);
+ drm_gem_object_put(bo);
}
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 405f8411e395..e39a8e7ad843 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -9,6 +9,7 @@
#include <linux/kref.h>
#include <linux/dma-resv.h>
+#include "drm/gpu_scheduler.h"
#include "msm_drv.h"
/* Make all GEM related WARN_ON()s ratelimited.. when things go wrong they
@@ -87,13 +88,6 @@ struct msm_gem_object {
*/
struct list_head mm_list;
- /* Transiently in the process of submit ioctl, objects associated
- * with the submit are on submit->bo_list.. this only lasts for
- * the duration of the ioctl, so one bo can never be on multiple
- * submit lists.
- */
- struct list_head submit_entry;
-
struct page **pages;
struct sg_table *sgt;
void *vaddr;
@@ -112,9 +106,6 @@ struct msm_gem_object {
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
-int msm_gem_mmap_obj(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
-int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
@@ -143,8 +134,6 @@ void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
-int msm_gem_sync_object(struct drm_gem_object *obj,
- struct msm_fence_context *fctx, bool exclusive);
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
void msm_gem_active_put(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
@@ -154,16 +143,11 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle, char *name);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
-struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
- uint32_t size, uint32_t flags);
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova);
-void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova);
void msm_gem_kernel_put(struct drm_gem_object *bo,
- struct msm_gem_address_space *aspace, bool locked);
+ struct msm_gem_address_space *aspace);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
__printf(2, 3)
@@ -313,19 +297,34 @@ void msm_gem_vunmap(struct drm_gem_object *obj);
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and
- * make it easier to unwind when things go wrong, etc). This only
- * lasts for the duration of the submit-ioctl.
+ * make it easier to unwind when things go wrong, etc).
*/
struct msm_gem_submit {
+ struct drm_sched_job base;
struct kref ref;
struct drm_device *dev;
struct msm_gpu *gpu;
struct msm_gem_address_space *aspace;
struct list_head node; /* node in ring submit list */
- struct list_head bo_list;
struct ww_acquire_ctx ticket;
uint32_t seqno; /* Sequence number of the submit on the ring */
- struct dma_fence *fence;
+
+ /* Array of struct dma_fence * to block on before submitting this job.
+ */
+ struct xarray deps;
+ unsigned long last_dep;
+
+ /* Hw fence, which is created when the scheduler executes the job, and
+ * is signaled when the hw finishes (via seqno write from cmdstream)
+ */
+ struct dma_fence *hw_fence;
+
+ /* Userspace visible fence, which is signaled by the scheduler after
+ * the hw_fence is signaled.
+ */
+ struct dma_fence *user_fence;
+
+ int fence_id; /* key into queue->fence_idr */
struct msm_gpu_submitqueue *queue;
struct pid *pid; /* submitting process */
bool fault_dumped; /* Limit devcoredump dumping to one per submit */
@@ -355,6 +354,11 @@ struct msm_gem_submit {
} bos[];
};
+static inline struct msm_gem_submit *to_msm_submit(struct drm_sched_job *job)
+{
+ return container_of(job, struct msm_gem_submit, base);
+}
+
void __msm_gem_submit_destroy(struct kref *kref);
static inline void msm_gem_submit_get(struct msm_gem_submit *submit)
@@ -367,6 +371,8 @@ static inline void msm_gem_submit_put(struct msm_gem_submit *submit)
kref_put(&submit->ref, __msm_gem_submit_destroy);
}
+void msm_submit_retire(struct msm_gem_submit *submit);
+
/* helper to determine of a buffer in submit should be dumped, used for both
* devcoredump and debugfs cmdstream dumping:
*/
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 9880348a4dc7..fc94e061d6a7 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -39,17 +39,6 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
msm_gem_put_vaddr(obj);
}
-int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret < 0)
- return ret;
-
- return msm_gem_mmap_obj(vma->vm_private_data, vma);
-}
-
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 44f84bfd0c0e..fdc5367aecaa 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -23,8 +23,9 @@
/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
-#define BO_LOCKED 0x4000
-#define BO_PINNED 0x2000
+#define BO_LOCKED 0x4000 /* obj lock is held */
+#define BO_ACTIVE 0x2000 /* active refcnt is held */
+#define BO_PINNED 0x1000 /* obj is pinned and on active list */
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu,
@@ -32,32 +33,37 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
- uint64_t sz = struct_size(submit, bos, nr_bos) +
- ((u64)nr_cmds * sizeof(submit->cmd[0]));
+ uint64_t sz;
+ int ret;
+
+ sz = struct_size(submit, bos, nr_bos) +
+ ((u64)nr_cmds * sizeof(submit->cmd[0]));
if (sz > SIZE_MAX)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (!submit)
- return NULL;
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_sched_job_init(&submit->base, &queue->entity, queue);
+ if (ret) {
+ kfree(submit);
+ return ERR_PTR(ret);
+ }
+
+ xa_init_flags(&submit->deps, XA_FLAGS_ALLOC);
kref_init(&submit->ref);
submit->dev = dev;
submit->aspace = queue->ctx->aspace;
submit->gpu = gpu;
- submit->fence = NULL;
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
- submit->ring = gpu->rb[queue->prio];
+ submit->ring = gpu->rb[queue->ring_nr];
submit->fault_dumped = false;
- /* initially, until copy_from_user() and bo lookup succeeds: */
- submit->nr_bos = 0;
- submit->nr_cmds = 0;
-
INIT_LIST_HEAD(&submit->node);
- INIT_LIST_HEAD(&submit->bo_list);
return submit;
}
@@ -66,9 +72,25 @@ void __msm_gem_submit_destroy(struct kref *kref)
{
struct msm_gem_submit *submit =
container_of(kref, struct msm_gem_submit, ref);
+ unsigned long index;
+ struct dma_fence *fence;
unsigned i;
- dma_fence_put(submit->fence);
+ if (submit->fence_id) {
+ mutex_lock(&submit->queue->lock);
+ idr_remove(&submit->queue->fence_idr, submit->fence_id);
+ mutex_unlock(&submit->queue->lock);
+ }
+
+ xa_for_each (&submit->deps, index, fence) {
+ dma_fence_put(fence);
+ }
+
+ xa_destroy(&submit->deps);
+
+ dma_fence_put(submit->user_fence);
+ dma_fence_put(submit->hw_fence);
+
put_pid(submit->pid);
msm_submitqueue_put(submit->queue);
@@ -121,7 +143,6 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
for (i = 0; i < args->nr_bos; i++) {
struct drm_gem_object *obj;
- struct msm_gem_object *msm_obj;
/* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly:
@@ -133,20 +154,9 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
goto out_unlock;
}
- msm_obj = to_msm_bo(obj);
-
- if (!list_empty(&msm_obj->submit_entry)) {
- DRM_ERROR("handle %u at index %u already on submit list\n",
- submit->bos[i].handle, i);
- ret = -EINVAL;
- goto out_unlock;
- }
-
drm_gem_object_get(obj);
- submit->bos[i].obj = msm_obj;
-
- list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
+ submit->bos[i].obj = to_msm_bo(obj);
}
out_unlock:
@@ -220,21 +230,34 @@ out:
return ret;
}
-static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
- int i, bool backoff)
+/* Unwind bo state, according to cleanup_flags. In the success case, only
+ * the lock is dropped at the end of the submit (and active/pin ref is dropped
+ * later when the submit is retired).
+ */
+static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
+ unsigned cleanup_flags)
{
- struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+ unsigned flags = submit->bos[i].flags & cleanup_flags;
- if (submit->bos[i].flags & BO_PINNED)
- msm_gem_unpin_iova_locked(&msm_obj->base, submit->aspace);
+ if (flags & BO_PINNED)
+ msm_gem_unpin_iova_locked(obj, submit->aspace);
- if (submit->bos[i].flags & BO_LOCKED)
- dma_resv_unlock(msm_obj->base.resv);
+ if (flags & BO_ACTIVE)
+ msm_gem_active_put(obj);
- if (backoff && !(submit->bos[i].flags & BO_VALID))
- submit->bos[i].iova = 0;
+ if (flags & BO_LOCKED)
+ dma_resv_unlock(obj->resv);
- submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
+ submit->bos[i].flags &= ~cleanup_flags;
+}
+
+static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
+{
+ submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE | BO_LOCKED);
+
+ if (!(submit->bos[i].flags & BO_VALID))
+ submit->bos[i].iova = 0;
}
/* This is where we make sure all the bo's are reserved and pin'd: */
@@ -265,11 +288,17 @@ retry:
return 0;
fail:
+ if (ret == -EALREADY) {
+ DRM_ERROR("handle %u at index %u already on submit list\n",
+ submit->bos[i].handle, i);
+ ret = -EINVAL;
+ }
+
for (; i >= 0; i--)
- submit_unlock_unpin_bo(submit, i, true);
+ submit_unlock_unpin_bo(submit, i);
if (slow_locked > 0)
- submit_unlock_unpin_bo(submit, slow_locked, true);
+ submit_unlock_unpin_bo(submit, slow_locked);
if (ret == -EDEADLK) {
struct msm_gem_object *msm_obj = submit->bos[contended].obj;
@@ -281,6 +310,12 @@ fail:
slow_locked = contended;
goto retry;
}
+
+ /* Not expecting -EALREADY here, if the bo was already
+ * locked, we should have gotten -EALREADY already from
+ * the dma_resv_lock_interruptable() call.
+ */
+ WARN_ON_ONCE(ret == -EALREADY);
}
return ret;
@@ -291,7 +326,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
int i, ret = 0;
for (i = 0; i < submit->nr_bos; i++) {
- struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
if (!write) {
@@ -300,8 +335,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
* strange place to call it. OTOH this is a
* convenient can-fail point to hook it in.
*/
- ret = dma_resv_reserve_shared(msm_obj->base.resv,
- 1);
+ ret = dma_resv_reserve_shared(obj->resv, 1);
if (ret)
return ret;
}
@@ -309,7 +343,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
if (no_implicit)
continue;
- ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
+ ret = drm_gem_fence_array_add_implicit(&submit->deps, obj,
write);
if (ret)
break;
@@ -324,12 +358,24 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
submit->valid = true;
+ /*
+ * Increment active_count first, so if under memory pressure, we
+ * don't inadvertently evict a bo needed by the submit in order
+ * to pin an earlier bo in the same submit.
+ */
for (i = 0; i < submit->nr_bos; i++) {
- struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+
+ msm_gem_active_get(obj, submit->gpu);
+ submit->bos[i].flags |= BO_ACTIVE;
+ }
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
uint64_t iova;
/* if locking succeeded, pin bo: */
- ret = msm_gem_get_and_pin_iova_locked(&msm_obj->base,
+ ret = msm_gem_get_and_pin_iova_locked(obj,
submit->aspace, &iova);
if (ret)
@@ -350,6 +396,20 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
return ret;
}
+static void submit_attach_object_fences(struct msm_gem_submit *submit)
+{
+ int i;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+
+ if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
+ dma_resv_add_excl_fence(obj->resv, submit->user_fence);
+ else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
+ dma_resv_add_shared_fence(obj->resv, submit->user_fence);
+ }
+}
+
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
struct msm_gem_object **obj, uint64_t *iova, bool *valid)
{
@@ -444,18 +504,39 @@ out:
return ret;
}
-static void submit_cleanup(struct msm_gem_submit *submit)
+/* Cleanup submit at end of ioctl. In the error case, this also drops
+ * references, unpins, and drops active refcnt. In the non-error case,
+ * this is done when the submit is retired.
+ */
+static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
+ unsigned cleanup_flags = BO_LOCKED;
unsigned i;
+ if (error)
+ cleanup_flags |= BO_PINNED | BO_ACTIVE;
+
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- submit_unlock_unpin_bo(submit, i, false);
- list_del_init(&msm_obj->submit_entry);
- drm_gem_object_put_locked(&msm_obj->base);
+ submit_cleanup_bo(submit, i, cleanup_flags);
+ if (error)
+ drm_gem_object_put(&msm_obj->base);
}
}
+void msm_submit_retire(struct msm_gem_submit *submit)
+{
+ int i;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+
+ msm_gem_lock(obj);
+ submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE);
+ msm_gem_unlock(obj);
+ drm_gem_object_put(obj);
+ }
+}
struct msm_submit_post_dep {
struct drm_syncobj *syncobj;
@@ -463,12 +544,12 @@ struct msm_submit_post_dep {
struct dma_fence_chain *chain;
};
-static struct drm_syncobj **msm_wait_deps(struct drm_device *dev,
- struct drm_file *file,
- uint64_t in_syncobjs_addr,
- uint32_t nr_in_syncobjs,
- size_t syncobj_stride,
- struct msm_ringbuffer *ring)
+static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
+ struct drm_file *file,
+ uint64_t in_syncobjs_addr,
+ uint32_t nr_in_syncobjs,
+ size_t syncobj_stride,
+ struct msm_ringbuffer *ring)
{
struct drm_syncobj **syncobjs = NULL;
struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
@@ -492,7 +573,7 @@ static struct drm_syncobj **msm_wait_deps(struct drm_device *dev,
}
if (syncobj_desc.point &&
- !drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) {
+ !drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
ret = -EOPNOTSUPP;
break;
}
@@ -507,10 +588,7 @@ static struct drm_syncobj **msm_wait_deps(struct drm_device *dev,
if (ret)
break;
- if (!dma_fence_match_context(fence, ring->fctx->context))
- ret = dma_fence_wait(fence, true);
-
- dma_fence_put(fence);
+ ret = drm_gem_fence_array_add(&submit->deps, fence);
if (ret)
break;
@@ -587,9 +665,7 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
break;
}
- post_deps[i].chain =
- kmalloc(sizeof(*post_deps[i].chain),
- GFP_KERNEL);
+ post_deps[i].chain = dma_fence_chain_alloc();
if (!post_deps[i].chain) {
ret = -ENOMEM;
break;
@@ -606,7 +682,7 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
if (ret) {
for (j = 0; j <= i; ++j) {
- kfree(post_deps[j].chain);
+ dma_fence_chain_free(post_deps[j].chain);
if (post_deps[j].syncobj)
drm_syncobj_put(post_deps[j].syncobj);
}
@@ -643,9 +719,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
struct msm_file_private *ctx = file->driver_priv;
- struct msm_gem_submit *submit;
+ struct msm_gem_submit *submit = NULL;
struct msm_gpu *gpu = priv->gpu;
- struct sync_file *sync_file = NULL;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
struct msm_submit_post_dep *post_deps = NULL;
@@ -655,6 +730,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
bool has_ww_ticket = false;
unsigned i;
int ret, submitid;
+
if (!gpu)
return -ENXIO;
@@ -683,38 +759,59 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
/* Get a unique identifier for the submission for logging purposes */
submitid = atomic_inc_return(&ident) - 1;
- ring = gpu->rb[queue->prio];
+ ring = gpu->rb[queue->ring_nr];
trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
args->nr_bos, args->nr_cmds);
+ ret = mutex_lock_interruptible(&queue->lock);
+ if (ret)
+ goto out_post_unlock;
+
+ if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0) {
+ ret = out_fence_fd;
+ goto out_unlock;
+ }
+ }
+
+ submit = submit_create(dev, gpu, queue, args->nr_bos,
+ args->nr_cmds);
+ if (IS_ERR(submit)) {
+ ret = PTR_ERR(submit);
+ goto out_unlock;
+ }
+
+ submit->pid = pid;
+ submit->ident = submitid;
+
+ if (args->flags & MSM_SUBMIT_SUDO)
+ submit->in_rb = true;
+
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
struct dma_fence *in_fence;
in_fence = sync_file_get_fence(args->fence_fd);
- if (!in_fence)
- return -EINVAL;
-
- /*
- * Wait if the fence is from a foreign context, or if the fence
- * array contains any fence from a foreign context.
- */
- ret = 0;
- if (!dma_fence_match_context(in_fence, ring->fctx->context))
- ret = dma_fence_wait(in_fence, true);
+ if (!in_fence) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
- dma_fence_put(in_fence);
+ ret = drm_gem_fence_array_add(&submit->deps, in_fence);
if (ret)
- return ret;
+ goto out_unlock;
}
if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
- syncobjs_to_reset = msm_wait_deps(dev, file,
- args->in_syncobjs,
- args->nr_in_syncobjs,
- args->syncobj_stride, ring);
- if (IS_ERR(syncobjs_to_reset))
- return PTR_ERR(syncobjs_to_reset);
+ syncobjs_to_reset = msm_parse_deps(submit, file,
+ args->in_syncobjs,
+ args->nr_in_syncobjs,
+ args->syncobj_stride, ring);
+ if (IS_ERR(syncobjs_to_reset)) {
+ ret = PTR_ERR(syncobjs_to_reset);
+ goto out_unlock;
+ }
}
if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
@@ -724,51 +821,17 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
args->syncobj_stride);
if (IS_ERR(post_deps)) {
ret = PTR_ERR(post_deps);
- goto out_post_unlock;
- }
- }
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- goto out_post_unlock;
-
- if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
- out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
- if (out_fence_fd < 0) {
- ret = out_fence_fd;
goto out_unlock;
}
}
- submit = submit_create(dev, gpu, queue, args->nr_bos,
- args->nr_cmds);
- if (!submit) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- submit->pid = pid;
- submit->ident = submitid;
-
- if (args->flags & MSM_SUBMIT_SUDO)
- submit->in_rb = true;
-
ret = submit_lookup_objects(submit, args, file);
if (ret)
- goto out_pre_pm;
+ goto out;
ret = submit_lookup_cmds(submit, args, file);
if (ret)
- goto out_pre_pm;
-
- /*
- * Thanks to dev_pm_opp opp_table_lock interactions with mm->mmap_sem
- * in the resume path, we need to to rpm get before we lock objs.
- * Which unfortunately might involve powering up the GPU sooner than
- * is necessary. But at least in the explicit fencing case, we will
- * have already done all the fence waiting.
- */
- pm_runtime_get_sync(&gpu->pdev->dev);
+ goto out;
/* copy_*_user while holding a ww ticket upsets lockdep */
ww_acquire_init(&submit->ticket, &reservation_ww_class);
@@ -815,47 +878,54 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
- submit->fence = msm_fence_alloc(ring->fctx);
- if (IS_ERR(submit->fence)) {
- ret = PTR_ERR(submit->fence);
- submit->fence = NULL;
+ submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
+
+ /*
+ * Allocate an id which can be used by WAIT_FENCE ioctl to map back
+ * to the underlying fence.
+ */
+ submit->fence_id = idr_alloc_cyclic(&queue->fence_idr,
+ submit->user_fence, 0, INT_MAX, GFP_KERNEL);
+ if (submit->fence_id < 0) {
+ ret = submit->fence_id = 0;
+ submit->fence_id = 0;
goto out;
}
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
- sync_file = sync_file_create(submit->fence);
+ struct sync_file *sync_file = sync_file_create(submit->user_fence);
if (!sync_file) {
ret = -ENOMEM;
goto out;
}
+ fd_install(out_fence_fd, sync_file->file);
+ args->fence_fd = out_fence_fd;
}
- msm_gpu_submit(gpu, submit);
+ submit_attach_object_fences(submit);
- args->fence = submit->fence->seqno;
+ /* The scheduler owns a ref now: */
+ msm_gem_submit_get(submit);
- if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
- fd_install(out_fence_fd, sync_file->file);
- args->fence_fd = out_fence_fd;
- }
+ drm_sched_entity_push_job(&submit->base, &queue->entity);
+
+ args->fence = submit->fence_id;
msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
msm_process_post_deps(post_deps, args->nr_out_syncobjs,
- submit->fence);
+ submit->user_fence);
out:
- pm_runtime_put(&gpu->pdev->dev);
-out_pre_pm:
- submit_cleanup(submit);
+ submit_cleanup(submit, !!ret);
if (has_ww_ticket)
ww_acquire_fini(&submit->ticket);
- msm_gem_submit_put(submit);
out_unlock:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
- mutex_unlock(&dev->struct_mutex);
-
+ mutex_unlock(&queue->lock);
+ if (submit)
+ msm_gem_submit_put(submit);
out_post_unlock:
if (!IS_ERR_OR_NULL(post_deps)) {
for (i = 0; i < args->nr_out_syncobjs; ++i) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 0ebf7bc6ad09..8a3a592da3a4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -13,8 +13,6 @@
#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
-#include <linux/devfreq.h>
-#include <linux/devfreq_cooling.h>
#include <linux/devcoredump.h>
#include <linux/sched/task.h>
@@ -22,106 +20,6 @@
* Power Management:
*/
-static int msm_devfreq_target(struct device *dev, unsigned long *freq,
- u32 flags)
-{
- struct msm_gpu *gpu = dev_to_gpu(dev);
- struct dev_pm_opp *opp;
-
- opp = devfreq_recommended_opp(dev, freq, flags);
-
- if (IS_ERR(opp))
- return PTR_ERR(opp);
-
- trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
-
- if (gpu->funcs->gpu_set_freq)
- gpu->funcs->gpu_set_freq(gpu, opp);
- else
- clk_set_rate(gpu->core_clk, *freq);
-
- dev_pm_opp_put(opp);
-
- return 0;
-}
-
-static int msm_devfreq_get_dev_status(struct device *dev,
- struct devfreq_dev_status *status)
-{
- struct msm_gpu *gpu = dev_to_gpu(dev);
- ktime_t time;
-
- if (gpu->funcs->gpu_get_freq)
- status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
- else
- status->current_frequency = clk_get_rate(gpu->core_clk);
-
- status->busy_time = gpu->funcs->gpu_busy(gpu);
-
- time = ktime_get();
- status->total_time = ktime_us_delta(time, gpu->devfreq.time);
- gpu->devfreq.time = time;
-
- return 0;
-}
-
-static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
-{
- struct msm_gpu *gpu = dev_to_gpu(dev);
-
- if (gpu->funcs->gpu_get_freq)
- *freq = gpu->funcs->gpu_get_freq(gpu);
- else
- *freq = clk_get_rate(gpu->core_clk);
-
- return 0;
-}
-
-static struct devfreq_dev_profile msm_devfreq_profile = {
- .polling_ms = 10,
- .target = msm_devfreq_target,
- .get_dev_status = msm_devfreq_get_dev_status,
- .get_cur_freq = msm_devfreq_get_cur_freq,
-};
-
-static void msm_devfreq_init(struct msm_gpu *gpu)
-{
- /* We need target support to do devfreq */
- if (!gpu->funcs->gpu_busy)
- return;
-
- msm_devfreq_profile.initial_freq = gpu->fast_rate;
-
- /*
- * Don't set the freq_table or max_state and let devfreq build the table
- * from OPP
- * After a deferred probe, these may have be left to non-zero values,
- * so set them back to zero before creating the devfreq device
- */
- msm_devfreq_profile.freq_table = NULL;
- msm_devfreq_profile.max_state = 0;
-
- gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
- &msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
- NULL);
-
- if (IS_ERR(gpu->devfreq.devfreq)) {
- DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
- gpu->devfreq.devfreq = NULL;
- return;
- }
-
- devfreq_suspend_device(gpu->devfreq.devfreq);
-
- gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node,
- gpu->devfreq.devfreq);
- if (IS_ERR(gpu->cooling)) {
- DRM_DEV_ERROR(&gpu->pdev->dev,
- "Couldn't register GPU cooling device\n");
- gpu->cooling = NULL;
- }
-}
-
static int enable_pwrrail(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
@@ -196,14 +94,6 @@ static int disable_axi(struct msm_gpu *gpu)
return 0;
}
-void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
-{
- gpu->devfreq.busy_cycles = 0;
- gpu->devfreq.time = ktime_get();
-
- devfreq_resume_device(gpu->devfreq.devfreq);
-}
-
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
int ret;
@@ -223,7 +113,7 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
- msm_gpu_resume_devfreq(gpu);
+ msm_devfreq_resume(gpu);
gpu->needs_hw_init = true;
@@ -237,7 +127,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
DBG("%s", gpu->name);
trace_msm_gpu_suspend(0);
- devfreq_suspend_device(gpu->devfreq.devfreq);
+ msm_devfreq_suspend(gpu);
ret = disable_axi(gpu);
if (ret)
@@ -278,16 +168,18 @@ static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
uint32_t fence)
{
struct msm_gem_submit *submit;
+ unsigned long flags;
- spin_lock(&ring->submit_lock);
+ spin_lock_irqsave(&ring->submit_lock, flags);
list_for_each_entry(submit, &ring->submits, node) {
if (submit->seqno > fence)
break;
msm_update_fence(submit->ring->fctx,
- submit->fence->seqno);
+ submit->hw_fence->seqno);
+ dma_fence_signal(submit->hw_fence);
}
- spin_unlock(&ring->submit_lock);
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
}
#ifdef CONFIG_DEV_COREDUMP
@@ -443,15 +335,16 @@ static struct msm_gem_submit *
find_submit(struct msm_ringbuffer *ring, uint32_t fence)
{
struct msm_gem_submit *submit;
+ unsigned long flags;
- spin_lock(&ring->submit_lock);
+ spin_lock_irqsave(&ring->submit_lock, flags);
list_for_each_entry(submit, &ring->submits, node) {
if (submit->seqno == fence) {
- spin_unlock(&ring->submit_lock);
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
return submit;
}
}
- spin_unlock(&ring->submit_lock);
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
return NULL;
}
@@ -487,10 +380,6 @@ static void recover_worker(struct kthread_work *work)
put_task_struct(task);
}
- /* msm_rd_dump_submit() needs bo locked to dump: */
- for (i = 0; i < submit->nr_bos; i++)
- msm_gem_lock(&submit->bos[i].obj->base);
-
if (comm && cmd) {
DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
gpu->name, comm, cmd);
@@ -500,9 +389,6 @@ static void recover_worker(struct kthread_work *work)
} else {
msm_rd_dump_submit(priv->hangrd, submit, NULL);
}
-
- for (i = 0; i < submit->nr_bos; i++)
- msm_gem_unlock(&submit->bos[i].obj->base);
}
/* Record the crash state */
@@ -547,11 +433,12 @@ static void recover_worker(struct kthread_work *work)
*/
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
+ unsigned long flags;
- spin_lock(&ring->submit_lock);
+ spin_lock_irqsave(&ring->submit_lock, flags);
list_for_each_entry(submit, &ring->submits, node)
gpu->funcs->submit(gpu, submit);
- spin_unlock(&ring->submit_lock);
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
}
}
@@ -641,7 +528,7 @@ static void hangcheck_handler(struct timer_list *t)
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
- kthread_queue_work(gpu->worker, &gpu->retire_work);
+ msm_gpu_retire(gpu);
}
/*
@@ -752,7 +639,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
volatile struct msm_gpu_submit_stats *stats;
u64 elapsed, clock = 0;
- int i;
+ unsigned long flags;
stats = &ring->memptrs->stats[index];
/* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
@@ -768,22 +655,22 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
trace_msm_gpu_submit_retired(submit, elapsed, clock,
stats->alwayson_start, stats->alwayson_end);
- for (i = 0; i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = &submit->bos[i].obj->base;
-
- msm_gem_lock(obj);
- msm_gem_active_put(obj);
- msm_gem_unpin_iova_locked(obj, submit->aspace);
- msm_gem_unlock(obj);
- drm_gem_object_put(obj);
- }
+ msm_submit_retire(submit);
pm_runtime_mark_last_busy(&gpu->pdev->dev);
pm_runtime_put_autosuspend(&gpu->pdev->dev);
- spin_lock(&ring->submit_lock);
+ spin_lock_irqsave(&ring->submit_lock, flags);
list_del(&submit->node);
- spin_unlock(&ring->submit_lock);
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
+
+ /* Update devfreq on transition from active->idle: */
+ mutex_lock(&gpu->active_lock);
+ gpu->active_submits--;
+ WARN_ON(gpu->active_submits < 0);
+ if (!gpu->active_submits)
+ msm_devfreq_idle(gpu);
+ mutex_unlock(&gpu->active_lock);
msm_gem_submit_put(submit);
}
@@ -798,18 +685,19 @@ static void retire_submits(struct msm_gpu *gpu)
while (true) {
struct msm_gem_submit *submit = NULL;
+ unsigned long flags;
- spin_lock(&ring->submit_lock);
+ spin_lock_irqsave(&ring->submit_lock, flags);
submit = list_first_entry_or_null(&ring->submits,
struct msm_gem_submit, node);
- spin_unlock(&ring->submit_lock);
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
/*
* If no submit, we are done. If submit->fence hasn't
* been signalled, then later submits are not signalled
* either, so we are also done.
*/
- if (submit && dma_fence_is_signaled(submit->fence)) {
+ if (submit && dma_fence_is_signaled(submit->hw_fence)) {
retire_submit(gpu, ring, submit);
} else {
break;
@@ -821,10 +709,6 @@ static void retire_submits(struct msm_gpu *gpu)
static void retire_worker(struct kthread_work *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
- int i;
-
- for (i = 0; i < gpu->nr_rings; i++)
- update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
retire_submits(gpu);
}
@@ -832,6 +716,11 @@ static void retire_worker(struct kthread_work *work)
/* call from irq handler to schedule work to retire bo's */
void msm_gpu_retire(struct msm_gpu *gpu)
{
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++)
+ update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
+
kthread_queue_work(gpu->worker, &gpu->retire_work);
update_sw_cntrs(gpu);
}
@@ -842,7 +731,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
- int i;
+ unsigned long flags;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -856,32 +745,22 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
update_sw_cntrs(gpu);
- for (i = 0; i < submit->nr_bos; i++) {
- struct msm_gem_object *msm_obj = submit->bos[i].obj;
- struct drm_gem_object *drm_obj = &msm_obj->base;
- uint64_t iova;
-
- /* submit takes a reference to the bo and iova until retired: */
- drm_gem_object_get(&msm_obj->base);
- msm_gem_get_and_pin_iova_locked(&msm_obj->base, submit->aspace, &iova);
-
- if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(drm_obj->resv, submit->fence);
- else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
- dma_resv_add_shared_fence(drm_obj->resv, submit->fence);
-
- msm_gem_active_get(drm_obj, gpu);
- }
-
/*
* ring->submits holds a ref to the submit, to deal with the case
* that a submit completes before msm_ioctl_gem_submit() returns.
*/
msm_gem_submit_get(submit);
- spin_lock(&ring->submit_lock);
+ spin_lock_irqsave(&ring->submit_lock, flags);
list_add_tail(&submit->node, &ring->submits);
- spin_unlock(&ring->submit_lock);
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
+
+ /* Update devfreq on transition from idle->active: */
+ mutex_lock(&gpu->active_lock);
+ if (!gpu->active_submits)
+ msm_devfreq_active(gpu);
+ gpu->active_submits++;
+ mutex_unlock(&gpu->active_lock);
gpu->funcs->submit(gpu, submit);
priv->lastctx = submit->queue->ctx;
@@ -968,6 +847,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
sched_set_fifo_low(gpu->worker->task);
INIT_LIST_HEAD(&gpu->active_list);
+ mutex_init(&gpu->active_lock);
kthread_init_work(&gpu->retire_work, retire_worker);
kthread_init_work(&gpu->recover_work, recover_worker);
kthread_init_work(&gpu->fault_work, fault_worker);
@@ -1078,7 +958,7 @@ fail:
gpu->rb[i] = NULL;
}
- msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
platform_set_drvdata(pdev, NULL);
return ret;
@@ -1097,7 +977,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
gpu->rb[i] = NULL;
}
- msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
if (!IS_ERR_OR_NULL(gpu->aspace)) {
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
@@ -1108,5 +988,5 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
kthread_destroy_worker(gpu->worker);
}
- devfreq_cooling_unregister(gpu->cooling);
+ msm_devfreq_cleanup(gpu);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index ef41ec09f59c..0e4b45bff2e6 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -80,6 +80,40 @@ struct msm_gpu_fault_info {
const char *block;
};
+/**
+ * struct msm_gpu_devfreq - devfreq related state
+ */
+struct msm_gpu_devfreq {
+ /** devfreq: devfreq instance */
+ struct devfreq *devfreq;
+
+ /**
+ * busy_cycles:
+ *
+ * Used by implementation of gpu->gpu_busy() to track the last
+ * busy counter value, for calculating elapsed busy cycles since
+ * last sampling period.
+ */
+ u64 busy_cycles;
+
+ /** time: Time of last sampling period. */
+ ktime_t time;
+
+ /** idle_time: Time of last transition to idle: */
+ ktime_t idle_time;
+
+ /**
+ * idle_freq:
+ *
+ * Shadow frequency used while the GPU is idle. From the PoV of
+ * the devfreq governor, we are continuing to sample busyness and
+ * adjust frequency while the GPU is idle, but we use this shadow
+ * value as the GPU is actually clamped to minimum frequency while
+ * it is inactive.
+ */
+ unsigned long idle_freq;
+};
+
struct msm_gpu {
const char *name;
struct drm_device *dev;
@@ -109,6 +143,19 @@ struct msm_gpu {
*/
struct list_head active_list;
+ /**
+ * active_submits:
+ *
+ * The number of submitted but not yet retired submits, used to
+ * determine transitions between active and idle.
+ *
+ * Protected by lock
+ */
+ int active_submits;
+
+ /** lock: protects active_submits and idle/active transitions */
+ struct mutex active_lock;
+
/* does gpu need hw_init? */
bool needs_hw_init;
@@ -151,11 +198,7 @@ struct msm_gpu {
struct drm_gem_object *memptrs_bo;
- struct {
- struct devfreq *devfreq;
- u64 busy_cycles;
- ktime_t time;
- } devfreq;
+ struct msm_gpu_devfreq devfreq;
uint32_t suspend_count;
@@ -207,14 +250,90 @@ struct msm_gpu_perfcntr {
const char *name;
};
+/*
+ * The number of priority levels provided by drm gpu scheduler. The
+ * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
+ * cases, so we don't use it (no need for kernel generated jobs).
+ */
+#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
+
+/**
+ * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
+ *
+ * @gpu: the gpu instance
+ * @prio: the userspace priority level
+ * @ring_nr: [out] the ringbuffer the userspace priority maps to
+ * @sched_prio: [out] the gpu scheduler priority level which the userspace
+ * priority maps to
+ *
+ * With drm/scheduler providing it's own level of prioritization, our total
+ * number of available priority levels is (nr_rings * NR_SCHED_PRIORITIES).
+ * Each ring is associated with it's own scheduler instance. However, our
+ * UABI is that lower numerical values are higher priority. So mapping the
+ * single userspace priority level into ring_nr and sched_prio takes some
+ * care. The userspace provided priority (when a submitqueue is created)
+ * is mapped to ring nr and scheduler priority as such:
+ *
+ * ring_nr = userspace_prio / NR_SCHED_PRIORITIES
+ * sched_prio = NR_SCHED_PRIORITIES -
+ * (userspace_prio % NR_SCHED_PRIORITIES) - 1
+ *
+ * This allows generations without preemption (nr_rings==1) to have some
+ * amount of prioritization, and provides more priority levels for gens
+ * that do have preemption.
+ */
+static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
+ unsigned *ring_nr, enum drm_sched_priority *sched_prio)
+{
+ unsigned rn, sp;
+
+ rn = div_u64_rem(prio, NR_SCHED_PRIORITIES, &sp);
+
+ /* invert sched priority to map to higher-numeric-is-higher-
+ * priority convention
+ */
+ sp = NR_SCHED_PRIORITIES - sp - 1;
+
+ if (rn >= gpu->nr_rings)
+ return -EINVAL;
+
+ *ring_nr = rn;
+ *sched_prio = sp;
+
+ return 0;
+}
+
+/**
+ * A submitqueue is associated with a gl context or vk queue (or equiv)
+ * in userspace.
+ *
+ * @id: userspace id for the submitqueue, unique within the drm_file
+ * @flags: userspace flags for the submitqueue, specified at creation
+ * (currently unusued)
+ * @ring_nr: the ringbuffer used by this submitqueue, which is determined
+ * by the submitqueue's priority
+ * @faults: the number of GPU hangs associated with this submitqueue
+ * @ctx: the per-drm_file context associated with the submitqueue (ie.
+ * which set of pgtables do submits jobs associated with the
+ * submitqueue use)
+ * @node: node in the context's list of submitqueues
+ * @fence_idr: maps fence-id to dma_fence for userspace visible fence
+ * seqno, protected by submitqueue lock
+ * @lock: submitqueue lock
+ * @ref: reference count
+ * @entity: the submit job-queue
+ */
struct msm_gpu_submitqueue {
int id;
u32 flags;
- u32 prio;
+ u32 ring_nr;
int faults;
struct msm_file_private *ctx;
struct list_head node;
+ struct idr fence_idr;
+ struct mutex lock;
struct kref ref;
+ struct drm_sched_entity entity;
};
struct msm_gpu_state_bo {
@@ -301,7 +420,13 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
-void msm_gpu_resume_devfreq(struct msm_gpu *gpu);
+
+void msm_devfreq_init(struct msm_gpu *gpu);
+void msm_devfreq_cleanup(struct msm_gpu *gpu);
+void msm_devfreq_resume(struct msm_gpu *gpu);
+void msm_devfreq_suspend(struct msm_gpu *gpu);
+void msm_devfreq_active(struct msm_gpu *gpu);
+void msm_devfreq_idle(struct msm_gpu *gpu);
int msm_gpu_hw_init(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
new file mode 100644
index 000000000000..0a1ee20296a2
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include "msm_gpu.h"
+#include "msm_gpu_trace.h"
+
+#include <linux/devfreq.h>
+#include <linux/devfreq_cooling.h>
+
+/*
+ * Power Management:
+ */
+
+static int msm_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+ struct dev_pm_opp *opp;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+
+ /*
+ * If the GPU is idle, devfreq is not aware, so just ignore
+ * it's requests
+ */
+ if (gpu->devfreq.idle_freq) {
+ gpu->devfreq.idle_freq = *freq;
+ return 0;
+ }
+
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
+
+ if (gpu->funcs->gpu_set_freq)
+ gpu->funcs->gpu_set_freq(gpu, opp);
+ else
+ clk_set_rate(gpu->core_clk, *freq);
+
+ dev_pm_opp_put(opp);
+
+ return 0;
+}
+
+static unsigned long get_freq(struct msm_gpu *gpu)
+{
+ if (gpu->devfreq.idle_freq)
+ return gpu->devfreq.idle_freq;
+
+ if (gpu->funcs->gpu_get_freq)
+ return gpu->funcs->gpu_get_freq(gpu);
+
+ return clk_get_rate(gpu->core_clk);
+}
+
+static int msm_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+ ktime_t time;
+
+ status->current_frequency = get_freq(gpu);
+ status->busy_time = gpu->funcs->gpu_busy(gpu);
+
+ time = ktime_get();
+ status->total_time = ktime_us_delta(time, gpu->devfreq.time);
+ gpu->devfreq.time = time;
+
+ return 0;
+}
+
+static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ *freq = get_freq(dev_to_gpu(dev));
+
+ return 0;
+}
+
+static struct devfreq_dev_profile msm_devfreq_profile = {
+ .timer = DEVFREQ_TIMER_DELAYED,
+ .polling_ms = 50,
+ .target = msm_devfreq_target,
+ .get_dev_status = msm_devfreq_get_dev_status,
+ .get_cur_freq = msm_devfreq_get_cur_freq,
+};
+
+void msm_devfreq_init(struct msm_gpu *gpu)
+{
+ /* We need target support to do devfreq */
+ if (!gpu->funcs->gpu_busy)
+ return;
+
+ msm_devfreq_profile.initial_freq = gpu->fast_rate;
+
+ /*
+ * Don't set the freq_table or max_state and let devfreq build the table
+ * from OPP
+ * After a deferred probe, these may have be left to non-zero values,
+ * so set them back to zero before creating the devfreq device
+ */
+ msm_devfreq_profile.freq_table = NULL;
+ msm_devfreq_profile.max_state = 0;
+
+ gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
+ &msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ NULL);
+
+ if (IS_ERR(gpu->devfreq.devfreq)) {
+ DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ gpu->devfreq.devfreq = NULL;
+ return;
+ }
+
+ devfreq_suspend_device(gpu->devfreq.devfreq);
+
+ gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node,
+ gpu->devfreq.devfreq);
+ if (IS_ERR(gpu->cooling)) {
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "Couldn't register GPU cooling device\n");
+ gpu->cooling = NULL;
+ }
+}
+
+void msm_devfreq_cleanup(struct msm_gpu *gpu)
+{
+ devfreq_cooling_unregister(gpu->cooling);
+}
+
+void msm_devfreq_resume(struct msm_gpu *gpu)
+{
+ gpu->devfreq.busy_cycles = 0;
+ gpu->devfreq.time = ktime_get();
+
+ devfreq_resume_device(gpu->devfreq.devfreq);
+}
+
+void msm_devfreq_suspend(struct msm_gpu *gpu)
+{
+ devfreq_suspend_device(gpu->devfreq.devfreq);
+}
+
+void msm_devfreq_active(struct msm_gpu *gpu)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+ struct devfreq_dev_status status;
+ unsigned int idle_time;
+ unsigned long target_freq = df->idle_freq;
+
+ /*
+ * Hold devfreq lock to synchronize with get_dev_status()/
+ * target() callbacks
+ */
+ mutex_lock(&df->devfreq->lock);
+
+ idle_time = ktime_to_ms(ktime_sub(ktime_get(), df->idle_time));
+
+ /*
+ * If we've been idle for a significant fraction of a polling
+ * interval, then we won't meet the threshold of busyness for
+ * the governor to ramp up the freq.. so give some boost
+ */
+ if (idle_time > msm_devfreq_profile.polling_ms/2) {
+ target_freq *= 2;
+ }
+
+ df->idle_freq = 0;
+
+ msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
+
+ /*
+ * Reset the polling interval so we aren't inconsistent
+ * about freq vs busy/total cycles
+ */
+ msm_devfreq_get_dev_status(&gpu->pdev->dev, &status);
+
+ mutex_unlock(&df->devfreq->lock);
+}
+
+void msm_devfreq_idle(struct msm_gpu *gpu)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+ unsigned long idle_freq, target_freq = 0;
+
+ /*
+ * Hold devfreq lock to synchronize with get_dev_status()/
+ * target() callbacks
+ */
+ mutex_lock(&df->devfreq->lock);
+
+ idle_freq = get_freq(gpu);
+
+ msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
+
+ df->idle_time = ktime_get();
+ df->idle_freq = idle_freq;
+
+ mutex_unlock(&df->devfreq->lock);
+}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 086a2d59b8c8..de2bc3467bb5 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -117,9 +117,6 @@ struct msm_kms_funcs {
struct drm_encoder *encoder,
struct drm_encoder *slave_encoder,
bool is_cmd_mode);
- void (*set_encoder_mode)(struct msm_kms *kms,
- struct drm_encoder *encoder,
- bool cmd_mode);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
@@ -150,7 +147,7 @@ struct msm_kms {
const struct msm_kms_funcs *funcs;
struct drm_device *dev;
- /* irq number to be passed on to drm_irq_install */
+ /* irq number to be passed on to msm_irq_install */
int irq;
/* mapper-id used to request GEM buffer mapped for scanout: */
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 659e5cc4b40a..b55398a34fa4 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -325,15 +325,19 @@ static void snapshot_buf(struct msm_rd_state *rd,
if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
return;
+ msm_gem_lock(&obj->base);
buf = msm_gem_get_vaddr_active(&obj->base);
if (IS_ERR(buf))
- return;
+ goto out_unlock;
buf += offset;
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr_locked(&obj->base);
+
+out_unlock:
+ msm_gem_unlock(&obj->base);
}
/* called under struct_mutex */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 4d2a2a4abef8..bd54c1412649 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -7,10 +7,61 @@
#include "msm_ringbuffer.h"
#include "msm_gpu.h"
+static uint num_hw_submissions = 8;
+MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)");
+module_param(num_hw_submissions, uint, 0600);
+
+static struct dma_fence *msm_job_dependency(struct drm_sched_job *job,
+ struct drm_sched_entity *s_entity)
+{
+ struct msm_gem_submit *submit = to_msm_submit(job);
+
+ if (!xa_empty(&submit->deps))
+ return xa_erase(&submit->deps, submit->last_dep++);
+
+ return NULL;
+}
+
+static struct dma_fence *msm_job_run(struct drm_sched_job *job)
+{
+ struct msm_gem_submit *submit = to_msm_submit(job);
+ struct msm_gpu *gpu = submit->gpu;
+
+ submit->hw_fence = msm_fence_alloc(submit->ring->fctx);
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ /* TODO move submit path over to using a per-ring lock.. */
+ mutex_lock(&gpu->dev->struct_mutex);
+
+ msm_gpu_submit(gpu, submit);
+
+ mutex_unlock(&gpu->dev->struct_mutex);
+
+ pm_runtime_put(&gpu->pdev->dev);
+
+ return dma_fence_get(submit->hw_fence);
+}
+
+static void msm_job_free(struct drm_sched_job *job)
+{
+ struct msm_gem_submit *submit = to_msm_submit(job);
+
+ drm_sched_job_cleanup(job);
+ msm_gem_submit_put(submit);
+}
+
+const struct drm_sched_backend_ops msm_sched_ops = {
+ .dependency = msm_job_dependency,
+ .run_job = msm_job_run,
+ .free_job = msm_job_free
+};
+
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
void *memptrs, uint64_t memptrs_iova)
{
struct msm_ringbuffer *ring;
+ long sched_timeout;
char name[32];
int ret;
@@ -32,7 +83,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
- ring->start = 0;
+ ring->start = NULL;
goto fail;
}
@@ -45,13 +96,23 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->memptrs = memptrs;
ring->memptrs_iova = memptrs_iova;
+ /* currently managing hangcheck ourselves: */
+ sched_timeout = MAX_SCHEDULE_TIMEOUT;
+
+ ret = drm_sched_init(&ring->sched, &msm_sched_ops,
+ num_hw_submissions, 0, sched_timeout,
+ NULL, NULL, to_msm_bo(ring->bo)->name);
+ if (ret) {
+ goto fail;
+ }
+
INIT_LIST_HEAD(&ring->submits);
spin_lock_init(&ring->submit_lock);
spin_lock_init(&ring->preempt_lock);
snprintf(name, sizeof(name), "gpu-ring-%d", ring->id);
- ring->fctx = msm_fence_context_alloc(gpu->dev, name);
+ ring->fctx = msm_fence_context_alloc(gpu->dev, &ring->memptrs->fence, name);
return ring;
@@ -65,9 +126,11 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
if (IS_ERR_OR_NULL(ring))
return;
+ drm_sched_fini(&ring->sched);
+
msm_fence_context_free(ring->fctx);
- msm_gem_kernel_put(ring->bo, ring->gpu->aspace, false);
+ msm_gem_kernel_put(ring->bo, ring->gpu->aspace);
kfree(ring);
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index fe55d4a1aa16..d8c63df4e9ca 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -7,6 +7,7 @@
#ifndef __MSM_RINGBUFFER_H__
#define __MSM_RINGBUFFER_H__
+#include "drm/gpu_scheduler.h"
#include "msm_drv.h"
#define rbmemptr(ring, member) \
@@ -41,7 +42,18 @@ struct msm_ringbuffer {
uint32_t *start, *end, *cur, *next;
/*
+ * The job scheduler for this ring.
+ */
+ struct drm_gpu_scheduler sched;
+
+ /*
* List of in-flight submits on this ring. Protected by submit_lock.
+ *
+ * Currently just submits that are already written into the ring, not
+ * submits that are still in drm_gpu_scheduler's queues. At a later
+ * step we could probably move to letting drm_gpu_scheduler manage
+ * hangcheck detection and keep track of submit jobs that are in-
+ * flight.
*/
struct list_head submits;
spinlock_t submit_lock;
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index c3d206105d28..32a55d81b58b 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -12,6 +12,10 @@ void msm_submitqueue_destroy(struct kref *kref)
struct msm_gpu_submitqueue *queue = container_of(kref,
struct msm_gpu_submitqueue, ref);
+ idr_destroy(&queue->fence_idr);
+
+ drm_sched_entity_destroy(&queue->entity);
+
msm_file_private_put(queue->ctx);
kfree(queue);
@@ -62,10 +66,22 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
{
struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu_submitqueue *queue;
+ struct msm_ringbuffer *ring;
+ struct drm_gpu_scheduler *sched;
+ enum drm_sched_priority sched_prio;
+ unsigned ring_nr;
+ int ret;
if (!ctx)
return -ENODEV;
+ if (!priv->gpu)
+ return -ENODEV;
+
+ ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
+ if (ret)
+ return ret;
+
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
if (!queue)
@@ -73,14 +89,16 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
kref_init(&queue->ref);
queue->flags = flags;
+ queue->ring_nr = ring_nr;
- if (priv->gpu) {
- if (prio >= priv->gpu->nr_rings) {
- kfree(queue);
- return -EINVAL;
- }
+ ring = priv->gpu->rb[ring_nr];
+ sched = &ring->sched;
- queue->prio = prio;
+ ret = drm_sched_entity_init(&queue->entity,
+ sched_prio, &sched, 1, NULL);
+ if (ret) {
+ kfree(queue);
+ return ret;
}
write_lock(&ctx->queuelock);
@@ -91,6 +109,9 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
if (id)
*id = queue->id;
+ idr_init(&queue->fence_idr);
+ mutex_init(&queue->lock);
+
list_add_tail(&queue->node, &ctx->submitqueues);
write_unlock(&ctx->queuelock);
@@ -98,20 +119,26 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
return 0;
}
+/*
+ * Create the default submit-queue (id==0), used for backwards compatibility
+ * for userspace that pre-dates the introduction of submitqueues.
+ */
int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
{
struct msm_drm_private *priv = drm->dev_private;
- int default_prio;
+ int default_prio, max_priority;
- if (!ctx)
- return 0;
+ if (!priv->gpu)
+ return -ENODEV;
+
+ max_priority = (priv->gpu->nr_rings * NR_SCHED_PRIORITIES) - 1;
/*
- * Select priority 2 as the "default priority" unless nr_rings is less
- * than 2 and then pick the lowest pirority
+ * Pick a medium priority level as default. Lower numeric value is
+ * higher priority, so round-up to pick a priority that is not higher
+ * than the middle priority level.
*/
- default_prio = priv->gpu ?
- clamp_t(uint32_t, 2, 0, priv->gpu->nr_rings - 1) : 0;
+ default_prio = DIV_ROUND_UP(max_priority, 2);
INIT_LIST_HEAD(&ctx->submitqueues);