summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-09-22 16:45:34 -0700
committerDavid S. Miller <davem@davemloft.net>2020-09-22 16:45:34 -0700
commit3ab0a7a0c349a1d7beb2bb371a62669d1528269d (patch)
treed2ae17c3bfc829ce0c747ad97021cd4bc8fb11dc /drivers/gpu/drm
parent92ec804f3dbf0d986f8e10850bfff14f316d7aaf (diff)
parent805c6d3c19210c90c109107d189744e960eae025 (diff)
downloadlinux-3ab0a7a0c349a1d7beb2bb371a62669d1528269d.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Two minor conflicts: 1) net/ipv4/route.c, adding a new local variable while moving another local variable and removing it's initial assignment. 2) drivers/net/dsa/microchip/ksz9477.c, overlapping changes. One pretty prints the port mode differently, whilst another changes the driver to try and obtain the port mode from the port node rather than the switch node. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c48
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c314
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c14
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c15
-rw-r--r--drivers/gpu/drm/i915/i915_request.c25
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c10
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c20
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c20
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c19
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c26
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c25
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c27
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h11
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c2
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c22
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c15
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c6
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c9
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig1
46 files changed, 636 insertions, 185 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index d8c6520ff74a..06757681b2ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -178,7 +178,7 @@ static int psp_sw_init(void *handle)
return ret;
}
- if (adev->asic_type == CHIP_NAVI10) {
+ if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) {
ret= psp_sysfs_init(adev);
if (ret) {
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index e16874f30d5d..6c5d9612abcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -58,7 +58,7 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
MODULE_FIRMWARE("amdgpu/sienna_cichlid_sos.bin");
MODULE_FIRMWARE("amdgpu/sienna_cichlid_ta.bin");
MODULE_FIRMWARE("amdgpu/navy_flounder_sos.bin");
-MODULE_FIRMWARE("amdgpu/navy_flounder_asd.bin");
+MODULE_FIRMWARE("amdgpu/navy_flounder_ta.bin");
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e0e60b0d0669..0f4508b4903e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1216,6 +1216,8 @@ static int stop_cpsch(struct device_queue_manager *dqm)
dqm->sched_running = false;
dqm_unlock(dqm);
+ pm_release_ib(&dqm->packets);
+
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
pm_uninit(&dqm->packets, hanging);
@@ -1326,7 +1328,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (q->properties.is_active) {
increment_queue_count(dqm, q->properties.type);
- retval = execute_queues_cpsch(dqm,
+ execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b51c527a3f0d..4ba8b54a2695 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5278,19 +5278,6 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc)
{
}
-static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
-{
- struct drm_device *dev = new_crtc_state->crtc->dev;
- struct drm_plane *plane;
-
- drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
- return true;
- }
-
- return false;
-}
-
static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
{
struct drm_atomic_state *state = new_crtc_state->state;
@@ -5354,19 +5341,20 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return ret;
}
- /* In some use cases, like reset, no stream is attached */
- if (!dm_crtc_state->stream)
- return 0;
-
/*
- * We want at least one hardware plane enabled to use
- * the stream with a cursor enabled.
+ * We require the primary plane to be enabled whenever the CRTC is, otherwise
+ * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
+ * planes are disabled, which is not supported by the hardware. And there is legacy
+ * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
*/
- if (state->enable && state->active &&
- does_crtc_have_active_cursor(state) &&
- dm_crtc_state->active_planes == 0)
+ if (state->enable &&
+ !(state->plane_mask & drm_plane_mask(crtc->primary)))
return -EINVAL;
+ /* In some use cases, like reset, no stream is attached */
+ if (!dm_crtc_state->stream)
+ return 0;
+
if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
return 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 9140b3fc767a..f31f48dd0da2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -409,8 +409,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
},
},
.num_states = 5,
- .sr_exit_time_us = 8.6,
- .sr_enter_plus_exit_time_us = 10.9,
+ .sr_exit_time_us = 11.6,
+ .sr_enter_plus_exit_time_us = 13.9,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index d3192b9d0c3d..47f8ee2832ff 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -27,7 +27,7 @@
#define MOD_HDCP_LOG_H_
#ifdef CONFIG_DRM_AMD_DC_HDCP
-#define HDCP_LOG_ERR(hdcp, ...) DRM_WARN(__VA_ARGS__)
+#define HDCP_LOG_ERR(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index fb1161dd7ea8..3a367a5968ae 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -88,7 +88,7 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!psp->dtm_context.dtm_initialized) {
- DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+ DRM_INFO("Failed to add display topology, DTM TA is not initialized.");
display->state = MOD_HDCP_DISPLAY_INACTIVE;
return MOD_HDCP_STATUS_FAILURE;
}
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 0826625573dc..63f945f9f331 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -1126,7 +1126,7 @@ static int smu_disable_dpms(struct smu_context *smu)
*/
if (smu->uploading_custom_pp_table &&
(adev->asic_type >= CHIP_NAVI10) &&
- (adev->asic_type <= CHIP_NAVI12))
+ (adev->asic_type <= CHIP_NAVY_FLOUNDER))
return 0;
/*
@@ -1211,7 +1211,9 @@ static int smu_hw_fini(void *handle)
int smu_reset(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int ret = 0;
+ int ret;
+
+ amdgpu_gfx_off_ctrl(smu->adev, false);
ret = smu_hw_fini(adev);
if (ret)
@@ -1222,8 +1224,12 @@ int smu_reset(struct smu_context *smu)
return ret;
ret = smu_late_init(adev);
+ if (ret)
+ return ret;
- return ret;
+ amdgpu_gfx_off_ctrl(smu->adev, true);
+
+ return 0;
}
static int smu_suspend(void *handle)
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 68325678f5ef..b18c5ac2934d 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -14956,12 +14956,6 @@ static int intel_atomic_check(struct drm_device *dev,
if (dev_priv->wm.distrust_bios_wm)
any_ms = true;
- if (any_ms) {
- ret = intel_modeset_checks(state);
- if (ret)
- goto fail;
- }
-
intel_fbc_choose_crtc(dev_priv, state);
ret = calc_watermark_data(state);
if (ret)
@@ -14976,6 +14970,10 @@ static int intel_atomic_check(struct drm_device *dev,
goto fail;
if (any_ms) {
+ ret = intel_modeset_checks(state);
+ if (ret)
+ goto fail;
+
ret = intel_modeset_calc_cdclk(state);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index d0bdb6d447ed..ef755dd5e68f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -439,29 +439,36 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
return __reset_engine(engine);
}
-static struct intel_engine_cs *__active_engine(struct i915_request *rq)
+static bool
+__active_engine(struct i915_request *rq, struct intel_engine_cs **active)
{
struct intel_engine_cs *engine, *locked;
+ bool ret = false;
/*
* Serialise with __i915_request_submit() so that it sees
* is-banned?, or we know the request is already inflight.
+ *
+ * Note that rq->engine is unstable, and so we double
+ * check that we have acquired the lock on the final engine.
*/
locked = READ_ONCE(rq->engine);
spin_lock_irq(&locked->active.lock);
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
spin_unlock(&locked->active.lock);
- spin_lock(&engine->active.lock);
locked = engine;
+ spin_lock(&locked->active.lock);
}
- engine = NULL;
- if (i915_request_is_active(rq) && rq->fence.error != -EIO)
- engine = rq->engine;
+ if (!i915_request_completed(rq)) {
+ if (i915_request_is_active(rq) && rq->fence.error != -EIO)
+ *active = locked;
+ ret = true;
+ }
spin_unlock_irq(&locked->active.lock);
- return engine;
+ return ret;
}
static struct intel_engine_cs *active_engine(struct intel_context *ce)
@@ -472,17 +479,16 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
if (!ce->timeline)
return NULL;
- mutex_lock(&ce->timeline->mutex);
- list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
- if (i915_request_completed(rq))
- break;
+ rcu_read_lock();
+ list_for_each_entry_rcu(rq, &ce->timeline->requests, link) {
+ if (i915_request_is_active(rq) && i915_request_completed(rq))
+ continue;
/* Check with the backend if the request is inflight */
- engine = __active_engine(rq);
- if (engine)
+ if (__active_engine(rq, &engine))
break;
}
- mutex_unlock(&ce->timeline->mutex);
+ rcu_read_unlock();
return engine;
}
@@ -713,6 +719,7 @@ __create_context(struct drm_i915_private *i915)
ctx->i915 = i915;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
mutex_init(&ctx->mutex);
+ INIT_LIST_HEAD(&ctx->link);
spin_lock_init(&ctx->stale.lock);
INIT_LIST_HEAD(&ctx->stale.engines);
@@ -740,10 +747,6 @@ __create_context(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
- spin_lock(&i915->gem.contexts.lock);
- list_add_tail(&ctx->link, &i915->gem.contexts.list);
- spin_unlock(&i915->gem.contexts.lock);
-
return ctx;
err_free:
@@ -931,6 +934,7 @@ static int gem_context_register(struct i915_gem_context *ctx,
struct drm_i915_file_private *fpriv,
u32 *id)
{
+ struct drm_i915_private *i915 = ctx->i915;
struct i915_address_space *vm;
int ret;
@@ -949,8 +953,16 @@ static int gem_context_register(struct i915_gem_context *ctx,
/* And finally expose ourselves to userspace via the idr */
ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
if (ret)
- put_pid(fetch_and_zero(&ctx->pid));
+ goto err_pid;
+
+ spin_lock(&i915->gem.contexts.lock);
+ list_add_tail(&ctx->link, &i915->gem.contexts.list);
+ spin_unlock(&i915->gem.contexts.lock);
+
+ return 0;
+err_pid:
+ put_pid(fetch_and_zero(&ctx->pid));
return ret;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 6b4ec66cb558..446e76e95c38 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -45,6 +45,13 @@ struct eb_vma_array {
struct eb_vma vma[];
};
+enum {
+ FORCE_CPU_RELOC = 1,
+ FORCE_GTT_RELOC,
+ FORCE_GPU_RELOC,
+#define DBG_FORCE_RELOC 0 /* choose one of the above! */
+};
+
#define __EXEC_OBJECT_HAS_PIN BIT(31)
#define __EXEC_OBJECT_HAS_FENCE BIT(30)
#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
@@ -253,6 +260,8 @@ struct i915_execbuffer {
*/
struct reloc_cache {
struct drm_mm_node node; /** temporary GTT binding */
+ unsigned long vaddr; /** Current kmap address */
+ unsigned long page; /** Currently mapped page index */
unsigned int gen; /** Cached value of INTEL_GEN */
bool use_64bit_reloc : 1;
bool has_llc : 1;
@@ -596,6 +605,23 @@ eb_add_vma(struct i915_execbuffer *eb,
}
}
+static inline int use_cpu_reloc(const struct reloc_cache *cache,
+ const struct drm_i915_gem_object *obj)
+{
+ if (!i915_gem_object_has_struct_page(obj))
+ return false;
+
+ if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
+ return true;
+
+ if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
+ return false;
+
+ return (cache->has_llc ||
+ obj->cache_dirty ||
+ obj->cache_level != I915_CACHE_NONE);
+}
+
static int eb_reserve_vma(const struct i915_execbuffer *eb,
struct eb_vma *ev,
u64 pin_flags)
@@ -926,6 +952,8 @@ relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
static void reloc_cache_init(struct reloc_cache *cache,
struct drm_i915_private *i915)
{
+ cache->page = -1;
+ cache->vaddr = 0;
/* Must be a variable in the struct to allow GCC to unroll. */
cache->gen = INTEL_GEN(i915);
cache->has_llc = HAS_LLC(i915);
@@ -937,6 +965,25 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->target = NULL;
}
+static inline void *unmask_page(unsigned long p)
+{
+ return (void *)(uintptr_t)(p & PAGE_MASK);
+}
+
+static inline unsigned int unmask_flags(unsigned long p)
+{
+ return p & ~PAGE_MASK;
+}
+
+#define KMAP 0x4 /* after CLFLUSH_FLAGS */
+
+static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
+{
+ struct drm_i915_private *i915 =
+ container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
+ return &i915->ggtt;
+}
+
#define RELOC_TAIL 4
static int reloc_gpu_chain(struct reloc_cache *cache)
@@ -1049,6 +1096,181 @@ static int reloc_gpu_flush(struct reloc_cache *cache)
return err;
}
+static void reloc_cache_reset(struct reloc_cache *cache)
+{
+ void *vaddr;
+
+ if (!cache->vaddr)
+ return;
+
+ vaddr = unmask_page(cache->vaddr);
+ if (cache->vaddr & KMAP) {
+ if (cache->vaddr & CLFLUSH_AFTER)
+ mb();
+
+ kunmap_atomic(vaddr);
+ i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
+ } else {
+ struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
+ io_mapping_unmap_atomic((void __iomem *)vaddr);
+
+ if (drm_mm_node_allocated(&cache->node)) {
+ ggtt->vm.clear_range(&ggtt->vm,
+ cache->node.start,
+ cache->node.size);
+ mutex_lock(&ggtt->vm.mutex);
+ drm_mm_remove_node(&cache->node);
+ mutex_unlock(&ggtt->vm.mutex);
+ } else {
+ i915_vma_unpin((struct i915_vma *)cache->node.mm);
+ }
+ }
+
+ cache->vaddr = 0;
+ cache->page = -1;
+}
+
+static void *reloc_kmap(struct drm_i915_gem_object *obj,
+ struct reloc_cache *cache,
+ unsigned long page)
+{
+ void *vaddr;
+
+ if (cache->vaddr) {
+ kunmap_atomic(unmask_page(cache->vaddr));
+ } else {
+ unsigned int flushes;
+ int err;
+
+ err = i915_gem_object_prepare_write(obj, &flushes);
+ if (err)
+ return ERR_PTR(err);
+
+ BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
+ BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
+
+ cache->vaddr = flushes | KMAP;
+ cache->node.mm = (void *)obj;
+ if (flushes)
+ mb();
+ }
+
+ vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
+ cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
+ cache->page = page;
+
+ return vaddr;
+}
+
+static void *reloc_iomap(struct drm_i915_gem_object *obj,
+ struct reloc_cache *cache,
+ unsigned long page)
+{
+ struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+ unsigned long offset;
+ void *vaddr;
+
+ if (cache->vaddr) {
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
+ io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
+ } else {
+ struct i915_vma *vma;
+ int err;
+
+ if (i915_gem_object_is_tiled(obj))
+ return ERR_PTR(-EINVAL);
+
+ if (use_cpu_reloc(cache, obj))
+ return NULL;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err)
+ return ERR_PTR(err);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
+ if (IS_ERR(vma)) {
+ memset(&cache->node, 0, sizeof(cache->node));
+ mutex_lock(&ggtt->vm.mutex);
+ err = drm_mm_insert_node_in_range
+ (&ggtt->vm.mm, &cache->node,
+ PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+ mutex_unlock(&ggtt->vm.mutex);
+ if (err) /* no inactive aperture space, use cpu reloc */
+ return NULL;
+ } else {
+ cache->node.start = vma->node.start;
+ cache->node.mm = (void *)vma;
+ }
+ }
+
+ offset = cache->node.start;
+ if (drm_mm_node_allocated(&cache->node)) {
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, page),
+ offset, I915_CACHE_NONE, 0);
+ } else {
+ offset += page << PAGE_SHIFT;
+ }
+
+ vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
+ offset);
+ cache->page = page;
+ cache->vaddr = (unsigned long)vaddr;
+
+ return vaddr;
+}
+
+static void *reloc_vaddr(struct drm_i915_gem_object *obj,
+ struct reloc_cache *cache,
+ unsigned long page)
+{
+ void *vaddr;
+
+ if (cache->page == page) {
+ vaddr = unmask_page(cache->vaddr);
+ } else {
+ vaddr = NULL;
+ if ((cache->vaddr & KMAP) == 0)
+ vaddr = reloc_iomap(obj, cache, page);
+ if (!vaddr)
+ vaddr = reloc_kmap(obj, cache, page);
+ }
+
+ return vaddr;
+}
+
+static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
+{
+ if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
+ if (flushes & CLFLUSH_BEFORE) {
+ clflushopt(addr);
+ mb();
+ }
+
+ *addr = value;
+
+ /*
+ * Writes to the same cacheline are serialised by the CPU
+ * (including clflush). On the write path, we only require
+ * that it hits memory in an orderly fashion and place
+ * mb barriers at the start and end of the relocation phase
+ * to ensure ordering of clflush wrt to the system.
+ */
+ if (flushes & CLFLUSH_AFTER)
+ clflushopt(addr);
+ } else
+ *addr = value;
+}
+
static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
@@ -1214,6 +1436,17 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
return cmd;
}
+static inline bool use_reloc_gpu(struct i915_vma *vma)
+{
+ if (DBG_FORCE_RELOC == FORCE_GPU_RELOC)
+ return true;
+
+ if (DBG_FORCE_RELOC)
+ return false;
+
+ return !dma_resv_test_signaled_rcu(vma->resv, true);
+}
+
static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
{
struct page *page;
@@ -1228,10 +1461,10 @@ static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
return addr + offset_in_page(offset);
}
-static int __reloc_entry_gpu(struct i915_execbuffer *eb,
- struct i915_vma *vma,
- u64 offset,
- u64 target_addr)
+static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
+ struct i915_vma *vma,
+ u64 offset,
+ u64 target_addr)
{
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
@@ -1247,7 +1480,7 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb,
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
- return PTR_ERR(batch);
+ return false;
addr = gen8_canonical_addr(vma->node.start + offset);
if (gen >= 8) {
@@ -1296,21 +1529,55 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb,
*batch++ = target_addr;
}
- return 0;
+ return true;
+}
+
+static bool reloc_entry_gpu(struct i915_execbuffer *eb,
+ struct i915_vma *vma,
+ u64 offset,
+ u64 target_addr)
+{
+ if (eb->reloc_cache.vaddr)
+ return false;
+
+ if (!use_reloc_gpu(vma))
+ return false;
+
+ return __reloc_entry_gpu(eb, vma, offset, target_addr);
}
static u64
-relocate_entry(struct i915_execbuffer *eb,
- struct i915_vma *vma,
+relocate_entry(struct i915_vma *vma,
const struct drm_i915_gem_relocation_entry *reloc,
+ struct i915_execbuffer *eb,
const struct i915_vma *target)
{
u64 target_addr = relocation_target(reloc, target);
- int err;
-
- err = __reloc_entry_gpu(eb, vma, reloc->offset, target_addr);
- if (err)
- return err;
+ u64 offset = reloc->offset;
+
+ if (!reloc_entry_gpu(eb, vma, offset, target_addr)) {
+ bool wide = eb->reloc_cache.use_64bit_reloc;
+ void *vaddr;
+
+repeat:
+ vaddr = reloc_vaddr(vma->obj,
+ &eb->reloc_cache,
+ offset >> PAGE_SHIFT);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
+ clflush_write32(vaddr + offset_in_page(offset),
+ lower_32_bits(target_addr),
+ eb->reloc_cache.vaddr);
+
+ if (wide) {
+ offset += sizeof(u32);
+ target_addr >>= 32;
+ wide = false;
+ goto repeat;
+ }
+ }
return target->node.start | UPDATE;
}
@@ -1375,7 +1642,8 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* If the relocation already has the right value in it, no
* more work needs to be done.
*/
- if (gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
+ if (!DBG_FORCE_RELOC &&
+ gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
@@ -1407,7 +1675,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
ev->flags &= ~EXEC_OBJECT_ASYNC;
/* and update the user's relocation entry */
- return relocate_entry(eb, ev->vma, reloc, target->vma);
+ return relocate_entry(ev->vma, reloc, eb, target->vma);
}
static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
@@ -1445,8 +1713,10 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
* this is bad and so lockdep complains vehemently.
*/
copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
- if (unlikely(copied))
- return -EFAULT;
+ if (unlikely(copied)) {
+ remain = -EFAULT;
+ goto out;
+ }
remain -= count;
do {
@@ -1454,7 +1724,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
if (likely(offset == 0)) {
} else if ((s64)offset < 0) {
- return (int)offset;
+ remain = (int)offset;
+ goto out;
} else {
/*
* Note that reporting an error now
@@ -1484,8 +1755,9 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
} while (r++, --count);
urelocs += ARRAY_SIZE(stack);
} while (remain);
-
- return 0;
+out:
+ reloc_cache_reset(&eb->reloc_cache);
+ return remain;
}
static int eb_relocate(struct i915_execbuffer *eb)
@@ -2392,7 +2664,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.i915 = i915;
eb.file = file;
eb.args = args;
- if (!(args->flags & I915_EXEC_NO_RELOC))
+ if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index e5b9276d254c..9cf4ad78ece6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -258,6 +258,10 @@ struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
unsigned int n);
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
+
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index d15ff6748a50..e8a083743e09 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -548,6 +548,20 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
return nth_page(sg_page(sg), offset);
}
+/* Like i915_gem_object_get_page(), but mark the returned page dirty */
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n)
+{
+ struct page *page;
+
+ page = i915_gem_object_get_page(obj, n);
+ if (!obj->mm.dirty)
+ set_page_dirty(page);
+
+ return page;
+}
+
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
index 57c14d3340cd..a49016f8ee0d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
@@ -37,14 +37,20 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
return err;
/* 8-Byte aligned */
- err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0);
- if (err)
+ if (!__reloc_entry_gpu(eb, vma,
+ offsets[0] * sizeof(u32),
+ 0)) {
+ err = -EIO;
goto unpin_vma;
+ }
/* !8-Byte aligned */
- err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1);
- if (err)
+ if (!__reloc_entry_gpu(eb, vma,
+ offsets[1] * sizeof(u32),
+ 1)) {
+ err = -EIO;
goto unpin_vma;
+ }
/* Skip to the end of the cmd page */
i = PAGE_SIZE / sizeof(u32) - RELOC_TAIL - 1;
@@ -54,9 +60,12 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
eb->reloc_cache.rq_size += i;
/* Force batch chaining */
- err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2);
- if (err)
+ if (!__reloc_entry_gpu(eb, vma,
+ offsets[2] * sizeof(u32),
+ 2)) {
+ err = -EIO;
goto unpin_vma;
+ }
GEM_BUG_ON(!eb->reloc_cache.rq);
rq = i915_request_get(eb->reloc_cache.rq);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 24322ef08aa4..9eeaca957a7e 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -2060,6 +2060,14 @@ static inline void clear_ports(struct i915_request **ports, int count)
memset_p((void **)ports, NULL, count);
}
+static inline void
+copy_ports(struct i915_request **dst, struct i915_request **src, int count)
+{
+ /* A memcpy_p() would be very useful here! */
+ while (count--)
+ WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -2648,10 +2656,9 @@ static void process_csb(struct intel_engine_cs *engine)
/* switch pending to inflight */
GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
- memcpy(execlists->inflight,
- execlists->pending,
- execlists_num_ports(execlists) *
- sizeof(*execlists->pending));
+ copy_ports(execlists->inflight,
+ execlists->pending,
+ execlists_num_ports(execlists));
smp_wmb(); /* complete the seqlock */
WRITE_ONCE(execlists->active, execlists->inflight);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 0b2fe55e6194..781a6783affe 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -388,17 +388,38 @@ static bool __request_in_flight(const struct i915_request *signal)
* As we know that there are always preemption points between
* requests, we know that only the currently executing request
* may be still active even though we have cleared the flag.
- * However, we can't rely on our tracking of ELSP[0] to known
+ * However, we can't rely on our tracking of ELSP[0] to know
* which request is currently active and so maybe stuck, as
* the tracking maybe an event behind. Instead assume that
* if the context is still inflight, then it is still active
* even if the active flag has been cleared.
+ *
+ * To further complicate matters, if there a pending promotion, the HW
+ * may either perform a context switch to the second inflight execlists,
+ * or it may switch to the pending set of execlists. In the case of the
+ * latter, it may send the ACK and we process the event copying the
+ * pending[] over top of inflight[], _overwriting_ our *active. Since
+ * this implies the HW is arbitrating and not struck in *active, we do
+ * not worry about complete accuracy, but we do require no read/write
+ * tearing of the pointer [the read of the pointer must be valid, even
+ * as the array is being overwritten, for which we require the writes
+ * to avoid tearing.]
+ *
+ * Note that the read of *execlists->active may race with the promotion
+ * of execlists->pending[] to execlists->inflight[], overwritting
+ * the value at *execlists->active. This is fine. The promotion implies
+ * that we received an ACK from the HW, and so the context is not
+ * stuck -- if we do not see ourselves in *active, the inflight status
+ * is valid. If instead we see ourselves being copied into *active,
+ * we are inflight and may signal the callback.
*/
if (!intel_context_inflight(signal->context))
return false;
rcu_read_lock();
- for (port = __engine_active(signal->engine); (rq = *port); port++) {
+ for (port = __engine_active(signal->engine);
+ (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */
+ port++) {
if (rq->context == signal->context) {
inflight = i915_seqno_passed(rq->fence.seqno,
signal->fence.seqno);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 295b9829e2da..4cd2038cbe35 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -164,9 +164,13 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
do {
list_for_each_entry_safe(pos, next, &x->head, entry) {
- pos->func(pos,
- TASK_NORMAL, fence->error,
- &extra);
+ int wake_flags;
+
+ wake_flags = fence->error;
+ if (pos->func == autoremove_wake_function)
+ wake_flags = 0;
+
+ pos->func(pos, TASK_NORMAL, wake_flags, &extra);
}
if (list_empty(&extra))
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index ada990a7f911..b7074161ccf0 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -673,7 +673,7 @@ static void ingenic_drm_unbind_all(void *d)
component_unbind_all(priv->dev, &priv->drm);
}
-static int ingenic_drm_bind(struct device *dev)
+static int ingenic_drm_bind(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
const struct jz_soc_info *soc_info;
@@ -808,7 +808,7 @@ static int ingenic_drm_bind(struct device *dev)
return ret;
}
- if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU)) {
+ if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && has_components) {
ret = component_bind_all(dev, drm);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -939,6 +939,11 @@ err_pixclk_disable:
return ret;
}
+static int ingenic_drm_bind_with_components(struct device *dev)
+{
+ return ingenic_drm_bind(dev, true);
+}
+
static int compare_of(struct device *dev, void *data)
{
return dev->of_node == data;
@@ -957,7 +962,7 @@ static void ingenic_drm_unbind(struct device *dev)
}
static const struct component_master_ops ingenic_master_ops = {
- .bind = ingenic_drm_bind,
+ .bind = ingenic_drm_bind_with_components,
.unbind = ingenic_drm_unbind,
};
@@ -968,16 +973,15 @@ static int ingenic_drm_probe(struct platform_device *pdev)
struct device_node *np;
if (!IS_ENABLED(CONFIG_DRM_INGENIC_IPU))
- return ingenic_drm_bind(dev);
+ return ingenic_drm_bind(dev, false);
/* IPU is at port address 8 */
np = of_graph_get_remote_node(dev->of_node, 8, 0);
- if (!np) {
- dev_err(dev, "Unable to get IPU node\n");
- return -EINVAL;
- }
+ if (!np)
+ return ingenic_drm_bind(dev, false);
drm_of_component_match_add(dev, &match, compare_of, np);
+ of_node_put(np);
return component_master_add_with_match(dev, &ingenic_master_ops, match);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 3fc5511330b9..4d29568be3f5 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -831,13 +831,19 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
drm_crtc_index(&mtk_crtc->base));
mtk_crtc->cmdq_client = NULL;
}
- ret = of_property_read_u32_index(priv->mutex_node,
- "mediatek,gce-events",
- drm_crtc_index(&mtk_crtc->base),
- &mtk_crtc->cmdq_event);
- if (ret)
- dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
- drm_crtc_index(&mtk_crtc->base));
+
+ if (mtk_crtc->cmdq_client) {
+ ret = of_property_read_u32_index(priv->mutex_node,
+ "mediatek,gce-events",
+ drm_crtc_index(&mtk_crtc->base),
+ &mtk_crtc->cmdq_event);
+ if (ret) {
+ dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
+ drm_crtc_index(&mtk_crtc->base));
+ cmdq_mbox_destroy(mtk_crtc->cmdq_client);
+ mtk_crtc->cmdq_client = NULL;
+ }
+ }
#endif
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 57c88de9a329..526648885b97 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -496,6 +496,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (of_address_to_resource(node, 0, &res) != 0) {
dev_err(dev, "Missing reg in %s node\n", node->full_name);
+ put_device(&larb_pdev->dev);
return -EINVAL;
}
comp->regs_pa = res.start;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 040a8f393fe2..2d982740b1a4 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -27,7 +27,6 @@
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp.h"
-#include "mtk_drm_ddp.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_drm_gem.h"
@@ -165,7 +164,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
ret = drmm_mode_config_init(drm);
if (ret)
- return ret;
+ goto put_mutex_dev;
drm->mode_config.min_width = 64;
drm->mode_config.min_height = 64;
@@ -182,7 +181,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
ret = component_bind_all(drm->dev, drm);
if (ret)
- return ret;
+ goto put_mutex_dev;
/*
* We currently support two fixed data streams, each optional,
@@ -229,7 +228,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
}
if (!dma_dev->dma_parms) {
ret = -ENOMEM;
- goto err_component_unbind;
+ goto put_dma_dev;
}
ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
@@ -256,9 +255,12 @@ static int mtk_drm_kms_init(struct drm_device *drm)
err_unset_dma_parms:
if (private->dma_parms_allocated)
dma_dev->dma_parms = NULL;
+put_dma_dev:
+ put_device(private->dma_dev);
err_component_unbind:
component_unbind_all(drm->dev, drm);
-
+put_mutex_dev:
+ put_device(private->mutex_dev);
return ret;
}
@@ -544,8 +546,13 @@ err_pm:
pm_runtime_disable(dev);
err_node:
of_node_put(private->mutex_node);
- for (i = 0; i < DDP_COMPONENT_ID_MAX; i++)
+ for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) {
of_node_put(private->comp_node[i]);
+ if (private->ddp_comp[i]) {
+ put_device(private->ddp_comp[i]->larb_dev);
+ private->ddp_comp[i] = NULL;
+ }
+ }
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 16fd99dcdacf..80b7a082e874 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -466,14 +466,13 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- horizontal_backporch_byte =
- (vm->hback_porch * dsi_tmp_buf_bpp - 10);
+ horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp;
else
- horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) *
- dsi_tmp_buf_bpp - 10);
+ horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
+ dsi_tmp_buf_bpp;
data_phy_cycles = timing->lpx + timing->da_hs_prepare +
- timing->da_hs_zero + timing->da_hs_exit + 3;
+ timing->da_hs_zero + timing->da_hs_exit;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index f2e9b429960b..a97725680d4e 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1507,25 +1507,30 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
dev_err(dev,
"Failed to get system configuration registers: %d\n",
ret);
- return ret;
+ goto put_device;
}
hdmi->sys_regmap = regmap;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hdmi->regs = devm_ioremap_resource(dev, mem);
- if (IS_ERR(hdmi->regs))
- return PTR_ERR(hdmi->regs);
+ if (IS_ERR(hdmi->regs)) {
+ ret = PTR_ERR(hdmi->regs);
+ goto put_device;
+ }
remote = of_graph_get_remote_node(np, 1, 0);
- if (!remote)
- return -EINVAL;
+ if (!remote) {
+ ret = -EINVAL;
+ goto put_device;
+ }
if (!of_device_is_compatible(remote, "hdmi-connector")) {
hdmi->next_bridge = of_drm_find_bridge(remote);
if (!hdmi->next_bridge) {
dev_err(dev, "Waiting for external bridge\n");
of_node_put(remote);
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto put_device;
}
}
@@ -1534,7 +1539,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n",
remote);
of_node_put(remote);
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_device;
}
of_node_put(remote);
@@ -1542,10 +1548,14 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
of_node_put(i2c_np);
if (!hdmi->ddc_adpt) {
dev_err(dev, "Failed to get ddc i2c adapter by node\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_device;
}
return 0;
+put_device:
+ put_device(hdmi->cec_dev);
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 6021f8d9efd1..48fa49f69d6d 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -164,6 +164,11 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
+ gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
+
/* NOTE: PM4/micro-engine firmware registers look to be the same
* for a2xx and a3xx.. we could possibly push that part down to
* adreno_gpu base class. Or push both PM4 and PFP but
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 0a5ea9f56cb8..f6471145a7a6 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -211,6 +211,16 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
+ /*
+ * Use the default ringbuffer size and block size but disable the RPTR
+ * shadow
+ */
+ gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ /* Set the ringbuffer address */
+ gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
+
/* setup access protection: */
gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index b9b26b2bf9c5..954753600625 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -267,6 +267,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
+ /*
+ * Use the default ringbuffer size and block size but disable the RPTR
+ * shadow
+ */
+ gpu_write(gpu, REG_A4XX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ /* Set the ringbuffer address */
+ gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
+
/* Load PM4: */
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 84a5d9c1f2a2..91726da82ed6 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -703,8 +703,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
- a5xx_preempt_hw_init(gpu);
-
if (!adreno_is_a510(adreno_gpu))
a5xx_gpmu_ucode_init(gpu);
@@ -712,6 +710,15 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
+ /* Set the ringbuffer address */
+ gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
+ gpu->rb[0]->iova);
+
+ gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ a5xx_preempt_hw_init(gpu);
+
/* Disable the interrupts through the initial bringup stage */
gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
@@ -1511,7 +1518,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
check_speed_bin(&pdev->dev);
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
+ /* Restricting nr_rings to 1 to temporarily disable preemption */
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 54868d4e3958..1e5b1a15a70f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -31,6 +31,7 @@ struct a5xx_gpu {
struct msm_ringbuffer *next_ring;
struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
+ struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 9cf9353a7ff1..9f3fe177b00e 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -226,19 +226,31 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
struct a5xx_preempt_record *ptr;
- struct drm_gem_object *bo = NULL;
- u64 iova = 0;
+ void *counters;
+ struct drm_gem_object *bo = NULL, *counters_bo = NULL;
+ u64 iova = 0, counters_iova = 0;
ptr = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
- MSM_BO_UNCACHED, gpu->aspace, &bo, &iova);
+ MSM_BO_UNCACHED | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
+ /* The buffer to store counters needs to be unprivileged */
+ counters = msm_gem_kernel_new(gpu->dev,
+ A5XX_PREEMPT_COUNTER_SIZE,
+ MSM_BO_UNCACHED, gpu->aspace, &counters_bo, &counters_iova);
+ if (IS_ERR(counters)) {
+ msm_gem_kernel_put(bo, gpu->aspace, true);
+ return PTR_ERR(counters);
+ }
+
msm_gem_object_set_name(bo, "preempt");
+ msm_gem_object_set_name(counters_bo, "preempt_counters");
a5xx_gpu->preempt_bo[ring->id] = bo;
+ a5xx_gpu->preempt_counters_bo[ring->id] = counters_bo;
a5xx_gpu->preempt_iova[ring->id] = iova;
a5xx_gpu->preempt[ring->id] = ptr;
@@ -249,7 +261,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr->data = 0;
ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
ptr->rptr_addr = rbmemptr(ring, rptr);
- ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
+ ptr->counter = counters_iova;
return 0;
}
@@ -260,8 +272,11 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
- for (i = 0; i < gpu->nr_rings; i++)
+ for (i = 0; i < gpu->nr_rings; i++) {
msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true);
+ msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i],
+ gpu->aspace, true);
+ }
}
void a5xx_preempt_init(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 3966abd523cc..66a95e22b7b3 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -678,7 +678,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
A6XX_PROTECT_RDONLY(0x980, 0x4));
gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
- if (adreno_is_a650(adreno_gpu)) {
+ /* Enable expanded apriv for targets that support it */
+ if (gpu->hw_apriv) {
gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
(1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
}
@@ -694,6 +695,13 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
if (ret)
goto out;
+ /* Set the ringbuffer address */
+ gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
+ gpu->rb[0]->iova);
+
+ gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
@@ -1056,6 +1064,9 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = NULL;
adreno_gpu->reg_offsets = a6xx_register_offsets;
+ if (adreno_is_a650(adreno_gpu))
+ adreno_gpu->base.hw_apriv = true;
+
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 288141fe4c58..862dd35b27d3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -400,26 +400,6 @@ int adreno_hw_init(struct msm_gpu *gpu)
ring->memptrs->rptr = 0;
}
- /*
- * Setup REG_CP_RB_CNTL. The same value is used across targets (with
- * the excpetion of A430 that disables the RPTR shadow) - the cacluation
- * for the ringbuffer size and block size is moved to msm_gpu.h for the
- * pre-processor to deal with and the A430 variant is ORed in here
- */
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
- MSM_GPU_RB_CNTL_DEFAULT |
- (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
-
- /* Setup ringbuffer address - use ringbuffer[0] for GPU init */
- adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
- REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
-
- if (!adreno_is_a430(adreno_gpu)) {
- adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
- REG_ADRENO_CP_RB_RPTR_ADDR_HI,
- rbmemptr(gpu->rb[0], rptr));
- }
-
return 0;
}
@@ -427,11 +407,8 @@ int adreno_hw_init(struct msm_gpu *gpu)
static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
struct msm_ringbuffer *ring)
{
- if (adreno_is_a430(adreno_gpu))
- return ring->memptrs->rptr = adreno_gpu_read(
- adreno_gpu, REG_ADRENO_CP_RB_RPTR);
- else
- return ring->memptrs->rptr;
+ return ring->memptrs->rptr = adreno_gpu_read(
+ adreno_gpu, REG_ADRENO_CP_RB_RPTR);
}
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index d5645472b25d..57ddc9438351 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -908,7 +908,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
memptrs = msm_gem_kernel_new(drm,
sizeof(struct msm_rbmemptrs) * nr_rings,
- MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
+ check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 0db117a7339b..37cffac4cbe3 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -15,6 +15,7 @@
#include "msm_drv.h"
#include "msm_fence.h"
#include "msm_ringbuffer.h"
+#include "msm_gem.h"
struct msm_gem_submit;
struct msm_gpu_perfcntr;
@@ -139,6 +140,8 @@ struct msm_gpu {
} devfreq;
struct msm_gpu_state *crashstate;
+ /* True if the hardware supports expanded apriv (a650 and newer) */
+ bool hw_apriv;
};
/* It turns out that all targets use the same ringbuffer size */
@@ -327,4 +330,12 @@ static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
mutex_unlock(&gpu->dev->struct_mutex);
}
+/*
+ * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
+ * support expanded privileges
+ */
+#define check_apriv(gpu, flags) \
+ (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
+
+
#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 39ecb5a18431..935bf9b1d941 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -27,8 +27,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->id = id;
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
- MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &ring->bo,
- &ring->iova);
+ check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY),
+ gpu->aspace, &ring->bo, &ring->iova);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 7b69d6dfe44a..e0ae911ef427 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -933,7 +933,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
/* get matching reference and feedback divider */
*ref_div = min(max(den/post_div, 1u), ref_div_max);
- *fb_div = max(nom * *ref_div * post_div / den, 1u);
+ *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
if (*fb_div > fb_div_max) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 072ea113e6be..ed5d86617802 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -589,8 +589,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
/* We can't have an alpha plane at the lowest position */
if (!backend->quirks->supports_lowest_plane_alpha &&
- (plane_states[0]->fb->format->has_alpha ||
- (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE)))
+ (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
return -EINVAL;
for (i = 1; i < num_planes; i++) {
@@ -995,7 +994,6 @@ static const struct sun4i_backend_quirks sun6i_backend_quirks = {
static const struct sun4i_backend_quirks sun7i_backend_quirks = {
.needs_output_muxing = true,
- .supports_lowest_plane_alpha = true,
};
static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index ced9a8287dd8..e40c542254f6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -1433,14 +1433,18 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon,
if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
encoder->encoder_type == DRM_MODE_ENCODER_TMDS) {
ret = sun8i_tcon_top_set_hdmi_src(&pdev->dev, id);
- if (ret)
+ if (ret) {
+ put_device(&pdev->dev);
return ret;
+ }
}
if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP)) {
ret = sun8i_tcon_top_de_config(&pdev->dev, tcon->id, id);
- if (ret)
+ if (ret) {
+ put_device(&pdev->dev);
return ret;
+ }
}
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 7f13f4d715bf..de8a11abd66a 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -889,7 +889,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi,
regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
- bounce = kzalloc(msg->tx_len + sizeof(crc), GFP_KERNEL);
+ bounce = kzalloc(ALIGN(msg->tx_len + sizeof(crc), 4), GFP_KERNEL);
if (!bounce)
return -ENOMEM;
@@ -900,7 +900,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi,
memcpy((u8 *)bounce + msg->tx_len, &crc, sizeof(crc));
len += sizeof(crc);
- regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, len);
+ regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, DIV_ROUND_UP(len, 4));
regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG, len + 4 - 1);
kfree(bounce);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 22c8c5375d0d..c0147af6a840 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -211,7 +211,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
return 0;
}
-static bool sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format)
+static u32 sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format)
{
if (!format->is_yuv)
return SUN8I_CSC_MODE_OFF;
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index d733bbc4ac0e..17ff24d999d1 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -14,6 +14,7 @@
#include <linux/version.h>
#include <linux/dma-buf.h>
#include <linux/of_graph.h>
+#include <linux/delay.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
@@ -130,9 +131,25 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe,
struct drm_connector *connector = priv->connector;
u32 format = fb->format->format;
u32 ctrl1 = 0;
+ int retries;
clk_prepare_enable(priv->clk);
+ /* Reset the TVE200 and wait for it to come back online */
+ writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4);
+ for (retries = 0; retries < 5; retries++) {
+ usleep_range(30000, 50000);
+ if (readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET)
+ continue;
+ else
+ break;
+ }
+ if (retries == 5 &&
+ readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) {
+ dev_err(drm->dev, "can't get hardware out of reset\n");
+ return;
+ }
+
/* Function 1 */
ctrl1 |= TVE200_CTRL_CSMODE;
/* Interlace mode for CCIR656: parameterize? */
@@ -230,8 +247,9 @@ static void tve200_display_disable(struct drm_simple_display_pipe *pipe)
drm_crtc_vblank_off(crtc);
- /* Disable and Power Down */
+ /* Disable put into reset and Power Down */
writel(0, priv->regs + TVE200_CTRL);
+ writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4);
clk_disable_unprepare(priv->clk);
}
@@ -279,6 +297,8 @@ static int tve200_display_enable_vblank(struct drm_simple_display_pipe *pipe)
struct drm_device *drm = crtc->dev;
struct tve200_drm_dev_private *priv = drm->dev_private;
+ /* Clear any IRQs and enable */
+ writel(0xFF, priv->regs + TVE200_INT_CLR);
writel(TVE200_INT_V_STATUS, priv->regs + TVE200_INT_EN);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index af55b334be2f..afd0f9200f90 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -97,9 +97,6 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
-
- output->enabled = true;
}
static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -111,7 +108,6 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
virtio_gpu_notify(vgdev);
- output->enabled = false;
}
static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
@@ -123,6 +119,17 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+
+ /*
+ * virtio-gpu can't do modeset and plane update operations
+ * independent from each other. So the actual modeset happens
+ * in the plane update callback, and here we just check
+ * whenever we must force the modeset.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc->state)) {
+ output->needs_modeset = true;
+ }
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 9ff9f4ac0522..fbc04272db4f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -137,7 +137,7 @@ struct virtio_gpu_output {
struct edid *edid;
int cur_x;
int cur_y;
- bool enabled;
+ bool needs_modeset;
};
#define drm_crtc_to_virtio_gpu_output(x) \
container_of(x, struct virtio_gpu_output, crtc)
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index e83651b7747d..842f8b61aa89 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -151,7 +151,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
if (ret < 0)
return -EINVAL;
- shmem->pages = drm_gem_shmem_get_pages_sgt(&bo->base.base);
+ /*
+ * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
+ * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
+ * dma-ops. This is discouraged for other drivers, but should be fine
+ * since virtio_gpu doesn't support dma-buf import from other devices.
+ */
+ shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
if (!shmem->pages) {
drm_gem_shmem_unpin(&bo->base.base);
return -EINVAL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 52d24179bcec..6a311cd93440 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -142,7 +142,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
if (WARN_ON(!output))
return;
- if (!plane->state->fb || !output->enabled) {
+ if (!plane->state->fb || !output->crtc.state->active) {
DRM_DEBUG("nofb\n");
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
plane->state->src_w >> 16,
@@ -163,7 +163,9 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
plane->state->src_w != old_state->src_w ||
plane->state->src_h != old_state->src_h ||
plane->state->src_x != old_state->src_x ||
- plane->state->src_y != old_state->src_y) {
+ plane->state->src_y != old_state->src_y ||
+ output->needs_modeset) {
+ output->needs_modeset = false;
DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
bo->hw_res_handle,
plane->state->crtc_w, plane->state->crtc_h,
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 39ff95b75357..534daf37c97e 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -18,6 +18,7 @@
#include <drm/drm_probe_helper.h>
#include <xen/balloon.h>
+#include <xen/xen.h>
#include "xen_drm_front.h"
#include "xen_drm_front_gem.h"
@@ -99,8 +100,8 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
* allocate ballooned pages which will be used to map
* grant references provided by the backend
*/
- ret = alloc_xenballooned_pages(xen_obj->num_pages,
- xen_obj->pages);
+ ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
+ xen_obj->pages);
if (ret < 0) {
DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
xen_obj->num_pages, ret);
@@ -152,8 +153,8 @@ void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
} else {
if (xen_obj->pages) {
if (xen_obj->be_alloc) {
- free_xenballooned_pages(xen_obj->num_pages,
- xen_obj->pages);
+ xen_free_unpopulated_pages(xen_obj->num_pages,
+ xen_obj->pages);
gem_free_pages_array(xen_obj);
} else {
drm_gem_put_pages(&xen_obj->base,
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index aa6cd889bd11..b52c6cdfc0b8 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -2,6 +2,7 @@ config DRM_ZYNQMP_DPSUB
tristate "ZynqMP DisplayPort Controller Driver"
depends on ARCH_ZYNQMP || COMPILE_TEST
depends on COMMON_CLK && DRM && OF
+ depends on DMADEVICES
select DMA_ENGINE
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER