diff options
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_context.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_dmabuf.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_evict.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 60 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ddi.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 34 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 26 |
14 files changed, 131 insertions, 99 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 0817dd1bd2ff..89af75a6d9a9 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; + /* + * The dri breadcrumb update races against the drm master disappearing. + * Instead of trying to fix this (this is by far not the only ums issue) + * just don't do the update in kms mode. + */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + if (dev->primary->master) { master_priv = dev->primary->master->driver_priv; if (master_priv->sarea_priv) @@ -1490,16 +1498,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->uncore.lock); spin_lock_init(&dev_priv->mm.object_stat_lock); mutex_init(&dev_priv->dpio_lock); - mutex_init(&dev_priv->rps.hw_lock); mutex_init(&dev_priv->modeset_restore_lock); - mutex_init(&dev_priv->pc8.lock); - dev_priv->pc8.requirements_met = false; - dev_priv->pc8.gpu_idle = false; - dev_priv->pc8.irqs_disabled = false; - dev_priv->pc8.enabled = false; - dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ - INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); + intel_pm_setup(dev); intel_display_crc_init(dev); @@ -1603,7 +1604,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) } intel_irq_init(dev); - intel_pm_init(dev); intel_uncore_sanitize(dev); /* Try to make sure MCHBAR is enabled before poking at it */ @@ -1845,8 +1845,10 @@ void i915_driver_lastclose(struct drm_device * dev) void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) { + mutex_lock(&dev->struct_mutex); i915_gem_context_close(dev, file_priv); i915_gem_release(dev, file_priv); + mutex_unlock(&dev->struct_mutex); } void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 13076db65eb9..65b5c83df3bb 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -535,8 +535,10 @@ static int i915_drm_freeze(struct drm_device *dev) * Disable CRTCs directly since we want to preserve sw state * for _thaw. */ + mutex_lock(&dev->mode_config.mutex); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) dev_priv->display.crtc_disable(crtc); + mutex_unlock(&dev->mode_config.mutex); intel_modeset_suspend_hw(dev); } @@ -650,6 +652,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) intel_modeset_init_hw(dev); drm_modeset_lock_all(dev); + drm_mode_config_reset(dev); intel_modeset_setup_hw_state(dev, true); drm_modeset_unlock_all(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b10d466f5dcb..a70d9c8b5277 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1944,9 +1944,7 @@ void i915_queue_hangcheck(struct drm_device *dev); void i915_handle_error(struct drm_device *dev, bool wedged); extern void intel_irq_init(struct drm_device *dev); -extern void intel_pm_init(struct drm_device *dev); extern void intel_hpd_init(struct drm_device *dev); -extern void intel_pm_init(struct drm_device *dev); extern void intel_uncore_sanitize(struct drm_device *dev); extern void intel_uncore_early_sanitize(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0572257ac6f6..a9cabff5aa37 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4421,10 +4421,9 @@ i915_gem_init_hw(struct drm_device *dev) if (dev_priv->ellc_size) I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); - if (IS_HSW_GT3(dev)) - I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED); - else - I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED); + if (IS_HASWELL(dev)) + I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ? + LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); if (HAS_PCH_NOP(dev)) { u32 temp = I915_READ(GEN7_MSG_CTL); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index d3a17ef78ba6..165a5c7d9424 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -520,11 +520,9 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) return; } - mutex_lock(&dev->struct_mutex); idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); i915_gem_context_unreference(file_priv->private_default_ctx); idr_destroy(&file_priv->context_idr); - mutex_unlock(&dev->struct_mutex); } struct i915_hw_context * @@ -611,11 +609,21 @@ static int do_switch(struct intel_ring_buffer *ring, if (ret) return ret; - /* Clear this page out of any CPU caches for coherent swap-in/out. Note + /* + * Pin can switch back to the default context if we end up calling into + * evict_everything - as a last ditch gtt defrag effort that also + * switches to the default context. Hence we need to reload from here. + */ + from = ring->last_context; + + /* + * Clear this page out of any CPU caches for coherent swap-in/out. Note * that thanks to write = false in this call and us not setting any gpu * write domains when putting a context object onto the active list * (when switching away from it), this won't block. - * XXX: We need a real interface to do this instead of trickery. */ + * + * XXX: We need a real interface to do this instead of trickery. + */ ret = i915_gem_object_set_to_gtt_domain(to->obj, false); if (ret) { i915_gem_object_ggtt_unpin(to->obj); diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 7d5752fda5f1..9bb533e0d762 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -125,13 +125,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) ret = i915_gem_object_get_pages(obj); if (ret) - goto error; + goto err; + + i915_gem_object_pin_pages(obj); ret = -ENOMEM; pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); if (pages == NULL) - goto error; + goto err_unpin; i = 0; for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) @@ -141,15 +143,16 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) drm_free_large(pages); if (!obj->dma_buf_vmapping) - goto error; + goto err_unpin; obj->vmapping_count = 1; - i915_gem_object_pin_pages(obj); out_unlock: mutex_unlock(&dev->struct_mutex); return obj->dma_buf_vmapping; -error: +err_unpin: + i915_gem_object_unpin_pages(obj); +err: mutex_unlock(&dev->struct_mutex); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 5cb0aa4fadc7..54413ed46a46 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -89,6 +89,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, } else drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); +search_again: /* First see if there is a large enough contiguous idle region... */ list_for_each_entry(vma, &vm->inactive_list, mm_list) { if (mark_free(vma, &unwind_list)) @@ -116,10 +117,17 @@ none: list_del_init(&vma->exec_list); } - /* We expect the caller to unpin, evict all and try again, or give up. - * So calling i915_gem_evict_vm() is unnecessary. + /* Can we unpin some objects such as idle hw contents, + * or pending flips? */ - return -ENOSPC; + ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev); + if (ret) + return ret; + + /* Only idle the GPU and repeat the search once */ + i915_gem_retire_requests(dev); + nonblocking = true; + goto search_again; found: /* drm_mm doesn't allow any other other operations while diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index e78c5c0e33d8..8779d75bee1f 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -33,6 +33,9 @@ #include "intel_drv.h" #include <linux/dma_remapping.h> +#define __EXEC_OBJECT_HAS_PIN (1<<31) +#define __EXEC_OBJECT_HAS_FENCE (1<<30) + struct eb_vmas { struct list_head vmas; int and; @@ -197,7 +200,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle) } } -static void eb_destroy(struct eb_vmas *eb) { +static void +i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) +{ + struct drm_i915_gem_exec_object2 *entry; + struct drm_i915_gem_object *obj = vma->obj; + + if (!drm_mm_node_allocated(&vma->node)) + return; + + entry = vma->exec_entry; + + if (entry->flags & __EXEC_OBJECT_HAS_FENCE) + i915_gem_object_unpin_fence(obj); + + if (entry->flags & __EXEC_OBJECT_HAS_PIN) + vma->pin_count--; + + entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); +} + +static void eb_destroy(struct eb_vmas *eb) +{ while (!list_empty(&eb->vmas)) { struct i915_vma *vma; @@ -205,6 +229,7 @@ static void eb_destroy(struct eb_vmas *eb) { struct i915_vma, exec_list); list_del_init(&vma->exec_list); + i915_gem_execbuffer_unreserve_vma(vma); drm_gem_object_unreference(&vma->obj->base); } kfree(eb); @@ -486,9 +511,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb) return ret; } -#define __EXEC_OBJECT_HAS_PIN (1<<31) -#define __EXEC_OBJECT_HAS_FENCE (1<<30) - static int need_reloc_mappable(struct i915_vma *vma) { @@ -551,26 +573,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, return 0; } -static void -i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) -{ - struct drm_i915_gem_exec_object2 *entry; - struct drm_i915_gem_object *obj = vma->obj; - - if (!drm_mm_node_allocated(&vma->node)) - return; - - entry = vma->exec_entry; - - if (entry->flags & __EXEC_OBJECT_HAS_FENCE) - i915_gem_object_unpin_fence(obj); - - if (entry->flags & __EXEC_OBJECT_HAS_PIN) - vma->pin_count--; - - entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); -} - static int i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, struct list_head *vmas, @@ -669,13 +671,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, goto err; } -err: /* Decrement pin count for bound objects */ - list_for_each_entry(vma, vmas, exec_list) - i915_gem_execbuffer_unreserve_vma(vma); - +err: if (ret != -ENOSPC || retry++) return ret; + /* Decrement pin count for bound objects */ + list_for_each_entry(vma, vmas, exec_list) + i915_gem_execbuffer_unreserve_vma(vma); + ret = i915_gem_evict_vm(vm, true); if (ret) return ret; @@ -707,6 +710,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, while (!list_empty(&eb->vmas)) { vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); list_del_init(&vma->exec_list); + i915_gem_execbuffer_unreserve_vma(vma); drm_gem_object_unreference(&vma->obj->base); } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index a6211e079010..88e49b19baba 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -57,7 +57,9 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) +#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) +#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) #define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) #define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) @@ -191,10 +193,10 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, case I915_CACHE_NONE: break; case I915_CACHE_WT: - pte |= HSW_WT_ELLC_LLC_AGE0; + pte |= HSW_WT_ELLC_LLC_AGE3; break; default: - pte |= HSW_WB_ELLC_LLC_AGE0; + pte |= HSW_WB_ELLC_LLC_AGE3; break; } diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 86dc6ecd357f..c8a5fb73fb66 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -173,7 +173,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) ddi_translations = ddi_translations_dp; break; case PORT_D: - if (intel_dpd_is_edp(dev)) + if (intel_dp_is_edp(dev, PORT_D)) ddi_translations = ddi_translations_edp; else ddi_translations = ddi_translations_dp; @@ -1165,9 +1165,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) if (wait) intel_wait_ddi_buf_idle(dev_priv, port); - if (type == INTEL_OUTPUT_EDP) { + if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); ironlake_edp_panel_vdd_on(intel_dp); + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); ironlake_edp_panel_off(intel_dp); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 596ad09f0e51..af3717a8a463 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6002,7 +6002,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc) uint16_t postoff = 0; if (intel_crtc->config.limited_color_range) - postoff = (16 * (1 << 13) / 255) & 0x1fff; + postoff = (16 * (1 << 12) / 255) & 0x1fff; I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); @@ -6589,7 +6589,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) /* Make sure we're not on PC8 state before disabling PC8, otherwise * we'll hang the machine! */ - dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); + gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); if (val & LCPLL_POWER_DOWN_ALLOW) { val &= ~LCPLL_POWER_DOWN_ALLOW; @@ -6623,7 +6623,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) DRM_ERROR("Switching back to LCPLL failed\n"); } - dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); + gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); } void hsw_enable_pc8_work(struct work_struct *__work) @@ -8541,7 +8541,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev, intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | DERRMR_PIPEB_PRI_FLIP_DONE | DERRMR_PIPEC_PRI_FLIP_DONE)); - intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1)); + intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | + MI_SRM_LRM_GLOBAL_GTT); intel_ring_emit(ring, DERRMR); intel_ring_emit(ring, ring->scratch.gtt_offset + 256); } @@ -9321,7 +9322,7 @@ intel_pipe_config_compare(struct drm_device *dev, if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) PIPE_CONF_CHECK_I(pipe_bpp); - if (!IS_HASWELL(dev)) { + if (!HAS_DDI(dev)) { PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); } @@ -10254,7 +10255,7 @@ static void intel_setup_outputs(struct drm_device *dev) intel_ddi_init(dev, PORT_D); } else if (HAS_PCH_SPLIT(dev)) { int found; - dpd_is_edp = intel_dpd_is_edp(dev); + dpd_is_edp = intel_dp_is_edp(dev, PORT_D); if (has_edp_a(dev)) intel_dp_init(dev, DP_A, PORT_A); @@ -10291,8 +10292,7 @@ static void intel_setup_outputs(struct drm_device *dev) intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, PORT_C); if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) - intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, - PORT_C); + intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); } intel_dsi_init(dev); @@ -11230,8 +11230,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, } intel_modeset_check_state(dev); - - drm_mode_config_reset(dev); } void intel_modeset_gem_init(struct drm_device *dev) @@ -11240,7 +11238,10 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_setup_overlay(dev); + drm_modeset_lock_all(dev); + drm_mode_config_reset(dev); intel_modeset_setup_hw_state(dev, false); + drm_modeset_unlock_all(dev); } void intel_modeset_cleanup(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f3b17b11b270..bda2e1fd5364 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -3316,11 +3316,19 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc) } /* check the VBT to see whether the eDP is on DP-D port */ -bool intel_dpd_is_edp(struct drm_device *dev) +bool intel_dp_is_edp(struct drm_device *dev, enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; union child_device_config *p_child; int i; + static const short port_mapping[] = { + [PORT_B] = PORT_IDPB, + [PORT_C] = PORT_IDPC, + [PORT_D] = PORT_IDPD, + }; + + if (port == PORT_A) + return true; if (!dev_priv->vbt.child_dev_num) return false; @@ -3328,7 +3336,7 @@ bool intel_dpd_is_edp(struct drm_device *dev) for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { p_child = dev_priv->vbt.child_dev + i; - if (p_child->common.dvo_port == PORT_IDPD && + if (p_child->common.dvo_port == port_mapping[port] && (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) return true; @@ -3606,26 +3614,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_dp->DP = I915_READ(intel_dp->output_reg); intel_dp->attached_connector = intel_connector; - type = DRM_MODE_CONNECTOR_DisplayPort; - /* - * FIXME : We need to initialize built-in panels before external panels. - * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup - */ - switch (port) { - case PORT_A: + if (intel_dp_is_edp(dev, port)) type = DRM_MODE_CONNECTOR_eDP; - break; - case PORT_C: - if (IS_VALLEYVIEW(dev)) - type = DRM_MODE_CONNECTOR_eDP; - break; - case PORT_D: - if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev)) - type = DRM_MODE_CONNECTOR_eDP; - break; - default: /* silence GCC warning */ - break; - } + else + type = DRM_MODE_CONNECTOR_DisplayPort; /* * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 5dea38967523..ea6267338209 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -720,7 +720,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder); void intel_dp_check_link_status(struct intel_dp *intel_dp); bool intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config); -bool intel_dpd_is_edp(struct drm_device *dev); +bool intel_dp_is_edp(struct drm_device *dev, enum port port); void ironlake_edp_backlight_on(struct intel_dp *intel_dp); void ironlake_edp_backlight_off(struct intel_dp *intel_dp); void ironlake_edp_panel_on(struct intel_dp *intel_dp); @@ -834,6 +834,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane, uint32_t sprite_width, int pixel_size, bool enabled, bool scaled); void intel_init_pm(struct drm_device *dev); +void intel_pm_setup(struct drm_device *dev); bool intel_fbc_enabled(struct drm_device *dev); void intel_update_fbc(struct drm_device *dev); void intel_gpu_ips_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index cba4be88eddb..0731338c0bfd 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1187,7 +1187,7 @@ static bool g4x_compute_wm0(struct drm_device *dev, adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; clock = adjusted_mode->crtc_clock; - htotal = adjusted_mode->htotal; + htotal = adjusted_mode->crtc_htotal; hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; pixel_size = crtc->fb->bits_per_pixel / 8; @@ -1274,7 +1274,7 @@ static bool g4x_compute_srwm(struct drm_device *dev, crtc = intel_get_crtc_for_plane(dev, plane); adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; clock = adjusted_mode->crtc_clock; - htotal = adjusted_mode->htotal; + htotal = adjusted_mode->crtc_htotal; hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; pixel_size = crtc->fb->bits_per_pixel / 8; @@ -1505,7 +1505,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; int clock = adjusted_mode->crtc_clock; - int htotal = adjusted_mode->htotal; + int htotal = adjusted_mode->crtc_htotal; int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; int pixel_size = crtc->fb->bits_per_pixel / 8; unsigned long line_time_us; @@ -1631,7 +1631,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config.adjusted_mode; int clock = adjusted_mode->crtc_clock; - int htotal = adjusted_mode->htotal; + int htotal = adjusted_mode->crtc_htotal; int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w; int pixel_size = enabled->fb->bits_per_pixel / 8; unsigned long line_time_us; @@ -1783,7 +1783,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, crtc = intel_get_crtc_for_plane(dev, plane); adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; clock = adjusted_mode->crtc_clock; - htotal = adjusted_mode->htotal; + htotal = adjusted_mode->crtc_htotal; hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; pixel_size = crtc->fb->bits_per_pixel / 8; @@ -2476,8 +2476,9 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) /* The WM are computed with base on how long it takes to fill a single * row at the given clock rate, multiplied by 8. * */ - linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock); - ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, + linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, + mode->crtc_clock); + ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, intel_ddi_get_cdclk_freq(dev_priv)); return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | @@ -6179,10 +6180,19 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val) return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; } -void intel_pm_init(struct drm_device *dev) +void intel_pm_setup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + mutex_init(&dev_priv->rps.hw_lock); + + mutex_init(&dev_priv->pc8.lock); + dev_priv->pc8.requirements_met = false; + dev_priv->pc8.gpu_idle = false; + dev_priv->pc8.irqs_disabled = false; + dev_priv->pc8.enabled = false; + dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ + INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, intel_gen6_powersave_work); } |