diff options
author | Dave Airlie <airlied@redhat.com> | 2016-07-15 13:50:58 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2016-07-15 13:50:58 +1000 |
commit | ff37c05a996bb96eccc21f4fb1b32ba0e24f3443 (patch) | |
tree | c09b09b37521f2f8f3f7a9bb3b0a33a2b3bde1a1 | |
parent | 6c181c82106e12dced317e93a7a396cbb8c64f75 (diff) | |
parent | 0b2c0582f1570bfc95aa9ac1cd340a215d8e8335 (diff) | |
download | linux-ff37c05a996bb96eccc21f4fb1b32ba0e24f3443.tar.bz2 |
Merge tag 'drm-intel-next-2016-07-11' of git://anongit.freedesktop.org/drm-intel into drm-next
- select igt testing depencies for CONFIG_DRM_I915_DEBUG (Chris)
- track outputs in crtc state and clean up all our ad-hoc connector/encoder
walking in modest code (Ville)
- demidlayer drm_device/drm_i915_private (Chris Wilson)
- thundering herd fix from Chris Wilson, with lots of help from Tvrtko Ursulin
- piles of assorted clean and fallout from the thundering herd fix
- documentation and more tuning for waitboosting (Chris)
- pooled EU support on bxt (Arun Siluvery)
- bxt support is no longer considered prelimary!
- ring/engine vfunc cleanup from Tvrtko
- introduce intel_wait_for_register helper (Chris)
- opregion updates (Jani Nukla)
- tuning and fixes for wait_for macros (Tvrkto&Imre)
- more kabylake pci ids (Rodrigo)
- pps cleanup and fixes for bxt (Imre)
- move sink crc support over to atomic state (Maarten)
- fix up async fbdev init ordering (Chris)
- fbc fixes from Paulo and Chris
* tag 'drm-intel-next-2016-07-11' of git://anongit.freedesktop.org/drm-intel: (223 commits)
drm/i915: Update DRIVER_DATE to 20160711
drm/i915: Select DRM_VGEM for igt
drm/i915: Select X86_MSR for igt
drm/i915: Fill unused GGTT with scratch pages for VT-d
drm/i915: Introduce Kabypoint PCH for Kabylake H/DT.
drm/i915:gen9: implement WaMediaPoolStateCmdInWABB
drm/i915: Check for invalid cloning earlier during modeset
drm/i915: Simplify hdmi_12bpc_possible()
drm/i915: Kill has_dsi_encoder
drm/i915: s/INTEL_OUTPUT_DISPLAYPORT/INTEL_OUTPUT_DP/
drm/i915: Replace some open coded intel_crtc_has_dp_encoder()s
drm/i915: Kill has_dp_encoder from pipe_config
drm/i915: Replace manual lvds and sdvo/hdmi counting with intel_crtc_has_type()
drm/i915: Unify intel_pipe_has_type() and intel_pipe_will_have_type()
drm/i915: Add output_types bitmask into the crtc state
drm/i915: Remove encoder type checks from MST suspend/resume
drm/i915: Don't mark eDP encoders as MST capable
drm/i915: avoid wait_for_atomic() in non-atomic host2guc_action()
drm/i915: Group the irq breadcrumb variables into the same cacheline
drm/i915: Wake up the bottom-half if we steal their interrupt
...
72 files changed, 6344 insertions, 5415 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 8f404103341d..cee87bfd10c4 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -18,6 +18,9 @@ config DRM_I915_WERROR config DRM_I915_DEBUG bool "Enable additional driver debugging" depends on DRM_I915 + select PREEMPT_COUNT + select X86_MSR # used by igt/pm_rpm + select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) default n help Choose this option to turn on extra driver debugging that may affect diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 276abf1cac2b..684fc1cd08fa 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -10,9 +10,11 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror i915-y := i915_drv.o \ i915_irq.o \ i915_params.o \ + i915_pci.o \ i915_suspend.o \ i915_sysfs.o \ intel_csr.o \ + intel_device_info.o \ intel_pm.o \ intel_runtime_pm.o @@ -37,6 +39,7 @@ i915-y += i915_cmd_parser.o \ i915_gem_userptr.o \ i915_gpu_error.o \ i915_trace_points.o \ + intel_breadcrumbs.o \ intel_lrc.o \ intel_mocs.o \ intel_ringbuffer.o \ @@ -101,9 +104,6 @@ i915-y += dvo_ch7017.o \ # virtual gpu code i915-y += i915_vgpu.o -# legacy horrors -i915-y += i915_dma.o - ifeq ($(CONFIG_DRM_I915_GVT),y) i915-y += intel_gvt.o include $(src)/gvt/Makefile diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 5b7526697838..844fea795bae 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -265,7 +265,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; u64 total_obj_size, total_gtt_size; LIST_HEAD(stolen); @@ -440,15 +440,15 @@ static void print_context_stats(struct seq_file *m, memset(&stats, 0, sizeof(stats)); - mutex_lock(&dev_priv->dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); if (dev_priv->kernel_context) per_file_ctx_stats(0, dev_priv->kernel_context, &stats); - list_for_each_entry(file, &dev_priv->dev->filelist, lhead) { + list_for_each_entry(file, &dev_priv->drm.filelist, lhead) { struct drm_i915_file_private *fpriv = file->driver_priv; idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats); } - mutex_unlock(&dev_priv->dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); print_file_stats(m, "[k]contexts", stats); } @@ -591,7 +591,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; uintptr_t list = (uintptr_t) node->info_ent->data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; u64 total_obj_size, total_gtt_size; int count, ret; @@ -625,7 +625,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc; int ret; @@ -662,8 +662,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) engine->name, i915_gem_request_get_seqno(work->flip_queued_req), dev_priv->next_seqno, - engine->get_seqno(engine), - i915_gem_request_completed(work->flip_queued_req, true)); + intel_engine_get_seqno(engine), + i915_gem_request_completed(work->flip_queued_req)); } else seq_printf(m, "Flip not associated with any ring\n"); seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", @@ -695,7 +695,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; struct intel_engine_cs *engine; int total = 0; @@ -740,7 +740,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; struct drm_i915_gem_request *req; int ret, any; @@ -788,17 +788,29 @@ static int i915_gem_request_info(struct seq_file *m, void *data) static void i915_ring_seqno_info(struct seq_file *m, struct intel_engine_cs *engine) { + struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct rb_node *rb; + seq_printf(m, "Current sequence (%s): %x\n", - engine->name, engine->get_seqno(engine)); - seq_printf(m, "Current user interrupts (%s): %x\n", - engine->name, READ_ONCE(engine->user_interrupts)); + engine->name, intel_engine_get_seqno(engine)); + seq_printf(m, "Current user interrupts (%s): %lx\n", + engine->name, READ_ONCE(engine->breadcrumbs.irq_wakeups)); + + spin_lock(&b->lock); + for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { + struct intel_wait *w = container_of(rb, typeof(*w), node); + + seq_printf(m, "Waiting (%s): %s [%d] on %x\n", + engine->name, w->tsk->comm, w->tsk->pid, w->seqno); + } + spin_unlock(&b->lock); } static int i915_gem_seqno_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; int ret; @@ -821,7 +833,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; int ret, i, pipe; @@ -1012,7 +1024,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i, ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -1040,7 +1052,7 @@ static int i915_hws_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; const u32 *hws; int i; @@ -1151,7 +1163,7 @@ static int i915_next_seqno_get(void *data, u64 *val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -1188,7 +1200,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; intel_runtime_pm_get(dev_priv); @@ -1391,7 +1403,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; u64 acthd[I915_NUM_ENGINES]; u32 seqno[I915_NUM_ENGINES]; @@ -1408,7 +1420,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) for_each_engine_id(engine, dev_priv, id) { acthd[id] = intel_ring_get_active_head(engine); - seqno[id] = engine->get_seqno(engine); + seqno[id] = intel_engine_get_seqno(engine); } i915_get_extra_instdone(dev_priv, instdone); @@ -1428,9 +1440,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) engine->hangcheck.seqno, seqno[id], engine->last_submitted_seqno); - seq_printf(m, "\tuser interrupts = %x [current %x]\n", + seq_printf(m, "\twaiters? %d\n", + intel_engine_has_waiter(engine)); + seq_printf(m, "\tuser interrupts = %lx [current %lx]\n", engine->hangcheck.user_interrupts, - READ_ONCE(engine->user_interrupts)); + READ_ONCE(engine->breadcrumbs.irq_wakeups)); seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", (long long)engine->hangcheck.acthd, (long long)acthd[id]); @@ -1460,7 +1474,7 @@ static int ironlake_drpc_info(struct seq_file *m) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 rgvmodectl, rstdbyctl; u16 crstandvid; int ret; @@ -1528,7 +1542,7 @@ static int i915_forcewake_domains(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore_forcewake_domain *fw_domain; spin_lock_irq(&dev_priv->uncore.lock); @@ -1546,7 +1560,7 @@ static int vlv_drpc_info(struct seq_file *m) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 rpmodectl1, rcctl1, pw_status; intel_runtime_pm_get(dev_priv); @@ -1586,7 +1600,7 @@ static int gen6_drpc_info(struct seq_file *m) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; unsigned forcewake_count; int count = 0, ret; @@ -1698,7 +1712,7 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); seq_printf(m, "FB tracking busy bits: 0x%08x\n", dev_priv->fb_tracking.busy_bits); @@ -1713,7 +1727,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!HAS_FBC(dev)) { seq_puts(m, "FBC unsupported on this chipset\n"); @@ -1743,7 +1757,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) static int i915_fbc_fc_get(void *data, u64 *val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) return -ENODEV; @@ -1756,7 +1770,7 @@ static int i915_fbc_fc_get(void *data, u64 *val) static int i915_fbc_fc_set(void *data, u64 val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 reg; if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) @@ -1783,7 +1797,7 @@ static int i915_ips_status(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!HAS_IPS(dev)) { seq_puts(m, "not supported\n"); @@ -1813,7 +1827,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); bool sr_enabled = false; intel_runtime_pm_get(dev_priv); @@ -1842,7 +1856,7 @@ static int i915_emon_status(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long temp, chipset, gfx; int ret; @@ -1870,7 +1884,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; int gpu_freq, ia_freq; unsigned int max_gpu_freq, min_gpu_freq; @@ -1925,7 +1939,7 @@ static int i915_opregion(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_opregion *opregion = &dev_priv->opregion; int ret; @@ -1946,7 +1960,7 @@ static int i915_vbt(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_opregion *opregion = &dev_priv->opregion; if (opregion->vbt) @@ -1968,19 +1982,19 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) return ret; #ifdef CONFIG_DRM_FBDEV_EMULATION - if (to_i915(dev)->fbdev) { - fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb); - - seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", - fbdev_fb->base.width, - fbdev_fb->base.height, - fbdev_fb->base.depth, - fbdev_fb->base.bits_per_pixel, - fbdev_fb->base.modifier[0], - drm_framebuffer_read_refcount(&fbdev_fb->base)); - describe_obj(m, fbdev_fb->obj); - seq_putc(m, '\n'); - } + if (to_i915(dev)->fbdev) { + fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb); + + seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", + fbdev_fb->base.width, + fbdev_fb->base.height, + fbdev_fb->base.depth, + fbdev_fb->base.bits_per_pixel, + fbdev_fb->base.modifier[0], + drm_framebuffer_read_refcount(&fbdev_fb->base)); + describe_obj(m, fbdev_fb->obj); + seq_putc(m, '\n'); + } #endif mutex_lock(&dev->mode_config.fb_lock); @@ -2017,7 +2031,7 @@ static int i915_context_status(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; struct i915_gem_context *ctx; int ret; @@ -2114,7 +2128,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; struct i915_gem_context *ctx; int ret; @@ -2141,7 +2155,7 @@ static int i915_execlists(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; u32 status_pointer; u8 read_pointer; @@ -2244,7 +2258,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -2317,7 +2331,7 @@ static int per_file_ctx(int id, void *ptr, void *data) static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; int i; @@ -2338,7 +2352,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; if (IS_GEN6(dev_priv)) @@ -2372,7 +2386,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_file *file; int ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -2415,7 +2429,7 @@ static int count_irq_waiters(struct drm_i915_private *i915) int count = 0; for_each_engine(engine, i915) - count += engine->irq_refcount; + count += intel_engine_has_waiter(engine); return count; } @@ -2424,11 +2438,12 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_file *file; seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled); - seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy); + seq_printf(m, "GPU busy? %s [%x]\n", + yesno(dev_priv->gt.awake), dev_priv->gt.active_engines); seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n", intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), @@ -2469,7 +2484,7 @@ static int i915_llc(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const bool edram = INTEL_GEN(dev_priv) > 8; seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); @@ -2482,7 +2497,7 @@ static int i915_llc(struct seq_file *m, void *data) static int i915_guc_load_status_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; - struct drm_i915_private *dev_priv = node->minor->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(node->minor->dev); struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; u32 tmp, i; @@ -2544,9 +2559,9 @@ static void i915_guc_client_info(struct seq_file *m, for_each_engine(engine, dev_priv) { seq_printf(m, "\tSubmissions: %llu %s\n", - client->submissions[engine->guc_id], + client->submissions[engine->id], engine->name); - tot += client->submissions[engine->guc_id]; + tot += client->submissions[engine->id]; } seq_printf(m, "\tTotal: %llu\n", tot); } @@ -2555,7 +2570,7 @@ static int i915_guc_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc guc; struct i915_guc_client client = {}; struct intel_engine_cs *engine; @@ -2587,9 +2602,9 @@ static int i915_guc_info(struct seq_file *m, void *data) seq_printf(m, "\nGuC submissions:\n"); for_each_engine(engine, dev_priv) { seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", - engine->name, guc.submissions[engine->guc_id], - guc.last_seqno[engine->guc_id]); - total += guc.submissions[engine->guc_id]; + engine->name, guc.submissions[engine->id], + guc.last_seqno[engine->id]); + total += guc.submissions[engine->id]; } seq_printf(m, "\t%s: %llu\n", "Total", total); @@ -2605,7 +2620,7 @@ static int i915_guc_log_dump(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj; u32 *log; int i = 0, pg; @@ -2633,7 +2648,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 psrperf = 0; u32 stat[3]; enum pipe pipe; @@ -2701,7 +2716,6 @@ static int i915_sink_crc(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct intel_encoder *encoder; struct intel_connector *connector; struct intel_dp *intel_dp = NULL; int ret; @@ -2709,18 +2723,19 @@ static int i915_sink_crc(struct seq_file *m, void *data) drm_modeset_lock_all(dev); for_each_intel_connector(dev, connector) { + struct drm_crtc *crtc; - if (connector->base.dpms != DRM_MODE_DPMS_ON) + if (!connector->base.state->best_encoder) continue; - if (!connector->base.encoder) + crtc = connector->base.state->crtc; + if (!crtc->state->active) continue; - encoder = to_intel_encoder(connector->base.encoder); - if (encoder->type != INTEL_OUTPUT_EDP) + if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) continue; - intel_dp = enc_to_intel_dp(&encoder->base); + intel_dp = enc_to_intel_dp(connector->base.state->best_encoder); ret = intel_dp_sink_crc(intel_dp, crc); if (ret) @@ -2741,7 +2756,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u64 power; u32 units; @@ -2767,12 +2782,12 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!HAS_RUNTIME_PM(dev_priv)) seq_puts(m, "Runtime power management not supported\n"); - seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); + seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake)); seq_printf(m, "IRQs disabled: %s\n", yesno(!intel_irqs_enabled(dev_priv))); #ifdef CONFIG_PM @@ -2782,8 +2797,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); #endif seq_printf(m, "PCI device power state: %s [%d]\n", - pci_power_name(dev_priv->dev->pdev->current_state), - dev_priv->dev->pdev->current_state); + pci_power_name(dev_priv->drm.pdev->current_state), + dev_priv->drm.pdev->current_state); return 0; } @@ -2792,7 +2807,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_power_domains *power_domains = &dev_priv->power_domains; int i; @@ -2827,7 +2842,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_csr *csr; if (!HAS_CSR(dev)) { @@ -2950,7 +2965,7 @@ static void intel_dp_info(struct seq_file *m, seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); - if (intel_encoder->type == INTEL_OUTPUT_EDP) + if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) intel_panel_info(m, &intel_connector->panel); } @@ -2989,14 +3004,26 @@ static void intel_connector_info(struct seq_file *m, seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); } - if (intel_encoder) { - if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || - intel_encoder->type == INTEL_OUTPUT_EDP) - intel_dp_info(m, intel_connector); - else if (intel_encoder->type == INTEL_OUTPUT_HDMI) - intel_hdmi_info(m, intel_connector); - else if (intel_encoder->type == INTEL_OUTPUT_LVDS) + + if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) + return; + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_DisplayPort: + case DRM_MODE_CONNECTOR_eDP: + intel_dp_info(m, intel_connector); + break; + case DRM_MODE_CONNECTOR_LVDS: + if (intel_encoder->type == INTEL_OUTPUT_LVDS) intel_lvds_info(m, intel_connector); + break; + case DRM_MODE_CONNECTOR_HDMIA: + if (intel_encoder->type == INTEL_OUTPUT_HDMI || + intel_encoder->type == INTEL_OUTPUT_UNKNOWN) + intel_hdmi_info(m, intel_connector); + break; + default: + break; } seq_printf(m, "\tmodes:\n"); @@ -3006,7 +3033,7 @@ static void intel_connector_info(struct seq_file *m, static bool cursor_active(struct drm_device *dev, int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 state; if (IS_845G(dev) || IS_I865G(dev)) @@ -3019,7 +3046,7 @@ static bool cursor_active(struct drm_device *dev, int pipe) static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 pos; pos = I915_READ(CURPOS(pipe)); @@ -3140,7 +3167,7 @@ static int i915_display_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc; struct drm_connector *connector; @@ -3195,7 +3222,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); enum intel_engine_id id; @@ -3268,7 +3295,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; drm_modeset_lock_all(dev); @@ -3298,7 +3325,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused) struct intel_engine_cs *engine; struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_workarounds *workarounds = &dev_priv->workarounds; enum intel_engine_id id; @@ -3336,7 +3363,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct skl_ddb_allocation *ddb; struct skl_ddb_entry *entry; enum pipe pipe; @@ -3374,31 +3401,16 @@ static int i915_ddb_info(struct seq_file *m, void *unused) static void drrs_status_per_crtc(struct seq_file *m, struct drm_device *dev, struct intel_crtc *intel_crtc) { - struct intel_encoder *intel_encoder; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_drrs *drrs = &dev_priv->drrs; int vrefresh = 0; + struct drm_connector *connector; - for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) { - /* Encoder connected on this CRTC */ - switch (intel_encoder->type) { - case INTEL_OUTPUT_EDP: - seq_puts(m, "eDP:\n"); - break; - case INTEL_OUTPUT_DSI: - seq_puts(m, "DSI:\n"); - break; - case INTEL_OUTPUT_HDMI: - seq_puts(m, "HDMI:\n"); - break; - case INTEL_OUTPUT_DISPLAYPORT: - seq_puts(m, "DP:\n"); - break; - default: - seq_printf(m, "Other encoder (id=%d).\n", - intel_encoder->type); - return; - } + drm_for_each_connector(connector, dev) { + if (connector->state->crtc != &intel_crtc->base) + continue; + + seq_printf(m, "%s:\n", connector->name); } if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) @@ -3461,18 +3473,16 @@ static int i915_drrs_status(struct seq_file *m, void *unused) struct intel_crtc *intel_crtc; int active_crtc_cnt = 0; + drm_modeset_lock_all(dev); for_each_intel_crtc(dev, intel_crtc) { - drm_modeset_lock(&intel_crtc->base.mutex, NULL); - if (intel_crtc->base.state->active) { active_crtc_cnt++; seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); drrs_status_per_crtc(m, dev, intel_crtc); } - - drm_modeset_unlock(&intel_crtc->base.mutex); } + drm_modeset_unlock_all(dev); if (!active_crtc_cnt) seq_puts(m, "No active crtc found\n"); @@ -3490,17 +3500,23 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct drm_encoder *encoder; struct intel_encoder *intel_encoder; struct intel_digital_port *intel_dig_port; + struct drm_connector *connector; + drm_modeset_lock_all(dev); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - intel_encoder = to_intel_encoder(encoder); - if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT) + drm_for_each_connector(connector, dev) { + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) + continue; + + intel_encoder = intel_attached_encoder(connector); + if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) continue; - intel_dig_port = enc_to_dig_port(encoder); + + intel_dig_port = enc_to_dig_port(&intel_encoder->base); if (!intel_dig_port->dp.can_mst) continue; + seq_printf(m, "MST Source Port %c\n", port_name(intel_dig_port->port)); drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); @@ -3512,7 +3528,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused) static int i915_pipe_crc_open(struct inode *inode, struct file *filep) { struct pipe_crc_info *info = inode->i_private; - struct drm_i915_private *dev_priv = info->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(info->dev); struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; if (info->pipe >= INTEL_INFO(info->dev)->num_pipes) @@ -3536,7 +3552,7 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep) static int i915_pipe_crc_release(struct inode *inode, struct file *filep) { struct pipe_crc_info *info = inode->i_private; - struct drm_i915_private *dev_priv = info->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(info->dev); struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; spin_lock_irq(&pipe_crc->lock); @@ -3564,7 +3580,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, { struct pipe_crc_info *info = filep->private_data; struct drm_device *dev = info->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; char buf[PIPE_CRC_BUFFER_LEN]; int n_entries; @@ -3697,7 +3713,7 @@ static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) static int display_crc_ctl_show(struct seq_file *m, void *data) { struct drm_device *dev = m->private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; for (i = 0; i < I915_MAX_PIPES; i++) @@ -3758,7 +3774,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe, case INTEL_OUTPUT_TVOUT: *source = INTEL_PIPE_CRC_SOURCE_TV; break; - case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_DP: case INTEL_OUTPUT_EDP: dig_port = enc_to_dig_port(&encoder->base); switch (dig_port->port) { @@ -3791,7 +3807,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, enum intel_pipe_crc_source *source, uint32_t *val) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); bool need_stable_symbols = false; if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { @@ -3862,7 +3878,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, enum intel_pipe_crc_source *source, uint32_t *val) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); bool need_stable_symbols = false; if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { @@ -3936,7 +3952,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t tmp = I915_READ(PORT_DFT2_G4X); switch (pipe) { @@ -3961,7 +3977,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t tmp = I915_READ(PORT_DFT2_G4X); if (pipe == PIPE_A) @@ -4004,7 +4020,7 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); struct intel_crtc_state *pipe_config; @@ -4072,7 +4088,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev, static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, enum intel_pipe_crc_source source) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); @@ -4579,7 +4595,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) static int pri_wm_latency_show(struct seq_file *m, void *data) { struct drm_device *dev = m->private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const uint16_t *latencies; if (INTEL_INFO(dev)->gen >= 9) @@ -4595,7 +4611,7 @@ static int pri_wm_latency_show(struct seq_file *m, void *data) static int spr_wm_latency_show(struct seq_file *m, void *data) { struct drm_device *dev = m->private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const uint16_t *latencies; if (INTEL_INFO(dev)->gen >= 9) @@ -4611,7 +4627,7 @@ static int spr_wm_latency_show(struct seq_file *m, void *data) static int cur_wm_latency_show(struct seq_file *m, void *data) { struct drm_device *dev = m->private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const uint16_t *latencies; if (INTEL_INFO(dev)->gen >= 9) @@ -4702,7 +4718,7 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, { struct seq_file *m = file->private_data; struct drm_device *dev = m->private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint16_t *latencies; if (INTEL_INFO(dev)->gen >= 9) @@ -4718,7 +4734,7 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, { struct seq_file *m = file->private_data; struct drm_device *dev = m->private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint16_t *latencies; if (INTEL_INFO(dev)->gen >= 9) @@ -4734,7 +4750,7 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, { struct seq_file *m = file->private_data; struct drm_device *dev = m->private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint16_t *latencies; if (INTEL_INFO(dev)->gen >= 9) @@ -4776,7 +4792,7 @@ static int i915_wedged_get(void *data, u64 *val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); *val = i915_terminally_wedged(&dev_priv->gpu_error); @@ -4787,7 +4803,7 @@ static int i915_wedged_set(void *data, u64 val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* * There is no safeguard against this debugfs entry colliding @@ -4815,44 +4831,10 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, "%llu\n"); static int -i915_ring_stop_get(void *data, u64 *val) -{ - struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; - - *val = dev_priv->gpu_error.stop_rings; - - return 0; -} - -static int -i915_ring_stop_set(void *data, u64 val) -{ - struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - dev_priv->gpu_error.stop_rings = val; - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, - i915_ring_stop_get, i915_ring_stop_set, - "0x%08llx\n"); - -static int i915_ring_missed_irq_get(void *data, u64 *val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); *val = dev_priv->gpu_error.missed_irq_rings; return 0; @@ -4862,7 +4844,7 @@ static int i915_ring_missed_irq_set(void *data, u64 val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; /* Lock against concurrent debugfs callers */ @@ -4883,7 +4865,7 @@ static int i915_ring_test_irq_get(void *data, u64 *val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); *val = dev_priv->gpu_error.test_irq_rings; @@ -4894,18 +4876,11 @@ static int i915_ring_test_irq_set(void *data, u64 val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; + struct drm_i915_private *dev_priv = to_i915(dev); + val &= INTEL_INFO(dev_priv)->ring_mask; DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); - - /* Lock against concurrent debugfs callers */ - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - dev_priv->gpu_error.test_irq_rings = val; - mutex_unlock(&dev->struct_mutex); return 0; } @@ -4934,7 +4909,7 @@ static int i915_drop_caches_set(void *data, u64 val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; DRM_DEBUG("Dropping caches: 0x%08llx\n", val); @@ -4946,7 +4921,7 @@ i915_drop_caches_set(void *data, u64 val) return ret; if (val & DROP_ACTIVE) { - ret = i915_gpu_idle(dev); + ret = i915_gem_wait_for_idle(dev_priv); if (ret) goto unlock; } @@ -4974,7 +4949,7 @@ static int i915_max_freq_get(void *data, u64 *val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; if (INTEL_INFO(dev)->gen < 6) @@ -4996,7 +4971,7 @@ static int i915_max_freq_set(void *data, u64 val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 hw_max, hw_min; int ret; @@ -5041,7 +5016,7 @@ static int i915_min_freq_get(void *data, u64 *val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; if (INTEL_INFO(dev)->gen < 6) @@ -5063,7 +5038,7 @@ static int i915_min_freq_set(void *data, u64 val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 hw_max, hw_min; int ret; @@ -5108,7 +5083,7 @@ static int i915_cache_sharing_get(void *data, u64 *val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 snpcr; int ret; @@ -5123,7 +5098,7 @@ i915_cache_sharing_get(void *data, u64 *val) snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); intel_runtime_pm_put(dev_priv); - mutex_unlock(&dev_priv->dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; @@ -5134,7 +5109,7 @@ static int i915_cache_sharing_set(void *data, u64 val) { struct drm_device *dev = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 snpcr; if (!(IS_GEN6(dev) || IS_GEN7(dev))) @@ -5171,7 +5146,7 @@ struct sseu_dev_status { static void cherryview_sseu_device_status(struct drm_device *dev, struct sseu_dev_status *stat) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ss_max = 2; int ss; u32 sig1[ss_max], sig2[ss_max]; @@ -5203,7 +5178,7 @@ static void cherryview_sseu_device_status(struct drm_device *dev, static void gen9_sseu_device_status(struct drm_device *dev, struct sseu_dev_status *stat) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int s_max = 3, ss_max = 4; int s, ss; u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; @@ -5268,7 +5243,7 @@ static void gen9_sseu_device_status(struct drm_device *dev, static void broadwell_sseu_device_status(struct drm_device *dev, struct sseu_dev_status *stat) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int s; u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); @@ -5347,7 +5322,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused) static int i915_forcewake_open(struct inode *inode, struct file *file) { struct drm_device *dev = inode->i_private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (INTEL_INFO(dev)->gen < 6) return 0; @@ -5361,7 +5336,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) static int i915_forcewake_release(struct inode *inode, struct file *file) { struct drm_device *dev = inode->i_private; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (INTEL_INFO(dev)->gen < 6) return 0; @@ -5477,7 +5452,6 @@ static const struct i915_debugfs_files { {"i915_max_freq", &i915_max_freq_fops}, {"i915_min_freq", &i915_min_freq_fops}, {"i915_cache_sharing", &i915_cache_sharing_fops}, - {"i915_ring_stop", &i915_ring_stop_fops}, {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, {"i915_ring_test_irq", &i915_ring_test_irq_fops}, {"i915_gem_drop_caches", &i915_drop_caches_fops}, @@ -5495,7 +5469,7 @@ static const struct i915_debugfs_files { void intel_display_crc_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; for_each_pipe(dev_priv, pipe) { @@ -5507,8 +5481,9 @@ void intel_display_crc_init(struct drm_device *dev) } } -int i915_debugfs_init(struct drm_minor *minor) +int i915_debugfs_register(struct drm_i915_private *dev_priv) { + struct drm_minor *minor = dev_priv->drm.primary; int ret, i; ret = i915_forcewake_create(minor->debugfs_root, minor); @@ -5534,8 +5509,9 @@ int i915_debugfs_init(struct drm_minor *minor) minor->debugfs_root, minor); } -void i915_debugfs_cleanup(struct drm_minor *minor) +void i915_debugfs_unregister(struct drm_i915_private *dev_priv) { + struct drm_minor *minor = dev_priv->drm.primary; int i; drm_debugfs_remove_files(i915_debugfs_list, diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c deleted file mode 100644 index d15a461fa84a..000000000000 --- a/drivers/gpu/drm/i915/i915_dma.c +++ /dev/null @@ -1,1692 +0,0 @@ -/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- - */ -/* - * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. - * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <drm/drmP.h> -#include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_legacy.h> -#include "intel_drv.h" -#include <drm/i915_drm.h> -#include "i915_drv.h" -#include "i915_vgpu.h" -#include "i915_trace.h" -#include <linux/pci.h> -#include <linux/console.h> -#include <linux/vt.h> -#include <linux/vgaarb.h> -#include <linux/acpi.h> -#include <linux/pnp.h> -#include <linux/vga_switcheroo.h> -#include <linux/slab.h> -#include <acpi/video.h> -#include <linux/pm.h> -#include <linux/pm_runtime.h> -#include <linux/oom.h> - -static unsigned int i915_load_fail_count; - -bool __i915_inject_load_failure(const char *func, int line) -{ - if (i915_load_fail_count >= i915.inject_load_failure) - return false; - - if (++i915_load_fail_count == i915.inject_load_failure) { - DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", - i915.inject_load_failure, func, line); - return true; - } - - return false; -} - -#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" -#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ - "providing the dmesg log by booting with drm.debug=0xf" - -void -__i915_printk(struct drm_i915_private *dev_priv, const char *level, - const char *fmt, ...) -{ - static bool shown_bug_once; - struct device *dev = dev_priv->dev->dev; - bool is_error = level[1] <= KERN_ERR[1]; - bool is_debug = level[1] == KERN_DEBUG[1]; - struct va_format vaf; - va_list args; - - if (is_debug && !(drm_debug & DRM_UT_DRIVER)) - return; - - va_start(args, fmt); - - vaf.fmt = fmt; - vaf.va = &args; - - dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV", - __builtin_return_address(0), &vaf); - - if (is_error && !shown_bug_once) { - dev_notice(dev, "%s", FDO_BUG_MSG); - shown_bug_once = true; - } - - va_end(args); -} - -static bool i915_error_injected(struct drm_i915_private *dev_priv) -{ - return i915.inject_load_failure && - i915_load_fail_count == i915.inject_load_failure; -} - -#define i915_load_error(dev_priv, fmt, ...) \ - __i915_printk(dev_priv, \ - i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ - fmt, ##__VA_ARGS__) - -static int i915_getparam(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - drm_i915_getparam_t *param = data; - int value; - - switch (param->param) { - case I915_PARAM_IRQ_ACTIVE: - case I915_PARAM_ALLOW_BATCHBUFFER: - case I915_PARAM_LAST_DISPATCH: - /* Reject all old ums/dri params. */ - return -ENODEV; - case I915_PARAM_CHIPSET_ID: - value = dev->pdev->device; - break; - case I915_PARAM_REVISION: - value = dev->pdev->revision; - break; - case I915_PARAM_HAS_GEM: - value = 1; - break; - case I915_PARAM_NUM_FENCES_AVAIL: - value = dev_priv->num_fence_regs; - break; - case I915_PARAM_HAS_OVERLAY: - value = dev_priv->overlay ? 1 : 0; - break; - case I915_PARAM_HAS_PAGEFLIPPING: - value = 1; - break; - case I915_PARAM_HAS_EXECBUF2: - /* depends on GEM */ - value = 1; - break; - case I915_PARAM_HAS_BSD: - value = intel_engine_initialized(&dev_priv->engine[VCS]); - break; - case I915_PARAM_HAS_BLT: - value = intel_engine_initialized(&dev_priv->engine[BCS]); - break; - case I915_PARAM_HAS_VEBOX: - value = intel_engine_initialized(&dev_priv->engine[VECS]); - break; - case I915_PARAM_HAS_BSD2: - value = intel_engine_initialized(&dev_priv->engine[VCS2]); - break; - case I915_PARAM_HAS_RELAXED_FENCING: - value = 1; - break; - case I915_PARAM_HAS_COHERENT_RINGS: - value = 1; - break; - case I915_PARAM_HAS_EXEC_CONSTANTS: - value = INTEL_INFO(dev)->gen >= 4; - break; - case I915_PARAM_HAS_RELAXED_DELTA: - value = 1; - break; - case I915_PARAM_HAS_GEN7_SOL_RESET: - value = 1; - break; - case I915_PARAM_HAS_LLC: - value = HAS_LLC(dev); - break; - case I915_PARAM_HAS_WT: - value = HAS_WT(dev); - break; - case I915_PARAM_HAS_ALIASING_PPGTT: - value = USES_PPGTT(dev); - break; - case I915_PARAM_HAS_WAIT_TIMEOUT: - value = 1; - break; - case I915_PARAM_HAS_SEMAPHORES: - value = i915_semaphore_is_enabled(dev_priv); - break; - case I915_PARAM_HAS_PRIME_VMAP_FLUSH: - value = 1; - break; - case I915_PARAM_HAS_SECURE_BATCHES: - value = capable(CAP_SYS_ADMIN); - break; - case I915_PARAM_HAS_PINNED_BATCHES: - value = 1; - break; - case I915_PARAM_HAS_EXEC_NO_RELOC: - value = 1; - break; - case I915_PARAM_HAS_EXEC_HANDLE_LUT: - value = 1; - break; - case I915_PARAM_CMD_PARSER_VERSION: - value = i915_cmd_parser_get_version(dev_priv); - break; - case I915_PARAM_HAS_COHERENT_PHYS_GTT: - value = 1; - break; - case I915_PARAM_MMAP_VERSION: - value = 1; - break; - case I915_PARAM_SUBSLICE_TOTAL: - value = INTEL_INFO(dev)->subslice_total; - if (!value) - return -ENODEV; - break; - case I915_PARAM_EU_TOTAL: - value = INTEL_INFO(dev)->eu_total; - if (!value) - return -ENODEV; - break; - case I915_PARAM_HAS_GPU_RESET: - value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); - break; - case I915_PARAM_HAS_RESOURCE_STREAMER: - value = HAS_RESOURCE_STREAMER(dev); - break; - case I915_PARAM_HAS_EXEC_SOFTPIN: - value = 1; - break; - default: - DRM_DEBUG("Unknown parameter %d\n", param->param); - return -EINVAL; - } - - if (copy_to_user(param->value, &value, sizeof(int))) { - DRM_ERROR("copy_to_user failed\n"); - return -EFAULT; - } - - return 0; -} - -static int i915_get_bridge_dev(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); - if (!dev_priv->bridge_dev) { - DRM_ERROR("bridge device not found\n"); - return -1; - } - return 0; -} - -/* Allocate space for the MCH regs if needed, return nonzero on error */ -static int -intel_alloc_mchbar_resource(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; - u32 temp_lo, temp_hi = 0; - u64 mchbar_addr; - int ret; - - if (INTEL_INFO(dev)->gen >= 4) - pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); - pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); - mchbar_addr = ((u64)temp_hi << 32) | temp_lo; - - /* If ACPI doesn't have it, assume we need to allocate it ourselves */ -#ifdef CONFIG_PNP - if (mchbar_addr && - pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) - return 0; -#endif - - /* Get some space for it */ - dev_priv->mch_res.name = "i915 MCHBAR"; - dev_priv->mch_res.flags = IORESOURCE_MEM; - ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, - &dev_priv->mch_res, - MCHBAR_SIZE, MCHBAR_SIZE, - PCIBIOS_MIN_MEM, - 0, pcibios_align_resource, - dev_priv->bridge_dev); - if (ret) { - DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); - dev_priv->mch_res.start = 0; - return ret; - } - - if (INTEL_INFO(dev)->gen >= 4) - pci_write_config_dword(dev_priv->bridge_dev, reg + 4, - upper_32_bits(dev_priv->mch_res.start)); - - pci_write_config_dword(dev_priv->bridge_dev, reg, - lower_32_bits(dev_priv->mch_res.start)); - return 0; -} - -/* Setup MCHBAR if possible, return true if we should disable it again */ -static void -intel_setup_mchbar(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; - u32 temp; - bool enabled; - - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) - return; - - dev_priv->mchbar_need_disable = false; - - if (IS_I915G(dev) || IS_I915GM(dev)) { - pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); - enabled = !!(temp & DEVEN_MCHBAR_EN); - } else { - pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); - enabled = temp & 1; - } - - /* If it's already enabled, don't have to do anything */ - if (enabled) - return; - - if (intel_alloc_mchbar_resource(dev)) - return; - - dev_priv->mchbar_need_disable = true; - - /* Space is allocated or reserved, so enable it. */ - if (IS_I915G(dev) || IS_I915GM(dev)) { - pci_write_config_dword(dev_priv->bridge_dev, DEVEN, - temp | DEVEN_MCHBAR_EN); - } else { - pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); - pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); - } -} - -static void -intel_teardown_mchbar(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; - - if (dev_priv->mchbar_need_disable) { - if (IS_I915G(dev) || IS_I915GM(dev)) { - u32 deven_val; - - pci_read_config_dword(dev_priv->bridge_dev, DEVEN, - &deven_val); - deven_val &= ~DEVEN_MCHBAR_EN; - pci_write_config_dword(dev_priv->bridge_dev, DEVEN, - deven_val); - } else { - u32 mchbar_val; - - pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, - &mchbar_val); - mchbar_val &= ~1; - pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, - mchbar_val); - } - } - - if (dev_priv->mch_res.start) - release_resource(&dev_priv->mch_res); -} - -/* true = enable decode, false = disable decoder */ -static unsigned int i915_vga_set_decode(void *cookie, bool state) -{ - struct drm_device *dev = cookie; - - intel_modeset_vga_set_state(dev, state); - if (state) - return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; - else - return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; -} - -static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) -{ - struct drm_device *dev = pci_get_drvdata(pdev); - pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; - - if (state == VGA_SWITCHEROO_ON) { - pr_info("switched on\n"); - dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - /* i915 resume handler doesn't set to D0 */ - pci_set_power_state(dev->pdev, PCI_D0); - i915_resume_switcheroo(dev); - dev->switch_power_state = DRM_SWITCH_POWER_ON; - } else { - pr_info("switched off\n"); - dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - i915_suspend_switcheroo(dev, pmm); - dev->switch_power_state = DRM_SWITCH_POWER_OFF; - } -} - -static bool i915_switcheroo_can_switch(struct pci_dev *pdev) -{ - struct drm_device *dev = pci_get_drvdata(pdev); - - /* - * FIXME: open_count is protected by drm_global_mutex but that would lead to - * locking inversion with the driver load path. And the access here is - * completely racy anyway. So don't bother with locking for now. - */ - return dev->open_count == 0; -} - -static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { - .set_gpu_state = i915_switcheroo_set_state, - .reprobe = NULL, - .can_switch = i915_switcheroo_can_switch, -}; - -static void i915_gem_fini(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - - /* - * Neither the BIOS, ourselves or any other kernel - * expects the system to be in execlists mode on startup, - * so we need to reset the GPU back to legacy mode. And the only - * known way to disable logical contexts is through a GPU reset. - * - * So in order to leave the system in a known default configuration, - * always reset the GPU upon unload. Afterwards we then clean up the - * GEM state tracking, flushing off the requests and leaving the - * system in a known idle state. - * - * Note that is of the upmost importance that the GPU is idle and - * all stray writes are flushed *before* we dismantle the backing - * storage for the pinned objects. - * - * However, since we are uncertain that reseting the GPU on older - * machines is a good idea, we don't - just in case it leaves the - * machine in an unusable condition. - */ - if (HAS_HW_CONTEXTS(dev)) { - int reset = intel_gpu_reset(dev_priv, ALL_ENGINES); - WARN_ON(reset && reset != -ENODEV); - } - - mutex_lock(&dev->struct_mutex); - i915_gem_reset(dev); - i915_gem_cleanup_engines(dev); - i915_gem_context_fini(dev); - mutex_unlock(&dev->struct_mutex); - - WARN_ON(!list_empty(&to_i915(dev)->context_list)); -} - -static int i915_load_modeset_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - if (i915_inject_load_failure()) - return -ENODEV; - - ret = intel_bios_init(dev_priv); - if (ret) - DRM_INFO("failed to find VBIOS tables\n"); - - /* If we have > 1 VGA cards, then we need to arbitrate access - * to the common VGA resources. - * - * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), - * then we do not take part in VGA arbitration and the - * vga_client_register() fails with -ENODEV. - */ - ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); - if (ret && ret != -ENODEV) - goto out; - - intel_register_dsm_handler(); - - ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false); - if (ret) - goto cleanup_vga_client; - - /* must happen before intel_power_domains_init_hw() on VLV/CHV */ - intel_update_rawclk(dev_priv); - - intel_power_domains_init_hw(dev_priv, false); - - intel_csr_ucode_init(dev_priv); - - ret = intel_irq_install(dev_priv); - if (ret) - goto cleanup_csr; - - intel_setup_gmbus(dev); - - /* Important: The output setup functions called by modeset_init need - * working irqs for e.g. gmbus and dp aux transfers. */ - intel_modeset_init(dev); - - intel_guc_init(dev); - - ret = i915_gem_init(dev); - if (ret) - goto cleanup_irq; - - intel_modeset_gem_init(dev); - - if (INTEL_INFO(dev)->num_pipes == 0) - return 0; - - ret = intel_fbdev_init(dev); - if (ret) - goto cleanup_gem; - - /* Only enable hotplug handling once the fbdev is fully set up. */ - intel_hpd_init(dev_priv); - - /* - * Some ports require correctly set-up hpd registers for detection to - * work properly (leading to ghost connected connector status), e.g. VGA - * on gm45. Hence we can only set up the initial fbdev config after hpd - * irqs are fully enabled. Now we should scan for the initial config - * only once hotplug handling is enabled, but due to screwed-up locking - * around kms/fbdev init we can't protect the fdbev initial config - * scanning against hotplug events. Hence do this first and ignore the - * tiny window where we will loose hotplug notifactions. - */ - intel_fbdev_initial_config_async(dev); - - drm_kms_helper_poll_init(dev); - - return 0; - -cleanup_gem: - i915_gem_fini(dev); -cleanup_irq: - intel_guc_fini(dev); - drm_irq_uninstall(dev); - intel_teardown_gmbus(dev); -cleanup_csr: - intel_csr_ucode_fini(dev_priv); - intel_power_domains_fini(dev_priv); - vga_switcheroo_unregister_client(dev->pdev); -cleanup_vga_client: - vga_client_register(dev->pdev, NULL, NULL, NULL); -out: - return ret; -} - -#if IS_ENABLED(CONFIG_FB) -static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) -{ - struct apertures_struct *ap; - struct pci_dev *pdev = dev_priv->dev->pdev; - struct i915_ggtt *ggtt = &dev_priv->ggtt; - bool primary; - int ret; - - ap = alloc_apertures(1); - if (!ap) - return -ENOMEM; - - ap->ranges[0].base = ggtt->mappable_base; - ap->ranges[0].size = ggtt->mappable_end; - - primary = - pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; - - ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary); - - kfree(ap); - - return ret; -} -#else -static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) -{ - return 0; -} -#endif - -#if !defined(CONFIG_VGA_CONSOLE) -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - return 0; -} -#elif !defined(CONFIG_DUMMY_CONSOLE) -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - return -ENODEV; -} -#else -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - int ret = 0; - - DRM_INFO("Replacing VGA console driver\n"); - - console_lock(); - if (con_is_bound(&vga_con)) - ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); - if (ret == 0) { - ret = do_unregister_con_driver(&vga_con); - - /* Ignore "already unregistered". */ - if (ret == -ENODEV) - ret = 0; - } - console_unlock(); - - return ret; -} -#endif - -static void i915_dump_device_info(struct drm_i915_private *dev_priv) -{ - const struct intel_device_info *info = &dev_priv->info; - -#define PRINT_S(name) "%s" -#define SEP_EMPTY -#define PRINT_FLAG(name) info->name ? #name "," : "" -#define SEP_COMMA , - DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags=" - DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), - info->gen, - dev_priv->dev->pdev->device, - dev_priv->dev->pdev->revision, - DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); -#undef PRINT_S -#undef SEP_EMPTY -#undef PRINT_FLAG -#undef SEP_COMMA -} - -static void cherryview_sseu_info_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_device_info *info; - u32 fuse, eu_dis; - - info = (struct intel_device_info *)&dev_priv->info; - fuse = I915_READ(CHV_FUSE_GT); - - info->slice_total = 1; - - if (!(fuse & CHV_FGT_DISABLE_SS0)) { - info->subslice_per_slice++; - eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | - CHV_FGT_EU_DIS_SS0_R1_MASK); - info->eu_total += 8 - hweight32(eu_dis); - } - - if (!(fuse & CHV_FGT_DISABLE_SS1)) { - info->subslice_per_slice++; - eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK | - CHV_FGT_EU_DIS_SS1_R1_MASK); - info->eu_total += 8 - hweight32(eu_dis); - } - - info->subslice_total = info->subslice_per_slice; - /* - * CHV expected to always have a uniform distribution of EU - * across subslices. - */ - info->eu_per_subslice = info->subslice_total ? - info->eu_total / info->subslice_total : - 0; - /* - * CHV supports subslice power gating on devices with more than - * one subslice, and supports EU power gating on devices with - * more than one EU pair per subslice. - */ - info->has_slice_pg = 0; - info->has_subslice_pg = (info->subslice_total > 1); - info->has_eu_pg = (info->eu_per_subslice > 2); -} - -static void gen9_sseu_info_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_device_info *info; - int s_max = 3, ss_max = 4, eu_max = 8; - int s, ss; - u32 fuse2, s_enable, ss_disable, eu_disable; - u8 eu_mask = 0xff; - - info = (struct intel_device_info *)&dev_priv->info; - fuse2 = I915_READ(GEN8_FUSE2); - s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> - GEN8_F2_S_ENA_SHIFT; - ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> - GEN9_F2_SS_DIS_SHIFT; - - info->slice_total = hweight32(s_enable); - /* - * The subslice disable field is global, i.e. it applies - * to each of the enabled slices. - */ - info->subslice_per_slice = ss_max - hweight32(ss_disable); - info->subslice_total = info->slice_total * - info->subslice_per_slice; - - /* - * Iterate through enabled slices and subslices to - * count the total enabled EU. - */ - for (s = 0; s < s_max; s++) { - if (!(s_enable & (0x1 << s))) - /* skip disabled slice */ - continue; - - eu_disable = I915_READ(GEN9_EU_DISABLE(s)); - for (ss = 0; ss < ss_max; ss++) { - int eu_per_ss; - - if (ss_disable & (0x1 << ss)) - /* skip disabled subslice */ - continue; - - eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) & - eu_mask); - - /* - * Record which subslice(s) has(have) 7 EUs. we - * can tune the hash used to spread work among - * subslices if they are unbalanced. - */ - if (eu_per_ss == 7) - info->subslice_7eu[s] |= 1 << ss; - - info->eu_total += eu_per_ss; - } - } - - /* - * SKL is expected to always have a uniform distribution - * of EU across subslices with the exception that any one - * EU in any one subslice may be fused off for die - * recovery. BXT is expected to be perfectly uniform in EU - * distribution. - */ - info->eu_per_subslice = info->subslice_total ? - DIV_ROUND_UP(info->eu_total, - info->subslice_total) : 0; - /* - * SKL supports slice power gating on devices with more than - * one slice, and supports EU power gating on devices with - * more than one EU pair per subslice. BXT supports subslice - * power gating on devices with more than one subslice, and - * supports EU power gating on devices with more than one EU - * pair per subslice. - */ - info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && - (info->slice_total > 1)); - info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1)); - info->has_eu_pg = (info->eu_per_subslice > 2); - - if (IS_BROXTON(dev)) { -#define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & (0x1 << ss)) - /* - * There is a HW issue in 2x6 fused down parts that requires - * Pooled EU to be enabled as a WA. The pool configuration - * changes depending upon which subslice is fused down. This - * doesn't affect if the device has all 3 subslices enabled. - */ - /* WaEnablePooledEuFor2x6:bxt */ - info->has_pooled_eu = ((info->subslice_per_slice == 3) || - (info->subslice_per_slice == 2 && - INTEL_REVID(dev) < BXT_REVID_C0)); - - info->min_eu_in_pool = 0; - if (info->has_pooled_eu) { - if (IS_SS_DISABLED(ss_disable, 0) || - IS_SS_DISABLED(ss_disable, 2)) - info->min_eu_in_pool = 3; - else if (IS_SS_DISABLED(ss_disable, 1)) - info->min_eu_in_pool = 6; - else - info->min_eu_in_pool = 9; - } -#undef IS_SS_DISABLED - } -} - -static void broadwell_sseu_info_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_device_info *info; - const int s_max = 3, ss_max = 3, eu_max = 8; - int s, ss; - u32 fuse2, eu_disable[s_max], s_enable, ss_disable; - - fuse2 = I915_READ(GEN8_FUSE2); - s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; - ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT; - - eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK; - eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) | - ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) << - (32 - GEN8_EU_DIS0_S1_SHIFT)); - eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) | - ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) << - (32 - GEN8_EU_DIS1_S2_SHIFT)); - - - info = (struct intel_device_info *)&dev_priv->info; - info->slice_total = hweight32(s_enable); - - /* - * The subslice disable field is global, i.e. it applies - * to each of the enabled slices. - */ - info->subslice_per_slice = ss_max - hweight32(ss_disable); - info->subslice_total = info->slice_total * info->subslice_per_slice; - - /* - * Iterate through enabled slices and subslices to - * count the total enabled EU. - */ - for (s = 0; s < s_max; s++) { - if (!(s_enable & (0x1 << s))) - /* skip disabled slice */ - continue; - - for (ss = 0; ss < ss_max; ss++) { - u32 n_disabled; - - if (ss_disable & (0x1 << ss)) - /* skip disabled subslice */ - continue; - - n_disabled = hweight8(eu_disable[s] >> (ss * eu_max)); - - /* - * Record which subslices have 7 EUs. - */ - if (eu_max - n_disabled == 7) - info->subslice_7eu[s] |= 1 << ss; - - info->eu_total += eu_max - n_disabled; - } - } - - /* - * BDW is expected to always have a uniform distribution of EU across - * subslices with the exception that any one EU in any one subslice may - * be fused off for die recovery. - */ - info->eu_per_subslice = info->subslice_total ? - DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0; - - /* - * BDW supports slice power gating on devices with more than - * one slice. - */ - info->has_slice_pg = (info->slice_total > 1); - info->has_subslice_pg = 0; - info->has_eu_pg = 0; -} - -/* - * Determine various intel_device_info fields at runtime. - * - * Use it when either: - * - it's judged too laborious to fill n static structures with the limit - * when a simple if statement does the job, - * - run-time checks (eg read fuse/strap registers) are needed. - * - * This function needs to be called: - * - after the MMIO has been setup as we are reading registers, - * - after the PCH has been detected, - * - before the first usage of the fields it can tweak. - */ -static void intel_device_info_runtime_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_device_info *info; - enum pipe pipe; - - info = (struct intel_device_info *)&dev_priv->info; - - /* - * Skylake and Broxton currently don't expose the topmost plane as its - * use is exclusive with the legacy cursor and we only want to expose - * one of those, not both. Until we can safely expose the topmost plane - * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, - * we don't expose the topmost plane at all to prevent ABI breakage - * down the line. - */ - if (IS_BROXTON(dev)) { - info->num_sprites[PIPE_A] = 2; - info->num_sprites[PIPE_B] = 2; - info->num_sprites[PIPE_C] = 1; - } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) - for_each_pipe(dev_priv, pipe) - info->num_sprites[pipe] = 2; - else - for_each_pipe(dev_priv, pipe) - info->num_sprites[pipe] = 1; - - if (i915.disable_display) { - DRM_INFO("Display disabled (module parameter)\n"); - info->num_pipes = 0; - } else if (info->num_pipes > 0 && - (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) && - HAS_PCH_SPLIT(dev)) { - u32 fuse_strap = I915_READ(FUSE_STRAP); - u32 sfuse_strap = I915_READ(SFUSE_STRAP); - - /* - * SFUSE_STRAP is supposed to have a bit signalling the display - * is fused off. Unfortunately it seems that, at least in - * certain cases, fused off display means that PCH display - * reads don't land anywhere. In that case, we read 0s. - * - * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK - * should be set when taking over after the firmware. - */ - if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || - sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || - (dev_priv->pch_type == PCH_CPT && - !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { - DRM_INFO("Display fused off, disabling\n"); - info->num_pipes = 0; - } else if (fuse_strap & IVB_PIPE_C_DISABLE) { - DRM_INFO("PipeC fused off\n"); - info->num_pipes -= 1; - } - } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) { - u32 dfsm = I915_READ(SKL_DFSM); - u8 disabled_mask = 0; - bool invalid; - int num_bits; - - if (dfsm & SKL_DFSM_PIPE_A_DISABLE) - disabled_mask |= BIT(PIPE_A); - if (dfsm & SKL_DFSM_PIPE_B_DISABLE) - disabled_mask |= BIT(PIPE_B); - if (dfsm & SKL_DFSM_PIPE_C_DISABLE) - disabled_mask |= BIT(PIPE_C); - - num_bits = hweight8(disabled_mask); - - switch (disabled_mask) { - case BIT(PIPE_A): - case BIT(PIPE_B): - case BIT(PIPE_A) | BIT(PIPE_B): - case BIT(PIPE_A) | BIT(PIPE_C): - invalid = true; - break; - default: - invalid = false; - } - - if (num_bits > info->num_pipes || invalid) - DRM_ERROR("invalid pipe fuse configuration: 0x%x\n", - disabled_mask); - else - info->num_pipes -= num_bits; - } - - /* Initialize slice/subslice/EU info */ - if (IS_CHERRYVIEW(dev)) - cherryview_sseu_info_init(dev); - else if (IS_BROADWELL(dev)) - broadwell_sseu_info_init(dev); - else if (INTEL_INFO(dev)->gen >= 9) - gen9_sseu_info_init(dev); - - info->has_snoop = !info->has_llc; - - /* Snooping is broken on BXT A stepping. */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) - info->has_snoop = false; - - DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); - DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); - DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice); - DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total); - DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice); - DRM_DEBUG_DRIVER("Has Pooled EU: %s\n", HAS_POOLED_EU(dev) ? "y" : "n"); - if (HAS_POOLED_EU(dev)) - DRM_DEBUG_DRIVER("Min EU in pool: %u\n", info->min_eu_in_pool); - DRM_DEBUG_DRIVER("has slice power gating: %s\n", - info->has_slice_pg ? "y" : "n"); - DRM_DEBUG_DRIVER("has subslice power gating: %s\n", - info->has_subslice_pg ? "y" : "n"); - DRM_DEBUG_DRIVER("has EU power gating: %s\n", - info->has_eu_pg ? "y" : "n"); - - i915.enable_execlists = - intel_sanitize_enable_execlists(dev_priv, - i915.enable_execlists); - - /* - * i915.enable_ppgtt is read-only, so do an early pass to validate the - * user's requested state against the hardware/driver capabilities. We - * do this now so that we can print out any log messages once rather - * than every time we check intel_enable_ppgtt(). - */ - i915.enable_ppgtt = - intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); - DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); -} - -static void intel_init_dpio(struct drm_i915_private *dev_priv) -{ - /* - * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), - * CHV x1 PHY (DP/HDMI D) - * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) - */ - if (IS_CHERRYVIEW(dev_priv)) { - DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; - DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; - } else if (IS_VALLEYVIEW(dev_priv)) { - DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; - } -} - -static int i915_workqueues_init(struct drm_i915_private *dev_priv) -{ - /* - * The i915 workqueue is primarily used for batched retirement of - * requests (and thus managing bo) once the task has been completed - * by the GPU. i915_gem_retire_requests() is called directly when we - * need high-priority retirement, such as waiting for an explicit - * bo. - * - * It is also used for periodic low-priority events, such as - * idle-timers and recording error state. - * - * All tasks on the workqueue are expected to acquire the dev mutex - * so there is no point in running more than one instance of the - * workqueue at any time. Use an ordered one. - */ - dev_priv->wq = alloc_ordered_workqueue("i915", 0); - if (dev_priv->wq == NULL) - goto out_err; - - dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); - if (dev_priv->hotplug.dp_wq == NULL) - goto out_free_wq; - - dev_priv->gpu_error.hangcheck_wq = - alloc_ordered_workqueue("i915-hangcheck", 0); - if (dev_priv->gpu_error.hangcheck_wq == NULL) - goto out_free_dp_wq; - - return 0; - -out_free_dp_wq: - destroy_workqueue(dev_priv->hotplug.dp_wq); -out_free_wq: - destroy_workqueue(dev_priv->wq); -out_err: - DRM_ERROR("Failed to allocate workqueues.\n"); - - return -ENOMEM; -} - -static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) -{ - destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); - destroy_workqueue(dev_priv->hotplug.dp_wq); - destroy_workqueue(dev_priv->wq); -} - -/** - * i915_driver_init_early - setup state not requiring device access - * @dev_priv: device private - * - * Initialize everything that is a "SW-only" state, that is state not - * requiring accessing the device or exposing the driver via kernel internal - * or userspace interfaces. Example steps belonging here: lock initialization, - * system memory allocation, setting up device specific attributes and - * function hooks not requiring accessing the device. - */ -static int i915_driver_init_early(struct drm_i915_private *dev_priv, - struct drm_device *dev, - struct intel_device_info *info) -{ - struct intel_device_info *device_info; - int ret = 0; - - if (i915_inject_load_failure()) - return -ENODEV; - - /* Setup the write-once "constant" device info */ - device_info = (struct intel_device_info *)&dev_priv->info; - memcpy(device_info, info, sizeof(dev_priv->info)); - device_info->device_id = dev->pdev->device; - - BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); - device_info->gen_mask = BIT(device_info->gen - 1); - - spin_lock_init(&dev_priv->irq_lock); - spin_lock_init(&dev_priv->gpu_error.lock); - mutex_init(&dev_priv->backlight_lock); - spin_lock_init(&dev_priv->uncore.lock); - spin_lock_init(&dev_priv->mm.object_stat_lock); - spin_lock_init(&dev_priv->mmio_flip_lock); - mutex_init(&dev_priv->sb_lock); - mutex_init(&dev_priv->modeset_restore_lock); - mutex_init(&dev_priv->av_mutex); - mutex_init(&dev_priv->wm.wm_mutex); - mutex_init(&dev_priv->pps_mutex); - - ret = i915_workqueues_init(dev_priv); - if (ret < 0) - return ret; - - ret = intel_gvt_init(dev_priv); - if (ret < 0) - goto err_workqueues; - - /* This must be called before any calls to HAS_PCH_* */ - intel_detect_pch(dev); - - intel_pm_setup(dev); - intel_init_dpio(dev_priv); - intel_power_domains_init(dev_priv); - intel_irq_init(dev_priv); - intel_init_display_hooks(dev_priv); - intel_init_clock_gating_hooks(dev_priv); - intel_init_audio_hooks(dev_priv); - i915_gem_load_init(dev); - - intel_display_crc_init(dev); - - i915_dump_device_info(dev_priv); - - /* Not all pre-production machines fall into this category, only the - * very first ones. Almost everything should work, except for maybe - * suspend/resume. And we don't implement workarounds that affect only - * pre-production machines. */ - if (IS_HSW_EARLY_SDV(dev)) - DRM_INFO("This is an early pre-production Haswell machine. " - "It may not be fully functional.\n"); - - return 0; - -err_workqueues: - i915_workqueues_cleanup(dev_priv); - return ret; -} - -/** - * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() - * @dev_priv: device private - */ -static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) -{ - i915_gem_load_cleanup(dev_priv->dev); - i915_workqueues_cleanup(dev_priv); -} - -static int i915_mmio_setup(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - int mmio_bar; - int mmio_size; - - mmio_bar = IS_GEN2(dev) ? 1 : 0; - /* - * Before gen4, the registers and the GTT are behind different BARs. - * However, from gen4 onwards, the registers and the GTT are shared - * in the same BAR, so we want to restrict this ioremap from - * clobbering the GTT which we want ioremap_wc instead. Fortunately, - * the register BAR remains the same size for all the earlier - * generations up to Ironlake. - */ - if (INTEL_INFO(dev)->gen < 5) - mmio_size = 512 * 1024; - else - mmio_size = 2 * 1024 * 1024; - dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); - if (dev_priv->regs == NULL) { - DRM_ERROR("failed to map registers\n"); - - return -EIO; - } - - /* Try to make sure MCHBAR is enabled before poking at it */ - intel_setup_mchbar(dev); - - return 0; -} - -static void i915_mmio_cleanup(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - - intel_teardown_mchbar(dev); - pci_iounmap(dev->pdev, dev_priv->regs); -} - -/** - * i915_driver_init_mmio - setup device MMIO - * @dev_priv: device private - * - * Setup minimal device state necessary for MMIO accesses later in the - * initialization sequence. The setup here should avoid any other device-wide - * side effects or exposing the driver via kernel internal or user space - * interfaces. - */ -static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - int ret; - - if (i915_inject_load_failure()) - return -ENODEV; - - if (i915_get_bridge_dev(dev)) - return -EIO; - - ret = i915_mmio_setup(dev); - if (ret < 0) - goto put_bridge; - - intel_uncore_init(dev_priv); - - return 0; - -put_bridge: - pci_dev_put(dev_priv->bridge_dev); - - return ret; -} - -/** - * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() - * @dev_priv: device private - */ -static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - - intel_uncore_fini(dev_priv); - i915_mmio_cleanup(dev); - pci_dev_put(dev_priv->bridge_dev); -} - -/** - * i915_driver_init_hw - setup state requiring device access - * @dev_priv: device private - * - * Setup state that requires accessing the device, but doesn't require - * exposing the driver via kernel internal or userspace interfaces. - */ -static int i915_driver_init_hw(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - struct i915_ggtt *ggtt = &dev_priv->ggtt; - uint32_t aperture_size; - int ret; - - if (i915_inject_load_failure()) - return -ENODEV; - - intel_device_info_runtime_init(dev); - - ret = i915_ggtt_init_hw(dev); - if (ret) - return ret; - - ret = i915_ggtt_enable_hw(dev); - if (ret) { - DRM_ERROR("failed to enable GGTT\n"); - goto out_ggtt; - } - - /* WARNING: Apparently we must kick fbdev drivers before vgacon, - * otherwise the vga fbdev driver falls over. */ - ret = i915_kick_out_firmware_fb(dev_priv); - if (ret) { - DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); - goto out_ggtt; - } - - ret = i915_kick_out_vgacon(dev_priv); - if (ret) { - DRM_ERROR("failed to remove conflicting VGA console\n"); - goto out_ggtt; - } - - pci_set_master(dev->pdev); - - /* overlay on gen2 is broken and can't address above 1G */ - if (IS_GEN2(dev)) { - ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); - if (ret) { - DRM_ERROR("failed to set DMA mask\n"); - - goto out_ggtt; - } - } - - - /* 965GM sometimes incorrectly writes to hardware status page (HWS) - * using 32bit addressing, overwriting memory if HWS is located - * above 4GB. - * - * The documentation also mentions an issue with undefined - * behaviour if any general state is accessed within a page above 4GB, - * which also needs to be handled carefully. - */ - if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) { - ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); - - if (ret) { - DRM_ERROR("failed to set DMA mask\n"); - - goto out_ggtt; - } - } - - aperture_size = ggtt->mappable_end; - - ggtt->mappable = - io_mapping_create_wc(ggtt->mappable_base, - aperture_size); - if (!ggtt->mappable) { - ret = -EIO; - goto out_ggtt; - } - - ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, - aperture_size); - - pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); - - intel_uncore_sanitize(dev_priv); - - intel_opregion_setup(dev_priv); - - i915_gem_load_init_fences(dev_priv); - - /* On the 945G/GM, the chipset reports the MSI capability on the - * integrated graphics even though the support isn't actually there - * according to the published specs. It doesn't appear to function - * correctly in testing on 945G. - * This may be a side effect of MSI having been made available for PEG - * and the registers being closely associated. - * - * According to chipset errata, on the 965GM, MSI interrupts may - * be lost or delayed, but we use them anyways to avoid - * stuck interrupts on some machines. - */ - if (!IS_I945G(dev) && !IS_I945GM(dev)) { - if (pci_enable_msi(dev->pdev) < 0) - DRM_DEBUG_DRIVER("can't enable MSI"); - } - - return 0; - -out_ggtt: - i915_ggtt_cleanup_hw(dev); - - return ret; -} - -/** - * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() - * @dev_priv: device private - */ -static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - struct i915_ggtt *ggtt = &dev_priv->ggtt; - - if (dev->pdev->msi_enabled) - pci_disable_msi(dev->pdev); - - pm_qos_remove_request(&dev_priv->pm_qos); - arch_phys_wc_del(ggtt->mtrr); - io_mapping_free(ggtt->mappable); - i915_ggtt_cleanup_hw(dev); -} - -/** - * i915_driver_register - register the driver with the rest of the system - * @dev_priv: device private - * - * Perform any steps necessary to make the driver available via kernel - * internal or userspace interfaces. - */ -static void i915_driver_register(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - - i915_gem_shrinker_init(dev_priv); - /* - * Notify a valid surface after modesetting, - * when running inside a VM. - */ - if (intel_vgpu_active(dev_priv)) - I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); - - i915_setup_sysfs(dev); - - if (INTEL_INFO(dev_priv)->num_pipes) { - /* Must be done after probing outputs */ - intel_opregion_register(dev_priv); - acpi_video_register(); - } - - if (IS_GEN5(dev_priv)) - intel_gpu_ips_init(dev_priv); - - i915_audio_component_init(dev_priv); -} - -/** - * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() - * @dev_priv: device private - */ -static void i915_driver_unregister(struct drm_i915_private *dev_priv) -{ - i915_audio_component_cleanup(dev_priv); - intel_gpu_ips_teardown(); - acpi_video_unregister(); - intel_opregion_unregister(dev_priv); - i915_teardown_sysfs(dev_priv->dev); - i915_gem_shrinker_cleanup(dev_priv); -} - -/** - * i915_driver_load - setup chip and create an initial config - * @dev: DRM device - * @flags: startup flags - * - * The driver load routine has to do several things: - * - drive output discovery via intel_modeset_init() - * - initialize the memory manager - * - allocate initial config memory - * - setup the DRM framebuffer with the allocated memory - */ -int i915_driver_load(struct drm_device *dev, unsigned long flags) -{ - struct drm_i915_private *dev_priv; - int ret = 0; - - dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); - if (dev_priv == NULL) - return -ENOMEM; - - dev->dev_private = dev_priv; - /* Must be set before calling __i915_printk */ - dev_priv->dev = dev; - - ret = i915_driver_init_early(dev_priv, dev, - (struct intel_device_info *)flags); - - if (ret < 0) - goto out_free_priv; - - intel_runtime_pm_get(dev_priv); - - ret = i915_driver_init_mmio(dev_priv); - if (ret < 0) - goto out_runtime_pm_put; - - ret = i915_driver_init_hw(dev_priv); - if (ret < 0) - goto out_cleanup_mmio; - - /* - * TODO: move the vblank init and parts of modeset init steps into one - * of the i915_driver_init_/i915_driver_register functions according - * to the role/effect of the given init step. - */ - if (INTEL_INFO(dev)->num_pipes) { - ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); - if (ret) - goto out_cleanup_hw; - } - - ret = i915_load_modeset_init(dev); - if (ret < 0) - goto out_cleanup_vblank; - - i915_driver_register(dev_priv); - - intel_runtime_pm_enable(dev_priv); - - intel_runtime_pm_put(dev_priv); - - return 0; - -out_cleanup_vblank: - drm_vblank_cleanup(dev); -out_cleanup_hw: - i915_driver_cleanup_hw(dev_priv); -out_cleanup_mmio: - i915_driver_cleanup_mmio(dev_priv); -out_runtime_pm_put: - intel_runtime_pm_put(dev_priv); - i915_driver_cleanup_early(dev_priv); -out_free_priv: - i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); - - kfree(dev_priv); - - return ret; -} - -int i915_driver_unload(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - intel_fbdev_fini(dev); - - intel_gvt_cleanup(dev_priv); - - ret = i915_gem_suspend(dev); - if (ret) { - DRM_ERROR("failed to idle hardware: %d\n", ret); - return ret; - } - - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); - - i915_driver_unregister(dev_priv); - - drm_vblank_cleanup(dev); - - intel_modeset_cleanup(dev); - - /* - * free the memory space allocated for the child device - * config parsed from VBT - */ - if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { - kfree(dev_priv->vbt.child_dev); - dev_priv->vbt.child_dev = NULL; - dev_priv->vbt.child_dev_num = 0; - } - kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); - dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; - kfree(dev_priv->vbt.lfp_lvds_vbt_mode); - dev_priv->vbt.lfp_lvds_vbt_mode = NULL; - - vga_switcheroo_unregister_client(dev->pdev); - vga_client_register(dev->pdev, NULL, NULL, NULL); - - intel_csr_ucode_fini(dev_priv); - - /* Free error state after interrupts are fully disabled. */ - cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); - i915_destroy_error_state(dev); - - /* Flush any outstanding unpin_work. */ - flush_workqueue(dev_priv->wq); - - intel_guc_fini(dev); - i915_gem_fini(dev); - intel_fbc_cleanup_cfb(dev_priv); - - intel_power_domains_fini(dev_priv); - - i915_driver_cleanup_hw(dev_priv); - i915_driver_cleanup_mmio(dev_priv); - - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); - - i915_driver_cleanup_early(dev_priv); - kfree(dev_priv); - - return 0; -} - -int i915_driver_open(struct drm_device *dev, struct drm_file *file) -{ - int ret; - - ret = i915_gem_open(dev, file); - if (ret) - return ret; - - return 0; -} - -/** - * i915_driver_lastclose - clean up after all DRM clients have exited - * @dev: DRM device - * - * Take care of cleaning up after all DRM clients have exited. In the - * mode setting case, we want to restore the kernel's initial mode (just - * in case the last client left us in a bad state). - * - * Additionally, in the non-mode setting case, we'll tear down the GTT - * and DMA structures, since the kernel won't be using them, and clea - * up any GEM state. - */ -void i915_driver_lastclose(struct drm_device *dev) -{ - intel_fbdev_restore_mode(dev); - vga_switcheroo_process_delayed_switch(); -} - -void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) -{ - mutex_lock(&dev->struct_mutex); - i915_gem_context_close(dev, file); - i915_gem_release(dev, file); - mutex_unlock(&dev->struct_mutex); -} - -void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) -{ - struct drm_i915_file_private *file_priv = file->driver_priv; - - kfree(file_priv); -} - -static int -i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - return -ENODEV; -} - -const struct drm_ioctl_desc i915_ioctls[] = { - DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), - DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), -}; - -int i915_max_ioctl = ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3eb47fbcea73..b9a811750ca8 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -27,403 +27,92 @@ * */ -#include <linux/device.h> #include <linux/acpi.h> +#include <linux/device.h> +#include <linux/oom.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/pnp.h> +#include <linux/slab.h> +#include <linux/vgaarb.h> +#include <linux/vga_switcheroo.h> +#include <linux/vt.h> +#include <acpi/video.h> + #include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> #include <drm/i915_drm.h> + #include "i915_drv.h" #include "i915_trace.h" +#include "i915_vgpu.h" #include "intel_drv.h" -#include <linux/console.h> -#include <linux/module.h> -#include <linux/pm_runtime.h> -#include <linux/vga_switcheroo.h> -#include <drm/drm_crtc_helper.h> - static struct drm_driver driver; -#define GEN_DEFAULT_PIPEOFFSETS \ - .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ - PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ - .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ - TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ - .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } - -#define GEN_CHV_PIPEOFFSETS \ - .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ - CHV_PIPE_C_OFFSET }, \ - .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ - CHV_TRANSCODER_C_OFFSET, }, \ - .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ - CHV_PALETTE_C_OFFSET } - -#define CURSOR_OFFSETS \ - .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } - -#define IVB_CURSOR_OFFSETS \ - .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } - -#define BDW_COLORS \ - .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 } -#define CHV_COLORS \ - .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 } - -static const struct intel_device_info intel_i830_info = { - .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, - .has_overlay = 1, .overlay_needs_physical = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_845g_info = { - .gen = 2, .num_pipes = 1, - .has_overlay = 1, .overlay_needs_physical = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_i85x_info = { - .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, - .cursor_needs_physical = 1, - .has_overlay = 1, .overlay_needs_physical = 1, - .has_fbc = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_i865g_info = { - .gen = 2, .num_pipes = 1, - .has_overlay = 1, .overlay_needs_physical = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_i915g_info = { - .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, - .has_overlay = 1, .overlay_needs_physical = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; -static const struct intel_device_info intel_i915gm_info = { - .gen = 3, .is_mobile = 1, .num_pipes = 2, - .cursor_needs_physical = 1, - .has_overlay = 1, .overlay_needs_physical = 1, - .supports_tv = 1, - .has_fbc = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; -static const struct intel_device_info intel_i945g_info = { - .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, - .has_overlay = 1, .overlay_needs_physical = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; -static const struct intel_device_info intel_i945gm_info = { - .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, - .has_hotplug = 1, .cursor_needs_physical = 1, - .has_overlay = 1, .overlay_needs_physical = 1, - .supports_tv = 1, - .has_fbc = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_i965g_info = { - .gen = 4, .is_broadwater = 1, .num_pipes = 2, - .has_hotplug = 1, - .has_overlay = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_i965gm_info = { - .gen = 4, .is_crestline = 1, .num_pipes = 2, - .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, - .has_overlay = 1, - .supports_tv = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_g33_info = { - .gen = 3, .is_g33 = 1, .num_pipes = 2, - .need_gfx_hws = 1, .has_hotplug = 1, - .has_overlay = 1, - .ring_mask = RENDER_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_g45_info = { - .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, - .has_pipe_cxsr = 1, .has_hotplug = 1, - .ring_mask = RENDER_RING | BSD_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_gm45_info = { - .gen = 4, .is_g4x = 1, .num_pipes = 2, - .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, - .has_pipe_cxsr = 1, .has_hotplug = 1, - .supports_tv = 1, - .ring_mask = RENDER_RING | BSD_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_pineview_info = { - .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, - .need_gfx_hws = 1, .has_hotplug = 1, - .has_overlay = 1, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_ironlake_d_info = { - .gen = 5, .num_pipes = 2, - .need_gfx_hws = 1, .has_hotplug = 1, - .ring_mask = RENDER_RING | BSD_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_ironlake_m_info = { - .gen = 5, .is_mobile = 1, .num_pipes = 2, - .need_gfx_hws = 1, .has_hotplug = 1, - .has_fbc = 1, - .ring_mask = RENDER_RING | BSD_RING, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_sandybridge_d_info = { - .gen = 6, .num_pipes = 2, - .need_gfx_hws = 1, .has_hotplug = 1, - .has_fbc = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING, - .has_llc = 1, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -static const struct intel_device_info intel_sandybridge_m_info = { - .gen = 6, .is_mobile = 1, .num_pipes = 2, - .need_gfx_hws = 1, .has_hotplug = 1, - .has_fbc = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING, - .has_llc = 1, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS, -}; - -#define GEN7_FEATURES \ - .gen = 7, .num_pipes = 3, \ - .need_gfx_hws = 1, .has_hotplug = 1, \ - .has_fbc = 1, \ - .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ - .has_llc = 1, \ - GEN_DEFAULT_PIPEOFFSETS, \ - IVB_CURSOR_OFFSETS - -static const struct intel_device_info intel_ivybridge_d_info = { - GEN7_FEATURES, - .is_ivybridge = 1, -}; - -static const struct intel_device_info intel_ivybridge_m_info = { - GEN7_FEATURES, - .is_ivybridge = 1, - .is_mobile = 1, -}; - -static const struct intel_device_info intel_ivybridge_q_info = { - GEN7_FEATURES, - .is_ivybridge = 1, - .num_pipes = 0, /* legal, last one wins */ -}; - -#define VLV_FEATURES \ - .gen = 7, .num_pipes = 2, \ - .need_gfx_hws = 1, .has_hotplug = 1, \ - .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ - .display_mmio_offset = VLV_DISPLAY_BASE, \ - GEN_DEFAULT_PIPEOFFSETS, \ - CURSOR_OFFSETS - -static const struct intel_device_info intel_valleyview_m_info = { - VLV_FEATURES, - .is_valleyview = 1, - .is_mobile = 1, -}; - -static const struct intel_device_info intel_valleyview_d_info = { - VLV_FEATURES, - .is_valleyview = 1, -}; - -#define HSW_FEATURES \ - GEN7_FEATURES, \ - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ - .has_ddi = 1, \ - .has_fpga_dbg = 1 +static unsigned int i915_load_fail_count; -static const struct intel_device_info intel_haswell_d_info = { - HSW_FEATURES, - .is_haswell = 1, -}; +bool __i915_inject_load_failure(const char *func, int line) +{ + if (i915_load_fail_count >= i915.inject_load_failure) + return false; -static const struct intel_device_info intel_haswell_m_info = { - HSW_FEATURES, - .is_haswell = 1, - .is_mobile = 1, -}; + if (++i915_load_fail_count == i915.inject_load_failure) { + DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", + i915.inject_load_failure, func, line); + return true; + } -#define BDW_FEATURES \ - HSW_FEATURES, \ - BDW_COLORS + return false; +} -static const struct intel_device_info intel_broadwell_d_info = { - BDW_FEATURES, - .gen = 8, - .is_broadwell = 1, -}; +#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" +#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ + "providing the dmesg log by booting with drm.debug=0xf" -static const struct intel_device_info intel_broadwell_m_info = { - BDW_FEATURES, - .gen = 8, .is_mobile = 1, - .is_broadwell = 1, -}; +void +__i915_printk(struct drm_i915_private *dev_priv, const char *level, + const char *fmt, ...) +{ + static bool shown_bug_once; + struct device *dev = dev_priv->drm.dev; + bool is_error = level[1] <= KERN_ERR[1]; + bool is_debug = level[1] == KERN_DEBUG[1]; + struct va_format vaf; + va_list args; + + if (is_debug && !(drm_debug & DRM_UT_DRIVER)) + return; -static const struct intel_device_info intel_broadwell_gt3d_info = { - BDW_FEATURES, - .gen = 8, - .is_broadwell = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, -}; + va_start(args, fmt); -static const struct intel_device_info intel_broadwell_gt3m_info = { - BDW_FEATURES, - .gen = 8, .is_mobile = 1, - .is_broadwell = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, -}; + vaf.fmt = fmt; + vaf.va = &args; -static const struct intel_device_info intel_cherryview_info = { - .gen = 8, .num_pipes = 3, - .need_gfx_hws = 1, .has_hotplug = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, - .is_cherryview = 1, - .display_mmio_offset = VLV_DISPLAY_BASE, - GEN_CHV_PIPEOFFSETS, - CURSOR_OFFSETS, - CHV_COLORS, -}; + dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV", + __builtin_return_address(0), &vaf); -static const struct intel_device_info intel_skylake_info = { - BDW_FEATURES, - .is_skylake = 1, - .gen = 9, -}; - -static const struct intel_device_info intel_skylake_gt3_info = { - BDW_FEATURES, - .is_skylake = 1, - .gen = 9, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, -}; - -static const struct intel_device_info intel_broxton_info = { - .is_preliminary = 1, - .is_broxton = 1, - .gen = 9, - .need_gfx_hws = 1, .has_hotplug = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, - .num_pipes = 3, - .has_ddi = 1, - .has_fpga_dbg = 1, - .has_fbc = 1, - .has_pooled_eu = 0, - GEN_DEFAULT_PIPEOFFSETS, - IVB_CURSOR_OFFSETS, - BDW_COLORS, -}; + if (is_error && !shown_bug_once) { + dev_notice(dev, "%s", FDO_BUG_MSG); + shown_bug_once = true; + } -static const struct intel_device_info intel_kabylake_info = { - BDW_FEATURES, - .is_kabylake = 1, - .gen = 9, -}; + va_end(args); +} -static const struct intel_device_info intel_kabylake_gt3_info = { - BDW_FEATURES, - .is_kabylake = 1, - .gen = 9, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, -}; +static bool i915_error_injected(struct drm_i915_private *dev_priv) +{ + return i915.inject_load_failure && + i915_load_fail_count == i915.inject_load_failure; +} -/* - * Make sure any device matches here are from most specific to most - * general. For example, since the Quanta match is based on the subsystem - * and subvendor IDs, we need it to come before the more general IVB - * PCI ID matches, otherwise we'll use the wrong info struct above. - */ -static const struct pci_device_id pciidlist[] = { - INTEL_I830_IDS(&intel_i830_info), - INTEL_I845G_IDS(&intel_845g_info), - INTEL_I85X_IDS(&intel_i85x_info), - INTEL_I865G_IDS(&intel_i865g_info), - INTEL_I915G_IDS(&intel_i915g_info), - INTEL_I915GM_IDS(&intel_i915gm_info), - INTEL_I945G_IDS(&intel_i945g_info), - INTEL_I945GM_IDS(&intel_i945gm_info), - INTEL_I965G_IDS(&intel_i965g_info), - INTEL_G33_IDS(&intel_g33_info), - INTEL_I965GM_IDS(&intel_i965gm_info), - INTEL_GM45_IDS(&intel_gm45_info), - INTEL_G45_IDS(&intel_g45_info), - INTEL_PINEVIEW_IDS(&intel_pineview_info), - INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), - INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), - INTEL_SNB_D_IDS(&intel_sandybridge_d_info), - INTEL_SNB_M_IDS(&intel_sandybridge_m_info), - INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ - INTEL_IVB_M_IDS(&intel_ivybridge_m_info), - INTEL_IVB_D_IDS(&intel_ivybridge_d_info), - INTEL_HSW_D_IDS(&intel_haswell_d_info), - INTEL_HSW_M_IDS(&intel_haswell_m_info), - INTEL_VLV_M_IDS(&intel_valleyview_m_info), - INTEL_VLV_D_IDS(&intel_valleyview_d_info), - INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), - INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), - INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), - INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), - INTEL_CHV_IDS(&intel_cherryview_info), - INTEL_SKL_GT1_IDS(&intel_skylake_info), - INTEL_SKL_GT2_IDS(&intel_skylake_info), - INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), - INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), - INTEL_BXT_IDS(&intel_broxton_info), - INTEL_KBL_GT1_IDS(&intel_kabylake_info), - INTEL_KBL_GT2_IDS(&intel_kabylake_info), - INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), - INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), - {0, 0, 0} -}; +#define i915_load_error(dev_priv, fmt, ...) \ + __i915_printk(dev_priv, \ + i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ + fmt, ##__VA_ARGS__) -MODULE_DEVICE_TABLE(pci, pciidlist); static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) { @@ -453,9 +142,9 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) return ret; } -void intel_detect_pch(struct drm_device *dev) +static void intel_detect_pch(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pch = NULL; /* In all current cases, num_pipes is equivalent to the PCH_NOP setting @@ -515,6 +204,10 @@ void intel_detect_pch(struct drm_device *dev) DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); WARN_ON(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev)); + } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { + dev_priv->pch_type = PCH_KBP; + DRM_DEBUG_KMS("Found KabyPoint PCH\n"); + WARN_ON(!IS_KABYLAKE(dev)); } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && @@ -556,9 +249,1163 @@ bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv) return true; } +static int i915_getparam(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + drm_i915_getparam_t *param = data; + int value; + + switch (param->param) { + case I915_PARAM_IRQ_ACTIVE: + case I915_PARAM_ALLOW_BATCHBUFFER: + case I915_PARAM_LAST_DISPATCH: + /* Reject all old ums/dri params. */ + return -ENODEV; + case I915_PARAM_CHIPSET_ID: + value = dev->pdev->device; + break; + case I915_PARAM_REVISION: + value = dev->pdev->revision; + break; + case I915_PARAM_HAS_GEM: + value = 1; + break; + case I915_PARAM_NUM_FENCES_AVAIL: + value = dev_priv->num_fence_regs; + break; + case I915_PARAM_HAS_OVERLAY: + value = dev_priv->overlay ? 1 : 0; + break; + case I915_PARAM_HAS_PAGEFLIPPING: + value = 1; + break; + case I915_PARAM_HAS_EXECBUF2: + /* depends on GEM */ + value = 1; + break; + case I915_PARAM_HAS_BSD: + value = intel_engine_initialized(&dev_priv->engine[VCS]); + break; + case I915_PARAM_HAS_BLT: + value = intel_engine_initialized(&dev_priv->engine[BCS]); + break; + case I915_PARAM_HAS_VEBOX: + value = intel_engine_initialized(&dev_priv->engine[VECS]); + break; + case I915_PARAM_HAS_BSD2: + value = intel_engine_initialized(&dev_priv->engine[VCS2]); + break; + case I915_PARAM_HAS_RELAXED_FENCING: + value = 1; + break; + case I915_PARAM_HAS_COHERENT_RINGS: + value = 1; + break; + case I915_PARAM_HAS_EXEC_CONSTANTS: + value = INTEL_INFO(dev)->gen >= 4; + break; + case I915_PARAM_HAS_RELAXED_DELTA: + value = 1; + break; + case I915_PARAM_HAS_GEN7_SOL_RESET: + value = 1; + break; + case I915_PARAM_HAS_LLC: + value = HAS_LLC(dev); + break; + case I915_PARAM_HAS_WT: + value = HAS_WT(dev); + break; + case I915_PARAM_HAS_ALIASING_PPGTT: + value = USES_PPGTT(dev); + break; + case I915_PARAM_HAS_WAIT_TIMEOUT: + value = 1; + break; + case I915_PARAM_HAS_SEMAPHORES: + value = i915_semaphore_is_enabled(dev_priv); + break; + case I915_PARAM_HAS_PRIME_VMAP_FLUSH: + value = 1; + break; + case I915_PARAM_HAS_SECURE_BATCHES: + value = capable(CAP_SYS_ADMIN); + break; + case I915_PARAM_HAS_PINNED_BATCHES: + value = 1; + break; + case I915_PARAM_HAS_EXEC_NO_RELOC: + value = 1; + break; + case I915_PARAM_HAS_EXEC_HANDLE_LUT: + value = 1; + break; + case I915_PARAM_CMD_PARSER_VERSION: + value = i915_cmd_parser_get_version(dev_priv); + break; + case I915_PARAM_HAS_COHERENT_PHYS_GTT: + value = 1; + break; + case I915_PARAM_MMAP_VERSION: + value = 1; + break; + case I915_PARAM_SUBSLICE_TOTAL: + value = INTEL_INFO(dev)->subslice_total; + if (!value) + return -ENODEV; + break; + case I915_PARAM_EU_TOTAL: + value = INTEL_INFO(dev)->eu_total; + if (!value) + return -ENODEV; + break; + case I915_PARAM_HAS_GPU_RESET: + value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); + break; + case I915_PARAM_HAS_RESOURCE_STREAMER: + value = HAS_RESOURCE_STREAMER(dev); + break; + case I915_PARAM_HAS_EXEC_SOFTPIN: + value = 1; + break; + case I915_PARAM_HAS_POOLED_EU: + value = HAS_POOLED_EU(dev); + break; + case I915_PARAM_MIN_EU_IN_POOL: + value = INTEL_INFO(dev)->min_eu_in_pool; + break; + default: + DRM_DEBUG("Unknown parameter %d\n", param->param); + return -EINVAL; + } + + if (put_user(value, param->value)) + return -EFAULT; + + return 0; +} + +static int i915_get_bridge_dev(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); + if (!dev_priv->bridge_dev) { + DRM_ERROR("bridge device not found\n"); + return -1; + } + return 0; +} + +/* Allocate space for the MCH regs if needed, return nonzero on error */ +static int +intel_alloc_mchbar_resource(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; + u32 temp_lo, temp_hi = 0; + u64 mchbar_addr; + int ret; + + if (INTEL_INFO(dev)->gen >= 4) + pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); + pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); + mchbar_addr = ((u64)temp_hi << 32) | temp_lo; + + /* If ACPI doesn't have it, assume we need to allocate it ourselves */ +#ifdef CONFIG_PNP + if (mchbar_addr && + pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) + return 0; +#endif + + /* Get some space for it */ + dev_priv->mch_res.name = "i915 MCHBAR"; + dev_priv->mch_res.flags = IORESOURCE_MEM; + ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, + &dev_priv->mch_res, + MCHBAR_SIZE, MCHBAR_SIZE, + PCIBIOS_MIN_MEM, + 0, pcibios_align_resource, + dev_priv->bridge_dev); + if (ret) { + DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); + dev_priv->mch_res.start = 0; + return ret; + } + + if (INTEL_INFO(dev)->gen >= 4) + pci_write_config_dword(dev_priv->bridge_dev, reg + 4, + upper_32_bits(dev_priv->mch_res.start)); + + pci_write_config_dword(dev_priv->bridge_dev, reg, + lower_32_bits(dev_priv->mch_res.start)); + return 0; +} + +/* Setup MCHBAR if possible, return true if we should disable it again */ +static void +intel_setup_mchbar(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; + u32 temp; + bool enabled; + + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) + return; + + dev_priv->mchbar_need_disable = false; + + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); + enabled = !!(temp & DEVEN_MCHBAR_EN); + } else { + pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); + enabled = temp & 1; + } + + /* If it's already enabled, don't have to do anything */ + if (enabled) + return; + + if (intel_alloc_mchbar_resource(dev)) + return; + + dev_priv->mchbar_need_disable = true; + + /* Space is allocated or reserved, so enable it. */ + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_write_config_dword(dev_priv->bridge_dev, DEVEN, + temp | DEVEN_MCHBAR_EN); + } else { + pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); + pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); + } +} + +static void +intel_teardown_mchbar(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; + + if (dev_priv->mchbar_need_disable) { + if (IS_I915G(dev) || IS_I915GM(dev)) { + u32 deven_val; + + pci_read_config_dword(dev_priv->bridge_dev, DEVEN, + &deven_val); + deven_val &= ~DEVEN_MCHBAR_EN; + pci_write_config_dword(dev_priv->bridge_dev, DEVEN, + deven_val); + } else { + u32 mchbar_val; + + pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, + &mchbar_val); + mchbar_val &= ~1; + pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, + mchbar_val); + } + } + + if (dev_priv->mch_res.start) + release_resource(&dev_priv->mch_res); +} + +/* true = enable decode, false = disable decoder */ +static unsigned int i915_vga_set_decode(void *cookie, bool state) +{ + struct drm_device *dev = cookie; + + intel_modeset_vga_set_state(dev, state); + if (state) + return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | + VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; + else + return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; +} + +static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; + + if (state == VGA_SWITCHEROO_ON) { + pr_info("switched on\n"); + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + /* i915 resume handler doesn't set to D0 */ + pci_set_power_state(dev->pdev, PCI_D0); + i915_resume_switcheroo(dev); + dev->switch_power_state = DRM_SWITCH_POWER_ON; + } else { + pr_info("switched off\n"); + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + i915_suspend_switcheroo(dev, pmm); + dev->switch_power_state = DRM_SWITCH_POWER_OFF; + } +} + +static bool i915_switcheroo_can_switch(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + /* + * FIXME: open_count is protected by drm_global_mutex but that would lead to + * locking inversion with the driver load path. And the access here is + * completely racy anyway. So don't bother with locking for now. + */ + return dev->open_count == 0; +} + +static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { + .set_gpu_state = i915_switcheroo_set_state, + .reprobe = NULL, + .can_switch = i915_switcheroo_can_switch, +}; + +static void i915_gem_fini(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + /* + * Neither the BIOS, ourselves or any other kernel + * expects the system to be in execlists mode on startup, + * so we need to reset the GPU back to legacy mode. And the only + * known way to disable logical contexts is through a GPU reset. + * + * So in order to leave the system in a known default configuration, + * always reset the GPU upon unload. Afterwards we then clean up the + * GEM state tracking, flushing off the requests and leaving the + * system in a known idle state. + * + * Note that is of the upmost importance that the GPU is idle and + * all stray writes are flushed *before* we dismantle the backing + * storage for the pinned objects. + * + * However, since we are uncertain that reseting the GPU on older + * machines is a good idea, we don't - just in case it leaves the + * machine in an unusable condition. + */ + if (HAS_HW_CONTEXTS(dev)) { + int reset = intel_gpu_reset(dev_priv, ALL_ENGINES); + WARN_ON(reset && reset != -ENODEV); + } + + mutex_lock(&dev->struct_mutex); + i915_gem_reset(dev); + i915_gem_cleanup_engines(dev); + i915_gem_context_fini(dev); + mutex_unlock(&dev->struct_mutex); + + WARN_ON(!list_empty(&to_i915(dev)->context_list)); +} + +static int i915_load_modeset_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + int ret; + + if (i915_inject_load_failure()) + return -ENODEV; + + ret = intel_bios_init(dev_priv); + if (ret) + DRM_INFO("failed to find VBIOS tables\n"); + + /* If we have > 1 VGA cards, then we need to arbitrate access + * to the common VGA resources. + * + * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), + * then we do not take part in VGA arbitration and the + * vga_client_register() fails with -ENODEV. + */ + ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); + if (ret && ret != -ENODEV) + goto out; + + intel_register_dsm_handler(); + + ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false); + if (ret) + goto cleanup_vga_client; + + /* must happen before intel_power_domains_init_hw() on VLV/CHV */ + intel_update_rawclk(dev_priv); + + intel_power_domains_init_hw(dev_priv, false); + + intel_csr_ucode_init(dev_priv); + + ret = intel_irq_install(dev_priv); + if (ret) + goto cleanup_csr; + + intel_setup_gmbus(dev); + + /* Important: The output setup functions called by modeset_init need + * working irqs for e.g. gmbus and dp aux transfers. */ + intel_modeset_init(dev); + + intel_guc_init(dev); + + ret = i915_gem_init(dev); + if (ret) + goto cleanup_irq; + + intel_modeset_gem_init(dev); + + if (INTEL_INFO(dev)->num_pipes == 0) + return 0; + + ret = intel_fbdev_init(dev); + if (ret) + goto cleanup_gem; + + /* Only enable hotplug handling once the fbdev is fully set up. */ + intel_hpd_init(dev_priv); + + drm_kms_helper_poll_init(dev); + + return 0; + +cleanup_gem: + i915_gem_fini(dev); +cleanup_irq: + intel_guc_fini(dev); + drm_irq_uninstall(dev); + intel_teardown_gmbus(dev); +cleanup_csr: + intel_csr_ucode_fini(dev_priv); + intel_power_domains_fini(dev_priv); + vga_switcheroo_unregister_client(dev->pdev); +cleanup_vga_client: + vga_client_register(dev->pdev, NULL, NULL, NULL); +out: + return ret; +} + +#if IS_ENABLED(CONFIG_FB) +static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) +{ + struct apertures_struct *ap; + struct pci_dev *pdev = dev_priv->drm.pdev; + struct i915_ggtt *ggtt = &dev_priv->ggtt; + bool primary; + int ret; + + ap = alloc_apertures(1); + if (!ap) + return -ENOMEM; + + ap->ranges[0].base = ggtt->mappable_base; + ap->ranges[0].size = ggtt->mappable_end; + + primary = + pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; + + ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary); + + kfree(ap); + + return ret; +} +#else +static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) +{ + return 0; +} +#endif + +#if !defined(CONFIG_VGA_CONSOLE) +static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) +{ + return 0; +} +#elif !defined(CONFIG_DUMMY_CONSOLE) +static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) +{ + return -ENODEV; +} +#else +static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) +{ + int ret = 0; + + DRM_INFO("Replacing VGA console driver\n"); + + console_lock(); + if (con_is_bound(&vga_con)) + ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); + if (ret == 0) { + ret = do_unregister_con_driver(&vga_con); + + /* Ignore "already unregistered". */ + if (ret == -ENODEV) + ret = 0; + } + console_unlock(); + + return ret; +} +#endif + +static void intel_init_dpio(struct drm_i915_private *dev_priv) +{ + /* + * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), + * CHV x1 PHY (DP/HDMI D) + * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) + */ + if (IS_CHERRYVIEW(dev_priv)) { + DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; + DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; + } else if (IS_VALLEYVIEW(dev_priv)) { + DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; + } +} + +static int i915_workqueues_init(struct drm_i915_private *dev_priv) +{ + /* + * The i915 workqueue is primarily used for batched retirement of + * requests (and thus managing bo) once the task has been completed + * by the GPU. i915_gem_retire_requests() is called directly when we + * need high-priority retirement, such as waiting for an explicit + * bo. + * + * It is also used for periodic low-priority events, such as + * idle-timers and recording error state. + * + * All tasks on the workqueue are expected to acquire the dev mutex + * so there is no point in running more than one instance of the + * workqueue at any time. Use an ordered one. + */ + dev_priv->wq = alloc_ordered_workqueue("i915", 0); + if (dev_priv->wq == NULL) + goto out_err; + + dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); + if (dev_priv->hotplug.dp_wq == NULL) + goto out_free_wq; + + return 0; + +out_free_wq: + destroy_workqueue(dev_priv->wq); +out_err: + DRM_ERROR("Failed to allocate workqueues.\n"); + + return -ENOMEM; +} + +static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) +{ + destroy_workqueue(dev_priv->hotplug.dp_wq); + destroy_workqueue(dev_priv->wq); +} + +/** + * i915_driver_init_early - setup state not requiring device access + * @dev_priv: device private + * + * Initialize everything that is a "SW-only" state, that is state not + * requiring accessing the device or exposing the driver via kernel internal + * or userspace interfaces. Example steps belonging here: lock initialization, + * system memory allocation, setting up device specific attributes and + * function hooks not requiring accessing the device. + */ +static int i915_driver_init_early(struct drm_i915_private *dev_priv, + const struct pci_device_id *ent) +{ + const struct intel_device_info *match_info = + (struct intel_device_info *)ent->driver_data; + struct intel_device_info *device_info; + int ret = 0; + + if (i915_inject_load_failure()) + return -ENODEV; + + /* Setup the write-once "constant" device info */ + device_info = mkwrite_device_info(dev_priv); + memcpy(device_info, match_info, sizeof(*device_info)); + device_info->device_id = dev_priv->drm.pdev->device; + + BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); + device_info->gen_mask = BIT(device_info->gen - 1); + + spin_lock_init(&dev_priv->irq_lock); + spin_lock_init(&dev_priv->gpu_error.lock); + mutex_init(&dev_priv->backlight_lock); + spin_lock_init(&dev_priv->uncore.lock); + spin_lock_init(&dev_priv->mm.object_stat_lock); + spin_lock_init(&dev_priv->mmio_flip_lock); + mutex_init(&dev_priv->sb_lock); + mutex_init(&dev_priv->modeset_restore_lock); + mutex_init(&dev_priv->av_mutex); + mutex_init(&dev_priv->wm.wm_mutex); + mutex_init(&dev_priv->pps_mutex); + + ret = i915_workqueues_init(dev_priv); + if (ret < 0) + return ret; + + ret = intel_gvt_init(dev_priv); + if (ret < 0) + goto err_workqueues; + + /* This must be called before any calls to HAS_PCH_* */ + intel_detect_pch(&dev_priv->drm); + + intel_pm_setup(&dev_priv->drm); + intel_init_dpio(dev_priv); + intel_power_domains_init(dev_priv); + intel_irq_init(dev_priv); + intel_init_display_hooks(dev_priv); + intel_init_clock_gating_hooks(dev_priv); + intel_init_audio_hooks(dev_priv); + i915_gem_load_init(&dev_priv->drm); + + intel_display_crc_init(&dev_priv->drm); + + intel_device_info_dump(dev_priv); + + /* Not all pre-production machines fall into this category, only the + * very first ones. Almost everything should work, except for maybe + * suspend/resume. And we don't implement workarounds that affect only + * pre-production machines. */ + if (IS_HSW_EARLY_SDV(dev_priv)) + DRM_INFO("This is an early pre-production Haswell machine. " + "It may not be fully functional.\n"); + + return 0; + +err_workqueues: + i915_workqueues_cleanup(dev_priv); + return ret; +} + +/** + * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() + * @dev_priv: device private + */ +static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) +{ + i915_gem_load_cleanup(&dev_priv->drm); + i915_workqueues_cleanup(dev_priv); +} + +static int i915_mmio_setup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + int mmio_bar; + int mmio_size; + + mmio_bar = IS_GEN2(dev) ? 1 : 0; + /* + * Before gen4, the registers and the GTT are behind different BARs. + * However, from gen4 onwards, the registers and the GTT are shared + * in the same BAR, so we want to restrict this ioremap from + * clobbering the GTT which we want ioremap_wc instead. Fortunately, + * the register BAR remains the same size for all the earlier + * generations up to Ironlake. + */ + if (INTEL_INFO(dev)->gen < 5) + mmio_size = 512 * 1024; + else + mmio_size = 2 * 1024 * 1024; + dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); + if (dev_priv->regs == NULL) { + DRM_ERROR("failed to map registers\n"); + + return -EIO; + } + + /* Try to make sure MCHBAR is enabled before poking at it */ + intel_setup_mchbar(dev); + + return 0; +} + +static void i915_mmio_cleanup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + intel_teardown_mchbar(dev); + pci_iounmap(dev->pdev, dev_priv->regs); +} + +/** + * i915_driver_init_mmio - setup device MMIO + * @dev_priv: device private + * + * Setup minimal device state necessary for MMIO accesses later in the + * initialization sequence. The setup here should avoid any other device-wide + * side effects or exposing the driver via kernel internal or user space + * interfaces. + */ +static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = &dev_priv->drm; + int ret; + + if (i915_inject_load_failure()) + return -ENODEV; + + if (i915_get_bridge_dev(dev)) + return -EIO; + + ret = i915_mmio_setup(dev); + if (ret < 0) + goto put_bridge; + + intel_uncore_init(dev_priv); + + return 0; + +put_bridge: + pci_dev_put(dev_priv->bridge_dev); + + return ret; +} + +/** + * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() + * @dev_priv: device private + */ +static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = &dev_priv->drm; + + intel_uncore_fini(dev_priv); + i915_mmio_cleanup(dev); + pci_dev_put(dev_priv->bridge_dev); +} + +static void intel_sanitize_options(struct drm_i915_private *dev_priv) +{ + i915.enable_execlists = + intel_sanitize_enable_execlists(dev_priv, + i915.enable_execlists); + + /* + * i915.enable_ppgtt is read-only, so do an early pass to validate the + * user's requested state against the hardware/driver capabilities. We + * do this now so that we can print out any log messages once rather + * than every time we check intel_enable_ppgtt(). + */ + i915.enable_ppgtt = + intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); + DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); +} + +/** + * i915_driver_init_hw - setup state requiring device access + * @dev_priv: device private + * + * Setup state that requires accessing the device, but doesn't require + * exposing the driver via kernel internal or userspace interfaces. + */ +static int i915_driver_init_hw(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = &dev_priv->drm; + struct i915_ggtt *ggtt = &dev_priv->ggtt; + uint32_t aperture_size; + int ret; + + if (i915_inject_load_failure()) + return -ENODEV; + + intel_device_info_runtime_init(dev_priv); + + intel_sanitize_options(dev_priv); + + ret = i915_ggtt_init_hw(dev); + if (ret) + return ret; + + ret = i915_ggtt_enable_hw(dev); + if (ret) { + DRM_ERROR("failed to enable GGTT\n"); + goto out_ggtt; + } + + /* WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. */ + ret = i915_kick_out_firmware_fb(dev_priv); + if (ret) { + DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); + goto out_ggtt; + } + + ret = i915_kick_out_vgacon(dev_priv); + if (ret) { + DRM_ERROR("failed to remove conflicting VGA console\n"); + goto out_ggtt; + } + + pci_set_master(dev->pdev); + + /* overlay on gen2 is broken and can't address above 1G */ + if (IS_GEN2(dev)) { + ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + if (ret) { + DRM_ERROR("failed to set DMA mask\n"); + + goto out_ggtt; + } + } + + + /* 965GM sometimes incorrectly writes to hardware status page (HWS) + * using 32bit addressing, overwriting memory if HWS is located + * above 4GB. + * + * The documentation also mentions an issue with undefined + * behaviour if any general state is accessed within a page above 4GB, + * which also needs to be handled carefully. + */ + if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) { + ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); + + if (ret) { + DRM_ERROR("failed to set DMA mask\n"); + + goto out_ggtt; + } + } + + aperture_size = ggtt->mappable_end; + + ggtt->mappable = + io_mapping_create_wc(ggtt->mappable_base, + aperture_size); + if (!ggtt->mappable) { + ret = -EIO; + goto out_ggtt; + } + + ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, + aperture_size); + + pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + + intel_uncore_sanitize(dev_priv); + + intel_opregion_setup(dev_priv); + + i915_gem_load_init_fences(dev_priv); + + /* On the 945G/GM, the chipset reports the MSI capability on the + * integrated graphics even though the support isn't actually there + * according to the published specs. It doesn't appear to function + * correctly in testing on 945G. + * This may be a side effect of MSI having been made available for PEG + * and the registers being closely associated. + * + * According to chipset errata, on the 965GM, MSI interrupts may + * be lost or delayed, but we use them anyways to avoid + * stuck interrupts on some machines. + */ + if (!IS_I945G(dev) && !IS_I945GM(dev)) { + if (pci_enable_msi(dev->pdev) < 0) + DRM_DEBUG_DRIVER("can't enable MSI"); + } + + return 0; + +out_ggtt: + i915_ggtt_cleanup_hw(dev); + + return ret; +} + +/** + * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() + * @dev_priv: device private + */ +static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = &dev_priv->drm; + struct i915_ggtt *ggtt = &dev_priv->ggtt; + + if (dev->pdev->msi_enabled) + pci_disable_msi(dev->pdev); + + pm_qos_remove_request(&dev_priv->pm_qos); + arch_phys_wc_del(ggtt->mtrr); + io_mapping_free(ggtt->mappable); + i915_ggtt_cleanup_hw(dev); +} + +/** + * i915_driver_register - register the driver with the rest of the system + * @dev_priv: device private + * + * Perform any steps necessary to make the driver available via kernel + * internal or userspace interfaces. + */ +static void i915_driver_register(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = &dev_priv->drm; + + i915_gem_shrinker_init(dev_priv); + + /* + * Notify a valid surface after modesetting, + * when running inside a VM. + */ + if (intel_vgpu_active(dev_priv)) + I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); + + /* Reveal our presence to userspace */ + if (drm_dev_register(dev, 0) == 0) { + i915_debugfs_register(dev_priv); + i915_setup_sysfs(dev); + } else + DRM_ERROR("Failed to register driver for userspace access!\n"); + + if (INTEL_INFO(dev_priv)->num_pipes) { + /* Must be done after probing outputs */ + intel_opregion_register(dev_priv); + acpi_video_register(); + } + + if (IS_GEN5(dev_priv)) + intel_gpu_ips_init(dev_priv); + + i915_audio_component_init(dev_priv); + + /* + * Some ports require correctly set-up hpd registers for detection to + * work properly (leading to ghost connected connector status), e.g. VGA + * on gm45. Hence we can only set up the initial fbdev config after hpd + * irqs are fully enabled. We do it last so that the async config + * cannot run before the connectors are registered. + */ + intel_fbdev_initial_config_async(dev); +} + +/** + * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() + * @dev_priv: device private + */ +static void i915_driver_unregister(struct drm_i915_private *dev_priv) +{ + i915_audio_component_cleanup(dev_priv); + + intel_gpu_ips_teardown(); + acpi_video_unregister(); + intel_opregion_unregister(dev_priv); + + i915_teardown_sysfs(&dev_priv->drm); + i915_debugfs_unregister(dev_priv); + drm_dev_unregister(&dev_priv->drm); + + i915_gem_shrinker_cleanup(dev_priv); +} + +/** + * i915_driver_load - setup chip and create an initial config + * @dev: DRM device + * @flags: startup flags + * + * The driver load routine has to do several things: + * - drive output discovery via intel_modeset_init() + * - initialize the memory manager + * - allocate initial config memory + * - setup the DRM framebuffer with the allocated memory + */ +int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct drm_i915_private *dev_priv; + int ret; + + if (i915.nuclear_pageflip) + driver.driver_features |= DRIVER_ATOMIC; + + ret = -ENOMEM; + dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); + if (dev_priv) + ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); + if (ret) { + dev_printk(KERN_ERR, &pdev->dev, + "[" DRM_NAME ":%s] allocation failed\n", __func__); + kfree(dev_priv); + return ret; + } + + dev_priv->drm.pdev = pdev; + dev_priv->drm.dev_private = dev_priv; + + ret = pci_enable_device(pdev); + if (ret) + goto out_free_priv; + + pci_set_drvdata(pdev, &dev_priv->drm); + + ret = i915_driver_init_early(dev_priv, ent); + if (ret < 0) + goto out_pci_disable; + + intel_runtime_pm_get(dev_priv); + + ret = i915_driver_init_mmio(dev_priv); + if (ret < 0) + goto out_runtime_pm_put; + + ret = i915_driver_init_hw(dev_priv); + if (ret < 0) + goto out_cleanup_mmio; + + /* + * TODO: move the vblank init and parts of modeset init steps into one + * of the i915_driver_init_/i915_driver_register functions according + * to the role/effect of the given init step. + */ + if (INTEL_INFO(dev_priv)->num_pipes) { + ret = drm_vblank_init(&dev_priv->drm, + INTEL_INFO(dev_priv)->num_pipes); + if (ret) + goto out_cleanup_hw; + } + + ret = i915_load_modeset_init(&dev_priv->drm); + if (ret < 0) + goto out_cleanup_vblank; + + i915_driver_register(dev_priv); + + intel_runtime_pm_enable(dev_priv); + + intel_runtime_pm_put(dev_priv); + + return 0; + +out_cleanup_vblank: + drm_vblank_cleanup(&dev_priv->drm); +out_cleanup_hw: + i915_driver_cleanup_hw(dev_priv); +out_cleanup_mmio: + i915_driver_cleanup_mmio(dev_priv); +out_runtime_pm_put: + intel_runtime_pm_put(dev_priv); + i915_driver_cleanup_early(dev_priv); +out_pci_disable: + pci_disable_device(pdev); +out_free_priv: + i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); + drm_dev_unref(&dev_priv->drm); + return ret; +} + +void i915_driver_unload(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + intel_fbdev_fini(dev); + + if (i915_gem_suspend(dev)) + DRM_ERROR("failed to idle hardware; continuing to unload!\n"); + + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + + i915_driver_unregister(dev_priv); + + drm_vblank_cleanup(dev); + + intel_modeset_cleanup(dev); + + /* + * free the memory space allocated for the child device + * config parsed from VBT + */ + if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { + kfree(dev_priv->vbt.child_dev); + dev_priv->vbt.child_dev = NULL; + dev_priv->vbt.child_dev_num = 0; + } + kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); + dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; + kfree(dev_priv->vbt.lfp_lvds_vbt_mode); + dev_priv->vbt.lfp_lvds_vbt_mode = NULL; + + vga_switcheroo_unregister_client(dev->pdev); + vga_client_register(dev->pdev, NULL, NULL, NULL); + + intel_csr_ucode_fini(dev_priv); + + /* Free error state after interrupts are fully disabled. */ + cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); + i915_destroy_error_state(dev); + + /* Flush any outstanding unpin_work. */ + flush_workqueue(dev_priv->wq); + + intel_guc_fini(dev); + i915_gem_fini(dev); + intel_fbc_cleanup_cfb(dev_priv); + + intel_power_domains_fini(dev_priv); + + i915_driver_cleanup_hw(dev_priv); + i915_driver_cleanup_mmio(dev_priv); + + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + + i915_driver_cleanup_early(dev_priv); +} + +static int i915_driver_open(struct drm_device *dev, struct drm_file *file) +{ + int ret; + + ret = i915_gem_open(dev, file); + if (ret) + return ret; + + return 0; +} + +/** + * i915_driver_lastclose - clean up after all DRM clients have exited + * @dev: DRM device + * + * Take care of cleaning up after all DRM clients have exited. In the + * mode setting case, we want to restore the kernel's initial mode (just + * in case the last client left us in a bad state). + * + * Additionally, in the non-mode setting case, we'll tear down the GTT + * and DMA structures, since the kernel won't be using them, and clea + * up any GEM state. + */ +static void i915_driver_lastclose(struct drm_device *dev) +{ + intel_fbdev_restore_mode(dev); + vga_switcheroo_process_delayed_switch(); +} + +static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) +{ + mutex_lock(&dev->struct_mutex); + i915_gem_context_close(dev, file); + i915_gem_release(dev, file); + mutex_unlock(&dev->struct_mutex); +} + +static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + + kfree(file_priv); +} + static void intel_suspend_encoders(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct intel_encoder *encoder; drm_modeset_lock_all(dev); @@ -583,7 +1430,7 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv) static int i915_drm_suspend(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); pci_power_t opregion_target_state; int error; @@ -650,7 +1497,7 @@ out: static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) { - struct drm_i915_private *dev_priv = drm_dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(drm_dev); bool fw_csr; int ret; @@ -712,7 +1559,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) { int error; - if (!dev || !dev->dev_private) { + if (!dev) { DRM_ERROR("dev: %p\n", dev); DRM_ERROR("DRM not initialized, aborting suspend.\n"); return -ENODEV; @@ -734,7 +1581,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) static int i915_drm_resume(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; disable_rpm_wakeref_asserts(dev_priv); @@ -768,7 +1615,7 @@ static int i915_drm_resume(struct drm_device *dev) mutex_lock(&dev->struct_mutex); if (i915_gem_init_hw(dev)) { DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); - atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); + atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); } mutex_unlock(&dev->struct_mutex); @@ -814,7 +1661,7 @@ static int i915_drm_resume(struct drm_device *dev) static int i915_drm_resume_early(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; /* @@ -926,7 +1773,7 @@ int i915_resume_switcheroo(struct drm_device *dev) */ int i915_reset(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct i915_gpu_error *error = &dev_priv->gpu_error; unsigned reset_counter; int ret; @@ -945,24 +1792,11 @@ int i915_reset(struct drm_i915_private *dev_priv) goto error; } + pr_notice("drm/i915: Resetting chip after gpu hang\n"); + i915_gem_reset(dev); ret = intel_gpu_reset(dev_priv, ALL_ENGINES); - - /* Also reset the gpu hangman. */ - if (error->stop_rings != 0) { - DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); - error->stop_rings = 0; - if (ret == -ENODEV) { - DRM_INFO("Reset not implemented, but ignoring " - "error for simulated gpu hangs\n"); - ret = 0; - } - } - - if (i915_stop_ring_allow_warn(dev_priv)) - pr_notice("drm/i915: Resetting chip after gpu hang\n"); - if (ret) { if (ret != -ENODEV) DRM_ERROR("Failed to reset chip: %i\n", ret); @@ -1012,45 +1846,12 @@ error: return ret; } -static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct intel_device_info *intel_info = - (struct intel_device_info *) ent->driver_data; - - if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) { - DRM_INFO("This hardware requires preliminary hardware support.\n" - "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); - return -ENODEV; - } - - /* Only bind to function 0 of the device. Early generations - * used function 1 as a placeholder for multi-head. This causes - * us confusion instead, especially on the systems where both - * functions have the same PCI-ID! - */ - if (PCI_FUNC(pdev->devfn)) - return -ENODEV; - - if (vga_switcheroo_client_probe_defer(pdev)) - return -EPROBE_DEFER; - - return drm_get_pci_dev(pdev, ent, &driver); -} - -static void -i915_pci_remove(struct pci_dev *pdev) -{ - struct drm_device *dev = pci_get_drvdata(pdev); - - drm_put_dev(dev); -} - static int i915_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - if (!drm_dev || !drm_dev->dev_private) { + if (!drm_dev) { dev_err(dev, "DRM not initialized, aborting suspend.\n"); return -ENODEV; } @@ -1063,7 +1864,7 @@ static int i915_pm_suspend(struct device *dev) static int i915_pm_suspend_late(struct device *dev) { - struct drm_device *drm_dev = dev_to_i915(dev)->dev; + struct drm_device *drm_dev = &dev_to_i915(dev)->drm; /* * We have a suspend ordering issue with the snd-hda driver also @@ -1082,7 +1883,7 @@ static int i915_pm_suspend_late(struct device *dev) static int i915_pm_poweroff_late(struct device *dev) { - struct drm_device *drm_dev = dev_to_i915(dev)->dev; + struct drm_device *drm_dev = &dev_to_i915(dev)->drm; if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -1092,7 +1893,7 @@ static int i915_pm_poweroff_late(struct device *dev) static int i915_pm_resume_early(struct device *dev) { - struct drm_device *drm_dev = dev_to_i915(dev)->dev; + struct drm_device *drm_dev = &dev_to_i915(dev)->drm; if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -1102,7 +1903,7 @@ static int i915_pm_resume_early(struct device *dev) static int i915_pm_resume(struct device *dev) { - struct drm_device *drm_dev = dev_to_i915(dev)->dev; + struct drm_device *drm_dev = &dev_to_i915(dev)->drm; if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -1352,8 +2153,6 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) u32 val; int err; -#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) - val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); val &= ~VLV_GFX_CLK_FORCE_ON_BIT; if (force_on) @@ -1363,13 +2162,16 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) if (!force_on) return 0; - err = wait_for(COND, 20); + err = intel_wait_for_register(dev_priv, + VLV_GTLC_SURVIVABILITY_REG, + VLV_GFX_CLK_STATUS_BIT, + VLV_GFX_CLK_STATUS_BIT, + 20); if (err) DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", I915_READ(VLV_GTLC_SURVIVABILITY_REG)); return err; -#undef COND } static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) @@ -1384,13 +2186,15 @@ static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) I915_WRITE(VLV_GTLC_WAKE_CTRL, val); POSTING_READ(VLV_GTLC_WAKE_CTRL); -#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \ - allow) - err = wait_for(COND, 1); + err = intel_wait_for_register(dev_priv, + VLV_GTLC_PW_STATUS, + VLV_GTLC_ALLOWWAKEACK, + allow, + 1); if (err) DRM_ERROR("timeout disabling GT waking\n"); + return err; -#undef COND } static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, @@ -1402,8 +2206,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; val = wait_for_on ? mask : 0; -#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) - if (COND) + if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) return 0; DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", @@ -1414,13 +2217,14 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, * RC6 transitioning can be delayed up to 2 msec (see * valleyview_enable_rps), use 3 msec for safety. */ - err = wait_for(COND, 3); + err = intel_wait_for_register(dev_priv, + VLV_GTLC_PW_STATUS, mask, val, + 3); if (err) DRM_ERROR("timeout waiting for GT wells to go %s\n", onoff(wait_for_on)); return err; -#undef COND } static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) @@ -1477,7 +2281,7 @@ err1: static int vlv_resume_prepare(struct drm_i915_private *dev_priv, bool rpm_resume) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; int err; int ret; @@ -1513,7 +2317,7 @@ static int intel_runtime_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct drm_device *dev = pci_get_drvdata(pdev); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6()))) @@ -1551,11 +2355,8 @@ static int intel_runtime_suspend(struct device *device) i915_gem_release_all_mmaps(dev_priv); mutex_unlock(&dev->struct_mutex); - cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); - intel_guc_suspend(dev); - intel_suspend_gt_powersave(dev_priv); intel_runtime_pm_disable_interrupts(dev_priv); ret = 0; @@ -1620,7 +2421,7 @@ static int intel_runtime_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct drm_device *dev = pci_get_drvdata(pdev); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) @@ -1670,8 +2471,6 @@ static int intel_runtime_resume(struct device *device) if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_init(dev_priv); - intel_enable_gt_powersave(dev_priv); - enable_rpm_wakeref_asserts(dev_priv); if (ret) @@ -1682,7 +2481,7 @@ static int intel_runtime_resume(struct device *device) return ret; } -static const struct dev_pm_ops i915_pm_ops = { +const struct dev_pm_ops i915_pm_ops = { /* * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, * PMSG_RESUME] @@ -1741,6 +2540,68 @@ static const struct file_operations i915_driver_fops = { .llseek = noop_llseek, }; +static int +i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + return -ENODEV; +} + +static const struct drm_ioctl_desc i915_ioctls[] = { + DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), + DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), +}; + static struct drm_driver driver = { /* Don't use MTRRs here; the Xserver or userspace app should * deal with them for Intel hardware. @@ -1748,18 +2609,12 @@ static struct drm_driver driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET, - .load = i915_driver_load, - .unload = i915_driver_unload, .open = i915_driver_open, .lastclose = i915_driver_lastclose, .preclose = i915_driver_preclose, .postclose = i915_driver_postclose, .set_busid = drm_pci_set_busid, -#if defined(CONFIG_DEBUG_FS) - .debugfs_init = i915_debugfs_init, - .debugfs_cleanup = i915_debugfs_cleanup, -#endif .gem_free_object = i915_gem_free_object, .gem_vm_ops = &i915_gem_vm_ops, @@ -1772,6 +2627,7 @@ static struct drm_driver driver = { .dumb_map_offset = i915_gem_mmap_gtt, .dumb_destroy = drm_gem_dumb_destroy, .ioctls = i915_ioctls, + .num_ioctls = ARRAY_SIZE(i915_ioctls), .fops = &i915_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -1780,56 +2636,3 @@ static struct drm_driver driver = { .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; - -static struct pci_driver i915_pci_driver = { - .name = DRIVER_NAME, - .id_table = pciidlist, - .probe = i915_pci_probe, - .remove = i915_pci_remove, - .driver.pm = &i915_pm_ops, -}; - -static int __init i915_init(void) -{ - driver.num_ioctls = i915_max_ioctl; - - /* - * Enable KMS by default, unless explicitly overriden by - * either the i915.modeset prarameter or by the - * vga_text_mode_force boot option. - */ - - if (i915.modeset == 0) - driver.driver_features &= ~DRIVER_MODESET; - - if (vgacon_text_force() && i915.modeset == -1) - driver.driver_features &= ~DRIVER_MODESET; - - if (!(driver.driver_features & DRIVER_MODESET)) { - /* Silently fail loading to not upset userspace. */ - DRM_DEBUG_DRIVER("KMS and UMS disabled.\n"); - return 0; - } - - if (i915.nuclear_pageflip) - driver.driver_features |= DRIVER_ATOMIC; - - return drm_pci_init(&driver, &i915_pci_driver); -} - -static void __exit i915_exit(void) -{ - if (!(driver.driver_features & DRIVER_MODESET)) - return; /* Never loaded a driver. */ - - drm_pci_exit(&driver, &i915_pci_driver); -} - -module_init(i915_init); -module_exit(i915_exit); - -MODULE_AUTHOR("Tungsten Graphics, Inc."); -MODULE_AUTHOR("Intel Corporation"); - -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 24a86c64d22e..03e1bfaa5a41 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -69,7 +69,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20160620" +#define DRIVER_DATE "20160711" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -320,15 +320,16 @@ struct i915_hotplug { for_each_if ((__ports_mask) & (1 << (__port))) #define for_each_crtc(dev, crtc) \ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) #define for_each_intel_plane(dev, intel_plane) \ list_for_each_entry(intel_plane, \ - &dev->mode_config.plane_list, \ + &(dev)->mode_config.plane_list, \ base.head) #define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \ - list_for_each_entry(intel_plane, &dev->mode_config.plane_list, \ + list_for_each_entry(intel_plane, \ + &(dev)->mode_config.plane_list, \ base.head) \ for_each_if ((plane_mask) & \ (1 << drm_plane_index(&intel_plane->base))) @@ -339,11 +340,15 @@ struct i915_hotplug { base.head) \ for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe) -#define for_each_intel_crtc(dev, intel_crtc) \ - list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) +#define for_each_intel_crtc(dev, intel_crtc) \ + list_for_each_entry(intel_crtc, \ + &(dev)->mode_config.crtc_list, \ + base.head) -#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \ - list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) \ +#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \ + list_for_each_entry(intel_crtc, \ + &(dev)->mode_config.crtc_list, \ + base.head) \ for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base))) #define for_each_intel_encoder(dev, intel_encoder) \ @@ -353,7 +358,7 @@ struct i915_hotplug { #define for_each_intel_connector(dev, intel_connector) \ list_for_each_entry(intel_connector, \ - &dev->mode_config.connector_list, \ + &(dev)->mode_config.connector_list, \ base.head) #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ @@ -475,6 +480,7 @@ struct drm_i915_error_state { struct timeval time; char error_msg[128]; + bool simulated; int iommu; u32 reset_count; u32 suspend_count; @@ -506,6 +512,7 @@ struct drm_i915_error_state { bool valid; /* Software tracked state */ bool waiting; + int num_waiters; int hangcheck_score; enum intel_ring_hangcheck_action hangcheck_action; int num_requests; @@ -551,6 +558,12 @@ struct drm_i915_error_state { u32 tail; } *requests; + struct drm_i915_error_waiter { + char comm[TASK_COMM_LEN]; + pid_t pid; + u32 seqno; + } *waiters; + struct { u32 gfx_mode; union { @@ -868,9 +881,12 @@ struct i915_gem_context { /* Unique identifier for this context, used by the hw for tracking */ unsigned long flags; +#define CONTEXT_NO_ZEROMAP BIT(0) +#define CONTEXT_NO_ERROR_CAPTURE BIT(1) unsigned hw_id; u32 user_handle; -#define CONTEXT_NO_ZEROMAP (1<<0) + + u32 ggtt_alignment; struct intel_context { struct drm_i915_gem_object *state; @@ -1011,6 +1027,7 @@ enum intel_pch { PCH_CPT, /* Cougarpoint PCH */ PCH_LPT, /* Lynxpoint PCH */ PCH_SPT, /* Sunrisepoint PCH */ + PCH_KBP, /* Kabypoint PCH */ PCH_NOP, }; @@ -1305,37 +1322,11 @@ struct i915_gem_mm { struct list_head fence_list; /** - * We leave the user IRQ off as much as possible, - * but this means that requests will finish and never - * be retired once the system goes idle. Set a timer to - * fire periodically while the ring is running. When it - * fires, go retire requests. - */ - struct delayed_work retire_work; - - /** - * When we detect an idle GPU, we want to turn on - * powersaving features. So once we see that there - * are no more requests outstanding and no more - * arrive within a small period of time, we fire - * off the idle_work. - */ - struct delayed_work idle_work; - - /** * Are we in a non-interruptible section of code like * modesetting? */ bool interruptible; - /** - * Is the GPU currently considered idle, or busy executing userspace - * requests? Whilst idle, we attempt to power down the hardware and - * display clocks. In order to reduce the effect on performance, there - * is a slight delay before we do so. - */ - bool busy; - /* the indicator for dispatch video commands on two BSD rings */ unsigned int bsd_ring_dispatch_index; @@ -1372,7 +1363,6 @@ struct i915_gpu_error { /* Hang gpu twice in this window and your context gets banned */ #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) - struct workqueue_struct *hangcheck_wq; struct delayed_work hangcheck_work; /* For reset and error_state handling. */ @@ -1409,20 +1399,19 @@ struct i915_gpu_error { #define I915_WEDGED (1 << 31) /** + * Waitqueue to signal when a hang is detected. Used to for waiters + * to release the struct_mutex for the reset to procede. + */ + wait_queue_head_t wait_queue; + + /** * Waitqueue to signal when the reset has completed. Used by clients * that wait for dev_priv->mm.wedged to settle. */ wait_queue_head_t reset_queue; - /* Userspace knobs for gpu hang simulation; - * combines both a ring mask, and extra flags - */ - u32 stop_rings; -#define I915_STOP_RING_ALLOW_BAN (1 << 31) -#define I915_STOP_RING_ALLOW_WARN (1 << 30) - /* For missed irq/seqno simulation. */ - unsigned int test_irq_rings; + unsigned long test_irq_rings; }; enum modeset_restore { @@ -1733,7 +1722,8 @@ struct intel_wm_config { }; struct drm_i915_private { - struct drm_device *dev; + struct drm_device drm; + struct kmem_cache *objects; struct kmem_cache *vmas; struct kmem_cache *requests; @@ -2029,6 +2019,34 @@ struct drm_i915_private { int (*init_engines)(struct drm_device *dev); void (*cleanup_engine)(struct intel_engine_cs *engine); void (*stop_engine)(struct intel_engine_cs *engine); + + /** + * Is the GPU currently considered idle, or busy executing + * userspace requests? Whilst idle, we allow runtime power + * management to power down the hardware and display clocks. + * In order to reduce the effect on performance, there + * is a slight delay before we do so. + */ + unsigned int active_engines; + bool awake; + + /** + * We leave the user IRQ off as much as possible, + * but this means that requests will finish and never + * be retired once the system goes idle. Set a timer to + * fire periodically while the ring is running. When it + * fires, go retire requests. + */ + struct delayed_work retire_work; + + /** + * When we detect an idle GPU, we want to turn on + * powersaving features. So once we see that there + * are no more requests outstanding and no more + * arrive within a small period of time, we fire + * off the idle_work. + */ + struct delayed_work idle_work; } gt; /* perform PHY state sanity checks? */ @@ -2044,7 +2062,7 @@ struct drm_i915_private { static inline struct drm_i915_private *to_i915(const struct drm_device *dev) { - return dev->dev_private; + return container_of(dev, struct drm_i915_private, drm); } static inline struct drm_i915_private *dev_to_i915(struct device *dev) @@ -2215,6 +2233,7 @@ struct drm_i915_gem_object { unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; + unsigned int has_wc_mmap; unsigned int pin_display; struct sg_table *pages; @@ -2267,6 +2286,12 @@ struct drm_i915_gem_object { }; #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) +static inline bool +i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) +{ + return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE; +} + /* * Optimised SGL iterator for GEM objects */ @@ -2357,7 +2382,7 @@ struct drm_i915_gem_request { /** On Which ring this request was generated */ struct drm_i915_private *i915; struct intel_engine_cs *engine; - unsigned reset_counter; + struct intel_signal_node signaling; /** GEM sequence number associated with the previous request, * when the HWS breadcrumb is equal to this the GPU is processing @@ -2613,7 +2638,7 @@ struct drm_i915_cmd_table { #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) #define REVID_FOREVER 0xff -#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) +#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision) #define GEN_FOREVER (0) /* @@ -2743,29 +2768,34 @@ struct drm_i915_cmd_table { * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular * chips, etc.). */ -#define IS_GEN2(dev) (INTEL_INFO(dev)->gen_mask & BIT(1)) -#define IS_GEN3(dev) (INTEL_INFO(dev)->gen_mask & BIT(2)) -#define IS_GEN4(dev) (INTEL_INFO(dev)->gen_mask & BIT(3)) -#define IS_GEN5(dev) (INTEL_INFO(dev)->gen_mask & BIT(4)) -#define IS_GEN6(dev) (INTEL_INFO(dev)->gen_mask & BIT(5)) -#define IS_GEN7(dev) (INTEL_INFO(dev)->gen_mask & BIT(6)) -#define IS_GEN8(dev) (INTEL_INFO(dev)->gen_mask & BIT(7)) -#define IS_GEN9(dev) (INTEL_INFO(dev)->gen_mask & BIT(8)) - -#define RENDER_RING (1<<RCS) -#define BSD_RING (1<<VCS) -#define BLT_RING (1<<BCS) -#define VEBOX_RING (1<<VECS) -#define BSD2_RING (1<<VCS2) -#define ALL_ENGINES (~0) - -#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) -#define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING) -#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) -#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) +#define IS_GEN2(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(1))) +#define IS_GEN3(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(2))) +#define IS_GEN4(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(3))) +#define IS_GEN5(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(4))) +#define IS_GEN6(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(5))) +#define IS_GEN7(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(6))) +#define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7))) +#define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8))) + +#define ENGINE_MASK(id) BIT(id) +#define RENDER_RING ENGINE_MASK(RCS) +#define BSD_RING ENGINE_MASK(VCS) +#define BLT_RING ENGINE_MASK(BCS) +#define VEBOX_RING ENGINE_MASK(VECS) +#define BSD2_RING ENGINE_MASK(VCS2) +#define ALL_ENGINES (~0) + +#define HAS_ENGINE(dev_priv, id) \ + (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id))) + +#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS) +#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2) +#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) +#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) + #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) #define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop) -#define HAS_EDRAM(dev) (__I915__(dev)->edram_cap & EDRAM_ENABLED) +#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED)) #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ HAS_EDRAM(dev)) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) @@ -2783,9 +2813,10 @@ struct drm_i915_cmd_table { #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) /* WaRsDisableCoarsePowerGating:skl,bxt */ -#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ - IS_SKL_GT3(dev) || \ - IS_SKL_GT4(dev)) +#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ + (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \ + IS_SKL_GT3(dev_priv) || \ + IS_SKL_GT4(dev_priv)) /* * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts @@ -2832,7 +2863,7 @@ struct drm_i915_cmd_table { * command submission once loaded. But these are logically independent * properties, so we have separate macros to test them. */ -#define HAS_GUC(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) +#define HAS_GUC(dev) (IS_GEN9(dev)) #define HAS_GUC_UCODE(dev) (HAS_GUC(dev)) #define HAS_GUC_SCHED(dev) (HAS_GUC(dev)) @@ -2853,11 +2884,13 @@ struct drm_i915_cmd_table { #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 +#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) +#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP) #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) @@ -2879,8 +2912,14 @@ struct drm_i915_cmd_table { #include "i915_trace.h" -extern const struct drm_ioctl_desc i915_ioctls[]; -extern int i915_max_ioctl; +static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) +{ +#ifdef CONFIG_INTEL_IOMMU + if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped) + return true; +#endif + return false; +} extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); extern int i915_resume_switcheroo(struct drm_device *dev); @@ -2888,7 +2927,7 @@ extern int i915_resume_switcheroo(struct drm_device *dev); int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, int enable_ppgtt); -/* i915_dma.c */ +/* i915_drv.c */ void __printf(3, 4) __i915_printk(struct drm_i915_private *dev_priv, const char *level, const char *fmt, ...); @@ -2896,14 +2935,6 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, #define i915_report_error(dev_priv, fmt, ...) \ __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) -extern int i915_driver_load(struct drm_device *, unsigned long flags); -extern int i915_driver_unload(struct drm_device *); -extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); -extern void i915_driver_lastclose(struct drm_device * dev); -extern void i915_driver_preclose(struct drm_device *dev, - struct drm_file *file); -extern void i915_driver_postclose(struct drm_device *dev, - struct drm_file *file); #ifdef CONFIG_COMPAT extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); @@ -2928,7 +2959,23 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); /* i915_irq.c */ -void i915_queue_hangcheck(struct drm_i915_private *dev_priv); +static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) +{ + unsigned long delay; + + if (unlikely(!i915.enable_hangcheck)) + return; + + /* Don't continually defer the hangcheck so that it is always run at + * least once after work has been scheduled on any ring. Otherwise, + * we will ignore a hung ring if a second ring is kept busy. + */ + + delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); + queue_delayed_work(system_long_wq, + &dev_priv->gpu_error.hangcheck_work, delay); +} + __printf(3, 4) void i915_handle_error(struct drm_i915_private *dev_priv, u32 engine_mask, @@ -2963,6 +3010,17 @@ u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); +int intel_wait_for_register(struct drm_i915_private *dev_priv, + i915_reg_t reg, + const u32 mask, + const u32 value, + const unsigned long timeout_ms); +int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, + i915_reg_t reg, + const u32 mask, + const u32 value, + const unsigned long timeout_ms); + static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) { return dev_priv->gvt.initialized; @@ -3027,7 +3085,6 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) ibx_display_interrupt_update(dev_priv, bits, 0); } - /* i915_gem.c */ int i915_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -3244,31 +3301,34 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) return (int32_t)(seq1 - seq2) >= 0; } -static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, - bool lazy_coherency) +static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req) { - if (!lazy_coherency && req->engine->irq_seqno_barrier) - req->engine->irq_seqno_barrier(req->engine); - return i915_seqno_passed(req->engine->get_seqno(req->engine), + return i915_seqno_passed(intel_engine_get_seqno(req->engine), req->previous_seqno); } -static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, - bool lazy_coherency) +static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req) { - if (!lazy_coherency && req->engine->irq_seqno_barrier) - req->engine->irq_seqno_barrier(req->engine); - return i915_seqno_passed(req->engine->get_seqno(req->engine), + return i915_seqno_passed(intel_engine_get_seqno(req->engine), req->seqno); } +bool __i915_spin_request(const struct drm_i915_gem_request *request, + int state, unsigned long timeout_us); +static inline bool i915_spin_request(const struct drm_i915_gem_request *request, + int state, unsigned long timeout_us) +{ + return (i915_gem_request_started(request) && + __i915_spin_request(request, state, timeout_us)); +} + int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno); int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); struct drm_i915_gem_request * i915_gem_find_active_request(struct intel_engine_cs *engine); -bool i915_gem_retire_requests(struct drm_i915_private *dev_priv); +void i915_gem_retire_requests(struct drm_i915_private *dev_priv); void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); static inline u32 i915_reset_counter(struct i915_gpu_error *error) @@ -3311,18 +3371,6 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error) return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2; } -static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) -{ - return dev_priv->gpu_error.stop_rings == 0 || - dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN; -} - -static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) -{ - return dev_priv->gpu_error.stop_rings == 0 || - dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN; -} - void i915_gem_reset(struct drm_device *dev); bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); int __must_check i915_gem_init(struct drm_device *dev); @@ -3330,7 +3378,7 @@ int i915_gem_init_engines(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_cleanup_engines(struct drm_device *dev); -int __must_check i915_gpu_idle(struct drm_device *dev); +int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv); int __must_check i915_gem_suspend(struct drm_device *dev); void __i915_add_request(struct drm_i915_gem_request *req, struct drm_i915_gem_object *batch_obj, @@ -3484,7 +3532,7 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) { struct i915_gem_context *ctx; - lockdep_assert_held(&file_priv->dev_priv->dev->struct_mutex); + lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex); ctx = idr_find(&file_priv->context_idr, id); if (!ctx) @@ -3500,7 +3548,7 @@ static inline void i915_gem_context_reference(struct i915_gem_context *ctx) static inline void i915_gem_context_unreference(struct i915_gem_context *ctx) { - lockdep_assert_held(&ctx->i915->dev->struct_mutex); + lockdep_assert_held(&ctx->i915->drm.struct_mutex); kref_put(&ctx->ref, i915_gem_context_free); } @@ -3576,7 +3624,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && obj->tiling_mode != I915_TILING_NONE; @@ -3590,12 +3638,14 @@ int i915_verify_lists(struct drm_device *dev); #endif /* i915_debugfs.c */ -int i915_debugfs_init(struct drm_minor *minor); -void i915_debugfs_cleanup(struct drm_minor *minor); #ifdef CONFIG_DEBUG_FS +int i915_debugfs_register(struct drm_i915_private *dev_priv); +void i915_debugfs_unregister(struct drm_i915_private *dev_priv); int i915_debugfs_connector_add(struct drm_connector *connector); void intel_display_crc_init(struct drm_device *dev); #else +static inline int i915_debugfs_register(struct drm_i915_private *) {return 0;} +static inline void i915_debugfs_unregister(struct drm_i915_private *) {} static inline int i915_debugfs_connector_add(struct drm_connector *connector) { return 0; } static inline void intel_display_crc_init(struct drm_device *dev) {} @@ -3686,8 +3736,8 @@ extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv); #else static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; } -static inline void intel_opregion_init(struct drm_i915_private *dev) { } -static inline void intel_opregion_fini(struct drm_i915_private *dev) { } +static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { } +static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { } static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) { } @@ -3716,11 +3766,22 @@ static inline void intel_register_dsm_handler(void) { return; } static inline void intel_unregister_dsm_handler(void) { return; } #endif /* CONFIG_ACPI */ +/* intel_device_info.c */ +static inline struct intel_device_info * +mkwrite_device_info(struct drm_i915_private *dev_priv) +{ + return (struct intel_device_info *)&dev_priv->info; +} + +void intel_device_info_runtime_init(struct drm_i915_private *dev_priv); +void intel_device_info_dump(struct drm_i915_private *dev_priv); + /* modesetting */ extern void intel_modeset_init_hw(struct drm_device *dev); extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_gem_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); +extern int intel_connector_register(struct drm_connector *); extern void intel_connector_unregister(struct drm_connector *); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); extern void intel_display_resume(struct drm_device *dev); @@ -3731,7 +3792,6 @@ extern void intel_init_pch_refclk(struct drm_device *dev); extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); -extern void intel_detect_pch(struct drm_device *dev); extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv); int i915_reg_read_ioctl(struct drm_device *dev, void *data, @@ -3864,6 +3924,7 @@ __raw_write(64, q) */ #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) +#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__)) #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) /* "Broadcast RGB" property */ @@ -3927,12 +3988,80 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) schedule_timeout_uninterruptible(remaining_jiffies); } } - -static inline void i915_trace_irq_get(struct intel_engine_cs *engine, - struct drm_i915_gem_request *req) +static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req) { - if (engine->trace_irq_req == NULL && engine->irq_get(engine)) - i915_gem_request_assign(&engine->trace_irq_req, req); + struct intel_engine_cs *engine = req->engine; + + /* Before we do the heavier coherent read of the seqno, + * check the value (hopefully) in the CPU cacheline. + */ + if (i915_gem_request_completed(req)) + return true; + + /* Ensure our read of the seqno is coherent so that we + * do not "miss an interrupt" (i.e. if this is the last + * request and the seqno write from the GPU is not visible + * by the time the interrupt fires, we will see that the + * request is incomplete and go back to sleep awaiting + * another interrupt that will never come.) + * + * Strictly, we only need to do this once after an interrupt, + * but it is easier and safer to do it every time the waiter + * is woken. + */ + if (engine->irq_seqno_barrier && + READ_ONCE(engine->breadcrumbs.irq_seqno_bh) == current && + cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) { + struct task_struct *tsk; + + /* The ordering of irq_posted versus applying the barrier + * is crucial. The clearing of the current irq_posted must + * be visible before we perform the barrier operation, + * such that if a subsequent interrupt arrives, irq_posted + * is reasserted and our task rewoken (which causes us to + * do another __i915_request_irq_complete() immediately + * and reapply the barrier). Conversely, if the clear + * occurs after the barrier, then an interrupt that arrived + * whilst we waited on the barrier would not trigger a + * barrier on the next pass, and the read may not see the + * seqno update. + */ + engine->irq_seqno_barrier(engine); + + /* If we consume the irq, but we are no longer the bottom-half, + * the real bottom-half may not have serialised their own + * seqno check with the irq-barrier (i.e. may have inspected + * the seqno before we believe it coherent since they see + * irq_posted == false but we are still running). + */ + rcu_read_lock(); + tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh); + if (tsk && tsk != current) + /* Note that if the bottom-half is changed as we + * are sending the wake-up, the new bottom-half will + * be woken by whomever made the change. We only have + * to worry about when we steal the irq-posted for + * ourself. + */ + wake_up_process(tsk); + rcu_read_unlock(); + + if (i915_gem_request_completed(req)) + return true; + } + + /* We need to check whether any gpu reset happened in between + * the request being submitted and now. If a reset has occurred, + * the seqno will have been advance past ours and our request + * is complete. If we are in the process of handling a reset, + * the request is effectively complete as the rendering will + * be discarded, but we need to return in order to drop the + * struct_mutex. + */ + if (i915_reset_in_progress(&req->i915->gpu_error)) + return true; + + return false; } #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 21d0dea57312..8f50919ba9b4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -128,7 +128,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error) int i915_mutex_lock_interruptible(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; ret = i915_gem_wait_for_error(&dev_priv->gpu_error); @@ -377,13 +377,13 @@ out: void *i915_gem_object_alloc(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); } void i915_gem_object_free(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); kmem_cache_free(dev_priv->objects, obj); } @@ -508,7 +508,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, *needs_clflush = 0; - if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) + if (WARN_ON(!i915_gem_object_has_struct_page(obj))) return -EINVAL; if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { @@ -636,7 +636,7 @@ i915_gem_gtt_pread(struct drm_device *dev, struct drm_i915_gem_object *obj, uint64_t size, uint64_t data_offset, uint64_t data_ptr) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_mm_node node; char __user *user_data; @@ -760,7 +760,7 @@ i915_gem_shmem_pread(struct drm_device *dev, int needs_clflush = 0; struct sg_page_iter sg_iter; - if (!obj->base.filp) + if (!i915_gem_object_has_struct_page(obj)) return -ENODEV; user_data = u64_to_user_ptr(args->data_ptr); @@ -1250,7 +1250,7 @@ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_pwrite *args = data; struct drm_i915_gem_object *obj; int ret; @@ -1298,7 +1298,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * pread/pwrite currently are reading and writing from the CPU * perspective, requiring manual detiling by the client. */ - if (!obj->base.filp || cpu_write_needs_clflush(obj)) { + if (!i915_gem_object_has_struct_page(obj) || + cpu_write_needs_clflush(obj)) { ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file); /* Note that the gtt paths might fail with non-page-backed user * pointers (e.g. gtt mappings when moving data between @@ -1308,7 +1309,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (ret == -EFAULT) { if (obj->phys_handle) ret = i915_gem_phys_pwrite(obj, args, file); - else if (obj->base.filp) + else if (i915_gem_object_has_struct_page(obj)) ret = i915_gem_shmem_pwrite(dev, obj, args, file); else ret = -ENODEV; @@ -1342,17 +1343,6 @@ i915_gem_check_wedge(unsigned reset_counter, bool interruptible) return 0; } -static void fake_irq(unsigned long data) -{ - wake_up_process((struct task_struct *)data); -} - -static bool missed_irq(struct drm_i915_private *dev_priv, - struct intel_engine_cs *engine) -{ - return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings); -} - static unsigned long local_clock_us(unsigned *cpu) { unsigned long t; @@ -1385,9 +1375,9 @@ static bool busywait_stop(unsigned long timeout, unsigned cpu) return this_cpu != cpu; } -static int __i915_spin_request(struct drm_i915_gem_request *req, int state) +bool __i915_spin_request(const struct drm_i915_gem_request *req, + int state, unsigned long timeout_us) { - unsigned long timeout; unsigned cpu; /* When waiting for high frequency requests, e.g. during synchronous @@ -1400,31 +1390,21 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) * takes to sleep on a request, on the order of a microsecond. */ - if (req->engine->irq_refcount) - return -EBUSY; - - /* Only spin if we know the GPU is processing this request */ - if (!i915_gem_request_started(req, true)) - return -EAGAIN; - - timeout = local_clock_us(&cpu) + 5; - while (!need_resched()) { - if (i915_gem_request_completed(req, true)) - return 0; + timeout_us += local_clock_us(&cpu); + do { + if (i915_gem_request_completed(req)) + return true; if (signal_pending_state(state, current)) break; - if (busywait_stop(timeout, cpu)) + if (busywait_stop(timeout_us, cpu)) break; cpu_relax_lowlatency(); - } + } while (!need_resched()); - if (i915_gem_request_completed(req, false)) - return 0; - - return -EAGAIN; + return false; } /** @@ -1449,25 +1429,22 @@ int __i915_wait_request(struct drm_i915_gem_request *req, s64 *timeout, struct intel_rps_client *rps) { - struct intel_engine_cs *engine = i915_gem_request_get_engine(req); - struct drm_i915_private *dev_priv = req->i915; - const bool irq_test_in_progress = - ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine); int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; - DEFINE_WAIT(wait); - unsigned long timeout_expire; + DEFINE_WAIT(reset); + struct intel_wait wait; + unsigned long timeout_remain; s64 before = 0; /* Only to silence a compiler warning. */ - int ret; + int ret = 0; - WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); + might_sleep(); if (list_empty(&req->list)) return 0; - if (i915_gem_request_completed(req, true)) + if (i915_gem_request_completed(req)) return 0; - timeout_expire = 0; + timeout_remain = MAX_SCHEDULE_TIMEOUT; if (timeout) { if (WARN_ON(*timeout < 0)) return -EINVAL; @@ -1475,7 +1452,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, if (*timeout == 0) return -ETIME; - timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout); + timeout_remain = nsecs_to_jiffies_timeout(*timeout); /* * Record current time in case interrupted by signal, or wedged. @@ -1483,75 +1460,85 @@ int __i915_wait_request(struct drm_i915_gem_request *req, before = ktime_get_raw_ns(); } - if (INTEL_INFO(dev_priv)->gen >= 6) - gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); - trace_i915_gem_request_wait_begin(req); - /* Optimistic spin for the next jiffie before touching IRQs */ - ret = __i915_spin_request(req, state); - if (ret == 0) - goto out; - - if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) { - ret = -ENODEV; - goto out; - } + /* This client is about to stall waiting for the GPU. In many cases + * this is undesirable and limits the throughput of the system, as + * many clients cannot continue processing user input/output whilst + * blocked. RPS autotuning may take tens of milliseconds to respond + * to the GPU load and thus incurs additional latency for the client. + * We can circumvent that by promoting the GPU frequency to maximum + * before we wait. This makes the GPU throttle up much more quickly + * (good for benchmarks and user experience, e.g. window animations), + * but at a cost of spending more power processing the workload + * (bad for battery). Not all clients even want their results + * immediately and for them we should just let the GPU select its own + * frequency to maximise efficiency. To prevent a single client from + * forcing the clocks too high for the whole system, we only allow + * each client to waitboost once in a busy period. + */ + if (INTEL_INFO(req->i915)->gen >= 6) + gen6_rps_boost(req->i915, rps, req->emitted_jiffies); - for (;;) { - struct timer_list timer; + /* Optimistic spin for the next ~jiffie before touching IRQs */ + if (i915_spin_request(req, state, 5)) + goto complete; - prepare_to_wait(&engine->irq_queue, &wait, state); + set_current_state(state); + add_wait_queue(&req->i915->gpu_error.wait_queue, &reset); - /* We need to check whether any gpu reset happened in between - * the request being submitted and now. If a reset has occurred, - * the request is effectively complete (we either are in the - * process of or have discarded the rendering and completely - * reset the GPU. The results of the request are lost and we - * are free to continue on with the original operation. + intel_wait_init(&wait, req->seqno); + if (intel_engine_add_wait(req->engine, &wait)) + /* In order to check that we haven't missed the interrupt + * as we enabled it, we need to kick ourselves to do a + * coherent check on the seqno before we sleep. */ - if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) { - ret = 0; - break; - } - - if (i915_gem_request_completed(req, false)) { - ret = 0; - break; - } + goto wakeup; + for (;;) { if (signal_pending_state(state, current)) { ret = -ERESTARTSYS; break; } - if (timeout && time_after_eq(jiffies, timeout_expire)) { + /* Ensure that even if the GPU hangs, we get woken up. + * + * However, note that if no one is waiting, we never notice + * a gpu hang. Eventually, we will have to wait for a resource + * held by the GPU and so trigger a hangcheck. In the most + * pathological case, this will be upon memory starvation! + */ + i915_queue_hangcheck(req->i915); + + timeout_remain = io_schedule_timeout(timeout_remain); + if (timeout_remain == 0) { ret = -ETIME; break; } - timer.function = NULL; - if (timeout || missed_irq(dev_priv, engine)) { - unsigned long expire; + if (intel_wait_complete(&wait)) + break; - setup_timer_on_stack(&timer, fake_irq, (unsigned long)current); - expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire; - mod_timer(&timer, expire); - } + set_current_state(state); - io_schedule(); +wakeup: + /* Carefully check if the request is complete, giving time + * for the seqno to be visible following the interrupt. + * We also have to check in case we are kicked by the GPU + * reset in order to drop the struct_mutex. + */ + if (__i915_request_irq_complete(req)) + break; - if (timer.function) { - del_singleshot_timer_sync(&timer); - destroy_timer_on_stack(&timer); - } + /* Only spin if we know the GPU is processing this request */ + if (i915_spin_request(req, state, 2)) + break; } - if (!irq_test_in_progress) - engine->irq_put(engine); - - finish_wait(&engine->irq_queue, &wait); + remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset); -out: + intel_engine_remove_wait(req->engine, &wait); + __set_current_state(TASK_RUNNING); +complete: trace_i915_gem_request_wait_end(req); if (timeout) { @@ -1570,6 +1557,22 @@ out: *timeout = 0; } + if (rps && req->seqno == req->engine->last_submitted_seqno) { + /* The GPU is now idle and this client has stalled. + * Since no other client has submitted a request in the + * meantime, assume that this client is the only one + * supplying work to the GPU but is unable to keep that + * work supplied because it is waiting. Since the GPU is + * then never kept fully busy, RPS autoclocking will + * keep the clocks relatively low, causing further delays. + * Compensate by giving the synchronous client credit for + * a waitboost next time. + */ + spin_lock(&req->i915->rps.client_lock); + list_del_init(&rps->link); + spin_unlock(&req->i915->rps.client_lock); + } + return ret; } @@ -1648,7 +1651,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req) struct intel_engine_cs *engine = req->engine; struct drm_i915_gem_request *tmp; - lockdep_assert_held(&engine->i915->dev->struct_mutex); + lockdep_assert_held(&engine->i915->drm.struct_mutex); if (list_empty(&req->list)) return; @@ -1677,14 +1680,14 @@ i915_wait_request(struct drm_i915_gem_request *req) interruptible = dev_priv->mm.interruptible; - BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); + BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex)); ret = __i915_wait_request(req, interruptible, NULL, NULL); if (ret) return ret; /* If the GPU hung, we want to keep the requests to find the guilty. */ - if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error)) + if (!i915_reset_in_progress(&dev_priv->gpu_error)) __i915_gem_request_retire__upto(req); return 0; @@ -1745,7 +1748,7 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj, else if (obj->last_write_req == req) i915_gem_object_retire__write(obj); - if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error)) + if (!i915_reset_in_progress(&req->i915->gpu_error)) __i915_gem_request_retire__upto(req); } @@ -1758,7 +1761,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, bool readonly) { struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; int ret, i, n = 0; @@ -1809,6 +1812,13 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file) return &fpriv->rps; } +static enum fb_op_origin +write_origin(struct drm_i915_gem_object *obj, unsigned domain) +{ + return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ? + ORIGIN_GTT : ORIGIN_CPU; +} + /** * Called when user space prepares to use an object with the CPU, either * through the mmap ioctl's mapping or a GTT mapping. @@ -1865,9 +1875,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); if (write_domain != 0) - intel_fb_obj_invalidate(obj, - write_domain == I915_GEM_DOMAIN_GTT ? - ORIGIN_GTT : ORIGIN_CPU); + intel_fb_obj_invalidate(obj, write_origin(obj, write_domain)); unref: drm_gem_object_unreference(&obj->base); @@ -1974,6 +1982,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, else addr = -ENOMEM; up_write(&mm->mmap_sem); + + /* This may race, but that's ok, it only gets set */ + WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true); } drm_gem_object_unreference_unlocked(obj); if (IS_ERR((void *)addr)) @@ -2262,7 +2273,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); int ret; dev_priv->mm.shrinker_no_lock_stealing = true; @@ -2478,7 +2489,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); int page_count, i; struct address_space *mapping; struct sg_table *st; @@ -2609,7 +2620,7 @@ err_pages: int i915_gem_object_get_pages(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); const struct drm_i915_gem_object_ops *ops = obj->ops; int ret; @@ -2773,6 +2784,13 @@ i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) } i915_gem_retire_requests(dev_priv); + /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ + if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) { + while (intel_kick_waiters(dev_priv) || + intel_kick_signalers(dev_priv)) + yield(); + } + /* Finally reset hw state */ for_each_engine(engine, dev_priv) intel_ring_init_seqno(engine, seqno); @@ -2782,7 +2800,7 @@ i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; if (seqno == 0) @@ -2822,6 +2840,26 @@ i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno) return 0; } +static void i915_gem_mark_busy(const struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + dev_priv->gt.active_engines |= intel_engine_flag(engine); + if (dev_priv->gt.awake) + return; + + intel_runtime_pm_get_noresume(dev_priv); + dev_priv->gt.awake = true; + + i915_update_gfx_val(dev_priv); + if (INTEL_GEN(dev_priv) >= 6) + gen6_rps_busy(dev_priv); + + queue_delayed_work(dev_priv->wq, + &dev_priv->gt.retire_work, + round_jiffies_up_relative(HZ)); +} + /* * NB: This function is not allowed to fail. Doing so would mean the the * request is not being tracked for completion but the work itself is @@ -2832,7 +2870,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) { struct intel_engine_cs *engine; - struct drm_i915_private *dev_priv; struct intel_ringbuffer *ringbuf; u32 request_start; u32 reserved_tail; @@ -2842,7 +2879,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, return; engine = request->engine; - dev_priv = request->i915; ringbuf = request->ringbuf; /* @@ -2908,14 +2944,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, } /* Not allowed to fail! */ WARN(ret, "emit|add_request failed: %d!\n", ret); - - i915_queue_hangcheck(engine->i915); - - queue_delayed_work(dev_priv->wq, - &dev_priv->mm.retire_work, - round_jiffies_up_relative(HZ)); - intel_mark_busy(dev_priv); - /* Sanity check that the reserved size was large enough. */ ret = intel_ring_get_tail(ringbuf) - request_start; if (ret < 0) @@ -2924,46 +2952,34 @@ void __i915_add_request(struct drm_i915_gem_request *request, "Not enough space reserved (%d bytes) " "for adding the request (%d bytes)\n", reserved_tail, ret); + + i915_gem_mark_busy(engine); } -static bool i915_context_is_banned(struct drm_i915_private *dev_priv, - const struct i915_gem_context *ctx) +static bool i915_context_is_banned(const struct i915_gem_context *ctx) { unsigned long elapsed; - elapsed = get_seconds() - ctx->hang_stats.guilty_ts; - if (ctx->hang_stats.banned) return true; + elapsed = get_seconds() - ctx->hang_stats.guilty_ts; if (ctx->hang_stats.ban_period_seconds && elapsed <= ctx->hang_stats.ban_period_seconds) { - if (!i915_gem_context_is_default(ctx)) { - DRM_DEBUG("context hanging too fast, banning!\n"); - return true; - } else if (i915_stop_ring_allow_ban(dev_priv)) { - if (i915_stop_ring_allow_warn(dev_priv)) - DRM_ERROR("gpu hanging too fast, banning!\n"); - return true; - } + DRM_DEBUG("context hanging too fast, banning!\n"); + return true; } return false; } -static void i915_set_reset_status(struct drm_i915_private *dev_priv, - struct i915_gem_context *ctx, +static void i915_set_reset_status(struct i915_gem_context *ctx, const bool guilty) { - struct i915_ctx_hang_stats *hs; - - if (WARN_ON(!ctx)) - return; - - hs = &ctx->hang_stats; + struct i915_ctx_hang_stats *hs = &ctx->hang_stats; if (guilty) { - hs->banned = i915_context_is_banned(dev_priv, ctx); + hs->banned = i915_context_is_banned(ctx); hs->batch_active++; hs->guilty_ts = get_seconds(); } else { @@ -3012,7 +3028,6 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, kref_init(&req->ref); req->i915 = dev_priv; req->engine = engine; - req->reset_counter = reset_counter; req->ctx = ctx; i915_gem_context_reference(req->ctx); @@ -3072,8 +3087,16 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) { struct drm_i915_gem_request *request; + /* We are called by the error capture and reset at a random + * point in time. In particular, note that neither is crucially + * ordered with an interrupt. After a hang, the GPU is dead and we + * assume that no more writes can happen (we waited long enough for + * all writes that were in transaction to be flushed) - adding an + * extra delay for a recent interrupt is pointless. Hence, we do + * not need an engine->irq_seqno_barrier() before the seqno reads. + */ list_for_each_entry(request, &engine->request_list, list) { - if (i915_gem_request_completed(request, false)) + if (i915_gem_request_completed(request)) continue; return request; @@ -3082,27 +3105,23 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) return NULL; } -static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv, - struct intel_engine_cs *engine) +static void i915_gem_reset_engine_status(struct intel_engine_cs *engine) { struct drm_i915_gem_request *request; bool ring_hung; request = i915_gem_find_active_request(engine); - if (request == NULL) return; ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; - i915_set_reset_status(dev_priv, request->ctx, ring_hung); - + i915_set_reset_status(request->ctx, ring_hung); list_for_each_entry_continue(request, &engine->request_list, list) - i915_set_reset_status(dev_priv, request->ctx, false); + i915_set_reset_status(request->ctx, false); } -static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv, - struct intel_engine_cs *engine) +static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine) { struct intel_ringbuffer *buffer; @@ -3163,7 +3182,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv, void i915_gem_reset(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; /* @@ -3172,10 +3191,10 @@ void i915_gem_reset(struct drm_device *dev) * their reference to the objects, the inspection must be done first. */ for_each_engine(engine, dev_priv) - i915_gem_reset_engine_status(dev_priv, engine); + i915_gem_reset_engine_status(engine); for_each_engine(engine, dev_priv) - i915_gem_reset_engine_cleanup(dev_priv, engine); + i915_gem_reset_engine_cleanup(engine); i915_gem_context_reset(dev); @@ -3205,7 +3224,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine) struct drm_i915_gem_request, list); - if (!i915_gem_request_completed(request, true)) + if (!i915_gem_request_completed(request)) break; i915_gem_request_retire(request); @@ -3228,55 +3247,52 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine) i915_gem_object_retire__read(obj, engine->id); } - if (unlikely(engine->trace_irq_req && - i915_gem_request_completed(engine->trace_irq_req, true))) { - engine->irq_put(engine); - i915_gem_request_assign(&engine->trace_irq_req, NULL); - } - WARN_ON(i915_verify_lists(engine->dev)); } -bool -i915_gem_retire_requests(struct drm_i915_private *dev_priv) +void i915_gem_retire_requests(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; - bool idle = true; + + lockdep_assert_held(&dev_priv->drm.struct_mutex); + + if (dev_priv->gt.active_engines == 0) + return; + + GEM_BUG_ON(!dev_priv->gt.awake); for_each_engine(engine, dev_priv) { i915_gem_retire_requests_ring(engine); - idle &= list_empty(&engine->request_list); - if (i915.enable_execlists) { - spin_lock_bh(&engine->execlist_lock); - idle &= list_empty(&engine->execlist_queue); - spin_unlock_bh(&engine->execlist_lock); - } + if (list_empty(&engine->request_list)) + dev_priv->gt.active_engines &= ~intel_engine_flag(engine); } - if (idle) - mod_delayed_work(dev_priv->wq, - &dev_priv->mm.idle_work, + if (dev_priv->gt.active_engines == 0) + queue_delayed_work(dev_priv->wq, + &dev_priv->gt.idle_work, msecs_to_jiffies(100)); - - return idle; } static void i915_gem_retire_work_handler(struct work_struct *work) { struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), mm.retire_work.work); - struct drm_device *dev = dev_priv->dev; - bool idle; + container_of(work, typeof(*dev_priv), gt.retire_work.work); + struct drm_device *dev = &dev_priv->drm; /* Come back later if the device is busy... */ - idle = false; if (mutex_trylock(&dev->struct_mutex)) { - idle = i915_gem_retire_requests(dev_priv); + i915_gem_retire_requests(dev_priv); mutex_unlock(&dev->struct_mutex); } - if (!idle) - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, + + /* Keep the retire handler running until we are finally idle. + * We do not need to do this test under locking as in the worst-case + * we queue the retire worker once too often. + */ + if (READ_ONCE(dev_priv->gt.awake)) + queue_delayed_work(dev_priv->wq, + &dev_priv->gt.retire_work, round_jiffies_up_relative(HZ)); } @@ -3284,25 +3300,55 @@ static void i915_gem_idle_work_handler(struct work_struct *work) { struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), mm.idle_work.work); - struct drm_device *dev = dev_priv->dev; + container_of(work, typeof(*dev_priv), gt.idle_work.work); + struct drm_device *dev = &dev_priv->drm; struct intel_engine_cs *engine; + unsigned int stuck_engines; + bool rearm_hangcheck; + + if (!READ_ONCE(dev_priv->gt.awake)) + return; + + if (READ_ONCE(dev_priv->gt.active_engines)) + return; + + rearm_hangcheck = + cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); + + if (!mutex_trylock(&dev->struct_mutex)) { + /* Currently busy, come back later */ + mod_delayed_work(dev_priv->wq, + &dev_priv->gt.idle_work, + msecs_to_jiffies(50)); + goto out_rearm; + } + + if (dev_priv->gt.active_engines) + goto out_unlock; for_each_engine(engine, dev_priv) - if (!list_empty(&engine->request_list)) - return; + i915_gem_batch_pool_fini(&engine->batch_pool); - /* we probably should sync with hangcheck here, using cancel_work_sync. - * Also locking seems to be fubar here, engine->request_list is protected - * by dev->struct_mutex. */ + GEM_BUG_ON(!dev_priv->gt.awake); + dev_priv->gt.awake = false; + rearm_hangcheck = false; - intel_mark_idle(dev_priv); + stuck_engines = intel_kick_waiters(dev_priv); + if (unlikely(stuck_engines)) { + DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n"); + dev_priv->gpu_error.missed_irq_rings |= stuck_engines; + } - if (mutex_trylock(&dev->struct_mutex)) { - for_each_engine(engine, dev_priv) - i915_gem_batch_pool_fini(&engine->batch_pool); + if (INTEL_GEN(dev_priv) >= 6) + gen6_rps_idle(dev_priv); + intel_runtime_pm_put(dev_priv); +out_unlock: + mutex_unlock(&dev->struct_mutex); - mutex_unlock(&dev->struct_mutex); +out_rearm: + if (rearm_hangcheck) { + GEM_BUG_ON(!dev_priv->gt.awake); + i915_queue_hangcheck(dev_priv); } } @@ -3327,7 +3373,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) if (req == NULL) continue; - if (i915_gem_request_completed(req, true)) + if (i915_gem_request_completed(req)) i915_gem_object_retire__read(obj, i); } @@ -3435,7 +3481,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, if (to == from) return 0; - if (i915_gem_request_completed(from_req, true)) + if (i915_gem_request_completed(from_req)) return 0; if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) { @@ -3586,7 +3632,7 @@ static void __i915_vma_iounmap(struct i915_vma *vma) static int __i915_vma_unbind(struct i915_vma *vma, bool wait) { struct drm_i915_gem_object *obj = vma->obj; - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); int ret; if (list_empty(&vma->obj_link)) @@ -3662,26 +3708,16 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma) return __i915_vma_unbind(vma, false); } -int i915_gpu_idle(struct drm_device *dev) +int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; int ret; - /* Flush everything onto the inactive list. */ - for_each_engine(engine, dev_priv) { - if (!i915.enable_execlists) { - struct drm_i915_gem_request *req; + lockdep_assert_held(&dev_priv->drm.struct_mutex); - req = i915_gem_request_alloc(engine, NULL); - if (IS_ERR(req)) - return PTR_ERR(req); - - ret = i915_switch_context(req); - i915_add_request_no_flush(req); - if (ret) - return ret; - } + for_each_engine(engine, dev_priv) { + if (engine->last_context == NULL) + continue; ret = intel_engine_idle(engine); if (ret) @@ -4214,7 +4250,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_caching *args = data; struct drm_i915_gem_object *obj; enum i915_cache_level level; @@ -4408,7 +4444,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) static int i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_file_private *file_priv = file->driver_priv; unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; struct drm_i915_gem_request *request, *target = NULL; @@ -4444,9 +4480,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) return 0; ret = __i915_wait_request(target, true, NULL, NULL); - if (ret == 0) - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); - i915_gem_request_unreference(target); return ret; @@ -4505,7 +4538,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj, uint32_t alignment, uint64_t flags) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct i915_vma *vma; unsigned bound; int ret; @@ -4669,7 +4702,7 @@ int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_madvise *args = data; struct drm_i915_gem_object *obj; int ret; @@ -4739,7 +4772,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, obj->fence_reg = I915_FENCE_REG_NONE; obj->madv = I915_MADV_WILLNEED; - i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); + i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); } static const struct drm_i915_gem_object_ops i915_gem_object_ops = { @@ -4834,7 +4867,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) { struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_vma *vma, *next; intel_runtime_pm_get(dev_priv); @@ -4938,7 +4971,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma) static void i915_gem_stop_engines(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; for_each_engine(engine, dev_priv) @@ -4948,11 +4981,11 @@ i915_gem_stop_engines(struct drm_device *dev) int i915_gem_suspend(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; mutex_lock(&dev->struct_mutex); - ret = i915_gpu_idle(dev); + ret = i915_gem_wait_for_idle(dev_priv); if (ret) goto err; @@ -4963,13 +4996,13 @@ i915_gem_suspend(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); - cancel_delayed_work_sync(&dev_priv->mm.retire_work); - flush_delayed_work(&dev_priv->mm.idle_work); + cancel_delayed_work_sync(&dev_priv->gt.retire_work); + flush_delayed_work(&dev_priv->gt.idle_work); /* Assert that we sucessfully flushed all the work and * reset the GPU back to its idle, low power state. */ - WARN_ON(dev_priv->mm.busy); + WARN_ON(dev_priv->gt.awake); return 0; @@ -4980,7 +5013,7 @@ err: void i915_gem_init_swizzling(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (INTEL_INFO(dev)->gen < 5 || dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) @@ -5005,7 +5038,7 @@ void i915_gem_init_swizzling(struct drm_device *dev) static void init_unused_ring(struct drm_device *dev, u32 base) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(RING_CTL(base), 0); I915_WRITE(RING_HEAD(base), 0); @@ -5032,7 +5065,7 @@ static void init_unused_rings(struct drm_device *dev) int i915_gem_init_engines(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; ret = intel_init_render_ring_buffer(dev); @@ -5080,7 +5113,7 @@ cleanup_render_ring: int i915_gem_init_hw(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; int ret; @@ -5138,12 +5171,6 @@ i915_gem_init_hw(struct drm_device *dev) if (ret) goto out; - /* - * Increment the next seqno by 0x100 so we have a visible break - * on re-initialisation - */ - ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100); - out: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return ret; @@ -5151,7 +5178,7 @@ out: int i915_gem_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; mutex_lock(&dev->struct_mutex); @@ -5208,7 +5235,7 @@ out_unlock: void i915_gem_cleanup_engines(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; for_each_engine(engine, dev_priv) @@ -5225,7 +5252,7 @@ init_engine_lists(struct intel_engine_cs *engine) void i915_gem_load_init_fences(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) @@ -5249,7 +5276,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) void i915_gem_load_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; dev_priv->objects = @@ -5277,22 +5304,15 @@ i915_gem_load_init(struct drm_device *dev) init_engine_lists(&dev_priv->engine[i]); for (i = 0; i < I915_MAX_NUM_FENCES; i++) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); - INIT_DELAYED_WORK(&dev_priv->mm.retire_work, + INIT_DELAYED_WORK(&dev_priv->gt.retire_work, i915_gem_retire_work_handler); - INIT_DELAYED_WORK(&dev_priv->mm.idle_work, + INIT_DELAYED_WORK(&dev_priv->gt.idle_work, i915_gem_idle_work_handler); + init_waitqueue_head(&dev_priv->gpu_error.wait_queue); init_waitqueue_head(&dev_priv->gpu_error.reset_queue); dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; - /* - * Set initial sequence number for requests. - * Using this number allows the wraparound to happen early, - * catching any obvious problems. - */ - dev_priv->next_seqno = ((u32)~0 - 0x1100); - dev_priv->last_seqno = ((u32)~0 - 0x1101); - INIT_LIST_HEAD(&dev_priv->mm.fence_list); init_waitqueue_head(&dev_priv->pending_flip_queue); @@ -5378,7 +5398,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file) return -ENOMEM; file->driver_priv = file_priv; - file_priv->dev_priv = dev->dev_private; + file_priv->dev_priv = to_i915(dev); file_priv->file = file; INIT_LIST_HEAD(&file_priv->rps.link); @@ -5424,7 +5444,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, struct i915_address_space *vm) { - struct drm_i915_private *dev_priv = o->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(o->base.dev); struct i915_vma *vma; WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); @@ -5528,7 +5548,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n) struct page *page; /* Only default objects have per-page dirty tracking */ - if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) + if (WARN_ON(!i915_gem_object_has_struct_page(obj))) return NULL; page = i915_gem_object_get_page(obj, n); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 30d9b4fd30f3..3c97f0e7a003 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -154,7 +154,7 @@ void i915_gem_context_free(struct kref *ctx_ref) struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); int i; - lockdep_assert_held(&ctx->i915->dev->struct_mutex); + lockdep_assert_held(&ctx->i915->drm.struct_mutex); trace_i915_context_free(ctx); /* @@ -250,7 +250,7 @@ static struct i915_gem_context * __create_hw_context(struct drm_device *dev, struct drm_i915_file_private *file_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_gem_context *ctx; int ret; @@ -268,6 +268,8 @@ __create_hw_context(struct drm_device *dev, list_add_tail(&ctx->link, &dev_priv->context_list); ctx->i915 = dev_priv; + ctx->ggtt_alignment = get_context_alignment(dev_priv); + if (dev_priv->hw_context_size) { struct drm_i915_gem_object *obj = i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); @@ -394,7 +396,7 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx, void i915_gem_context_reset(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); lockdep_assert_held(&dev->struct_mutex); @@ -410,7 +412,7 @@ void i915_gem_context_reset(struct drm_device *dev) int i915_gem_context_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_gem_context *ctx; /* Init should only be called once per module load. Eventually the @@ -451,26 +453,6 @@ int i915_gem_context_init(struct drm_device *dev) return PTR_ERR(ctx); } - if (!i915.enable_execlists && ctx->engine[RCS].state) { - int ret; - - /* We may need to do things with the shrinker which - * require us to immediately switch back to the default - * context. This can cause a problem as pinning the - * default context also requires GTT space which may not - * be available. To avoid this we always pin the default - * context. - */ - ret = i915_gem_obj_ggtt_pin(ctx->engine[RCS].state, - get_context_alignment(dev_priv), 0); - if (ret) { - DRM_ERROR("Failed to pinned default global context (error %d)\n", - ret); - i915_gem_context_unreference(ctx); - return ret; - } - } - dev_priv->kernel_context = ctx; DRM_DEBUG_DRIVER("%s context support initialized\n", @@ -483,33 +465,45 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; - lockdep_assert_held(&dev_priv->dev->struct_mutex); + lockdep_assert_held(&dev_priv->drm.struct_mutex); for_each_engine(engine, dev_priv) { if (engine->last_context) { i915_gem_context_unpin(engine->last_context, engine); engine->last_context = NULL; } - - /* Force the GPU state to be reinitialised on enabling */ - dev_priv->kernel_context->engine[engine->id].initialised = - engine->init_context == NULL; } - /* Force the GPU state to be reinitialised on enabling */ - dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv); + /* Force the GPU state to be restored on enabling */ + if (!i915.enable_execlists) { + struct i915_gem_context *ctx; + + list_for_each_entry(ctx, &dev_priv->context_list, link) { + if (!i915_gem_context_is_default(ctx)) + continue; + + for_each_engine(engine, dev_priv) + ctx->engine[engine->id].initialised = false; + + ctx->remap_slice = ALL_L3_SLICES(dev_priv); + } + + for_each_engine(engine, dev_priv) { + struct intel_context *kce = + &dev_priv->kernel_context->engine[engine->id]; + + kce->initialised = true; + } + } } void i915_gem_context_fini(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_gem_context *dctx = dev_priv->kernel_context; lockdep_assert_held(&dev->struct_mutex); - if (!i915.enable_execlists && dctx->engine[RCS].state) - i915_gem_object_ggtt_unpin(dctx->engine[RCS].state); - i915_gem_context_unreference(dctx); dev_priv->kernel_context = NULL; @@ -759,7 +753,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) /* Trying to pin first makes error handling easier. */ ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state, - get_context_alignment(engine->i915), + to->ggtt_alignment, 0); if (ret) return ret; @@ -901,7 +895,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) struct intel_engine_cs *engine = req->engine; WARN_ON(i915.enable_execlists); - lockdep_assert_held(&req->i915->dev->struct_mutex); + lockdep_assert_held(&req->i915->drm.struct_mutex); if (!req->ctx->engine[engine->id].state) { struct i915_gem_context *to = req->ctx; @@ -1032,6 +1026,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, else args->value = to_i915(dev)->ggtt.base.total; break; + case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: + args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE); + break; default: ret = -EINVAL; break; @@ -1077,6 +1074,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0; } break; + case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: + if (args->size) { + ret = -EINVAL; + } else { + if (args->value) + ctx->flags |= CONTEXT_NO_ERROR_CAPTURE; + else + ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE; + } + break; default: ret = -EINVAL; break; @@ -1089,7 +1096,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_reset_stats *args = data; struct i915_ctx_hang_stats *hs; struct i915_gem_context *ctx; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index b144c3f5c650..3c1280ec7ff6 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -33,6 +33,37 @@ #include "intel_drv.h" #include "i915_trace.h" +static int switch_to_pinned_context(struct drm_i915_private *dev_priv) +{ + struct intel_engine_cs *engine; + + if (i915.enable_execlists) + return 0; + + for_each_engine(engine, dev_priv) { + struct drm_i915_gem_request *req; + int ret; + + if (engine->last_context == NULL) + continue; + + if (engine->last_context == dev_priv->kernel_context) + continue; + + req = i915_gem_request_alloc(engine, dev_priv->kernel_context); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = i915_switch_context(req); + i915_add_request_no_flush(req); + if (ret) + return ret; + } + + return 0; +} + + static bool mark_free(struct i915_vma *vma, struct list_head *unwind) { @@ -150,11 +181,19 @@ none: /* Only idle the GPU and repeat the search once */ if (pass++ == 0) { - ret = i915_gpu_idle(dev); + struct drm_i915_private *dev_priv = to_i915(dev); + + if (i915_is_ggtt(vm)) { + ret = switch_to_pinned_context(dev_priv); + if (ret) + return ret; + } + + ret = i915_gem_wait_for_idle(dev_priv); if (ret) return ret; - i915_gem_retire_requests(to_i915(dev)); + i915_gem_retire_requests(dev_priv); goto search_again; } @@ -261,11 +300,19 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) trace_i915_gem_evict_vm(vm); if (do_idle) { - ret = i915_gpu_idle(vm->dev); + struct drm_i915_private *dev_priv = to_i915(vm->dev); + + if (i915_is_ggtt(vm)) { + ret = switch_to_pinned_context(dev_priv); + if (ret) + return ret; + } + + ret = i915_gem_wait_for_idle(dev_priv); if (ret) return ret; - i915_gem_retire_requests(to_i915(vm->dev)); + i915_gem_retire_requests(dev_priv); WARN_ON(!list_empty(&vm->active_list)); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 7941f1fe9cd2..1978633e7549 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1142,7 +1142,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, struct drm_i915_gem_request *req) { struct intel_engine_cs *engine = req->engine; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret, i; if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) { @@ -1225,7 +1225,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, { struct drm_device *dev = params->dev; struct intel_engine_cs *engine = params->engine; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u64 exec_start, exec_len; int instp_mode; u32 instp_mask; @@ -1328,10 +1328,10 @@ gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file) /* Check whether the file_priv has already selected one ring. */ if ((int)file_priv->bsd_ring < 0) { /* If not, use the ping-pong mechanism to select one. */ - mutex_lock(&dev_priv->dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index; dev_priv->mm.bsd_ring_dispatch_index ^= 1; - mutex_unlock(&dev_priv->dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); } return file_priv->bsd_ring; @@ -1477,6 +1477,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, dispatch_flags |= I915_DISPATCH_RS; } + /* Take a local wakeref for preparing to dispatch the execbuf as + * we expect to access the hardware fairly frequently in the + * process. Upon first dispatch, we acquire another prolonged + * wakeref that we hold until the GPU has been idle for at least + * 100ms. + */ intel_runtime_pm_get(dev_priv); ret = i915_mutex_lock_interruptible(dev); diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c index 2b6bdc267fb5..251d7a95af89 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence.c +++ b/drivers/gpu/drm/i915/i915_gem_fence.c @@ -58,7 +58,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t fence_reg_lo, fence_reg_hi; int fence_pitch_shift; @@ -117,7 +117,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, static void i915_write_fence_reg(struct drm_device *dev, int reg, struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val; if (obj) { @@ -156,7 +156,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, static void i830_write_fence_reg(struct drm_device *dev, int reg, struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t val; if (obj) { @@ -193,7 +193,7 @@ inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj) static void i915_gem_write_fence(struct drm_device *dev, int reg, struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* Ensure that all CPU reads are completed before installing a fence * and all writes before removing the fence. @@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, struct drm_i915_fence_reg *fence, bool enable) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); int reg = fence_number(dev_priv, fence); i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); @@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) int i915_gem_object_put_fence(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_fence_reg *fence; int ret; @@ -311,7 +311,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj) static struct drm_i915_fence_reg * i915_find_fence_reg(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_fence_reg *reg, *avail; int i; @@ -367,7 +367,7 @@ int i915_gem_object_get_fence(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); bool enable = obj->tiling_mode != I915_TILING_NONE; struct drm_i915_fence_reg *reg; int ret; @@ -433,7 +433,7 @@ bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) { if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj); WARN_ON(!ggtt_vma || @@ -457,7 +457,7 @@ void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) { if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); dev_priv->fence_regs[obj->fence_reg].pin_count--; } @@ -472,7 +472,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) */ void i915_gem_restore_fences(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; for (i = 0; i < dev_priv->num_fence_regs; i++) { @@ -549,7 +549,7 @@ void i915_gem_restore_fences(struct drm_device *dev) void i915_gem_detect_bit_6_swizzle(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 5890017b9832..10f1e32767e6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -153,7 +153,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, #endif /* Early VLV doesn't have this */ - if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) { + if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) { DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); return 0; } @@ -1570,13 +1570,13 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) struct i915_page_table *unused; gen6_pte_t scratch_pte; uint32_t pd_entry; - uint32_t pte, pde, temp; + uint32_t pte, pde; uint32_t start = ppgtt->base.start, length = ppgtt->base.total; scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), I915_CACHE_LLC, true, 0); - gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) { + gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) { u32 expected; gen6_pte_t *pt_vaddr; const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]); @@ -1640,9 +1640,9 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv, { struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_page_table *pt; - uint32_t pde, temp; + uint32_t pde; - gen6_for_each_pde(pt, pd, start, length, temp, pde) + gen6_for_each_pde(pt, pd, start, length, pde) gen6_write_pde(pd, pde, pt); /* Make sure write is complete before other code can use this page @@ -1683,17 +1683,6 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, return 0; } -static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, - struct drm_i915_gem_request *req) -{ - struct intel_engine_cs *engine = req->engine; - struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); - - I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); - I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt)); - return 0; -} - static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_request *req) { @@ -1731,21 +1720,16 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_request *req) { struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = ppgtt->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - + struct drm_i915_private *dev_priv = req->i915; I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt)); - - POSTING_READ(RING_PP_DIR_DCLV(engine)); - return 0; } static void gen8_ppgtt_enable(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; for_each_engine(engine, dev_priv) { @@ -1757,7 +1741,7 @@ static void gen8_ppgtt_enable(struct drm_device *dev) static void gen7_ppgtt_enable(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; uint32_t ecochk, ecobits; @@ -1782,7 +1766,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev) static void gen6_ppgtt_enable(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t ecochk, gab_ctl, ecobits; ecobits = I915_READ(GAC_ECO_BITS); @@ -1875,7 +1859,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_page_table *pt; uint32_t start, length, start_save, length_save; - uint32_t pde, temp; + uint32_t pde; int ret; if (WARN_ON(start_in + length_in > ppgtt->base.total)) @@ -1891,7 +1875,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, * need allocation. The second stage marks use ptes within the page * tables. */ - gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { + gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) { if (pt != vm->scratch_pt) { WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES)); continue; @@ -1916,7 +1900,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, start = start_save; length = length_save; - gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { + gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) { DECLARE_BITMAP(tmp_bitmap, GEN6_PTES); bitmap_zero(tmp_bitmap, GEN6_PTES); @@ -1985,15 +1969,16 @@ static void gen6_free_scratch(struct i915_address_space *vm) static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct i915_page_directory *pd = &ppgtt->pd; + struct drm_device *dev = vm->dev; struct i915_page_table *pt; uint32_t pde; drm_mm_remove_node(&ppgtt->node); - gen6_for_all_pdes(pt, ppgtt, pde) { + gen6_for_all_pdes(pt, pd, pde) if (pt != vm->scratch_pt) - free_pt(ppgtt->base.dev, pt); - } + free_pt(dev, pt); gen6_free_scratch(vm); } @@ -2059,9 +2044,9 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, uint64_t start, uint64_t length) { struct i915_page_table *unused; - uint32_t pde, temp; + uint32_t pde; - gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) + gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt; } @@ -2073,18 +2058,15 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) int ret; ppgtt->base.pte_encode = ggtt->base.pte_encode; - if (IS_GEN6(dev)) { + if (intel_vgpu_active(dev_priv) || IS_GEN6(dev)) ppgtt->switch_mm = gen6_mm_switch; - } else if (IS_HASWELL(dev)) { + else if (IS_HASWELL(dev)) ppgtt->switch_mm = hsw_mm_switch; - } else if (IS_GEN7(dev)) { + else if (IS_GEN7(dev)) ppgtt->switch_mm = gen7_mm_switch; - } else + else BUG(); - if (intel_vgpu_active(dev_priv)) - ppgtt->switch_mm = vgpu_mm_switch; - ret = gen6_ppgtt_alloc(ppgtt); if (ret) return ret; @@ -2133,7 +2115,7 @@ static void i915_address_space_init(struct i915_address_space *vm, struct drm_i915_private *dev_priv) { drm_mm_init(&vm->mm, vm->start, vm->total); - vm->dev = dev_priv->dev; + vm->dev = &dev_priv->drm; INIT_LIST_HEAD(&vm->active_list); INIT_LIST_HEAD(&vm->inactive_list); list_add_tail(&vm->global_link, &dev_priv->vm_list); @@ -2141,7 +2123,7 @@ static void i915_address_space_init(struct i915_address_space *vm, static void gtt_write_workarounds(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* This function is for gtt related workarounds. This function is * called on driver load and after a GPU reset, so you can place @@ -2160,7 +2142,7 @@ static void gtt_write_workarounds(struct drm_device *dev) static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; ret = __hw_ppgtt_init(dev, ppgtt); @@ -2261,8 +2243,8 @@ static bool do_idling(struct drm_i915_private *dev_priv) if (unlikely(ggtt->do_idle_maps)) { dev_priv->mm.interruptible = false; - if (i915_gpu_idle(dev_priv->dev)) { - DRM_ERROR("Couldn't idle GPU\n"); + if (i915_gem_wait_for_idle(dev_priv)) { + DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); /* Wait a bit, in hopes it avoids the hang */ udelay(10); } @@ -2610,7 +2592,7 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm, uint64_t start, enum i915_cache_level cache_level, u32 unused) { - struct drm_i915_private *dev_priv = vm->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(vm->dev); unsigned int flags = (cache_level == I915_CACHE_NONE) ? AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; int rpm_atomic_seq; @@ -2628,7 +2610,7 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm, uint64_t length, bool unused) { - struct drm_i915_private *dev_priv = vm->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(vm->dev); unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; int rpm_atomic_seq; @@ -2709,7 +2691,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, static void ggtt_unbind_vma(struct i915_vma *vma) { struct drm_device *dev = vma->vm->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj = vma->obj; const uint64_t size = min_t(uint64_t, obj->base.size, @@ -2735,7 +2717,7 @@ static void ggtt_unbind_vma(struct i915_vma *vma) void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); bool interruptible; interruptible = do_idling(dev_priv); @@ -3137,7 +3119,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->base.unbind_vma = ggtt_unbind_vma; ggtt->base.insert_page = gen8_ggtt_insert_page; ggtt->base.clear_range = nop_clear_range; - if (!USES_FULL_PPGTT(dev_priv)) + if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv)) ggtt->base.clear_range = gen8_ggtt_clear_range; ggtt->base.insert_entries = gen8_ggtt_insert_entries; @@ -3197,7 +3179,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) struct drm_i915_private *dev_priv = to_i915(dev); int ret; - ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); + ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL); if (!ret) { DRM_ERROR("failed to set up gmch\n"); return -EIO; @@ -3206,7 +3188,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size, &ggtt->mappable_base, &ggtt->mappable_end); - ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev); + ggtt->do_idle_maps = needs_idle_maps(&dev_priv->drm); ggtt->base.insert_page = i915_ggtt_insert_page; ggtt->base.insert_entries = i915_ggtt_insert_entries; ggtt->base.clear_range = i915_ggtt_clear_range; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 163b564fb87d..aa5f31d1c2ed 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -390,27 +390,27 @@ struct i915_hw_ppgtt { void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); }; -/* For each pde iterates over every pde between from start until start + length. - * If start, and start+length are not perfectly divisible, the macro will round - * down, and up as needed. The macro modifies pde, start, and length. Dev is - * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0, - * and length = 2G effectively iterates over every PDE in the system. - * - * XXX: temp is not actually needed, but it saves doing the ALIGN operation. +/* + * gen6_for_each_pde() iterates over every pde from start until start+length. + * If start and start+length are not perfectly divisible, the macro will round + * down and up as needed. Start=0 and length=2G effectively iterates over + * every PDE in the system. The macro modifies ALL its parameters except 'pd', + * so each of the other parameters should preferably be a simple variable, or + * at most an lvalue with no side-effects! */ -#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \ - for (iter = gen6_pde_index(start); \ - length > 0 && iter < I915_PDES ? \ - (pt = (pd)->page_table[iter]), 1 : 0; \ - iter++, \ - temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \ - temp = min_t(unsigned, temp, length), \ - start += temp, length -= temp) - -#define gen6_for_all_pdes(pt, ppgtt, iter) \ - for (iter = 0; \ - pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \ - iter++) +#define gen6_for_each_pde(pt, pd, start, length, iter) \ + for (iter = gen6_pde_index(start); \ + length > 0 && iter < I915_PDES && \ + (pt = (pd)->page_table[iter], true); \ + ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \ + temp = min(temp - start, length); \ + start += temp, length -= temp; }), ++iter) + +#define gen6_for_all_pdes(pt, pd, iter) \ + for (iter = 0; \ + iter < I915_PDES && \ + (pt = (pd)->page_table[iter], true); \ + ++iter) static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift) { diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index b7c1b5fb61ea..f75bbd67a13a 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -58,7 +58,7 @@ static int render_state_init(struct render_state *so, if (so->rodata->batch_items * 4 > 4096) return -EINVAL; - so->obj = i915_gem_object_create(dev_priv->dev, 4096); + so->obj = i915_gem_object_create(&dev_priv->drm, 4096); if (IS_ERR(so->obj)) return PTR_ERR(so->obj); diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 538c30499848..067632ad2f29 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -257,7 +257,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { struct drm_i915_private *dev_priv = container_of(shrinker, struct drm_i915_private, mm.shrinker); - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_i915_gem_object *obj; unsigned long count; bool unlock; @@ -265,6 +265,8 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) if (!i915_gem_shrinker_lock(dev, &unlock)) return 0; + i915_gem_retire_requests(dev_priv); + count = 0; list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) if (can_release_pages(obj)) @@ -286,7 +288,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) { struct drm_i915_private *dev_priv = container_of(shrinker, struct drm_i915_private, mm.shrinker); - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; unsigned long freed; bool unlock; @@ -321,7 +323,7 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, { unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1; - while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) { + while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) { schedule_timeout_killable(1); if (fatal_signal_pending(current)) return false; @@ -342,7 +344,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv, { dev_priv->mm.interruptible = slu->was_interruptible; if (slu->unlock) - mutex_unlock(&dev_priv->dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); } static int @@ -408,7 +410,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr return NOTIFY_DONE; /* Force everything onto the inactive lists */ - ret = i915_gpu_idle(dev_priv->dev); + ret = i915_gem_wait_for_idle(dev_priv); if (ret) goto out; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index e9cd82290408..66be299a1486 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -270,7 +270,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) void i915_gem_cleanup_stolen(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!drm_mm_initialized(&dev_priv->mm.stolen)) return; @@ -550,7 +550,7 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) static void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); if (obj->stolen) { i915_gem_stolen_remove_node(dev_priv, obj->stolen); @@ -601,7 +601,7 @@ cleanup: struct drm_i915_gem_object * i915_gem_object_create_stolen(struct drm_device *dev, u32 size) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; int ret; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index a6eb5c47a49c..8030199731db 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -162,7 +162,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_set_tiling *args = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; int ret = 0; @@ -294,7 +294,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_get_tiling *args = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; obj = to_intel_bo(drm_gem_object_lookup(file, args->handle)); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 34ff2459ceea..9d73d2216adc 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -332,7 +332,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, const struct i915_error_state_file_priv *error_priv) { struct drm_device *dev = error_priv->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_error_state *error = error_priv->error; struct drm_i915_error_object *obj; int i, j, offset, elt; @@ -463,6 +463,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, } } + if (error->ring[i].num_waiters) { + err_printf(m, "%s --- %d waiters\n", + dev_priv->engine[i].name, + error->ring[i].num_waiters); + for (j = 0; j < error->ring[i].num_waiters; j++) { + err_printf(m, " seqno 0x%08x for %s [%d]\n", + error->ring[i].waiters[j].seqno, + error->ring[i].waiters[j].comm, + error->ring[i].waiters[j].pid); + } + } + if ((obj = error->ring[i].ringbuffer)) { err_printf(m, "%s --- ringbuffer = 0x%08x\n", dev_priv->engine[i].name, @@ -488,7 +500,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, hws_page[elt+1], hws_page[elt+2], hws_page[elt+3]); - offset += 16; + offset += 16; } } @@ -605,8 +617,9 @@ static void i915_error_state_free(struct kref *error_ref) i915_error_object_free(error->ring[i].ringbuffer); i915_error_object_free(error->ring[i].hws_page); i915_error_object_free(error->ring[i].ctx); - kfree(error->ring[i].requests); i915_error_object_free(error->ring[i].wa_ctx); + kfree(error->ring[i].requests); + kfree(error->ring[i].waiters); } i915_error_object_free(error->semaphore_obj); @@ -892,6 +905,48 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv, } } +static void engine_record_waiters(struct intel_engine_cs *engine, + struct drm_i915_error_ring *ering) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct drm_i915_error_waiter *waiter; + struct rb_node *rb; + int count; + + ering->num_waiters = 0; + ering->waiters = NULL; + + spin_lock(&b->lock); + count = 0; + for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb)) + count++; + spin_unlock(&b->lock); + + waiter = NULL; + if (count) + waiter = kmalloc_array(count, + sizeof(struct drm_i915_error_waiter), + GFP_ATOMIC); + if (!waiter) + return; + + ering->waiters = waiter; + + spin_lock(&b->lock); + for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { + struct intel_wait *w = container_of(rb, typeof(*w), node); + + strcpy(waiter->comm, w->tsk->comm); + waiter->pid = w->tsk->pid; + waiter->seqno = w->seqno; + waiter++; + + if (++ering->num_waiters == count) + break; + } + spin_unlock(&b->lock); +} + static void i915_record_ring_state(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error, struct intel_engine_cs *engine, @@ -926,10 +981,10 @@ static void i915_record_ring_state(struct drm_i915_private *dev_priv, ering->instdone = I915_READ(GEN2_INSTDONE); } - ering->waiting = waitqueue_active(&engine->irq_queue); + ering->waiting = intel_engine_has_waiter(engine); ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base)); ering->acthd = intel_ring_get_active_head(engine); - ering->seqno = engine->get_seqno(engine); + ering->seqno = intel_engine_get_seqno(engine); ering->last_seqno = engine->last_submitted_seqno; ering->start = I915_READ_START(engine); ering->head = I915_READ_HEAD(engine); @@ -1022,7 +1077,6 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, for (i = 0; i < I915_NUM_ENGINES; i++) { struct intel_engine_cs *engine = &dev_priv->engine[i]; - struct intel_ringbuffer *rbuf; error->ring[i].pid = -1; @@ -1032,14 +1086,15 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, error->ring[i].valid = true; i915_record_ring_state(dev_priv, error, engine, &error->ring[i]); + engine_record_waiters(engine, &error->ring[i]); request = i915_gem_find_active_request(engine); if (request) { struct i915_address_space *vm; + struct intel_ringbuffer *rb; - vm = request->ctx && request->ctx->ppgtt ? - &request->ctx->ppgtt->base : - &ggtt->base; + vm = request->ctx->ppgtt ? + &request->ctx->ppgtt->base : &ggtt->base; /* We need to copy these to an anonymous buffer * as the simplest method to avoid being overwritten @@ -1066,26 +1121,17 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, } rcu_read_unlock(); } - } - - if (i915.enable_execlists) { - /* TODO: This is only a small fix to keep basic error - * capture working, but we need to add more information - * for it to be useful (e.g. dump the context being - * executed). - */ - if (request) - rbuf = request->ctx->engine[engine->id].ringbuf; - else - rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf; - } else - rbuf = engine->buffer; - error->ring[i].cpu_ring_head = rbuf->head; - error->ring[i].cpu_ring_tail = rbuf->tail; + error->simulated |= + request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE; - error->ring[i].ringbuffer = - i915_error_ggtt_object_create(dev_priv, rbuf->obj); + rb = request->ringbuf; + error->ring[i].cpu_ring_head = rb->head; + error->ring[i].cpu_ring_tail = rb->tail; + error->ring[i].ringbuffer = + i915_error_ggtt_object_create(dev_priv, + rb->obj); + } error->ring[i].hws_page = i915_error_ggtt_object_create(dev_priv, @@ -1230,7 +1276,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, static void i915_capture_reg_state(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; int i; /* General organization @@ -1355,6 +1401,9 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error; unsigned long flags; + if (READ_ONCE(dev_priv->gpu_error.first_error)) + return; + /* Account for pipe specific data like PIPE*STAT */ error = kzalloc(sizeof(*error), GFP_ATOMIC); if (!error) { @@ -1378,12 +1427,14 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv, i915_error_capture_msg(dev_priv, error, engine_mask, error_msg); DRM_INFO("%s\n", error->error_msg); - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); - if (dev_priv->gpu_error.first_error == NULL) { - dev_priv->gpu_error.first_error = error; - error = NULL; + if (!error->simulated) { + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + if (!dev_priv->gpu_error.first_error) { + dev_priv->gpu_error.first_error = error; + error = NULL; + } + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); } - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); if (error) { i915_error_state_free(&error->ref); @@ -1395,7 +1446,8 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv, DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); - DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index); + DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", + dev_priv->drm.primary->index); warned = true; } } @@ -1403,7 +1455,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv, void i915_error_state_get(struct drm_device *dev, struct i915_error_state_file_priv *error_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); spin_lock_irq(&dev_priv->gpu_error.lock); error_priv->error = dev_priv->gpu_error.first_error; @@ -1421,7 +1473,7 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv) void i915_destroy_error_state(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_error_state *error; spin_lock_irq(&dev_priv->gpu_error.lock); diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 22a55ac4e51c..2112e029db6a 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -97,8 +97,14 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len) I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER); - /* No HOST2GUC command should take longer than 10ms */ - ret = wait_for_atomic(host2guc_action_response(dev_priv, &status), 10); + /* + * Fast commands should complete in less than 10us, so sample quickly + * up to that length of time, then switch to a slower sleep-wait loop. + * No HOST2GUC command should ever take longer than 10ms. + */ + ret = wait_for_us(host2guc_action_response(dev_priv, &status), 10); + if (ret) + ret = wait_for(host2guc_action_response(dev_priv, &status), 10); if (status != GUC2HOST_STATUS_SUCCESS) { /* * Either the GuC explicitly returned an error (which @@ -153,12 +159,11 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, struct i915_guc_client *client) { struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct drm_device *dev = dev_priv->dev; u32 data[2]; data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; /* WaRsDisableCoarsePowerGating:skl,bxt */ - if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev)) + if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) data[1] = 0; else /* bit 0 and 1 are for Render and Media domain separately */ @@ -582,7 +587,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) */ int i915_guc_submit(struct drm_i915_gem_request *rq) { - unsigned int engine_id = rq->engine->guc_id; + unsigned int engine_id = rq->engine->id; struct intel_guc *guc = &rq->i915->guc; struct i915_guc_client *client = guc->execbuf_client; int b_ret; @@ -623,7 +628,7 @@ gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size) { struct drm_i915_gem_object *obj; - obj = i915_gem_object_create(dev_priv->dev, size); + obj = i915_gem_object_create(&dev_priv->drm, size); if (IS_ERR(obj)) return NULL; @@ -1034,7 +1039,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv) */ int intel_guc_suspend(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc *guc = &dev_priv->guc; struct i915_gem_context *ctx; u32 data[3]; @@ -1060,7 +1065,7 @@ int intel_guc_suspend(struct drm_device *dev) */ int intel_guc_resume(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc *guc = &dev_priv->guc; struct i915_gem_context *ctx; u32 data[3]; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4378a659d962..1c2aec392412 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, dev_priv->gt_irq_mask &= ~interrupt_mask; dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); } void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) { ilk_update_gt_irq(dev_priv, mask, mask); + POSTING_READ_FW(GTIMR); } void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) @@ -351,9 +351,8 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) { spin_lock_irq(&dev_priv->irq_lock); - - WARN_ON(dev_priv->rps.pm_iir); - WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); + WARN_ON_ONCE(dev_priv->rps.pm_iir); + WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); dev_priv->rps.interrupts_enabled = true; I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | dev_priv->pm_rps_events); @@ -371,11 +370,6 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) { spin_lock_irq(&dev_priv->irq_lock); dev_priv->rps.interrupts_enabled = false; - spin_unlock_irq(&dev_priv->irq_lock); - - cancel_work_sync(&dev_priv->rps.work); - - spin_lock_irq(&dev_priv->irq_lock); I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); @@ -384,8 +378,15 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) ~dev_priv->pm_rps_events); spin_unlock_irq(&dev_priv->irq_lock); + synchronize_irq(dev_priv->drm.irq); - synchronize_irq(dev_priv->dev->irq); + /* Now that we will not be generating any more work, flush any + * outsanding tasks. As we are called on the RPS idle path, + * we will reset the GPU to minimum frequencies, so the current + * state of the worker can be discarded. + */ + cancel_work_sync(&dev_priv->rps.work); + gen6_reset_rps_interrupts(dev_priv); } /** @@ -565,7 +566,7 @@ i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, u32 enable_mask; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, + enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, status_mask); else enable_mask = status_mask << 16; @@ -579,7 +580,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, u32 enable_mask; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, + enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, status_mask); else enable_mask = status_mask << 16; @@ -666,7 +667,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe) */ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t high_frame, low_frame; u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; struct intel_crtc *intel_crtc = @@ -713,7 +714,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); } @@ -722,7 +723,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) static int __intel_get_crtc_scanline(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const struct drm_display_mode *mode = &crtc->base.hwmode; enum pipe pipe = crtc->pipe; int position, vtotal; @@ -774,7 +775,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, ktime_t *stime, ktime_t *etime, const struct drm_display_mode *mode) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int position; @@ -895,7 +896,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, int intel_get_crtc_scanline(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); unsigned long irqflags; int position; @@ -976,13 +977,11 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) static void notify_ring(struct intel_engine_cs *engine) { - if (!intel_engine_initialized(engine)) - return; - - trace_i915_gem_request_notify(engine); - engine->user_interrupts++; - - wake_up_all(&engine->irq_queue); + smp_store_mb(engine->breadcrumbs.irq_posted, true); + if (intel_engine_wakeup(engine)) { + trace_i915_gem_request_notify(engine); + engine->breadcrumbs.irq_wakeups++; + } } static void vlv_c0_read(struct drm_i915_private *dev_priv, @@ -1063,7 +1062,7 @@ static bool any_waiters(struct drm_i915_private *dev_priv) struct intel_engine_cs *engine; for_each_engine(engine, dev_priv) - if (engine->irq_refcount) + if (intel_engine_has_waiter(engine)) return true; return false; @@ -1084,13 +1083,6 @@ static void gen6_pm_rps_work(struct work_struct *work) return; } - /* - * The RPS work is synced during runtime suspend, we don't require a - * wakeref. TODO: instead of disabling the asserts make sure that we - * always hold an RPM reference while the work is running. - */ - DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); - pm_iir = dev_priv->rps.pm_iir; dev_priv->rps.pm_iir = 0; /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ @@ -1103,7 +1095,7 @@ static void gen6_pm_rps_work(struct work_struct *work) WARN_ON(pm_iir & ~dev_priv->pm_rps_events); if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) - goto out; + return; mutex_lock(&dev_priv->rps.hw_lock); @@ -1158,8 +1150,6 @@ static void gen6_pm_rps_work(struct work_struct *work) intel_set_rps(dev_priv, new_delay); mutex_unlock(&dev_priv->rps.hw_lock); -out: - ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); } @@ -1185,7 +1175,7 @@ static void ivybridge_parity_work(struct work_struct *work) * In order to prevent a get/put style interface, acquire struct mutex * any time we access those registers. */ - mutex_lock(&dev_priv->dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); /* If we've screwed up tracking, just let the interrupt fire again */ if (WARN_ON(!dev_priv->l3_parity.which_slice)) @@ -1221,7 +1211,7 @@ static void ivybridge_parity_work(struct work_struct *work) parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); parity_event[5] = NULL; - kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, + kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, KOBJ_CHANGE, parity_event); DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", @@ -1241,7 +1231,7 @@ out: gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); spin_unlock_irq(&dev_priv->irq_lock); - mutex_unlock(&dev_priv->dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); } static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, @@ -1267,8 +1257,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) { - if (gt_iir & - (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) + if (gt_iir & GT_RENDER_USER_INTERRUPT) notify_ring(&dev_priv->engine[RCS]); if (gt_iir & ILK_BSD_USER_INTERRUPT) notify_ring(&dev_priv->engine[VCS]); @@ -1277,9 +1266,7 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) { - - if (gt_iir & - (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) + if (gt_iir & GT_RENDER_USER_INTERRUPT) notify_ring(&dev_priv->engine[RCS]); if (gt_iir & GT_BSD_USER_INTERRUPT) notify_ring(&dev_priv->engine[VCS]); @@ -1526,7 +1513,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, entry = &pipe_crc->entries[head]; - entry->frame = dev_priv->dev->driver->get_vblank_counter(dev_priv->dev, + entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); entry->crc[0] = crc0; entry->crc[1] = crc1; @@ -1602,7 +1589,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); if (dev_priv->rps.interrupts_enabled) { dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; - queue_work(dev_priv->wq, &dev_priv->rps.work); + schedule_work(&dev_priv->rps.work); } spin_unlock(&dev_priv->irq_lock); } @@ -1624,7 +1611,7 @@ static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv, { bool ret; - ret = drm_handle_vblank(dev_priv->dev, pipe); + ret = drm_handle_vblank(&dev_priv->drm, pipe); if (ret) intel_finish_page_flip_mmio(dev_priv, pipe); @@ -1757,7 +1744,7 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, static irqreturn_t valleyview_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -1840,7 +1827,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) static irqreturn_t cherryview_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -2225,7 +2212,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, static irqreturn_t ironlake_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 de_iir, gt_iir, de_ier, sde_ier = 0; irqreturn_t ret = IRQ_NONE; @@ -2438,7 +2425,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(SDEIIR, iir); ret = IRQ_HANDLED; - if (HAS_PCH_SPT(dev_priv)) + if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) spt_irq_handler(dev_priv, iir); else cpt_irq_handler(dev_priv, iir); @@ -2457,7 +2444,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) static irqreturn_t gen8_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 master_ctl; u32 gt_iir[4] = {}; irqreturn_t ret; @@ -2488,11 +2475,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) return ret; } -static void i915_error_wake_up(struct drm_i915_private *dev_priv, - bool reset_completed) +static void i915_error_wake_up(struct drm_i915_private *dev_priv) { - struct intel_engine_cs *engine; - /* * Notify all waiters for GPU completion events that reset state has * been changed, and that they need to restart their wait after @@ -2501,18 +2485,10 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, */ /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ - for_each_engine(engine, dev_priv) - wake_up_all(&engine->irq_queue); + wake_up_all(&dev_priv->gpu_error.wait_queue); /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ wake_up_all(&dev_priv->pending_flip_queue); - - /* - * Signal tasks blocked in i915_gem_wait_for_error that the pending - * reset state is cleared. - */ - if (reset_completed) - wake_up_all(&dev_priv->gpu_error.reset_queue); } /** @@ -2524,7 +2500,7 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, */ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) { - struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj; + struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; @@ -2577,7 +2553,7 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) * Note: The wake_up also serves as a memory barrier so that * waiters see the update value of the reset counter atomic_t. */ - i915_error_wake_up(dev_priv, true); + wake_up_all(&dev_priv->gpu_error.reset_queue); } } @@ -2714,7 +2690,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv, * ensure that the waiters see the updated value of the reset * counter atomic_t. */ - i915_error_wake_up(dev_priv, false); + i915_error_wake_up(dev_priv); } i915_reset_and_wakeup(dev_priv); @@ -2725,7 +2701,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv, */ static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -2742,7 +2718,7 @@ static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe) static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); @@ -2756,7 +2732,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -2769,7 +2745,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe) static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -2784,7 +2760,7 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) */ static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -2796,7 +2772,7 @@ static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe) static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); @@ -2808,7 +2784,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -2819,7 +2795,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe) static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -2835,9 +2811,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno) } static bool -ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr) +ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr) { - if (INTEL_GEN(dev_priv) >= 8) { + if (INTEL_GEN(engine->i915) >= 8) { return (ipehr >> 23) == 0x1c; } else { ipehr &= ~MI_SEMAPHORE_SYNC_MASK; @@ -2908,7 +2884,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) return NULL; ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); - if (!ipehr_is_semaphore_wait(engine->i915, ipehr)) + if (!ipehr_is_semaphore_wait(engine, ipehr)) return NULL; /* @@ -2966,7 +2942,7 @@ static int semaphore_passed(struct intel_engine_cs *engine) if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES) return -1; - if (i915_seqno_passed(signaller->get_seqno(signaller), seqno)) + if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno)) return 1; /* cursory check for an unkickable deadlock */ @@ -3078,23 +3054,21 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) return HANGCHECK_HUNG; } -static unsigned kick_waiters(struct intel_engine_cs *engine) +static unsigned long kick_waiters(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; - unsigned user_interrupts = READ_ONCE(engine->user_interrupts); + unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups); - if (engine->hangcheck.user_interrupts == user_interrupts && + if (engine->hangcheck.user_interrupts == irq_count && !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) { - if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine))) + if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings)) DRM_ERROR("Hangcheck timer elapsed... %s idle\n", engine->name); - else - DRM_INFO("Fake missed irq on %s\n", - engine->name); - wake_up_all(&engine->irq_queue); + + intel_engine_enable_fake_irq(engine); } - return user_interrupts; + return irq_count; } /* * This is called when the chip hasn't reported back with completed @@ -3110,9 +3084,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work) container_of(work, typeof(*dev_priv), gpu_error.hangcheck_work.work); struct intel_engine_cs *engine; - enum intel_engine_id id; - int busy_count = 0, rings_hung = 0; - bool stuck[I915_NUM_ENGINES] = { 0 }; + unsigned int hung = 0, stuck = 0; + int busy_count = 0; #define BUSY 1 #define KICK 5 #define HUNG 20 @@ -3121,12 +3094,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (!i915.enable_hangcheck) return; - /* - * The hangcheck work is synced during runtime suspend, we don't - * require a wakeref. TODO: instead of disabling the asserts make - * sure that we hold a reference when this work is running. - */ - DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); + if (!READ_ONCE(dev_priv->gt.awake)) + return; /* As enabling the GPU requires fairly extensive mmio access, * periodically arm the mmio checker to see if we are triggering @@ -3134,11 +3103,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work) */ intel_uncore_arm_unclaimed_mmio_detection(dev_priv); - for_each_engine_id(engine, dev_priv, id) { + for_each_engine(engine, dev_priv) { + bool busy = intel_engine_has_waiter(engine); u64 acthd; u32 seqno; unsigned user_interrupts; - bool busy = true; semaphore_clear_deadlocks(dev_priv); @@ -3153,7 +3122,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) engine->irq_seqno_barrier(engine); acthd = intel_ring_get_active_head(engine); - seqno = engine->get_seqno(engine); + seqno = intel_engine_get_seqno(engine); /* Reset stuck interrupts between batch advances */ user_interrupts = 0; @@ -3161,12 +3130,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (engine->hangcheck.seqno == seqno) { if (ring_idle(engine, seqno)) { engine->hangcheck.action = HANGCHECK_IDLE; - if (waitqueue_active(&engine->irq_queue)) { + if (busy) { /* Safeguard against driver failure */ user_interrupts = kick_waiters(engine); engine->hangcheck.score += BUSY; - } else - busy = false; + } } else { /* We always increment the hangcheck score * if the ring is busy and still processing @@ -3198,10 +3166,15 @@ static void i915_hangcheck_elapsed(struct work_struct *work) break; case HANGCHECK_HUNG: engine->hangcheck.score += HUNG; - stuck[id] = true; break; } } + + if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { + hung |= intel_engine_flag(engine); + if (engine->hangcheck.action != HANGCHECK_HUNG) + stuck |= intel_engine_flag(engine); + } } else { engine->hangcheck.action = HANGCHECK_ACTIVE; @@ -3226,48 +3199,33 @@ static void i915_hangcheck_elapsed(struct work_struct *work) busy_count += busy; } - for_each_engine_id(engine, dev_priv, id) { - if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { - DRM_INFO("%s on %s\n", - stuck[id] ? "stuck" : "no progress", - engine->name); - rings_hung |= intel_engine_flag(engine); - } - } + if (hung) { + char msg[80]; + int len; - if (rings_hung) { - i915_handle_error(dev_priv, rings_hung, "Engine(s) hung"); - goto out; + /* If some rings hung but others were still busy, only + * blame the hanging rings in the synopsis. + */ + if (stuck != hung) + hung &= ~stuck; + len = scnprintf(msg, sizeof(msg), + "%s on ", stuck == hung ? "No progress" : "Hang"); + for_each_engine_masked(engine, dev_priv, hung) + len += scnprintf(msg + len, sizeof(msg) - len, + "%s, ", engine->name); + msg[len-2] = '\0'; + + return i915_handle_error(dev_priv, hung, msg); } + /* Reset timer in case GPU hangs without another request being added */ if (busy_count) - /* Reset timer case chip hangs without another request - * being added */ i915_queue_hangcheck(dev_priv); - -out: - ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); -} - -void i915_queue_hangcheck(struct drm_i915_private *dev_priv) -{ - struct i915_gpu_error *e = &dev_priv->gpu_error; - - if (!i915.enable_hangcheck) - return; - - /* Don't continually defer the hangcheck so that it is always run at - * least once after work has been scheduled on any ring. Otherwise, - * we will ignore a hung ring if a second ring is kept busy. - */ - - queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work, - round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES)); } static void ibx_irq_reset(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (HAS_PCH_NOP(dev)) return; @@ -3288,7 +3246,7 @@ static void ibx_irq_reset(struct drm_device *dev) */ static void ibx_irq_pre_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (HAS_PCH_NOP(dev)) return; @@ -3300,7 +3258,7 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev) static void gen5_gt_irq_reset(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); GEN5_IRQ_RESET(GT); if (INTEL_INFO(dev)->gen >= 6) @@ -3360,7 +3318,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) */ static void ironlake_irq_reset(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(HWSTAM, 0xffffffff); @@ -3375,7 +3333,7 @@ static void ironlake_irq_reset(struct drm_device *dev) static void valleyview_irq_preinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(VLV_MASTER_IER, 0); POSTING_READ(VLV_MASTER_IER); @@ -3398,7 +3356,7 @@ static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) static void gen8_irq_reset(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe; I915_WRITE(GEN8_MASTER_IRQ, 0); @@ -3444,12 +3402,12 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, spin_unlock_irq(&dev_priv->irq_lock); /* make sure we're done processing display irqs */ - synchronize_irq(dev_priv->dev->irq); + synchronize_irq(dev_priv->drm.irq); } static void cherryview_irq_preinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(GEN8_MASTER_IRQ, 0); POSTING_READ(GEN8_MASTER_IRQ); @@ -3470,7 +3428,7 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, struct intel_encoder *encoder; u32 enabled_irqs = 0; - for_each_intel_encoder(dev_priv->dev, encoder) + for_each_intel_encoder(&dev_priv->drm, encoder) if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) enabled_irqs |= hpd[encoder->hpd_pin]; @@ -3601,7 +3559,7 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) static void ibx_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 mask; if (HAS_PCH_NOP(dev)) @@ -3618,7 +3576,7 @@ static void ibx_irq_postinstall(struct drm_device *dev) static void gen5_gt_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 pm_irqs, gt_irqs; pm_irqs = gt_irqs = 0; @@ -3632,8 +3590,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) gt_irqs |= GT_RENDER_USER_INTERRUPT; if (IS_GEN5(dev)) { - gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | - ILK_BSD_USER_INTERRUPT; + gt_irqs |= ILK_BSD_USER_INTERRUPT; } else { gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; } @@ -3655,7 +3612,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) static int ironlake_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 display_mask, extra_mask; if (INTEL_INFO(dev)->gen >= 7) { @@ -3734,7 +3691,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) static int valleyview_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); gen5_gt_irq_postinstall(dev); @@ -3827,7 +3784,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) static int gen8_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (HAS_PCH_SPLIT(dev)) ibx_irq_pre_postinstall(dev); @@ -3846,7 +3803,7 @@ static int gen8_irq_postinstall(struct drm_device *dev) static int cherryview_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); gen8_gt_irq_postinstall(dev_priv); @@ -3863,7 +3820,7 @@ static int cherryview_irq_postinstall(struct drm_device *dev) static void gen8_irq_uninstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!dev_priv) return; @@ -3873,7 +3830,7 @@ static void gen8_irq_uninstall(struct drm_device *dev) static void valleyview_irq_uninstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!dev_priv) return; @@ -3893,7 +3850,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev) static void cherryview_irq_uninstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!dev_priv) return; @@ -3913,7 +3870,7 @@ static void cherryview_irq_uninstall(struct drm_device *dev) static void ironlake_irq_uninstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!dev_priv) return; @@ -3923,7 +3880,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev) static void i8xx_irq_preinstall(struct drm_device * dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe; for_each_pipe(dev_priv, pipe) @@ -3935,7 +3892,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev) static int i8xx_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); @@ -3998,7 +3955,7 @@ check_page_flip: static irqreturn_t i8xx_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u16 iir, new_iir; u32 pipe_stats[2]; int pipe; @@ -4075,7 +4032,7 @@ out: static void i8xx_irq_uninstall(struct drm_device * dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe; for_each_pipe(dev_priv, pipe) { @@ -4090,7 +4047,7 @@ static void i8xx_irq_uninstall(struct drm_device * dev) static void i915_irq_preinstall(struct drm_device * dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe; if (I915_HAS_HOTPLUG(dev)) { @@ -4108,7 +4065,7 @@ static void i915_irq_preinstall(struct drm_device * dev) static int i915_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 enable_mask; I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); @@ -4187,7 +4144,7 @@ check_page_flip: static irqreturn_t i915_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; u32 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | @@ -4292,7 +4249,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) static void i915_irq_uninstall(struct drm_device * dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe; if (I915_HAS_HOTPLUG(dev)) { @@ -4314,7 +4271,7 @@ static void i915_irq_uninstall(struct drm_device * dev) static void i965_irq_preinstall(struct drm_device * dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe; i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); @@ -4330,7 +4287,7 @@ static void i965_irq_preinstall(struct drm_device * dev) static int i965_irq_postinstall(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 enable_mask; u32 error_mask; @@ -4414,7 +4371,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) static irqreturn_t i965_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 iir, new_iir; u32 pipe_stats[I915_MAX_PIPES]; int ret = IRQ_NONE, pipe; @@ -4523,7 +4480,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) static void i965_irq_uninstall(struct drm_device * dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe; if (!dev_priv) @@ -4553,7 +4510,7 @@ static void i965_irq_uninstall(struct drm_device * dev) */ void intel_irq_init(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; intel_hpd_init_work(dev_priv); @@ -4631,7 +4588,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->disable_vblank = gen8_disable_vblank; if (IS_BROXTON(dev)) dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; - else if (HAS_PCH_SPT(dev)) + else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev)) dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; else dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; @@ -4687,7 +4644,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv) */ dev_priv->pm.irqs_enabled = true; - return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); + return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); } /** @@ -4699,7 +4656,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv) */ void intel_irq_uninstall(struct drm_i915_private *dev_priv) { - drm_irq_uninstall(dev_priv->dev); + drm_irq_uninstall(&dev_priv->drm); intel_hpd_cancel_work(dev_priv); dev_priv->pm.irqs_enabled = false; } @@ -4713,9 +4670,9 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv) */ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) { - dev_priv->dev->driver->irq_uninstall(dev_priv->dev); + dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); dev_priv->pm.irqs_enabled = false; - synchronize_irq(dev_priv->dev->irq); + synchronize_irq(dev_priv->drm.irq); } /** @@ -4728,6 +4685,6 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) { dev_priv->pm.irqs_enabled = true; - dev_priv->dev->driver->irq_preinstall(dev_priv->dev); - dev_priv->dev->driver->irq_postinstall(dev_priv->dev); + dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); + dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); } diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 7effe68d552c..8b13bfa47fba 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -224,6 +224,6 @@ module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600 MODULE_PARM_DESC(enable_dpcd_backlight, "Enable support for DPCD backlight control (default:false)"); -module_param_named(enable_gvt, i915.enable_gvt, bool, 0600); +module_param_named(enable_gvt, i915.enable_gvt, bool, 0400); MODULE_PARM_DESC(enable_gvt, "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c new file mode 100644 index 000000000000..949c01686a66 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -0,0 +1,503 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/console.h> +#include <linux/vgaarb.h> +#include <linux/vga_switcheroo.h> + +#include "i915_drv.h" + +#define GEN_DEFAULT_PIPEOFFSETS \ + .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ + PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ + .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ + TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ + .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } + +#define GEN_CHV_PIPEOFFSETS \ + .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ + CHV_PIPE_C_OFFSET }, \ + .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ + CHV_TRANSCODER_C_OFFSET, }, \ + .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ + CHV_PALETTE_C_OFFSET } + +#define CURSOR_OFFSETS \ + .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } + +#define IVB_CURSOR_OFFSETS \ + .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } + +#define BDW_COLORS \ + .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 } +#define CHV_COLORS \ + .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 } + +static const struct intel_device_info intel_i830_info = { + .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, + .has_overlay = 1, .overlay_needs_physical = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_845g_info = { + .gen = 2, .num_pipes = 1, + .has_overlay = 1, .overlay_needs_physical = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_i85x_info = { + .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, + .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, + .has_fbc = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_i865g_info = { + .gen = 2, .num_pipes = 1, + .has_overlay = 1, .overlay_needs_physical = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_i915g_info = { + .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, + .has_overlay = 1, .overlay_needs_physical = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; +static const struct intel_device_info intel_i915gm_info = { + .gen = 3, .is_mobile = 1, .num_pipes = 2, + .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, + .supports_tv = 1, + .has_fbc = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; +static const struct intel_device_info intel_i945g_info = { + .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, + .has_overlay = 1, .overlay_needs_physical = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; +static const struct intel_device_info intel_i945gm_info = { + .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, + .has_hotplug = 1, .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, + .supports_tv = 1, + .has_fbc = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_i965g_info = { + .gen = 4, .is_broadwater = 1, .num_pipes = 2, + .has_hotplug = 1, + .has_overlay = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_i965gm_info = { + .gen = 4, .is_crestline = 1, .num_pipes = 2, + .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, + .has_overlay = 1, + .supports_tv = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_g33_info = { + .gen = 3, .is_g33 = 1, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_overlay = 1, + .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_g45_info = { + .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, + .has_pipe_cxsr = 1, .has_hotplug = 1, + .ring_mask = RENDER_RING | BSD_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_gm45_info = { + .gen = 4, .is_g4x = 1, .num_pipes = 2, + .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, + .has_pipe_cxsr = 1, .has_hotplug = 1, + .supports_tv = 1, + .ring_mask = RENDER_RING | BSD_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_pineview_info = { + .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_overlay = 1, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_ironlake_d_info = { + .gen = 5, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .ring_mask = RENDER_RING | BSD_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_ironlake_m_info = { + .gen = 5, .is_mobile = 1, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_fbc = 1, + .ring_mask = RENDER_RING | BSD_RING, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_sandybridge_d_info = { + .gen = 6, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_fbc = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING, + .has_llc = 1, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +static const struct intel_device_info intel_sandybridge_m_info = { + .gen = 6, .is_mobile = 1, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_fbc = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING, + .has_llc = 1, + GEN_DEFAULT_PIPEOFFSETS, + CURSOR_OFFSETS, +}; + +#define GEN7_FEATURES \ + .gen = 7, .num_pipes = 3, \ + .need_gfx_hws = 1, .has_hotplug = 1, \ + .has_fbc = 1, \ + .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ + .has_llc = 1, \ + GEN_DEFAULT_PIPEOFFSETS, \ + IVB_CURSOR_OFFSETS + +static const struct intel_device_info intel_ivybridge_d_info = { + GEN7_FEATURES, + .is_ivybridge = 1, +}; + +static const struct intel_device_info intel_ivybridge_m_info = { + GEN7_FEATURES, + .is_ivybridge = 1, + .is_mobile = 1, +}; + +static const struct intel_device_info intel_ivybridge_q_info = { + GEN7_FEATURES, + .is_ivybridge = 1, + .num_pipes = 0, /* legal, last one wins */ +}; + +#define VLV_FEATURES \ + .gen = 7, .num_pipes = 2, \ + .need_gfx_hws = 1, .has_hotplug = 1, \ + .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ + .display_mmio_offset = VLV_DISPLAY_BASE, \ + GEN_DEFAULT_PIPEOFFSETS, \ + CURSOR_OFFSETS + +static const struct intel_device_info intel_valleyview_m_info = { + VLV_FEATURES, + .is_valleyview = 1, + .is_mobile = 1, +}; + +static const struct intel_device_info intel_valleyview_d_info = { + VLV_FEATURES, + .is_valleyview = 1, +}; + +#define HSW_FEATURES \ + GEN7_FEATURES, \ + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ + .has_ddi = 1, \ + .has_fpga_dbg = 1 + +static const struct intel_device_info intel_haswell_d_info = { + HSW_FEATURES, + .is_haswell = 1, +}; + +static const struct intel_device_info intel_haswell_m_info = { + HSW_FEATURES, + .is_haswell = 1, + .is_mobile = 1, +}; + +#define BDW_FEATURES \ + HSW_FEATURES, \ + BDW_COLORS + +static const struct intel_device_info intel_broadwell_d_info = { + BDW_FEATURES, + .gen = 8, + .is_broadwell = 1, +}; + +static const struct intel_device_info intel_broadwell_m_info = { + BDW_FEATURES, + .gen = 8, .is_mobile = 1, + .is_broadwell = 1, +}; + +static const struct intel_device_info intel_broadwell_gt3d_info = { + BDW_FEATURES, + .gen = 8, + .is_broadwell = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, +}; + +static const struct intel_device_info intel_broadwell_gt3m_info = { + BDW_FEATURES, + .gen = 8, .is_mobile = 1, + .is_broadwell = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, +}; + +static const struct intel_device_info intel_cherryview_info = { + .gen = 8, .num_pipes = 3, + .need_gfx_hws = 1, .has_hotplug = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, + .is_cherryview = 1, + .display_mmio_offset = VLV_DISPLAY_BASE, + GEN_CHV_PIPEOFFSETS, + CURSOR_OFFSETS, + CHV_COLORS, +}; + +static const struct intel_device_info intel_skylake_info = { + BDW_FEATURES, + .is_skylake = 1, + .gen = 9, +}; + +static const struct intel_device_info intel_skylake_gt3_info = { + BDW_FEATURES, + .is_skylake = 1, + .gen = 9, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, +}; + +static const struct intel_device_info intel_broxton_info = { + .is_broxton = 1, + .gen = 9, + .need_gfx_hws = 1, .has_hotplug = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, + .num_pipes = 3, + .has_ddi = 1, + .has_fpga_dbg = 1, + .has_fbc = 1, + .has_pooled_eu = 0, + GEN_DEFAULT_PIPEOFFSETS, + IVB_CURSOR_OFFSETS, + BDW_COLORS, +}; + +static const struct intel_device_info intel_kabylake_info = { + BDW_FEATURES, + .is_kabylake = 1, + .gen = 9, +}; + +static const struct intel_device_info intel_kabylake_gt3_info = { + BDW_FEATURES, + .is_kabylake = 1, + .gen = 9, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, +}; + +/* + * Make sure any device matches here are from most specific to most + * general. For example, since the Quanta match is based on the subsystem + * and subvendor IDs, we need it to come before the more general IVB + * PCI ID matches, otherwise we'll use the wrong info struct above. + */ +static const struct pci_device_id pciidlist[] = { + INTEL_I830_IDS(&intel_i830_info), + INTEL_I845G_IDS(&intel_845g_info), + INTEL_I85X_IDS(&intel_i85x_info), + INTEL_I865G_IDS(&intel_i865g_info), + INTEL_I915G_IDS(&intel_i915g_info), + INTEL_I915GM_IDS(&intel_i915gm_info), + INTEL_I945G_IDS(&intel_i945g_info), + INTEL_I945GM_IDS(&intel_i945gm_info), + INTEL_I965G_IDS(&intel_i965g_info), + INTEL_G33_IDS(&intel_g33_info), + INTEL_I965GM_IDS(&intel_i965gm_info), + INTEL_GM45_IDS(&intel_gm45_info), + INTEL_G45_IDS(&intel_g45_info), + INTEL_PINEVIEW_IDS(&intel_pineview_info), + INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), + INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), + INTEL_SNB_D_IDS(&intel_sandybridge_d_info), + INTEL_SNB_M_IDS(&intel_sandybridge_m_info), + INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ + INTEL_IVB_M_IDS(&intel_ivybridge_m_info), + INTEL_IVB_D_IDS(&intel_ivybridge_d_info), + INTEL_HSW_D_IDS(&intel_haswell_d_info), + INTEL_HSW_M_IDS(&intel_haswell_m_info), + INTEL_VLV_M_IDS(&intel_valleyview_m_info), + INTEL_VLV_D_IDS(&intel_valleyview_d_info), + INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), + INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), + INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), + INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), + INTEL_CHV_IDS(&intel_cherryview_info), + INTEL_SKL_GT1_IDS(&intel_skylake_info), + INTEL_SKL_GT2_IDS(&intel_skylake_info), + INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), + INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), + INTEL_BXT_IDS(&intel_broxton_info), + INTEL_KBL_GT1_IDS(&intel_kabylake_info), + INTEL_KBL_GT2_IDS(&intel_kabylake_info), + INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), + INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), + {0, 0, 0} +}; +MODULE_DEVICE_TABLE(pci, pciidlist); + +extern int i915_driver_load(struct pci_dev *pdev, + const struct pci_device_id *ent); + +static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct intel_device_info *intel_info = + (struct intel_device_info *) ent->driver_data; + + if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) { + DRM_INFO("This hardware requires preliminary hardware support.\n" + "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); + return -ENODEV; + } + + /* Only bind to function 0 of the device. Early generations + * used function 1 as a placeholder for multi-head. This causes + * us confusion instead, especially on the systems where both + * functions have the same PCI-ID! + */ + if (PCI_FUNC(pdev->devfn)) + return -ENODEV; + + /* + * apple-gmux is needed on dual GPU MacBook Pro + * to probe the panel if we're the inactive GPU. + */ + if (vga_switcheroo_client_probe_defer(pdev)) + return -EPROBE_DEFER; + + return i915_driver_load(pdev, ent); +} + +extern void i915_driver_unload(struct drm_device *dev); + +static void i915_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + i915_driver_unload(dev); + drm_dev_unref(dev); +} + +extern const struct dev_pm_ops i915_pm_ops; + +static struct pci_driver i915_pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = i915_pci_probe, + .remove = i915_pci_remove, + .driver.pm = &i915_pm_ops, +}; + +static int __init i915_init(void) +{ + bool use_kms = true; + + /* + * Enable KMS by default, unless explicitly overriden by + * either the i915.modeset prarameter or by the + * vga_text_mode_force boot option. + */ + + if (i915.modeset == 0) + use_kms = false; + + if (vgacon_text_force() && i915.modeset == -1) + use_kms = false; + + if (!use_kms) { + /* Silently fail loading to not upset userspace. */ + DRM_DEBUG_DRIVER("KMS disabled.\n"); + return 0; + } + + return pci_register_driver(&i915_pci_driver); +} + +static void __exit i915_exit(void) +{ + if (!i915_pci_driver.driver.owner) + return; + + pci_unregister_driver(&i915_pci_driver); +} + +module_init(i915_init); +module_exit(i915_exit); + +MODULE_AUTHOR("Tungsten Graphics, Inc."); +MODULE_AUTHOR("Intel Corporation"); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c6bfbf8d7cca..8bfde75789f6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7070,7 +7070,8 @@ enum { #define GEN6_RPDEUC _MMIO(0xA084) #define GEN6_RPDEUCSW _MMIO(0xA088) #define GEN6_RC_STATE _MMIO(0xA094) -#define RC6_STATE (1 << 18) +#define RC_SW_TARGET_STATE_SHIFT 16 +#define RC_SW_TARGET_STATE_MASK (7 << RC_SW_TARGET_STATE_SHIFT) #define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098) #define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C) #define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0) @@ -7085,12 +7086,16 @@ enum { #define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0) #define GEN6_PMINTRMSK _MMIO(0xA168) #define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) +#define GEN8_MISC_CTRL0 _MMIO(0xA180) #define VLV_PWRDWNUPCTL _MMIO(0xA294) #define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4) #define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8) #define GEN9_PG_ENABLE _MMIO(0xA210) #define GEN9_RENDER_PG_ENABLE (1<<0) #define GEN9_MEDIA_PG_ENABLE (1<<1) +#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248) +#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250) +#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C) #define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C) #define PIXEL_OVERLAP_CNT_MASK (3 << 30) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 34e061a9ef06..5cfe4c7716b4 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -31,7 +31,7 @@ static void i915_save_display(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* Display arbitration control */ if (INTEL_INFO(dev)->gen <= 4) @@ -63,7 +63,7 @@ static void i915_save_display(struct drm_device *dev) static void i915_restore_display(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 mask = 0xffffffff; /* Display arbitration */ @@ -103,7 +103,7 @@ static void i915_restore_display(struct drm_device *dev) int i915_save_state(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; mutex_lock(&dev->struct_mutex); @@ -148,7 +148,7 @@ int i915_save_state(struct drm_device *dev) int i915_restore_state(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 02507bfc8def..d61829e54f93 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -38,7 +38,7 @@ static u32 calc_residency(struct drm_device *dev, i915_reg_t reg) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u64 raw_time; /* 32b value may overflow during fixed point math */ u64 units = 128ULL, div = 100000ULL; u32 ret; @@ -166,7 +166,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct drm_minor *dminor = dev_to_drm_minor(dev); struct drm_device *drm_dev = dminor->dev; - struct drm_i915_private *dev_priv = drm_dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(drm_dev); int slice = (int)(uintptr_t)attr->private; int ret; @@ -202,7 +202,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct drm_minor *dminor = dev_to_drm_minor(dev); struct drm_device *drm_dev = dminor->dev; - struct drm_i915_private *dev_priv = drm_dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(drm_dev); struct i915_gem_context *ctx; u32 *temp = NULL; /* Just here to make handling failures easy */ int slice = (int)(uintptr_t)attr->private; @@ -227,13 +227,6 @@ i915_l3_write(struct file *filp, struct kobject *kobj, } } - ret = i915_gpu_idle(drm_dev); - if (ret) { - kfree(temp); - mutex_unlock(&drm_dev->struct_mutex); - return ret; - } - /* TODO: Ideally we really want a GPU reset here to make sure errors * aren't propagated. Since I cannot find a stable way to reset the GPU * at this point it is left as a TODO. @@ -275,7 +268,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -309,7 +302,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -330,7 +323,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); return snprintf(buf, PAGE_SIZE, "%d\n", @@ -341,7 +334,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -359,7 +352,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val; ssize_t ret; @@ -409,7 +402,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -427,7 +420,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val; ssize_t ret; @@ -487,7 +480,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val; if (attr == &dev_attr_gt_RP0_freq_mhz) diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 6768db032f84..534154e05fbe 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -118,7 +118,7 @@ TRACE_EVENT(i915_gem_shrink, ), TP_fast_assign( - __entry->dev = i915->dev->primary->index; + __entry->dev = i915->drm.primary->index; __entry->target = target; __entry->flags = flags; ), @@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, ), TP_fast_assign( - __entry->dev = from->i915->dev->primary->index; + __entry->dev = from->i915->drm.primary->index; __entry->sync_from = from->id; __entry->sync_to = to_req->engine->id; __entry->seqno = i915_gem_request_get_seqno(req); @@ -486,11 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch, ), TP_fast_assign( - __entry->dev = req->i915->dev->primary->index; + __entry->dev = req->i915->drm.primary->index; __entry->ring = req->engine->id; __entry->seqno = req->seqno; __entry->flags = flags; - i915_trace_irq_get(req->engine, req); + intel_engine_enable_signaling(req); ), TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", @@ -509,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush, ), TP_fast_assign( - __entry->dev = req->i915->dev->primary->index; + __entry->dev = req->i915->drm.primary->index; __entry->ring = req->engine->id; __entry->invalidate = invalidate; __entry->flush = flush; @@ -531,7 +531,7 @@ DECLARE_EVENT_CLASS(i915_gem_request, ), TP_fast_assign( - __entry->dev = req->i915->dev->primary->index; + __entry->dev = req->i915->drm.primary->index; __entry->ring = req->engine->id; __entry->seqno = req->seqno; ), @@ -556,9 +556,9 @@ TRACE_EVENT(i915_gem_request_notify, ), TP_fast_assign( - __entry->dev = engine->i915->dev->primary->index; + __entry->dev = engine->i915->drm.primary->index; __entry->ring = engine->id; - __entry->seqno = engine->get_seqno(engine); + __entry->seqno = intel_engine_get_seqno(engine); ), TP_printk("dev=%u, ring=%u, seqno=%u", @@ -593,11 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin, * less desirable. */ TP_fast_assign( - __entry->dev = req->i915->dev->primary->index; + __entry->dev = req->i915->drm.primary->index; __entry->ring = req->engine->id; __entry->seqno = req->seqno; __entry->blocking = - mutex_is_locked(&req->i915->dev->struct_mutex); + mutex_is_locked(&req->i915->drm.struct_mutex); ), TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", @@ -746,7 +746,7 @@ DECLARE_EVENT_CLASS(i915_context, TP_fast_assign( __entry->ctx = ctx; __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL; - __entry->dev = ctx->i915->dev->primary->index; + __entry->dev = ctx->i915->drm.primary->index; ), TP_printk("dev=%u, ctx=%p, ctx_vm=%p", @@ -786,7 +786,7 @@ TRACE_EVENT(switch_mm, __entry->ring = engine->id; __entry->to = to; __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; - __entry->dev = engine->i915->dev->primary->index; + __entry->dev = engine->i915->drm.primary->index; ), TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index b9329c2a670a..6700a7be7f78 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -154,7 +154,7 @@ static bool audio_rate_need_prog(struct intel_crtc *crtc, { if (((mode->clock == TMDS_297M) || (mode->clock == TMDS_296M)) && - intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) + intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) return true; else return false; @@ -165,7 +165,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector, i915_reg_t reg_elda, uint32_t bits_elda, i915_reg_t reg_edid) { - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); uint8_t *eld = connector->eld; uint32_t tmp; int i; @@ -189,7 +189,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector, static void g4x_audio_codec_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); uint32_t eldv, tmp; DRM_DEBUG_KMS("Disable audio codec\n"); @@ -210,7 +210,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector, struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); uint8_t *eld = connector->eld; uint32_t eldv; uint32_t tmp; @@ -247,7 +247,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector, static void hsw_audio_codec_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); enum pipe pipe = intel_crtc->pipe; uint32_t tmp; @@ -262,7 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder) tmp |= AUD_CONFIG_N_PROG_ENABLE; tmp &= ~AUD_CONFIG_UPPER_N_MASK; tmp &= ~AUD_CONFIG_LOWER_N_MASK; - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) + if (intel_crtc_has_dp_encoder(intel_crtc->config)) tmp |= AUD_CONFIG_N_VALUE_INDEX; I915_WRITE(HSW_AUD_CFG(pipe), tmp); @@ -279,7 +279,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector, struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); enum pipe pipe = intel_crtc->pipe; struct i915_audio_component *acomp = dev_priv->audio_component; @@ -328,7 +328,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector, tmp = I915_READ(HSW_AUD_CFG(pipe)); tmp &= ~AUD_CONFIG_N_VALUE_INDEX; tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) + if (intel_crtc_has_dp_encoder(intel_crtc->config)) tmp |= AUD_CONFIG_N_VALUE_INDEX; else tmp |= audio_config_hdmi_pixel_clock(adjusted_mode); @@ -357,7 +357,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector, static void ilk_audio_codec_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); @@ -389,7 +389,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder) tmp |= AUD_CONFIG_N_PROG_ENABLE; tmp &= ~AUD_CONFIG_UPPER_N_MASK; tmp &= ~AUD_CONFIG_LOWER_N_MASK; - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) + if (intel_crtc_has_dp_encoder(intel_crtc->config)) tmp |= AUD_CONFIG_N_VALUE_INDEX; I915_WRITE(aud_config, tmp); @@ -405,7 +405,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector, struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); @@ -475,7 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector, tmp &= ~AUD_CONFIG_N_VALUE_INDEX; tmp &= ~AUD_CONFIG_N_PROG_ENABLE; tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) + if (intel_crtc_has_dp_encoder(intel_crtc->config)) tmp |= AUD_CONFIG_N_VALUE_INDEX; else tmp |= audio_config_hdmi_pixel_clock(adjusted_mode); @@ -496,7 +496,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder) const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; struct drm_connector *connector; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_audio_component *acomp = dev_priv->audio_component; struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); enum port port = intel_dig_port->port; @@ -513,7 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder) /* ELD Conn_Type */ connector->eld[5] &= ~(3 << 2); - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) + if (intel_crtc_has_dp_encoder(crtc->config)) connector->eld[5] |= (1 << 2); connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; @@ -543,7 +543,7 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder) { struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct i915_audio_component *acomp = dev_priv->audio_component; struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); enum port port = intel_dig_port->port; @@ -749,14 +749,14 @@ static int i915_audio_component_bind(struct device *i915_dev, if (WARN_ON(acomp->ops || acomp->dev)) return -EEXIST; - drm_modeset_lock_all(dev_priv->dev); + drm_modeset_lock_all(&dev_priv->drm); acomp->ops = &i915_audio_component_ops; acomp->dev = i915_dev; BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) acomp->aud_sample_rate[i] = 0; dev_priv->audio_component = acomp; - drm_modeset_unlock_all(dev_priv->dev); + drm_modeset_unlock_all(&dev_priv->drm); return 0; } @@ -767,11 +767,11 @@ static void i915_audio_component_unbind(struct device *i915_dev, struct i915_audio_component *acomp = data; struct drm_i915_private *dev_priv = dev_to_i915(i915_dev); - drm_modeset_lock_all(dev_priv->dev); + drm_modeset_lock_all(&dev_priv->drm); acomp->ops = NULL; acomp->dev = NULL; dev_priv->audio_component = NULL; - drm_modeset_unlock_all(dev_priv->dev); + drm_modeset_unlock_all(&dev_priv->drm); } static const struct component_ops i915_audio_component_bind_ops = { @@ -799,7 +799,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv) { int ret; - ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops); + ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); if (ret < 0) { DRM_ERROR("failed to add audio component (%d)\n", ret); /* continue with reduced functionality */ @@ -821,6 +821,6 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv) if (!dev_priv->audio_component_registered) return; - component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops); + component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops); dev_priv->audio_component_registered = false; } diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index da5ed4a850b9..c6e69e4cfa83 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1426,7 +1426,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size) int intel_bios_init(struct drm_i915_private *dev_priv) { - struct pci_dev *pdev = dev_priv->dev->pdev; + struct pci_dev *pdev = dev_priv->drm.pdev; const struct vbt_header *vbt = dev_priv->opregion.vbt; const struct bdb_header *bdb; u8 __iomem *bios = NULL; diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c new file mode 100644 index 000000000000..d89b2c963618 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -0,0 +1,586 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/kthread.h> + +#include "i915_drv.h" + +static void intel_breadcrumbs_fake_irq(unsigned long data) +{ + struct intel_engine_cs *engine = (struct intel_engine_cs *)data; + + /* + * The timer persists in case we cannot enable interrupts, + * or if we have previously seen seqno/interrupt incoherency + * ("missed interrupt" syndrome). Here the worker will wake up + * every jiffie in order to kick the oldest waiter to do the + * coherent seqno check. + */ + rcu_read_lock(); + if (intel_engine_wakeup(engine)) + mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1); + rcu_read_unlock(); +} + +static void irq_enable(struct intel_engine_cs *engine) +{ + /* Enabling the IRQ may miss the generation of the interrupt, but + * we still need to force the barrier before reading the seqno, + * just in case. + */ + engine->breadcrumbs.irq_posted = true; + + spin_lock_irq(&engine->i915->irq_lock); + engine->irq_enable(engine); + spin_unlock_irq(&engine->i915->irq_lock); +} + +static void irq_disable(struct intel_engine_cs *engine) +{ + spin_lock_irq(&engine->i915->irq_lock); + engine->irq_disable(engine); + spin_unlock_irq(&engine->i915->irq_lock); + + engine->breadcrumbs.irq_posted = false; +} + +static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b) +{ + struct intel_engine_cs *engine = + container_of(b, struct intel_engine_cs, breadcrumbs); + struct drm_i915_private *i915 = engine->i915; + + assert_spin_locked(&b->lock); + if (b->rpm_wakelock) + return; + + /* Since we are waiting on a request, the GPU should be busy + * and should have its own rpm reference. For completeness, + * record an rpm reference for ourselves to cover the + * interrupt we unmask. + */ + intel_runtime_pm_get_noresume(i915); + b->rpm_wakelock = true; + + /* No interrupts? Kick the waiter every jiffie! */ + if (intel_irqs_enabled(i915)) { + if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings)) + irq_enable(engine); + b->irq_enabled = true; + } + + if (!b->irq_enabled || + test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) + mod_timer(&b->fake_irq, jiffies + 1); +} + +static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b) +{ + struct intel_engine_cs *engine = + container_of(b, struct intel_engine_cs, breadcrumbs); + + assert_spin_locked(&b->lock); + if (!b->rpm_wakelock) + return; + + if (b->irq_enabled) { + irq_disable(engine); + b->irq_enabled = false; + } + + intel_runtime_pm_put(engine->i915); + b->rpm_wakelock = false; +} + +static inline struct intel_wait *to_wait(struct rb_node *node) +{ + return container_of(node, struct intel_wait, node); +} + +static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b, + struct intel_wait *wait) +{ + assert_spin_locked(&b->lock); + + /* This request is completed, so remove it from the tree, mark it as + * complete, and *then* wake up the associated task. + */ + rb_erase(&wait->node, &b->waiters); + RB_CLEAR_NODE(&wait->node); + + wake_up_process(wait->tsk); /* implicit smp_wmb() */ +} + +static bool __intel_engine_add_wait(struct intel_engine_cs *engine, + struct intel_wait *wait) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct rb_node **p, *parent, *completed; + bool first; + u32 seqno; + + /* Insert the request into the retirement ordered list + * of waiters by walking the rbtree. If we are the oldest + * seqno in the tree (the first to be retired), then + * set ourselves as the bottom-half. + * + * As we descend the tree, prune completed branches since we hold the + * spinlock we know that the first_waiter must be delayed and can + * reduce some of the sequential wake up latency if we take action + * ourselves and wake up the completed tasks in parallel. Also, by + * removing stale elements in the tree, we may be able to reduce the + * ping-pong between the old bottom-half and ourselves as first-waiter. + */ + first = true; + parent = NULL; + completed = NULL; + seqno = intel_engine_get_seqno(engine); + + /* If the request completed before we managed to grab the spinlock, + * return now before adding ourselves to the rbtree. We let the + * current bottom-half handle any pending wakeups and instead + * try and get out of the way quickly. + */ + if (i915_seqno_passed(seqno, wait->seqno)) { + RB_CLEAR_NODE(&wait->node); + return first; + } + + p = &b->waiters.rb_node; + while (*p) { + parent = *p; + if (wait->seqno == to_wait(parent)->seqno) { + /* We have multiple waiters on the same seqno, select + * the highest priority task (that with the smallest + * task->prio) to serve as the bottom-half for this + * group. + */ + if (wait->tsk->prio > to_wait(parent)->tsk->prio) { + p = &parent->rb_right; + first = false; + } else { + p = &parent->rb_left; + } + } else if (i915_seqno_passed(wait->seqno, + to_wait(parent)->seqno)) { + p = &parent->rb_right; + if (i915_seqno_passed(seqno, to_wait(parent)->seqno)) + completed = parent; + else + first = false; + } else { + p = &parent->rb_left; + } + } + rb_link_node(&wait->node, parent, p); + rb_insert_color(&wait->node, &b->waiters); + GEM_BUG_ON(!first && !b->irq_seqno_bh); + + if (completed) { + struct rb_node *next = rb_next(completed); + + GEM_BUG_ON(!next && !first); + if (next && next != &wait->node) { + GEM_BUG_ON(first); + b->first_wait = to_wait(next); + smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk); + /* As there is a delay between reading the current + * seqno, processing the completed tasks and selecting + * the next waiter, we may have missed the interrupt + * and so need for the next bottom-half to wakeup. + * + * Also as we enable the IRQ, we may miss the + * interrupt for that seqno, so we have to wake up + * the next bottom-half in order to do a coherent check + * in case the seqno passed. + */ + __intel_breadcrumbs_enable_irq(b); + if (READ_ONCE(b->irq_posted)) + wake_up_process(to_wait(next)->tsk); + } + + do { + struct intel_wait *crumb = to_wait(completed); + completed = rb_prev(completed); + __intel_breadcrumbs_finish(b, crumb); + } while (completed); + } + + if (first) { + GEM_BUG_ON(rb_first(&b->waiters) != &wait->node); + b->first_wait = wait; + smp_store_mb(b->irq_seqno_bh, wait->tsk); + /* After assigning ourselves as the new bottom-half, we must + * perform a cursory check to prevent a missed interrupt. + * Either we miss the interrupt whilst programming the hardware, + * or if there was a previous waiter (for a later seqno) they + * may be woken instead of us (due to the inherent race + * in the unlocked read of b->irq_seqno_bh in the irq handler) + * and so we miss the wake up. + */ + __intel_breadcrumbs_enable_irq(b); + } + GEM_BUG_ON(!b->irq_seqno_bh); + GEM_BUG_ON(!b->first_wait); + GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node); + + return first; +} + +bool intel_engine_add_wait(struct intel_engine_cs *engine, + struct intel_wait *wait) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + bool first; + + spin_lock(&b->lock); + first = __intel_engine_add_wait(engine, wait); + spin_unlock(&b->lock); + + return first; +} + +void intel_engine_enable_fake_irq(struct intel_engine_cs *engine) +{ + mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1); +} + +static inline bool chain_wakeup(struct rb_node *rb, int priority) +{ + return rb && to_wait(rb)->tsk->prio <= priority; +} + +static inline int wakeup_priority(struct intel_breadcrumbs *b, + struct task_struct *tsk) +{ + if (tsk == b->signaler) + return INT_MIN; + else + return tsk->prio; +} + +void intel_engine_remove_wait(struct intel_engine_cs *engine, + struct intel_wait *wait) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + + /* Quick check to see if this waiter was already decoupled from + * the tree by the bottom-half to avoid contention on the spinlock + * by the herd. + */ + if (RB_EMPTY_NODE(&wait->node)) + return; + + spin_lock(&b->lock); + + if (RB_EMPTY_NODE(&wait->node)) + goto out_unlock; + + if (b->first_wait == wait) { + const int priority = wakeup_priority(b, wait->tsk); + struct rb_node *next; + + GEM_BUG_ON(b->irq_seqno_bh != wait->tsk); + + /* We are the current bottom-half. Find the next candidate, + * the first waiter in the queue on the remaining oldest + * request. As multiple seqnos may complete in the time it + * takes us to wake up and find the next waiter, we have to + * wake up that waiter for it to perform its own coherent + * completion check. + */ + next = rb_next(&wait->node); + if (chain_wakeup(next, priority)) { + /* If the next waiter is already complete, + * wake it up and continue onto the next waiter. So + * if have a small herd, they will wake up in parallel + * rather than sequentially, which should reduce + * the overall latency in waking all the completed + * clients. + * + * However, waking up a chain adds extra latency to + * the first_waiter. This is undesirable if that + * waiter is a high priority task. + */ + u32 seqno = intel_engine_get_seqno(engine); + + while (i915_seqno_passed(seqno, to_wait(next)->seqno)) { + struct rb_node *n = rb_next(next); + + __intel_breadcrumbs_finish(b, to_wait(next)); + next = n; + if (!chain_wakeup(next, priority)) + break; + } + } + + if (next) { + /* In our haste, we may have completed the first waiter + * before we enabled the interrupt. Do so now as we + * have a second waiter for a future seqno. Afterwards, + * we have to wake up that waiter in case we missed + * the interrupt, or if we have to handle an + * exception rather than a seqno completion. + */ + b->first_wait = to_wait(next); + smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk); + if (b->first_wait->seqno != wait->seqno) + __intel_breadcrumbs_enable_irq(b); + wake_up_process(b->irq_seqno_bh); + } else { + b->first_wait = NULL; + WRITE_ONCE(b->irq_seqno_bh, NULL); + __intel_breadcrumbs_disable_irq(b); + } + } else { + GEM_BUG_ON(rb_first(&b->waiters) == &wait->node); + } + + GEM_BUG_ON(RB_EMPTY_NODE(&wait->node)); + rb_erase(&wait->node, &b->waiters); + +out_unlock: + GEM_BUG_ON(b->first_wait == wait); + GEM_BUG_ON(rb_first(&b->waiters) != + (b->first_wait ? &b->first_wait->node : NULL)); + GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters)); + spin_unlock(&b->lock); +} + +static bool signal_complete(struct drm_i915_gem_request *request) +{ + if (!request) + return false; + + /* If another process served as the bottom-half it may have already + * signalled that this wait is already completed. + */ + if (intel_wait_complete(&request->signaling.wait)) + return true; + + /* Carefully check if the request is complete, giving time for the + * seqno to be visible or if the GPU hung. + */ + if (__i915_request_irq_complete(request)) + return true; + + return false; +} + +static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) +{ + return container_of(rb, struct drm_i915_gem_request, signaling.node); +} + +static void signaler_set_rtpriority(void) +{ + struct sched_param param = { .sched_priority = 1 }; + + sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); +} + +static int intel_breadcrumbs_signaler(void *arg) +{ + struct intel_engine_cs *engine = arg; + struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct drm_i915_gem_request *request; + + /* Install ourselves with high priority to reduce signalling latency */ + signaler_set_rtpriority(); + + do { + set_current_state(TASK_INTERRUPTIBLE); + + /* We are either woken up by the interrupt bottom-half, + * or by a client adding a new signaller. In both cases, + * the GPU seqno may have advanced beyond our oldest signal. + * If it has, propagate the signal, remove the waiter and + * check again with the next oldest signal. Otherwise we + * need to wait for a new interrupt from the GPU or for + * a new client. + */ + request = READ_ONCE(b->first_signal); + if (signal_complete(request)) { + /* Wake up all other completed waiters and select the + * next bottom-half for the next user interrupt. + */ + intel_engine_remove_wait(engine, + &request->signaling.wait); + + /* Find the next oldest signal. Note that as we have + * not been holding the lock, another client may + * have installed an even older signal than the one + * we just completed - so double check we are still + * the oldest before picking the next one. + */ + spin_lock(&b->lock); + if (request == b->first_signal) { + struct rb_node *rb = + rb_next(&request->signaling.node); + b->first_signal = rb ? to_signaler(rb) : NULL; + } + rb_erase(&request->signaling.node, &b->signals); + spin_unlock(&b->lock); + + i915_gem_request_unreference(request); + } else { + if (kthread_should_stop()) + break; + + schedule(); + } + } while (1); + __set_current_state(TASK_RUNNING); + + return 0; +} + +void intel_engine_enable_signaling(struct drm_i915_gem_request *request) +{ + struct intel_engine_cs *engine = request->engine; + struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct rb_node *parent, **p; + bool first, wakeup; + + if (unlikely(READ_ONCE(request->signaling.wait.tsk))) + return; + + spin_lock(&b->lock); + if (unlikely(request->signaling.wait.tsk)) { + wakeup = false; + goto unlock; + } + + request->signaling.wait.tsk = b->signaler; + request->signaling.wait.seqno = request->seqno; + i915_gem_request_reference(request); + + /* First add ourselves into the list of waiters, but register our + * bottom-half as the signaller thread. As per usual, only the oldest + * waiter (not just signaller) is tasked as the bottom-half waking + * up all completed waiters after the user interrupt. + * + * If we are the oldest waiter, enable the irq (after which we + * must double check that the seqno did not complete). + */ + wakeup = __intel_engine_add_wait(engine, &request->signaling.wait); + + /* Now insert ourselves into the retirement ordered list of signals + * on this engine. We track the oldest seqno as that will be the + * first signal to complete. + */ + parent = NULL; + first = true; + p = &b->signals.rb_node; + while (*p) { + parent = *p; + if (i915_seqno_passed(request->seqno, + to_signaler(parent)->seqno)) { + p = &parent->rb_right; + first = false; + } else { + p = &parent->rb_left; + } + } + rb_link_node(&request->signaling.node, parent, p); + rb_insert_color(&request->signaling.node, &b->signals); + if (first) + smp_store_mb(b->first_signal, request); + +unlock: + spin_unlock(&b->lock); + + if (wakeup) + wake_up_process(b->signaler); +} + +int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct task_struct *tsk; + + spin_lock_init(&b->lock); + setup_timer(&b->fake_irq, + intel_breadcrumbs_fake_irq, + (unsigned long)engine); + + /* Spawn a thread to provide a common bottom-half for all signals. + * As this is an asynchronous interface we cannot steal the current + * task for handling the bottom-half to the user interrupt, therefore + * we create a thread to do the coherent seqno dance after the + * interrupt and then signal the waitqueue (via the dma-buf/fence). + */ + tsk = kthread_run(intel_breadcrumbs_signaler, engine, + "i915/signal:%d", engine->id); + if (IS_ERR(tsk)) + return PTR_ERR(tsk); + + b->signaler = tsk; + + return 0; +} + +void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + + if (!IS_ERR_OR_NULL(b->signaler)) + kthread_stop(b->signaler); + + del_timer_sync(&b->fake_irq); +} + +unsigned int intel_kick_waiters(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + unsigned int mask = 0; + + /* To avoid the task_struct disappearing beneath us as we wake up + * the process, we must first inspect the task_struct->state under the + * RCU lock, i.e. as we call wake_up_process() we must be holding the + * rcu_read_lock(). + */ + rcu_read_lock(); + for_each_engine(engine, i915) + if (unlikely(intel_engine_wakeup(engine))) + mask |= intel_engine_flag(engine); + rcu_read_unlock(); + + return mask; +} + +unsigned int intel_kick_signalers(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + unsigned int mask = 0; + + for_each_engine(engine, i915) { + if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) { + wake_up_process(engine->breadcrumbs.signaler); + mask |= intel_engine_flag(engine); + } + } + + return mask; +} diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 522f5a2de015..bc0fef3d3335 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -96,7 +96,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state) { struct drm_crtc *crtc = crtc_state->crtc; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int i, pipe = intel_crtc->pipe; uint16_t coeffs[9] = { 0, }; @@ -207,7 +207,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state) { struct drm_crtc *crtc = state->crtc; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = to_intel_crtc(crtc)->pipe; uint32_t mode; @@ -255,7 +255,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state) void intel_color_set_csc(struct drm_crtc_state *crtc_state) { struct drm_device *dev = crtc_state->crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (dev_priv->display.load_csc_matrix) dev_priv->display.load_csc_matrix(crtc_state); @@ -266,13 +266,13 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc, struct drm_property_blob *blob) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum pipe pipe = intel_crtc->pipe; int i; if (HAS_GMCH_DISPLAY(dev)) { - if (intel_crtc->config->has_dsi_encoder) + if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) assert_dsi_pll_enabled(dev_priv); else assert_pll_enabled(dev_priv, pipe); @@ -313,7 +313,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state) { struct drm_crtc *crtc = crtc_state->crtc; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state); @@ -343,7 +343,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state) { struct drm_crtc *crtc = state->crtc; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_state *intel_state = to_intel_crtc_state(state); enum pipe pipe = to_intel_crtc(crtc)->pipe; uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size; @@ -426,7 +426,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state) { struct drm_crtc *crtc = state->crtc; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; struct drm_color_lut *lut; uint32_t i, lut_size; @@ -485,7 +485,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state) void intel_color_load_luts(struct drm_crtc_state *crtc_state) { struct drm_device *dev = crtc_state->crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->display.load_luts(crtc_state); } @@ -526,7 +526,7 @@ int intel_color_check(struct drm_crtc *crtc, void intel_color_init(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); drm_mode_crtc_set_gamma_size(crtc, 256); diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index e115bcc6766f..5819d524d917 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -67,7 +67,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); enum intel_display_power_domain power_domain; u32 tmp; @@ -98,7 +98,7 @@ out: static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); u32 tmp, flags = 0; @@ -146,7 +146,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder, static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; @@ -281,7 +281,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct intel_crt *crt = intel_attached_crt(connector); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 adpa; bool ret; @@ -301,8 +301,10 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) I915_WRITE(crt->adpa_reg, adpa); - if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, - 1000)) + if (intel_wait_for_register(dev_priv, + crt->adpa_reg, + ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0, + 1000)) DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); if (turn_off_dac) { @@ -326,7 +328,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct intel_crt *crt = intel_attached_crt(connector); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 adpa; bool ret; u32 save_adpa; @@ -338,8 +340,10 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) I915_WRITE(crt->adpa_reg, adpa); - if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, - 1000)) { + if (intel_wait_for_register(dev_priv, + crt->adpa_reg, + ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0, + 1000)) { DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); I915_WRITE(crt->adpa_reg, save_adpa); } @@ -367,7 +371,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) static bool intel_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 stat; bool ret = false; int i, tries = 0; @@ -394,9 +398,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) CRT_HOTPLUG_FORCE_DETECT, CRT_HOTPLUG_FORCE_DETECT); /* wait for FORCE_DETECT to go off */ - if (wait_for((I915_READ(PORT_HOTPLUG_EN) & - CRT_HOTPLUG_FORCE_DETECT) == 0, - 1000)) + if (intel_wait_for_register(dev_priv, PORT_HOTPLUG_EN, + CRT_HOTPLUG_FORCE_DETECT, 0, + 1000)) DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); } @@ -449,7 +453,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector, static bool intel_crt_detect_ddc(struct drm_connector *connector) { struct intel_crt *crt = intel_attached_crt(connector); - struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); struct edid *edid; struct i2c_adapter *i2c; @@ -485,7 +489,7 @@ static enum drm_connector_status intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe) { struct drm_device *dev = crt->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t save_bclrpat; uint32_t save_vtotal; uint32_t vtotal, vactive; @@ -600,7 +604,7 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crt *crt = intel_attached_crt(connector); struct intel_encoder *intel_encoder = &crt->base; enum intel_display_power_domain power_domain; @@ -681,7 +685,7 @@ static void intel_crt_destroy(struct drm_connector *connector) static int intel_crt_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crt *crt = intel_attached_crt(connector); struct intel_encoder *intel_encoder = &crt->base; enum intel_display_power_domain power_domain; @@ -716,7 +720,7 @@ static int intel_crt_set_property(struct drm_connector *connector, static void intel_crt_reset(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crt *crt = intel_attached_crt(connector); if (INTEL_INFO(dev)->gen >= 5) { @@ -743,6 +747,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = intel_crt_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_crt_destroy, .set_property = intel_crt_set_property, @@ -791,7 +796,7 @@ void intel_crt_init(struct drm_device *dev) struct drm_connector *connector; struct intel_crt *crt; struct intel_connector *intel_connector; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t adpa_reg; u32 adpa; @@ -879,8 +884,6 @@ void intel_crt_init(struct drm_device *dev) drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); - drm_connector_register(connector); - if (!I915_HAS_HOTPLUG(dev)) intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 2b3b428d9cd2..c3b33a10c15c 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -41,15 +41,15 @@ * be moved to FW_FAILED. */ -#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin" +#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin" MODULE_FIRMWARE(I915_CSR_KBL); #define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) -#define I915_CSR_SKL "i915/skl_dmc_ver1.bin" +#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin" MODULE_FIRMWARE(I915_CSR_SKL); -#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) +#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 26) -#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" +#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin" MODULE_FIRMWARE(I915_CSR_BXT); #define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) @@ -286,7 +286,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; uint32_t i; uint32_t *dmc_payload; - uint32_t required_min_version; + uint32_t required_version; if (!fw) return NULL; @@ -303,24 +303,23 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, csr->version = css_header->version; if (IS_KABYLAKE(dev_priv)) { - required_min_version = KBL_CSR_VERSION_REQUIRED; + required_version = KBL_CSR_VERSION_REQUIRED; } else if (IS_SKYLAKE(dev_priv)) { - required_min_version = SKL_CSR_VERSION_REQUIRED; + required_version = SKL_CSR_VERSION_REQUIRED; } else if (IS_BROXTON(dev_priv)) { - required_min_version = BXT_CSR_VERSION_REQUIRED; + required_version = BXT_CSR_VERSION_REQUIRED; } else { MISSING_CASE(INTEL_REVID(dev_priv)); - required_min_version = 0; + required_version = 0; } - if (csr->version < required_min_version) { - DRM_INFO("Refusing to load old DMC firmware v%u.%u," - " please upgrade to v%u.%u or later" - " [" FIRMWARE_URL "].\n", + if (csr->version != required_version) { + DRM_INFO("Refusing to load DMC firmware v%u.%u," + " please use v%u.%u [" FIRMWARE_URL "].\n", CSR_VERSION_MAJOR(csr->version), CSR_VERSION_MINOR(csr->version), - CSR_VERSION_MAJOR(required_min_version), - CSR_VERSION_MINOR(required_min_version)); + CSR_VERSION_MAJOR(required_version), + CSR_VERSION_MINOR(required_version)); return NULL; } @@ -413,7 +412,7 @@ static void csr_load_work_fn(struct work_struct *work) csr = &dev_priv->csr; ret = request_firmware(&fw, dev_priv->csr.fw_path, - &dev_priv->dev->pdev->dev); + &dev_priv->drm.pdev->dev); if (fw) dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw); @@ -427,7 +426,7 @@ static void csr_load_work_fn(struct work_struct *work) CSR_VERSION_MAJOR(csr->version), CSR_VERSION_MINOR(csr->version)); } else { - dev_notice(dev_priv->dev->dev, + dev_notice(dev_priv->drm.dev, "Failed to load DMC firmware" " [" FIRMWARE_URL "]," " disabling runtime power management.\n"); diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index ad3b0ee5e55b..dd1d6fe12297 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -318,7 +318,7 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder, default: WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type); /* fallthrough and treat as unknown */ - case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_DP: case INTEL_OUTPUT_EDP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_UNKNOWN: @@ -482,7 +482,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) ddi_translations = ddi_translations_edp; size = n_edp_entries; break; - case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: ddi_translations = ddi_translations_dp; size = n_dp_entries; @@ -543,7 +543,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, void hsw_fdi_link_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; u32 temp, i, rx_ctl_val; @@ -834,7 +834,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) if (pipe_config->has_pch_encoder) dotclock = intel_dotclock_calculate(pipe_config->port_clock, &pipe_config->fdi_m_n); - else if (pipe_config->has_dp_encoder) + else if (intel_crtc_has_dp_encoder(pipe_config)) dotclock = intel_dotclock_calculate(pipe_config->port_clock, &pipe_config->dp_m_n); else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36) @@ -851,7 +851,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) static void skl_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int link_clock = 0; uint32_t dpll_ctl1, dpll; @@ -899,7 +899,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder, static void hsw_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int link_clock = 0; u32 val, pll; @@ -971,7 +971,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv, static void bxt_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = intel_ddi_get_encoder_port(encoder); uint32_t dpll = port; @@ -1061,14 +1061,14 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc, void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; int type = intel_encoder->type; uint32_t temp; - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) { + if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) { WARN_ON(transcoder_is_dsi(cpu_transcoder)); temp = TRANS_MSA_SYNC_CLK; @@ -1096,7 +1096,7 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; uint32_t temp; temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); @@ -1113,7 +1113,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = intel_crtc->pipe; enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; enum port port = intel_ddi_get_encoder_port(intel_encoder); @@ -1182,7 +1182,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) temp |= TRANS_DDI_MODE_SELECT_FDI; temp |= (intel_crtc->config->fdi_lanes - 1) << 1; - } else if (type == INTEL_OUTPUT_DISPLAYPORT || + } else if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -1223,7 +1223,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) { struct drm_device *dev = intel_connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *intel_encoder = intel_connector->encoder; int type = intel_connector->base.connector_type; enum port port = intel_ddi_get_encoder_port(intel_encoder); @@ -1285,7 +1285,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_ddi_get_encoder_port(encoder); enum intel_display_power_domain power_domain; u32 tmp; @@ -1359,7 +1359,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) { struct drm_crtc *crtc = &intel_crtc->base; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); enum port port = intel_ddi_get_encoder_port(intel_encoder); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; @@ -1371,7 +1371,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) { - struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; if (cpu_transcoder != TRANSCODER_EDP) @@ -1392,7 +1392,7 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level; hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level; - if (type == INTEL_OUTPUT_DISPLAYPORT) { + if (type == INTEL_OUTPUT_DP) { if (dp_iboost) { iboost = dp_iboost; } else { @@ -1450,7 +1450,7 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { n_entries = ARRAY_SIZE(bxt_ddi_translations_edp); ddi_translations = bxt_ddi_translations_edp; - } else if (type == INTEL_OUTPUT_DISPLAYPORT + } else if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) { n_entries = ARRAY_SIZE(bxt_ddi_translations_dp); ddi_translations = bxt_ddi_translations_dp; @@ -1624,7 +1624,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) intel_ddi_clk_select(intel_encoder, crtc->config); - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { + if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_dp_set_link_params(intel_dp, crtc->config); @@ -1648,7 +1648,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) { struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_ddi_get_encoder_port(intel_encoder); int type = intel_encoder->type; uint32_t val; @@ -1669,7 +1669,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) if (wait) intel_wait_ddi_buf_idle(dev_priv, port); - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { + if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); intel_edp_panel_vdd_on(intel_dp); @@ -1695,7 +1695,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_ddi_get_encoder_port(intel_encoder); int type = intel_encoder->type; @@ -1734,7 +1734,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int type = intel_encoder->type; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (intel_crtc->config->has_audio) { intel_audio_codec_disable(intel_encoder); @@ -1808,7 +1808,10 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv, enum dpio_phy phy) { - if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10)) + if (intel_wait_for_register(dev_priv, + BXT_PORT_REF_DW3(phy), + GRC_DONE, GRC_DONE, + 10)) DRM_ERROR("timeout waiting for PHY%d GRC\n", phy); } @@ -2121,7 +2124,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) void intel_ddi_fdi_disable(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); uint32_t val; @@ -2154,7 +2157,7 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc) void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; struct intel_hdmi *intel_hdmi; @@ -2208,7 +2211,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, break; case TRANS_DDI_MODE_SELECT_DP_SST: case TRANS_DDI_MODE_SELECT_DP_MST: - pipe_config->has_dp_encoder = true; pipe_config->lane_count = ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; intel_dp_get_m_n(intel_crtc, pipe_config); @@ -2253,7 +2255,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, static bool intel_ddi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int type = encoder->type; int port = intel_ddi_get_encoder_port(encoder); int ret; @@ -2319,7 +2321,7 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port) void intel_ddi_init(struct drm_device *dev, enum port port) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c new file mode 100644 index 000000000000..cba137f9ad3e --- /dev/null +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -0,0 +1,388 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "i915_drv.h" + +void intel_device_info_dump(struct drm_i915_private *dev_priv) +{ + const struct intel_device_info *info = &dev_priv->info; + +#define PRINT_S(name) "%s" +#define SEP_EMPTY +#define PRINT_FLAG(name) info->name ? #name "," : "" +#define SEP_COMMA , + DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags=" + DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), + info->gen, + dev_priv->drm.pdev->device, + dev_priv->drm.pdev->revision, + DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); +#undef PRINT_S +#undef SEP_EMPTY +#undef PRINT_FLAG +#undef SEP_COMMA +} + +static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) +{ + struct intel_device_info *info = mkwrite_device_info(dev_priv); + u32 fuse, eu_dis; + + fuse = I915_READ(CHV_FUSE_GT); + + info->slice_total = 1; + + if (!(fuse & CHV_FGT_DISABLE_SS0)) { + info->subslice_per_slice++; + eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | + CHV_FGT_EU_DIS_SS0_R1_MASK); + info->eu_total += 8 - hweight32(eu_dis); + } + + if (!(fuse & CHV_FGT_DISABLE_SS1)) { + info->subslice_per_slice++; + eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK | + CHV_FGT_EU_DIS_SS1_R1_MASK); + info->eu_total += 8 - hweight32(eu_dis); + } + + info->subslice_total = info->subslice_per_slice; + /* + * CHV expected to always have a uniform distribution of EU + * across subslices. + */ + info->eu_per_subslice = info->subslice_total ? + info->eu_total / info->subslice_total : + 0; + /* + * CHV supports subslice power gating on devices with more than + * one subslice, and supports EU power gating on devices with + * more than one EU pair per subslice. + */ + info->has_slice_pg = 0; + info->has_subslice_pg = (info->subslice_total > 1); + info->has_eu_pg = (info->eu_per_subslice > 2); +} + +static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) +{ + struct intel_device_info *info = mkwrite_device_info(dev_priv); + int s_max = 3, ss_max = 4, eu_max = 8; + int s, ss; + u32 fuse2, s_enable, ss_disable, eu_disable; + u8 eu_mask = 0xff; + + fuse2 = I915_READ(GEN8_FUSE2); + s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; + ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> GEN9_F2_SS_DIS_SHIFT; + + info->slice_total = hweight32(s_enable); + /* + * The subslice disable field is global, i.e. it applies + * to each of the enabled slices. + */ + info->subslice_per_slice = ss_max - hweight32(ss_disable); + info->subslice_total = info->slice_total * info->subslice_per_slice; + + /* + * Iterate through enabled slices and subslices to + * count the total enabled EU. + */ + for (s = 0; s < s_max; s++) { + if (!(s_enable & BIT(s))) + /* skip disabled slice */ + continue; + + eu_disable = I915_READ(GEN9_EU_DISABLE(s)); + for (ss = 0; ss < ss_max; ss++) { + int eu_per_ss; + + if (ss_disable & BIT(ss)) + /* skip disabled subslice */ + continue; + + eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) & + eu_mask); + + /* + * Record which subslice(s) has(have) 7 EUs. we + * can tune the hash used to spread work among + * subslices if they are unbalanced. + */ + if (eu_per_ss == 7) + info->subslice_7eu[s] |= BIT(ss); + + info->eu_total += eu_per_ss; + } + } + + /* + * SKL is expected to always have a uniform distribution + * of EU across subslices with the exception that any one + * EU in any one subslice may be fused off for die + * recovery. BXT is expected to be perfectly uniform in EU + * distribution. + */ + info->eu_per_subslice = info->subslice_total ? + DIV_ROUND_UP(info->eu_total, + info->subslice_total) : 0; + /* + * SKL supports slice power gating on devices with more than + * one slice, and supports EU power gating on devices with + * more than one EU pair per subslice. BXT supports subslice + * power gating on devices with more than one subslice, and + * supports EU power gating on devices with more than one EU + * pair per subslice. + */ + info->has_slice_pg = + (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && + info->slice_total > 1; + info->has_subslice_pg = + IS_BROXTON(dev_priv) && info->subslice_total > 1; + info->has_eu_pg = info->eu_per_subslice > 2; + + if (IS_BROXTON(dev_priv)) { +#define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & BIT(ss)) + /* + * There is a HW issue in 2x6 fused down parts that requires + * Pooled EU to be enabled as a WA. The pool configuration + * changes depending upon which subslice is fused down. This + * doesn't affect if the device has all 3 subslices enabled. + */ + /* WaEnablePooledEuFor2x6:bxt */ + info->has_pooled_eu = ((info->subslice_per_slice == 3) || + (info->subslice_per_slice == 2 && + INTEL_REVID(dev_priv) < BXT_REVID_C0)); + + info->min_eu_in_pool = 0; + if (info->has_pooled_eu) { + if (IS_SS_DISABLED(ss_disable, 0) || + IS_SS_DISABLED(ss_disable, 2)) + info->min_eu_in_pool = 3; + else if (IS_SS_DISABLED(ss_disable, 1)) + info->min_eu_in_pool = 6; + else + info->min_eu_in_pool = 9; + } +#undef IS_SS_DISABLED + } +} + +static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) +{ + struct intel_device_info *info = mkwrite_device_info(dev_priv); + const int s_max = 3, ss_max = 3, eu_max = 8; + int s, ss; + u32 fuse2, eu_disable[s_max], s_enable, ss_disable; + + fuse2 = I915_READ(GEN8_FUSE2); + s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; + ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT; + + eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK; + eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) | + ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) << + (32 - GEN8_EU_DIS0_S1_SHIFT)); + eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) | + ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) << + (32 - GEN8_EU_DIS1_S2_SHIFT)); + + info->slice_total = hweight32(s_enable); + + /* + * The subslice disable field is global, i.e. it applies + * to each of the enabled slices. + */ + info->subslice_per_slice = ss_max - hweight32(ss_disable); + info->subslice_total = info->slice_total * info->subslice_per_slice; + + /* + * Iterate through enabled slices and subslices to + * count the total enabled EU. + */ + for (s = 0; s < s_max; s++) { + if (!(s_enable & (0x1 << s))) + /* skip disabled slice */ + continue; + + for (ss = 0; ss < ss_max; ss++) { + u32 n_disabled; + + if (ss_disable & (0x1 << ss)) + /* skip disabled subslice */ + continue; + + n_disabled = hweight8(eu_disable[s] >> (ss * eu_max)); + + /* + * Record which subslices have 7 EUs. + */ + if (eu_max - n_disabled == 7) + info->subslice_7eu[s] |= 1 << ss; + + info->eu_total += eu_max - n_disabled; + } + } + + /* + * BDW is expected to always have a uniform distribution of EU across + * subslices with the exception that any one EU in any one subslice may + * be fused off for die recovery. + */ + info->eu_per_subslice = info->subslice_total ? + DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0; + + /* + * BDW supports slice power gating on devices with more than + * one slice. + */ + info->has_slice_pg = (info->slice_total > 1); + info->has_subslice_pg = 0; + info->has_eu_pg = 0; +} + +/* + * Determine various intel_device_info fields at runtime. + * + * Use it when either: + * - it's judged too laborious to fill n static structures with the limit + * when a simple if statement does the job, + * - run-time checks (eg read fuse/strap registers) are needed. + * + * This function needs to be called: + * - after the MMIO has been setup as we are reading registers, + * - after the PCH has been detected, + * - before the first usage of the fields it can tweak. + */ +void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) +{ + struct intel_device_info *info = mkwrite_device_info(dev_priv); + enum pipe pipe; + + /* + * Skylake and Broxton currently don't expose the topmost plane as its + * use is exclusive with the legacy cursor and we only want to expose + * one of those, not both. Until we can safely expose the topmost plane + * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, + * we don't expose the topmost plane at all to prevent ABI breakage + * down the line. + */ + if (IS_BROXTON(dev_priv)) { + info->num_sprites[PIPE_A] = 2; + info->num_sprites[PIPE_B] = 2; + info->num_sprites[PIPE_C] = 1; + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + for_each_pipe(dev_priv, pipe) + info->num_sprites[pipe] = 2; + else + for_each_pipe(dev_priv, pipe) + info->num_sprites[pipe] = 1; + + if (i915.disable_display) { + DRM_INFO("Display disabled (module parameter)\n"); + info->num_pipes = 0; + } else if (info->num_pipes > 0 && + (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) && + HAS_PCH_SPLIT(dev_priv)) { + u32 fuse_strap = I915_READ(FUSE_STRAP); + u32 sfuse_strap = I915_READ(SFUSE_STRAP); + + /* + * SFUSE_STRAP is supposed to have a bit signalling the display + * is fused off. Unfortunately it seems that, at least in + * certain cases, fused off display means that PCH display + * reads don't land anywhere. In that case, we read 0s. + * + * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK + * should be set when taking over after the firmware. + */ + if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || + sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || + (dev_priv->pch_type == PCH_CPT && + !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { + DRM_INFO("Display fused off, disabling\n"); + info->num_pipes = 0; + } else if (fuse_strap & IVB_PIPE_C_DISABLE) { + DRM_INFO("PipeC fused off\n"); + info->num_pipes -= 1; + } + } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) { + u32 dfsm = I915_READ(SKL_DFSM); + u8 disabled_mask = 0; + bool invalid; + int num_bits; + + if (dfsm & SKL_DFSM_PIPE_A_DISABLE) + disabled_mask |= BIT(PIPE_A); + if (dfsm & SKL_DFSM_PIPE_B_DISABLE) + disabled_mask |= BIT(PIPE_B); + if (dfsm & SKL_DFSM_PIPE_C_DISABLE) + disabled_mask |= BIT(PIPE_C); + + num_bits = hweight8(disabled_mask); + + switch (disabled_mask) { + case BIT(PIPE_A): + case BIT(PIPE_B): + case BIT(PIPE_A) | BIT(PIPE_B): + case BIT(PIPE_A) | BIT(PIPE_C): + invalid = true; + break; + default: + invalid = false; + } + + if (num_bits > info->num_pipes || invalid) + DRM_ERROR("invalid pipe fuse configuration: 0x%x\n", + disabled_mask); + else + info->num_pipes -= num_bits; + } + + /* Initialize slice/subslice/EU info */ + if (IS_CHERRYVIEW(dev_priv)) + cherryview_sseu_info_init(dev_priv); + else if (IS_BROADWELL(dev_priv)) + broadwell_sseu_info_init(dev_priv); + else if (INTEL_INFO(dev_priv)->gen >= 9) + gen9_sseu_info_init(dev_priv); + + info->has_snoop = !info->has_llc; + + /* Snooping is broken on BXT A stepping. */ + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) + info->has_snoop = false; + + DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); + DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); + DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice); + DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total); + DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice); + DRM_DEBUG_DRIVER("has slice power gating: %s\n", + info->has_slice_pg ? "y" : "n"); + DRM_DEBUG_DRIVER("has subslice power gating: %s\n", + info->has_subslice_pg ? "y" : "n"); + DRM_DEBUG_DRIVER("has EU power gating: %s\n", + info->has_eu_pg ? "y" : "n"); +} diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0b2cd669ac05..be3b2cab2640 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -530,52 +530,6 @@ needs_modeset(struct drm_crtc_state *state) return drm_atomic_crtc_needs_modeset(state); } -/** - * Returns whether any output on the specified pipe is of the specified type - */ -bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type) -{ - struct drm_device *dev = crtc->base.dev; - struct intel_encoder *encoder; - - for_each_encoder_on_crtc(dev, &crtc->base, encoder) - if (encoder->type == type) - return true; - - return false; -} - -/** - * Returns whether any output on the specified pipe will have the specified - * type after a staged modeset is complete, i.e., the same as - * intel_pipe_has_type() but looking at encoder->new_crtc instead of - * encoder->crtc. - */ -static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state, - int type) -{ - struct drm_atomic_state *state = crtc_state->base.state; - struct drm_connector *connector; - struct drm_connector_state *connector_state; - struct intel_encoder *encoder; - int i, num_connectors = 0; - - for_each_connector_in_state(state, connector, connector_state, i) { - if (connector_state->crtc != crtc_state->base.crtc) - continue; - - num_connectors++; - - encoder = to_intel_encoder(connector_state->best_encoder); - if (encoder->type == type) - return true; - } - - WARN_ON(num_connectors == 0); - - return false; -} - /* * Platform specific helpers to calculate the port PLL loopback- (clock.m), * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast @@ -688,7 +642,7 @@ i9xx_select_p2_div(const struct intel_limit *limit, { struct drm_device *dev = crtc_state->base.crtc->dev; - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { /* * For LVDS just rely on its current settings for dual-channel. * We haven't figured out how to reliably set up different @@ -1080,7 +1034,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t reg = PIPEDSL(pipe); u32 line1, line2; u32 line_mask; @@ -1116,7 +1070,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) static void intel_wait_for_pipe_off(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; enum pipe pipe = crtc->pipe; @@ -1124,8 +1078,9 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) i915_reg_t reg = PIPECONF(cpu_transcoder); /* Wait for the Pipe State to go off */ - if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, - 100)) + if (intel_wait_for_register(dev_priv, + reg, I965_PIPECONF_ACTIVE, 0, + 100)) WARN(1, "pipe_off wait timed out\n"); } else { /* Wait for the display line to settle */ @@ -1234,7 +1189,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; i915_reg_t pp_reg; u32 val; enum pipe panel_pipe = PIPE_A; @@ -1276,7 +1231,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, static void assert_cursor(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; bool cur_state; if (IS_845G(dev) || IS_I865G(dev)) @@ -1338,7 +1293,7 @@ static void assert_plane(struct drm_i915_private *dev_priv, static void assert_planes_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; int i; /* Primary planes are fixed to pipes on gen4+ */ @@ -1364,7 +1319,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv, static void assert_sprites_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; int sprite; if (INTEL_INFO(dev)->gen >= 9) { @@ -1544,7 +1499,11 @@ static void _vlv_enable_pll(struct intel_crtc *crtc, POSTING_READ(DPLL(pipe)); udelay(150); - if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) + if (intel_wait_for_register(dev_priv, + DPLL(pipe), + DPLL_LOCK_VLV, + DPLL_LOCK_VLV, + 1)) DRM_ERROR("DPLL %d failed to lock\n", pipe); } @@ -1593,7 +1552,9 @@ static void _chv_enable_pll(struct intel_crtc *crtc, I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); /* Check PLL is locked */ - if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) + if (intel_wait_for_register(dev_priv, + DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV, + 1)) DRM_ERROR("PLL %d failed to lock\n", pipe); } @@ -1639,9 +1600,10 @@ static int intel_num_dvo_pipes(struct drm_device *dev) struct intel_crtc *crtc; int count = 0; - for_each_intel_crtc(dev, crtc) + for_each_intel_crtc(dev, crtc) { count += crtc->base.state->active && - intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO); + intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO); + } return count; } @@ -1649,7 +1611,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev) static void i9xx_enable_pll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t reg = DPLL(crtc->pipe); u32 dpll = crtc->config->dpll_hw_state.dpll; @@ -1721,12 +1683,12 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) static void i9xx_disable_pll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; /* Disable DVO 2x clock on both PLLs if necessary */ if (IS_I830(dev) && - intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) && + intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) && !intel_num_dvo_pipes(dev)) { I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); @@ -1813,7 +1775,9 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, BUG(); } - if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000)) + if (intel_wait_for_register(dev_priv, + dpll_reg, port_mask, expected_mask, + 1000)) WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask); } @@ -1821,7 +1785,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); i915_reg_t reg; @@ -1854,7 +1818,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, * here for both 8bpc and 12bpc. */ val &= ~PIPECONF_BPC_MASK; - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI)) + if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI)) val |= PIPECONF_8BPC; else val |= pipeconf_val & PIPECONF_BPC_MASK; @@ -1863,7 +1827,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, val &= ~TRANS_INTERLACE_MASK; if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) if (HAS_PCH_IBX(dev_priv) && - intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) + intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) val |= TRANS_LEGACY_INTERLACED_ILK; else val |= TRANS_INTERLACED; @@ -1871,7 +1835,9 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, val |= TRANS_PROGRESSIVE; I915_WRITE(reg, val | TRANS_ENABLE); - if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) + if (intel_wait_for_register(dev_priv, + reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE, + 100)) DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); } @@ -1899,14 +1865,18 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, val |= TRANS_PROGRESSIVE; I915_WRITE(LPT_TRANSCONF, val); - if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100)) + if (intel_wait_for_register(dev_priv, + LPT_TRANSCONF, + TRANS_STATE_ENABLE, + TRANS_STATE_ENABLE, + 100)) DRM_ERROR("Failed to enable PCH transcoder\n"); } static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; i915_reg_t reg; uint32_t val; @@ -1922,7 +1892,9 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, val &= ~TRANS_ENABLE; I915_WRITE(reg, val); /* wait for PCH transcoder off, transcoder state */ - if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) + if (intel_wait_for_register(dev_priv, + reg, TRANS_STATE_ENABLE, 0, + 50)) DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); if (HAS_PCH_CPT(dev)) { @@ -1942,7 +1914,9 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) val &= ~TRANS_ENABLE; I915_WRITE(LPT_TRANSCONF, val); /* wait for PCH transcoder off, transcoder state */ - if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50)) + if (intel_wait_for_register(dev_priv, + LPT_TRANSCONF, TRANS_STATE_ENABLE, 0, + 50)) DRM_ERROR("Failed to disable PCH transcoder\n"); /* Workaround: clear timing override bit. */ @@ -1961,7 +1935,7 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) static void intel_enable_pipe(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; enum pipe pch_transcoder; @@ -1985,7 +1959,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc) * need the check. */ if (HAS_GMCH_DISPLAY(dev_priv)) - if (crtc->config->has_dsi_encoder) + if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI)) assert_dsi_pll_enabled(dev_priv); else assert_pll_enabled(dev_priv, pipe); @@ -2034,7 +2008,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc) */ static void intel_disable_pipe(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; enum pipe pipe = crtc->pipe; i915_reg_t reg; @@ -2072,15 +2046,6 @@ static void intel_disable_pipe(struct intel_crtc *crtc) intel_wait_for_pipe_off(crtc); } -static bool need_vtd_wa(struct drm_device *dev) -{ -#ifdef CONFIG_INTEL_IOMMU - if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped) - return true; -#endif - return false; -} - static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) { return IS_GEN2(dev_priv) ? 2048 : 4096; @@ -2245,7 +2210,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) { struct drm_device *dev = fb->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; u32 alignment; @@ -2262,7 +2227,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, * we should always have valid PTE following the scanout preventing * the VT-d warning. */ - if (need_vtd_wa(dev) && alignment < 256 * 1024) + if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) alignment = 256 * 1024; /* @@ -2547,7 +2512,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct intel_initial_plane_config *plane_config) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *c; struct intel_crtc *i; struct drm_i915_gem_object *obj; @@ -2643,7 +2608,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, const struct intel_plane_state *plane_state) { struct drm_device *dev = primary->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); @@ -2756,7 +2721,7 @@ static void i9xx_disable_primary_plane(struct drm_plane *primary, struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane = intel_crtc->plane; @@ -2773,7 +2738,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, const struct intel_plane_state *plane_state) { struct drm_device *dev = primary->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); @@ -2901,7 +2866,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane, static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); @@ -3011,7 +2976,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane, const struct intel_plane_state *plane_state) { struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); @@ -3095,7 +3060,7 @@ static void skylake_disable_primary_plane(struct drm_plane *primary, struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = to_intel_crtc(crtc)->pipe; I915_WRITE(PLANE_CTL(pipe, 0), 0); @@ -3118,7 +3083,7 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv) { struct intel_crtc *crtc; - for_each_intel_crtc(dev_priv->dev, crtc) + for_each_intel_crtc(&dev_priv->drm, crtc) intel_finish_page_flip_cs(dev_priv, crtc->pipe); } @@ -3152,12 +3117,12 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) return; - drm_modeset_lock_all(dev_priv->dev); + drm_modeset_lock_all(&dev_priv->drm); /* * Disabling the crtcs gracefully seems nicer. Also the * g33 docs say we should at least disable all the planes. */ - intel_display_suspend(dev_priv->dev); + intel_display_suspend(&dev_priv->drm); } void intel_finish_reset(struct drm_i915_private *dev_priv) @@ -3184,7 +3149,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) * FIXME: Atomic will make this obsolete since we won't schedule * CS-based flips (which might get lost in gpu resets) any more. */ - intel_update_primary_planes(dev_priv->dev); + intel_update_primary_planes(&dev_priv->drm); return; } @@ -3195,18 +3160,18 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) intel_runtime_pm_disable_interrupts(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv); - intel_modeset_init_hw(dev_priv->dev); + intel_modeset_init_hw(&dev_priv->drm); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); - intel_display_resume(dev_priv->dev); + intel_display_resume(&dev_priv->drm); intel_hpd_init(dev_priv); - drm_modeset_unlock_all(dev_priv->dev); + drm_modeset_unlock_all(&dev_priv->drm); } static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) @@ -3231,7 +3196,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *old_crtc_state) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->base.state); @@ -3272,7 +3237,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc, static void intel_fdi_normal_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; i915_reg_t reg; @@ -3315,7 +3280,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc) static void ironlake_fdi_link_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; i915_reg_t reg; @@ -3416,7 +3381,7 @@ static const int snb_b_fdi_train_param[] = { static void gen6_fdi_link_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; i915_reg_t reg; @@ -3549,7 +3514,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; i915_reg_t reg; @@ -3668,7 +3633,7 @@ train_done: static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = intel_crtc->pipe; i915_reg_t reg; u32 temp; @@ -3705,7 +3670,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = intel_crtc->pipe; i915_reg_t reg; u32 temp; @@ -3735,7 +3700,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) static void ironlake_fdi_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; i915_reg_t reg; @@ -3831,7 +3796,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); long ret; WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); @@ -3994,7 +3959,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, enum pipe pch_transcoder) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), @@ -4016,7 +3981,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t temp; temp = I915_READ(SOUTH_CHICKEN1); @@ -4066,7 +4031,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc) struct intel_encoder *encoder; for_each_encoder_on_crtc(dev, crtc, encoder) { - if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || + if (encoder->type == INTEL_OUTPUT_DP || encoder->type == INTEL_OUTPUT_EDP) return enc_to_dig_port(&encoder->base)->port; } @@ -4085,7 +4050,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc) static void ironlake_pch_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 temp; @@ -4135,7 +4100,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) intel_fdi_normal_train(crtc); /* For PCH DP, enable TRANS_DP_CTL */ - if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { + if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) { const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; @@ -4175,7 +4140,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) static void lpt_pch_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; @@ -4191,7 +4156,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc) static void cpt_verify_modeset(struct drm_device *dev, int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t dslreg = PIPEDSL(pipe); u32 temp; @@ -4369,7 +4334,7 @@ static void skylake_scaler_disable(struct intel_crtc *crtc) static void skylake_pfit_enable(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = crtc->pipe; struct intel_crtc_scaler_state *scaler_state = &crtc->config->scaler_state; @@ -4397,7 +4362,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc) static void ironlake_pfit_enable(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = crtc->pipe; if (crtc->config->pch_pfit.enabled) { @@ -4418,7 +4383,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc) void hsw_enable_ips(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!crtc->config->ips_enabled) return; @@ -4446,7 +4411,9 @@ void hsw_enable_ips(struct intel_crtc *crtc) * and don't wait for vblanks until the end of crtc_enable, then * the HW state readout code will complain that the expected * IPS_CTL value is not the one we read. */ - if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) + if (intel_wait_for_register(dev_priv, + IPS_CTL, IPS_ENABLE, IPS_ENABLE, + 50)) DRM_ERROR("Timed out waiting for IPS enable\n"); } } @@ -4454,7 +4421,7 @@ void hsw_enable_ips(struct intel_crtc *crtc) void hsw_disable_ips(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!crtc->config->ips_enabled) return; @@ -4465,7 +4432,9 @@ void hsw_disable_ips(struct intel_crtc *crtc) WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); mutex_unlock(&dev_priv->rps.hw_lock); /* wait for pcode to finish disabling IPS, which may take up to 42ms */ - if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42)) + if (intel_wait_for_register(dev_priv, + IPS_CTL, IPS_ENABLE, 0, + 42)) DRM_ERROR("Timed out waiting for IPS disable\n"); } else { I915_WRITE(IPS_CTL, 0); @@ -4480,7 +4449,7 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) { if (intel_crtc->overlay) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); mutex_lock(&dev->struct_mutex); dev_priv->mm.interruptible = false; @@ -4508,7 +4477,7 @@ static void intel_post_enable_primary(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; @@ -4540,7 +4509,7 @@ static void intel_pre_disable_primary(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; @@ -4567,7 +4536,7 @@ static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; @@ -4626,7 +4595,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->base.state); struct drm_atomic_state *old_state = old_crtc_state->base.state; @@ -4729,7 +4698,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask static void ironlake_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe; @@ -4757,7 +4726,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config->has_pch_encoder) intel_prepare_shared_dpll(intel_crtc); - if (intel_crtc->config->has_dp_encoder) + if (intel_crtc_has_dp_encoder(intel_crtc->config)) intel_dp_set_m_n(intel_crtc, M1_N1); intel_set_pipe_timings(intel_crtc); @@ -4826,7 +4795,7 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) static void haswell_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe, hsw_workaround_pipe; @@ -4848,10 +4817,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config->shared_dpll) intel_enable_shared_dpll(intel_crtc); - if (intel_crtc->config->has_dp_encoder) + if (intel_crtc_has_dp_encoder(intel_crtc->config)) intel_dp_set_m_n(intel_crtc, M1_N1); - if (!intel_crtc->config->has_dsi_encoder) + if (!transcoder_is_dsi(cpu_transcoder)) intel_set_pipe_timings(intel_crtc); intel_set_pipe_src_size(intel_crtc); @@ -4867,7 +4836,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) &intel_crtc->config->fdi_m_n, NULL); } - if (!intel_crtc->config->has_dsi_encoder) + if (!transcoder_is_dsi(cpu_transcoder)) haswell_set_pipeconf(crtc); haswell_set_pipemisc(crtc); @@ -4889,7 +4858,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config->has_pch_encoder) dev_priv->display.fdi_link_train(crtc); - if (!intel_crtc->config->has_dsi_encoder) + if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_enable_pipe_clock(intel_crtc); if (INTEL_INFO(dev)->gen >= 9) @@ -4904,7 +4873,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_color_load_luts(&pipe_config->base); intel_ddi_set_pipe_settings(crtc); - if (!intel_crtc->config->has_dsi_encoder) + if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_enable_transcoder_func(crtc); if (dev_priv->display.initial_watermarks != NULL) @@ -4913,7 +4882,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_update_watermarks(crtc); /* XXX: Do the pipe assertions at the right place for BXT DSI. */ - if (!intel_crtc->config->has_dsi_encoder) + if (!transcoder_is_dsi(cpu_transcoder)) intel_enable_pipe(intel_crtc); if (intel_crtc->config->has_pch_encoder) @@ -4950,7 +4919,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = crtc->pipe; /* To avoid upsetting the power well on haswell only disable the pfit if @@ -4965,7 +4934,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) static void ironlake_crtc_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe; @@ -5028,7 +4997,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) static void haswell_crtc_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; @@ -5046,13 +5015,13 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) assert_vblank_disabled(crtc); /* XXX: Do the pipe assertions at the right place for BXT DSI. */ - if (!intel_crtc->config->has_dsi_encoder) + if (!transcoder_is_dsi(cpu_transcoder)) intel_disable_pipe(intel_crtc); if (intel_crtc->config->dp_encoder_is_mst) intel_ddi_set_vc_payload_alloc(crtc, false); - if (!intel_crtc->config->has_dsi_encoder) + if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); if (INTEL_INFO(dev)->gen >= 9) @@ -5060,7 +5029,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) else ironlake_pfit_disable(intel_crtc, false); - if (!intel_crtc->config->has_dsi_encoder) + if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_disable_pipe_clock(intel_crtc); for_each_encoder_on_crtc(dev, crtc, encoder) @@ -5080,7 +5049,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) static void i9xx_pfit_enable(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_state *pipe_config = crtc->config; if (!pipe_config->gmch_pfit.control) @@ -5150,7 +5119,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder) case INTEL_OUTPUT_UNKNOWN: /* Only DDI platforms should ever use this output type */ WARN_ON_ONCE(!HAS_DDI(dev)); - case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_EDP: intel_dig_port = enc_to_dig_port(&intel_encoder->base); @@ -5184,7 +5153,7 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder) * run the DP detection too. */ WARN_ON_ONCE(!HAS_DDI(dev)); - case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_DP: case INTEL_OUTPUT_EDP: intel_dig_port = enc_to_dig_port(&intel_encoder->base); return port_to_aux_power_domain(intel_dig_port->port); @@ -5232,7 +5201,7 @@ static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum intel_display_power_domain domain; unsigned long domains, new_domains, old_domains; @@ -5277,7 +5246,7 @@ static int skl_calc_cdclk(int max_pixclk, int vco); static void intel_update_max_cdclk(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; @@ -5338,7 +5307,7 @@ static void intel_update_max_cdclk(struct drm_device *dev) static void intel_update_cdclk(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); @@ -5395,7 +5364,9 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) I915_WRITE(BXT_DE_PLL_ENABLE, 0); /* Timeout 200us */ - if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1)) + if (intel_wait_for_register(dev_priv, + BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0, + 1)) DRM_ERROR("timeout waiting for DE PLL unlock\n"); dev_priv->cdclk_pll.vco = 0; @@ -5414,7 +5385,11 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); /* Timeout 200us */ - if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1)) + if (intel_wait_for_register(dev_priv, + BXT_DE_PLL_ENABLE, + BXT_DE_PLL_LOCK, + BXT_DE_PLL_LOCK, + 1)) DRM_ERROR("timeout waiting for DE PLL lock\n"); dev_priv->cdclk_pll.vco = vco; @@ -5495,14 +5470,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk) return; } - intel_update_cdclk(dev_priv->dev); + intel_update_cdclk(&dev_priv->drm); } static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) { u32 cdctl, expected; - intel_update_cdclk(dev_priv->dev); + intel_update_cdclk(&dev_priv->drm); if (dev_priv->cdclk_pll.vco == 0 || dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) @@ -5635,7 +5610,7 @@ void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco) dev_priv->skl_preferred_vco_freq = vco; if (changed) - intel_update_max_cdclk(dev_priv->dev); + intel_update_max_cdclk(&dev_priv->drm); } static void @@ -5677,7 +5652,9 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE); - if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5)) + if (intel_wait_for_register(dev_priv, + LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, + 5)) DRM_ERROR("DPLL0 not locked\n"); dev_priv->cdclk_pll.vco = vco; @@ -5690,7 +5667,9 @@ static void skl_dpll0_disable(struct drm_i915_private *dev_priv) { I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); - if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) + if (intel_wait_for_register(dev_priv, + LCPLL1_CTL, LCPLL_PLL_LOCK, 0, + 1)) DRM_ERROR("Couldn't disable DPLL0\n"); dev_priv->cdclk_pll.vco = 0; @@ -5725,7 +5704,7 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; u32 freq_select, pcu_ack; WARN_ON((cdclk == 24000) != (vco == 0)); @@ -5823,7 +5802,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) goto sanitize; - intel_update_cdclk(dev_priv->dev); + intel_update_cdclk(&dev_priv->drm); /* Is PLL enabled and locked ? */ if (dev_priv->cdclk_pll.vco == 0 || dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) @@ -5854,7 +5833,7 @@ sanitize: /* Adjust CDclk dividers to allow high res or save power if possible */ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val, cmd; WARN_ON(dev_priv->display.get_display_clock_speed(dev) @@ -5919,7 +5898,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val, cmd; WARN_ON(dev_priv->display.get_display_clock_speed(dev) @@ -6007,7 +5986,7 @@ static int intel_mode_max_pixclk(struct drm_device *dev, struct drm_atomic_state *state) { struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; unsigned max_pixclk = 0, i; @@ -6034,7 +6013,7 @@ static int intel_mode_max_pixclk(struct drm_device *dev, static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int max_pixclk = intel_mode_max_pixclk(dev, state); struct intel_atomic_state *intel_state = to_intel_atomic_state(state); @@ -6102,7 +6081,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_atomic_state *old_intel_state = to_intel_atomic_state(old_state); unsigned req_cdclk = old_intel_state->dev_cdclk; @@ -6141,14 +6120,14 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) if (WARN_ON(intel_crtc->active)) return; - if (intel_crtc->config->has_dp_encoder) + if (intel_crtc_has_dp_encoder(intel_crtc->config)) intel_dp_set_m_n(intel_crtc, M1_N1); intel_set_pipe_timings(intel_crtc); intel_set_pipe_src_size(intel_crtc); if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); I915_WRITE(CHV_CANVAS(pipe), 0); @@ -6193,7 +6172,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) static void i9xx_set_pll_dividers(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); @@ -6214,7 +6193,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) i9xx_set_pll_dividers(intel_crtc); - if (intel_crtc->config->has_dp_encoder) + if (intel_crtc_has_dp_encoder(intel_crtc->config)) intel_dp_set_m_n(intel_crtc, M1_N1); intel_set_pipe_timings(intel_crtc); @@ -6250,7 +6229,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) static void i9xx_pfit_disable(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!crtc->config->gmch_pfit.control) return; @@ -6265,7 +6244,7 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc) static void i9xx_crtc_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe; @@ -6291,7 +6270,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) if (encoder->post_disable) encoder->post_disable(encoder); - if (!intel_crtc->config->has_dsi_encoder) { + if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) { if (IS_CHERRYVIEW(dev)) chv_disable_pll(dev_priv, pipe); else if (IS_VALLEYVIEW(dev)) @@ -6609,7 +6588,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); pipe_config->ips_enabled = i915.enable_ips && hsw_crtc_supports_ips(crtc) && @@ -6629,7 +6608,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; int clock_limit = dev_priv->max_dotclk_freq; @@ -6660,7 +6639,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, * - LVDS dual channel mode * - Double wide pipe */ - if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) && + if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) pipe_config->pipe_src_w &= ~1; @@ -6779,7 +6758,7 @@ static int broxton_get_display_clock_speed(struct drm_device *dev) static int broadwell_get_display_clock_speed(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t lcpll = I915_READ(LCPLL_CTL); uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; @@ -6799,7 +6778,7 @@ static int broadwell_get_display_clock_speed(struct drm_device *dev) static int haswell_get_display_clock_speed(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t lcpll = I915_READ(LCPLL_CTL); uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; @@ -6933,7 +6912,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev) static unsigned int intel_hpll_vco(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); static const unsigned int blb_vco[8] = { [0] = 3200000, [1] = 4000000, @@ -7171,7 +7150,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc, crtc_state->dpll_hw_state.fp0 = fp; crtc->lowfreq_avail = false; - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && reduced_clock) { crtc_state->dpll_hw_state.fp1 = fp2; crtc->lowfreq_avail = true; @@ -7213,7 +7192,7 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m_n) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = crtc->pipe; I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); @@ -7227,7 +7206,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m2_n2) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = crtc->pipe; enum transcoder transcoder = crtc->config->cpu_transcoder; @@ -7290,7 +7269,7 @@ static void vlv_compute_dpll(struct intel_crtc *crtc, pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; /* DPLL not used with DSI, but still need the rest set up */ - if (!pipe_config->has_dsi_encoder) + if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV; @@ -7307,7 +7286,7 @@ static void chv_compute_dpll(struct intel_crtc *crtc, pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; /* DPLL not used with DSI, but still need the rest set up */ - if (!pipe_config->has_dsi_encoder) + if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; pipe_config->dpll_hw_state.dpll_md = @@ -7318,7 +7297,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; u32 mdiv; u32 bestn, bestm1, bestm2, bestp1, bestp2; @@ -7377,15 +7356,15 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, /* Set HBR and RBR LPF coefficients */ if (pipe_config->port_clock == 162000 || - intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) || - intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) + intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) || + intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 0x009f0003); else vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 0x00d0000f); - if (pipe_config->has_dp_encoder) { + if (intel_crtc_has_dp_encoder(pipe_config)) { /* Use SSC source */ if (pipe == PIPE_A) vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), @@ -7405,8 +7384,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); coreclk = (coreclk & 0x0000ff00) | 0x01c00000; - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || - intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) + if (intel_crtc_has_dp_encoder(crtc->config)) coreclk |= 0x01000000; vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); @@ -7418,7 +7396,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; enum dpio_channel port = vlv_pipe_to_channel(pipe); u32 loopfilter, tribuf_calcntr; @@ -7580,19 +7558,15 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, struct dpll *reduced_clock) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 dpll; - bool is_sdvo; struct dpll *clock = &crtc_state->dpll; i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); - is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) || - intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI); - dpll = DPLL_VGA_MODE_DIS; - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) dpll |= DPLLB_MODE_LVDS; else dpll |= DPLLB_MODE_DAC_SERIAL; @@ -7602,10 +7576,11 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, << SDVO_MULTIPLIER_SHIFT_HIRES; } - if (is_sdvo) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) dpll |= DPLL_SDVO_HIGH_SPEED; - if (crtc_state->has_dp_encoder) + if (intel_crtc_has_dp_encoder(crtc_state)) dpll |= DPLL_SDVO_HIGH_SPEED; /* compute bitmask from p1 value */ @@ -7635,7 +7610,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, if (crtc_state->sdvo_tv_clock) dpll |= PLL_REF_INPUT_TVCLKINBC; - else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else @@ -7656,7 +7631,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, struct dpll *reduced_clock) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 dpll; struct dpll *clock = &crtc_state->dpll; @@ -7664,7 +7639,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, dpll = DPLL_VGA_MODE_DIS; - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; } else { if (clock->p1 == 2) @@ -7675,10 +7650,10 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, dpll |= PLL_P2_DIVIDE_BY_4; } - if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) + if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) dpll |= DPLL_DVO_2X_MODE; - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else @@ -7691,7 +7666,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = intel_crtc->pipe; enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; @@ -7708,7 +7683,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) crtc_vtotal -= 1; crtc_vblank_end -= 1; - if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) + if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; else vsyncshift = adjusted_mode->crtc_hsync_start - @@ -7753,7 +7728,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = intel_crtc->pipe; /* pipesrc controls the size that is scaled from, which should @@ -7768,7 +7743,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; uint32_t tmp; @@ -7803,7 +7778,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp; tmp = I915_READ(PIPESRC(crtc->pipe)); @@ -7841,7 +7816,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t pipeconf; pipeconf = 0; @@ -7887,7 +7862,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { if (INTEL_INFO(dev)->gen < 4 || - intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) + intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; else pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; @@ -7906,21 +7881,21 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const struct intel_limit *limit; int refclk = 48000; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); } limit = &intel_limits_i8xx_lvds; - } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) { + } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { limit = &intel_limits_i8xx_dvo; } else { limit = &intel_limits_i8xx_dac; @@ -7942,14 +7917,14 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const struct intel_limit *limit; int refclk = 96000; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); @@ -7959,10 +7934,10 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc, limit = &intel_limits_g4x_dual_channel_lvds; else limit = &intel_limits_g4x_single_channel_lvds; - } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) || - intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) { + } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { limit = &intel_limits_g4x_hdmi; - } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) { + } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { limit = &intel_limits_g4x_sdvo; } else { /* The option is for other outputs */ @@ -7985,14 +7960,14 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const struct intel_limit *limit; int refclk = 96000; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); @@ -8019,14 +7994,14 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const struct intel_limit *limit; int refclk = 96000; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); @@ -8095,7 +8070,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t tmp; if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) @@ -8122,7 +8097,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = pipe_config->cpu_transcoder; struct dpll clock; u32 mdiv; @@ -8150,7 +8125,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val, base, offset; int pipe = crtc->pipe, plane = crtc->plane; int fourcc, pixel_format; @@ -8218,7 +8193,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = pipe_config->cpu_transcoder; enum dpio_channel port = vlv_pipe_to_channel(pipe); struct dpll clock; @@ -8252,7 +8227,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; uint32_t tmp; bool ret; @@ -8363,7 +8338,7 @@ out: static void ironlake_init_pch_refclk(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; int i; u32 val, final; @@ -8537,16 +8512,16 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) tmp |= FDI_MPHY_IOSFSB_RESET_CTL; I915_WRITE(SOUTH_CHICKEN2, tmp); - if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS, 100)) + if (wait_for_us(I915_READ(SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS, 100)) DRM_ERROR("FDI mPHY reset assert timeout\n"); tmp = I915_READ(SOUTH_CHICKEN2); tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; I915_WRITE(SOUTH_CHICKEN2, tmp); - if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) + if (wait_for_us((I915_READ(SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) DRM_ERROR("FDI mPHY reset de-assert timeout\n"); } @@ -8634,7 +8609,7 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, bool with_fdi) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t reg, tmp; if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) @@ -8673,7 +8648,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, /* Sequence to disable CLKOUT_DP */ static void lpt_disable_clkout_dp(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t reg, tmp; mutex_lock(&dev_priv->sb_lock); @@ -8794,7 +8769,7 @@ void intel_init_pch_refclk(struct drm_device *dev) static void ironlake_set_pipeconf(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; uint32_t val; @@ -8836,7 +8811,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc) static void haswell_set_pipeconf(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; u32 val = 0; @@ -8855,7 +8830,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc) static void haswell_set_pipemisc(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) { @@ -8908,37 +8883,13 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, { struct drm_crtc *crtc = &intel_crtc->base; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_atomic_state *state = crtc_state->base.state; - struct drm_connector *connector; - struct drm_connector_state *connector_state; - struct intel_encoder *encoder; + struct drm_i915_private *dev_priv = to_i915(dev); u32 dpll, fp, fp2; - int factor, i; - bool is_lvds = false, is_sdvo = false; - - for_each_connector_in_state(state, connector, connector_state, i) { - if (connector_state->crtc != crtc_state->base.crtc) - continue; - - encoder = to_intel_encoder(connector_state->best_encoder); - - switch (encoder->type) { - case INTEL_OUTPUT_LVDS: - is_lvds = true; - break; - case INTEL_OUTPUT_SDVO: - case INTEL_OUTPUT_HDMI: - is_sdvo = true; - break; - default: - break; - } - } + int factor; /* Enable autotuning of the PLL clock (if permissible) */ factor = 21; - if (is_lvds) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if ((intel_panel_use_ssc(dev_priv) && dev_priv->vbt.lvds_ssc_freq == 100000) || (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) @@ -8962,7 +8913,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, dpll = 0; - if (is_lvds) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) dpll |= DPLLB_MODE_LVDS; else dpll |= DPLLB_MODE_DAC_SERIAL; @@ -8970,9 +8921,11 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, dpll |= (crtc_state->pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; - if (is_sdvo) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) dpll |= DPLL_SDVO_HIGH_SPEED; - if (crtc_state->has_dp_encoder) + + if (intel_crtc_has_dp_encoder(crtc_state)) dpll |= DPLL_SDVO_HIGH_SPEED; /* compute bitmask from p1 value */ @@ -8995,7 +8948,8 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, break; } - if (is_lvds && intel_panel_use_ssc(dev_priv)) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && + intel_panel_use_ssc(dev_priv)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; @@ -9011,7 +8965,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct dpll reduced_clock; bool has_reduced_clock = false; struct intel_shared_dpll *pll; @@ -9027,7 +8981,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, if (!crtc_state->has_pch_encoder) return 0; - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", dev_priv->vbt.lvds_ssc_freq); @@ -9066,7 +9020,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, return -EINVAL; } - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && has_reduced_clock) crtc->lowfreq_avail = true; @@ -9077,7 +9031,7 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m_n) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); @@ -9095,7 +9049,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m2_n2) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; if (INTEL_INFO(dev)->gen >= 5) { @@ -9153,7 +9107,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; uint32_t ps_ctrl = 0; int id = -1; @@ -9184,7 +9138,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val, base, offset, stride_mult, tiling; int pipe = crtc->pipe; int fourcc, pixel_format; @@ -9267,7 +9221,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t tmp; tmp = I915_READ(PF_CTL(crtc->pipe)); @@ -9292,7 +9246,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val, base, offset; int pipe = crtc->pipe; int fourcc, pixel_format; @@ -9360,7 +9314,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; uint32_t tmp; bool ret; @@ -9455,7 +9409,7 @@ out: static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct intel_crtc *crtc; for_each_intel_crtc(dev, crtc) @@ -9489,7 +9443,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; if (IS_HASWELL(dev)) return I915_READ(D_COMP_HSW); @@ -9499,7 +9453,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; if (IS_HASWELL(dev)) { mutex_lock(&dev_priv->rps.hw_lock); @@ -9534,8 +9488,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, val |= LCPLL_CD_SOURCE_FCLK; I915_WRITE(LCPLL_CTL, val); - if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & - LCPLL_CD_SOURCE_FCLK_DONE, 1)) + if (wait_for_us(I915_READ(LCPLL_CTL) & + LCPLL_CD_SOURCE_FCLK_DONE, 1)) DRM_ERROR("Switching to FCLK failed\n"); val = I915_READ(LCPLL_CTL); @@ -9545,7 +9499,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, I915_WRITE(LCPLL_CTL, val); POSTING_READ(LCPLL_CTL); - if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) + if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1)) DRM_ERROR("LCPLL still locked\n"); val = hsw_read_dcomp(dev_priv); @@ -9600,7 +9554,9 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) val &= ~LCPLL_PLL_DISABLE; I915_WRITE(LCPLL_CTL, val); - if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) + if (intel_wait_for_register(dev_priv, + LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, + 5)) DRM_ERROR("LCPLL not locked yet\n"); if (val & LCPLL_CD_SOURCE_FCLK) { @@ -9608,13 +9564,13 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) val &= ~LCPLL_CD_SOURCE_FCLK; I915_WRITE(LCPLL_CTL, val); - if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & - LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) + if (wait_for_us((I915_READ(LCPLL_CTL) & + LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) DRM_ERROR("Switching back to LCPLL failed\n"); } intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_update_cdclk(dev_priv->dev); + intel_update_cdclk(&dev_priv->drm); } /* @@ -9642,7 +9598,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) */ void hsw_enable_pc8(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; uint32_t val; DRM_DEBUG_KMS("Enabling package C8+\n"); @@ -9659,7 +9615,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv) void hsw_disable_pc8(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; uint32_t val; DRM_DEBUG_KMS("Disabling package C8+\n"); @@ -9688,7 +9644,7 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state) static int ilk_max_pixel_rate(struct drm_atomic_state *state) { struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_i915_private *dev_priv = state->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(state->dev); struct drm_crtc *crtc; struct drm_crtc_state *cstate; struct intel_crtc_state *crtc_state; @@ -9724,7 +9680,7 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state) static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t val, data; int ret; @@ -9893,10 +9849,7 @@ static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state) static int haswell_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { - struct intel_encoder *intel_encoder = - intel_ddi_get_crtc_new_encoder(crtc_state); - - if (intel_encoder->type != INTEL_OUTPUT_DSI) { + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) { if (!intel_ddi_pll_select(crtc, crtc_state)) return -EINVAL; } @@ -10006,7 +9959,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, unsigned long *power_domain_mask) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; u32 tmp; @@ -10057,14 +10010,12 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, unsigned long *power_domain_mask) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; enum port port; enum transcoder cpu_transcoder; u32 tmp; - pipe_config->has_dsi_encoder = false; - for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { if (port == PORT_A) cpu_transcoder = TRANSCODER_DSI_A; @@ -10096,18 +10047,17 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, continue; pipe_config->cpu_transcoder = cpu_transcoder; - pipe_config->has_dsi_encoder = true; break; } - return pipe_config->has_dsi_encoder; + return transcoder_is_dsi(pipe_config->cpu_transcoder); } static void haswell_get_ddi_port_state(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_shared_dpll *pll; enum port port; uint32_t tmp; @@ -10150,7 +10100,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; unsigned long power_domain_mask; bool active; @@ -10164,18 +10114,16 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask); - if (IS_BROXTON(dev_priv)) { - bxt_get_dsi_transcoder_state(crtc, pipe_config, - &power_domain_mask); - WARN_ON(active && pipe_config->has_dsi_encoder); - if (pipe_config->has_dsi_encoder) - active = true; + if (IS_BROXTON(dev_priv) && + bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) { + WARN_ON(active); + active = true; } if (!active) goto out; - if (!pipe_config->has_dsi_encoder) { + if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { haswell_get_ddi_port_state(crtc, pipe_config); intel_get_pipe_timings(crtc, pipe_config); } @@ -10226,7 +10174,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base, const struct intel_plane_state *plane_state) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t cntl = 0, size = 0; @@ -10289,7 +10237,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, const struct intel_plane_state *plane_state) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; uint32_t cntl = 0; @@ -10337,7 +10285,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, const struct intel_plane_state *plane_state) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 base = intel_crtc->cursor_addr; @@ -10504,7 +10452,7 @@ mode_fits_in_fbdev(struct drm_device *dev, struct drm_display_mode *mode) { #ifdef CONFIG_DRM_FBDEV_EMULATION - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; struct drm_framebuffer *fb; @@ -10774,7 +10722,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, static int i9xx_pll_refclk(struct drm_device *dev, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 dpll = pipe_config->dpll_hw_state.dpll; if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) @@ -10792,7 +10740,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe = pipe_config->cpu_transcoder; u32 dpll = pipe_config->dpll_hw_state.dpll; u32 fp; @@ -10918,7 +10866,7 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc, struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; struct drm_display_mode *mode; @@ -10970,31 +10918,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, return mode; } -void intel_mark_busy(struct drm_i915_private *dev_priv) -{ - if (dev_priv->mm.busy) - return; - - intel_runtime_pm_get(dev_priv); - i915_update_gfx_val(dev_priv); - if (INTEL_GEN(dev_priv) >= 6) - gen6_rps_busy(dev_priv); - dev_priv->mm.busy = true; -} - -void intel_mark_idle(struct drm_i915_private *dev_priv) -{ - if (!dev_priv->mm.busy) - return; - - dev_priv->mm.busy = false; - - if (INTEL_GEN(dev_priv) >= 6) - gen6_rps_idle(dev_priv); - - intel_runtime_pm_put(dev_priv); -} - static void intel_crtc_destroy(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -11056,7 +10979,7 @@ static bool __pageflip_finished_cs(struct intel_crtc *crtc, struct intel_flip_work *work) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned reset_counter; reset_counter = i915_reset_counter(&dev_priv->gpu_error); @@ -11132,7 +11055,7 @@ static bool pageflip_finished(struct intel_crtc *crtc, void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_flip_work *work; @@ -11159,7 +11082,7 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe) void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_flip_work *work; @@ -11267,7 +11190,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, uint32_t flags) { struct intel_engine_cs *engine = req->engine; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; int ret; @@ -11305,7 +11228,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, uint32_t flags) { struct intel_engine_cs *engine = req->engine; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; int ret; @@ -11464,7 +11387,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, struct intel_flip_work *work) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_framebuffer *fb = intel_crtc->base.primary->fb; const enum pipe pipe = intel_crtc->pipe; u32 ctl, stride, tile_height; @@ -11516,7 +11439,7 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, struct intel_flip_work *work) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_framebuffer *intel_fb = to_intel_framebuffer(intel_crtc->base.primary->fb); struct drm_i915_gem_object *obj = intel_fb->obj; @@ -11593,7 +11516,7 @@ static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv, vblank = intel_crtc_get_vblank_counter(intel_crtc); if (work->flip_ready_vblank == 0) { if (work->flip_queued_req && - !i915_gem_request_completed(work->flip_queued_req, true)) + !i915_gem_request_completed(work->flip_queued_req)) return false; work->flip_ready_vblank = vblank; @@ -11618,7 +11541,7 @@ static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv, void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_flip_work *work; @@ -11646,14 +11569,13 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) spin_unlock(&dev->event_lock); } -__maybe_unused static int intel_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_framebuffer *old_fb = crtc->primary->fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -11949,8 +11871,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, struct drm_framebuffer *fb = plane_state->fb; int ret; - if (crtc_state && INTEL_INFO(dev)->gen >= 9 && - plane->type != DRM_PLANE_TYPE_CURSOR) { + if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) { ret = skl_update_scaler_plane( to_intel_crtc_state(crtc_state), to_intel_plane_state(plane_state)); @@ -12067,31 +11988,11 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state, return true; } -static bool check_encoder_cloning(struct drm_atomic_state *state, - struct intel_crtc *crtc) -{ - struct intel_encoder *encoder; - struct drm_connector *connector; - struct drm_connector_state *connector_state; - int i; - - for_each_connector_in_state(state, connector, connector_state, i) { - if (connector_state->crtc != &crtc->base) - continue; - - encoder = to_intel_encoder(connector_state->best_encoder); - if (!check_single_encoder_cloning(state, crtc, encoder)) - return false; - } - - return true; -} - static int intel_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); @@ -12099,11 +12000,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, int ret; bool mode_changed = needs_modeset(crtc_state); - if (mode_changed && !check_encoder_cloning(state, intel_crtc)) { - DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); - return -EINVAL; - } - if (mode_changed && !crtc_state->active) pipe_config->update_wm_post = true; @@ -12299,14 +12195,14 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, pipe_config->fdi_m_n.tu); DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", - pipe_config->has_dp_encoder, + intel_crtc_has_dp_encoder(pipe_config), pipe_config->lane_count, pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, pipe_config->dp_m_n.tu); DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", - pipe_config->has_dp_encoder, + intel_crtc_has_dp_encoder(pipe_config), pipe_config->lane_count, pipe_config->dp_m2_n2.gmch_m, pipe_config->dp_m2_n2.gmch_n, @@ -12439,7 +12335,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state) case INTEL_OUTPUT_UNKNOWN: if (WARN_ON(!HAS_DDI(dev))) break; - case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_EDP: port_mask = 1 << enc_to_dig_port(&encoder->base)->port; @@ -12536,6 +12432,24 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, &pipe_config->pipe_src_w, &pipe_config->pipe_src_h); + for_each_connector_in_state(state, connector, connector_state, i) { + if (connector_state->crtc != crtc) + continue; + + encoder = to_intel_encoder(connector_state->best_encoder); + + if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { + DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); + goto fail; + } + + /* + * Determine output_types before calling the .compute_config() + * hooks so that the hooks can use this information safely. + */ + pipe_config->output_types |= 1 << encoder->type; + } + encoder_retry: /* Ensure the port clock defaults are reset when retrying. */ pipe_config->port_clock = 0; @@ -12821,7 +12735,6 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(fdi_lanes); PIPE_CONF_CHECK_M_N(fdi_m_n); - PIPE_CONF_CHECK_I(has_dp_encoder); PIPE_CONF_CHECK_I(lane_count); PIPE_CONF_CHECK_X(lane_lat_optim_mask); @@ -12833,7 +12746,7 @@ intel_pipe_config_compare(struct drm_device *dev, } else PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); - PIPE_CONF_CHECK_I(has_dsi_encoder); + PIPE_CONF_CHECK_X(output_types); PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); @@ -12952,7 +12865,7 @@ static void verify_wm_state(struct drm_crtc *crtc, struct drm_crtc_state *new_state) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct skl_ddb_allocation hw_ddb, *sw_ddb; struct skl_ddb_entry *hw_entry, *sw_entry; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -13058,7 +12971,7 @@ verify_crtc_state(struct drm_crtc *crtc, struct drm_crtc_state *new_crtc_state) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *pipe_config, *sw_config; @@ -13101,8 +13014,10 @@ verify_crtc_state(struct drm_crtc *crtc, "Encoder connected to wrong pipe %c\n", pipe_name(pipe)); - if (active) + if (active) { + pipe_config->output_types |= 1 << encoder->type; encoder->get_config(encoder, pipe_config); + } } if (!new_crtc_state->active) @@ -13181,7 +13096,7 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state, struct drm_crtc_state *new_crtc_state) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state); struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state); @@ -13220,7 +13135,7 @@ intel_modeset_verify_crtc(struct drm_crtc *crtc, static void verify_disabled_dpll_state(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; for (i = 0; i < dev_priv->num_shared_dpll; i++) @@ -13267,7 +13182,7 @@ static void update_scanline_offset(struct intel_crtc *crtc) crtc->scanline_offset = vtotal - 1; } else if (HAS_DDI(dev) && - intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { + intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) { crtc->scanline_offset = 2; } else crtc->scanline_offset = 1; @@ -13402,7 +13317,7 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state) static int intel_modeset_checks(struct drm_atomic_state *state) { struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_i915_private *dev_priv = state->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(state->dev); struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; int ret = 0, i; @@ -13568,7 +13483,7 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_plane_state *plane_state; struct drm_crtc_state *crtc_state; struct drm_plane *plane; @@ -13697,7 +13612,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc_state *old_crtc_state; struct drm_crtc *crtc; struct intel_crtc_state *intel_cstate; @@ -13929,7 +13844,7 @@ static int intel_atomic_commit(struct drm_device *dev, bool nonblock) { struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; if (intel_state->modeset && nonblock) { @@ -14008,7 +13923,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .set_property = drm_atomic_helper_crtc_set_property, .destroy = intel_crtc_destroy, - .page_flip = drm_atomic_helper_page_flip, + .page_flip = intel_crtc_page_flip, .atomic_duplicate_state = intel_crtc_duplicate_state, .atomic_destroy_state = intel_crtc_destroy_state, }; @@ -14136,15 +14051,11 @@ int skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state) { int max_scale; - struct drm_device *dev; - struct drm_i915_private *dev_priv; int crtc_clock, cdclk; if (!intel_crtc || !crtc_state->base.enable) return DRM_PLANE_HELPER_NO_SCALING; - dev = intel_crtc->base.dev; - dev_priv = dev->dev_private; crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk; @@ -14534,7 +14445,7 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr static void intel_crtc_init(struct drm_device *dev, int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc; struct intel_crtc_state *crtc_state = NULL; struct drm_plane *primary = NULL; @@ -14633,11 +14544,8 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct intel_crtc *crtc; drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); - - if (!drmmode_crtc) { - DRM_ERROR("no such CRTC id\n"); + if (!drmmode_crtc) return -ENOENT; - } crtc = to_intel_crtc(drmmode_crtc); pipe_from_crtc_id->pipe = crtc->pipe; @@ -14664,7 +14572,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder) static bool has_edp_a(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!IS_MOBILE(dev)) return false; @@ -14680,7 +14588,7 @@ static bool has_edp_a(struct drm_device *dev) static bool intel_crt_present(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (INTEL_INFO(dev)->gen >= 9) return false; @@ -14706,10 +14614,15 @@ static bool intel_crt_present(struct drm_device *dev) static void intel_setup_outputs(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; bool dpd_is_edp = false; + /* + * intel_edp_init_connector() depends on this completing first, to + * prevent the registeration of both eDP and LVDS and the incorrect + * sharing of the PPS. + */ intel_lvds_init(dev); if (intel_crt_present(dev)) @@ -15354,7 +15267,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) */ static void quirk_pipea_force(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->quirks |= QUIRK_PIPEA_FORCE; DRM_INFO("applying pipe a force quirk\n"); @@ -15362,7 +15275,7 @@ static void quirk_pipea_force(struct drm_device *dev) static void quirk_pipeb_force(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->quirks |= QUIRK_PIPEB_FORCE; DRM_INFO("applying pipe b force quirk\n"); @@ -15373,7 +15286,7 @@ static void quirk_pipeb_force(struct drm_device *dev) */ static void quirk_ssc_force_disable(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; DRM_INFO("applying lvds SSC disable quirk\n"); } @@ -15384,7 +15297,7 @@ static void quirk_ssc_force_disable(struct drm_device *dev) */ static void quirk_invert_brightness(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; DRM_INFO("applying inverted panel brightness quirk\n"); } @@ -15392,7 +15305,7 @@ static void quirk_invert_brightness(struct drm_device *dev) /* Some VBT's incorrectly indicate no backlight is present */ static void quirk_backlight_present(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; DRM_INFO("applying backlight present quirk\n"); } @@ -15518,7 +15431,7 @@ static void intel_init_quirks(struct drm_device *dev) /* Disable the VGA plane that we never use */ static void i915_disable_vga(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u8 sr1; i915_reg_t vga_reg = i915_vgacntrl_reg(dev); @@ -15536,7 +15449,7 @@ static void i915_disable_vga(struct drm_device *dev) void intel_modeset_init_hw(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); intel_update_cdclk(dev); @@ -15784,7 +15697,7 @@ static bool intel_check_plane_mapping(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val; if (INTEL_INFO(dev)->num_pipes == 1) @@ -15824,7 +15737,7 @@ static bool intel_encoder_has_connectors(struct intel_encoder *encoder) static void intel_sanitize_crtc(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; /* Clear any frame start delays used for debugging left by the BIOS */ @@ -15949,7 +15862,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) void i915_redisable_vga_power_on(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t vga_reg = i915_vgacntrl_reg(dev); if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { @@ -15960,7 +15873,7 @@ void i915_redisable_vga_power_on(struct drm_device *dev) void i915_redisable_vga(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* This function can be called both from intel_modeset_setup_hw_state or * at a very early point in our resume sequence, where the power well @@ -16000,7 +15913,7 @@ static void readout_plane_state(struct intel_crtc *crtc) static void intel_modeset_readout_hw_state(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; struct intel_crtc *crtc; struct intel_encoder *encoder; @@ -16069,6 +15982,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) if (encoder->get_hw_state(encoder, &pipe)) { crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); encoder->base.crtc = &crtc->base; + crtc->config->output_types |= 1 << encoder->type; encoder->get_config(encoder, crtc->config); } else { encoder->base.crtc = NULL; @@ -16153,7 +16067,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) static void intel_modeset_setup_hw_state(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; struct intel_crtc *crtc; struct intel_encoder *encoder; @@ -16309,8 +16223,21 @@ void intel_modeset_gem_init(struct drm_device *dev) c->state->plane_mask &= ~(1 << drm_plane_index(c->primary)); } } +} + +int intel_connector_register(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + int ret; + + ret = intel_backlight_device_register(intel_connector); + if (ret) + goto err; - intel_backlight_register(dev); + return 0; + +err: + return ret; } void intel_connector_unregister(struct drm_connector *connector) @@ -16323,7 +16250,7 @@ void intel_connector_unregister(struct drm_connector *connector) void intel_modeset_cleanup(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); intel_disable_gt_powersave(dev_priv); @@ -16347,8 +16274,6 @@ void intel_modeset_cleanup(struct drm_device *dev) /* flush any delayed tasks or pending work */ flush_scheduled_work(); - drm_connector_unregister_all(dev); - drm_mode_config_cleanup(dev); intel_cleanup_overlay(dev_priv); @@ -16371,7 +16296,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector, */ int intel_modeset_vga_set_state(struct drm_device *dev, bool state) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; u16 gmch_ctrl; @@ -16527,7 +16452,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, struct drm_device *dev, struct intel_display_error_state *error) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; if (!error) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index ffa43eca14d3..0c5ba3410a1e 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -262,7 +262,7 @@ static void pps_lock(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &intel_dig_port->base; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; /* @@ -280,7 +280,7 @@ static void pps_unlock(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &intel_dig_port->base; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; mutex_unlock(&dev_priv->pps_mutex); @@ -294,7 +294,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = intel_dp->pps_pipe; bool pll_enabled, release_cl_override = false; enum dpio_phy phy = DPIO_PHY(pipe); @@ -368,7 +368,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); enum pipe pipe; @@ -426,6 +426,37 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) return intel_dp->pps_pipe; } +static int +bxt_power_sequencer_idx(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + + lockdep_assert_held(&dev_priv->pps_mutex); + + /* We should never land here with regular DP ports */ + WARN_ON(!is_edp(intel_dp)); + + /* + * TODO: BXT has 2 PPS instances. The correct port->PPS instance + * mapping needs to be retrieved from VBT, for now just hard-code to + * use instance #0 always. + */ + if (!intel_dp->pps_reset) + return 0; + + intel_dp->pps_reset = false; + + /* + * Only the HW needs to be reprogrammed, the SW state is fixed and + * has been setup during connector init. + */ + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); + + return 0; +} + typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, enum pipe pipe); @@ -475,7 +506,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dig_port->port; lockdep_assert_held(&dev_priv->pps_mutex); @@ -507,12 +538,13 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); } -void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv) +void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct intel_encoder *encoder; - if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))) + if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && + !IS_BROXTON(dev))) return; /* @@ -532,34 +564,71 @@ void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv) continue; intel_dp = enc_to_intel_dp(&encoder->base); - intel_dp->pps_pipe = INVALID_PIPE; + if (IS_BROXTON(dev)) + intel_dp->pps_reset = true; + else + intel_dp->pps_pipe = INVALID_PIPE; + } +} + +struct pps_registers { + i915_reg_t pp_ctrl; + i915_reg_t pp_stat; + i915_reg_t pp_on; + i915_reg_t pp_off; + i915_reg_t pp_div; +}; + +static void intel_pps_get_registers(struct drm_i915_private *dev_priv, + struct intel_dp *intel_dp, + struct pps_registers *regs) +{ + memset(regs, 0, sizeof(*regs)); + + if (IS_BROXTON(dev_priv)) { + int idx = bxt_power_sequencer_idx(intel_dp); + + regs->pp_ctrl = BXT_PP_CONTROL(idx); + regs->pp_stat = BXT_PP_STATUS(idx); + regs->pp_on = BXT_PP_ON_DELAYS(idx); + regs->pp_off = BXT_PP_OFF_DELAYS(idx); + } else if (HAS_PCH_SPLIT(dev_priv)) { + regs->pp_ctrl = PCH_PP_CONTROL; + regs->pp_stat = PCH_PP_STATUS; + regs->pp_on = PCH_PP_ON_DELAYS; + regs->pp_off = PCH_PP_OFF_DELAYS; + regs->pp_div = PCH_PP_DIVISOR; + } else { + enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); + + regs->pp_ctrl = VLV_PIPE_PP_CONTROL(pipe); + regs->pp_stat = VLV_PIPE_PP_STATUS(pipe); + regs->pp_on = VLV_PIPE_PP_ON_DELAYS(pipe); + regs->pp_off = VLV_PIPE_PP_OFF_DELAYS(pipe); + regs->pp_div = VLV_PIPE_PP_DIVISOR(pipe); } } static i915_reg_t _pp_ctrl_reg(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct pps_registers regs; - if (IS_BROXTON(dev)) - return BXT_PP_CONTROL(0); - else if (HAS_PCH_SPLIT(dev)) - return PCH_PP_CONTROL; - else - return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp)); + intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp, + ®s); + + return regs.pp_ctrl; } static i915_reg_t _pp_stat_reg(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct pps_registers regs; - if (IS_BROXTON(dev)) - return BXT_PP_STATUS(0); - else if (HAS_PCH_SPLIT(dev)) - return PCH_PP_STATUS; - else - return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); + intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp, + ®s); + + return regs.pp_stat; } /* Reboot notifier handler to shutdown panel power to guarantee T12 timing @@ -570,7 +639,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), edp_notifier); struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!is_edp(intel_dp) || code != SYS_RESTART) return 0; @@ -601,7 +670,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, static bool edp_have_panel_power(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); lockdep_assert_held(&dev_priv->pps_mutex); @@ -615,7 +684,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp) static bool edp_have_panel_vdd(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); lockdep_assert_held(&dev_priv->pps_mutex); @@ -630,7 +699,7 @@ static void intel_dp_check_edp(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (!is_edp(intel_dp)) return; @@ -648,7 +717,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg; uint32_t status; bool done; @@ -658,7 +727,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, msecs_to_jiffies_timeout(10)); else - done = wait_for_atomic(C, 10) == 0; + done = wait_for(C, 10) == 0; if (!done) DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", has_aux_irq); @@ -781,7 +850,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg; uint32_t aux_clock_divider; int i, ret, recv_bytes; @@ -1180,35 +1249,18 @@ intel_dp_aux_fini(struct intel_dp *intel_dp) kfree(intel_dp->aux.name); } -static int +static void intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->port; - int ret; intel_aux_reg_init(intel_dp); + drm_dp_aux_init(&intel_dp->aux); + /* Failure to allocate our preferred name is not critical */ intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port)); - if (!intel_dp->aux.name) - return -ENOMEM; - - intel_dp->aux.dev = connector->base.kdev; intel_dp->aux.transfer = intel_dp_aux_transfer; - - DRM_DEBUG_KMS("registering %s bus for %s\n", - intel_dp->aux.name, - connector->base.kdev->kobj.name); - - ret = drm_dp_aux_register(&intel_dp->aux); - if (ret < 0) { - DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n", - intel_dp->aux.name, ret); - kfree(intel_dp->aux.name); - return ret; - } - - return 0; } static int @@ -1421,7 +1473,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = dp_to_dig_port(intel_dp)->port; @@ -1449,7 +1501,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) pipe_config->has_pch_encoder = true; - pipe_config->has_dp_encoder = true; pipe_config->has_drrs = false; pipe_config->has_audio = intel_dp->has_audio && port != PORT_A; @@ -1605,7 +1656,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp, static void intel_dp_prepare(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = dp_to_dig_port(intel_dp)->port; struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); @@ -1693,16 +1744,21 @@ static void intel_dp_prepare(struct intel_encoder *encoder) #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) +static void intel_pps_verify_state(struct drm_i915_private *dev_priv, + struct intel_dp *intel_dp); + static void wait_panel_status(struct intel_dp *intel_dp, u32 mask, u32 value) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t pp_stat_reg, pp_ctrl_reg; lockdep_assert_held(&dev_priv->pps_mutex); + intel_pps_verify_state(dev_priv, intel_dp); + pp_stat_reg = _pp_stat_reg(intel_dp); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); @@ -1711,8 +1767,9 @@ static void wait_panel_status(struct intel_dp *intel_dp, I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); - if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, - 5 * USEC_PER_SEC, 10 * USEC_PER_MSEC)) + if (intel_wait_for_register(dev_priv, + pp_stat_reg, mask, value, + 5000)) DRM_ERROR("Panel status timeout: status %08x control %08x\n", I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); @@ -1772,7 +1829,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp) static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 control; lockdep_assert_held(&dev_priv->pps_mutex); @@ -1795,7 +1852,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) struct drm_device *dev = intel_dp_to_dev(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *intel_encoder = &intel_dig_port->base; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; u32 pp; i915_reg_t pp_stat_reg, pp_ctrl_reg; @@ -1868,7 +1925,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *intel_encoder = &intel_dig_port->base; @@ -1937,8 +1994,7 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) */ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) { - struct drm_i915_private *dev_priv = - intel_dp_to_dev(intel_dp)->dev_private; + struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); lockdep_assert_held(&dev_priv->pps_mutex); @@ -1959,7 +2015,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) static void edp_panel_on(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 pp; i915_reg_t pp_ctrl_reg; @@ -2020,7 +2076,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; u32 pp; i915_reg_t pp_ctrl_reg; @@ -2072,7 +2128,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 pp; i915_reg_t pp_ctrl_reg; @@ -2113,7 +2169,7 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp) static void _intel_edp_backlight_off(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 pp; i915_reg_t pp_ctrl_reg; @@ -2229,7 +2285,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp) * 2. Program DP PLL enable */ if (IS_GEN5(dev_priv)) - intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe); + intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe); intel_dp->DP |= DP_PLL_ENABLE; @@ -2294,7 +2350,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = dp_to_dig_port(intel_dp)->port; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; u32 tmp; bool ret; @@ -2347,7 +2403,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); u32 tmp, flags = 0; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = dp_to_dig_port(intel_dp)->port; struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); @@ -2385,8 +2441,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder, !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235) pipe_config->limited_color_range = true; - pipe_config->has_dp_encoder = true; - pipe_config->lane_count = ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; @@ -2471,7 +2525,7 @@ static void chv_post_disable_dp(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); intel_dp_link_down(intel_dp); @@ -2490,7 +2544,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dig_port->port; if (HAS_DDI(dev)) { @@ -2570,7 +2624,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, static void intel_dp_enable_port(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc); @@ -2599,7 +2653,7 @@ static void intel_enable_dp(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); uint32_t dp_reg = I915_READ(intel_dp->output_reg); enum pipe pipe = crtc->pipe; @@ -2672,7 +2726,7 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder) static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); enum pipe pipe = intel_dp->pps_pipe; i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); @@ -2698,7 +2752,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) static void vlv_steal_power_sequencer(struct drm_device *dev, enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; lockdep_assert_held(&dev_priv->pps_mutex); @@ -2736,7 +2790,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &intel_dig_port->base; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); lockdep_assert_held(&dev_priv->pps_mutex); @@ -2824,7 +2878,7 @@ uint8_t intel_dp_voltage_max(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = dp_to_dig_port(intel_dp)->port; if (IS_BROXTON(dev)) @@ -3242,7 +3296,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dig_port->port; uint32_t val; @@ -3264,8 +3318,10 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) if (port == PORT_A) return; - if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE), - 1)) + if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port), + DP_TP_STATUS_IDLE_DONE, + DP_TP_STATUS_IDLE_DONE, + 1)) DRM_ERROR("Timed out waiting for DP idle patterns\n"); } @@ -3276,7 +3332,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); enum port port = intel_dig_port->port; struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t DP = intel_dp->DP; if (WARN_ON(HAS_DDI(dev))) @@ -3328,7 +3384,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) I915_WRITE(intel_dp->output_reg, DP); POSTING_READ(intel_dp->output_reg); - intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); + intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A); intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } @@ -3343,7 +3399,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, sizeof(intel_dp->dpcd)) < 0) @@ -4194,7 +4250,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) } if (intel_encoder->type != INTEL_OUTPUT_EDP) - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + intel_encoder->type = INTEL_OUTPUT_DP; intel_dp_probe_oui(intel_dp); @@ -4270,7 +4326,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) /* MST devices are disconnected from a monitor POV */ intel_dp_unset_edid(intel_dp); if (intel_encoder->type != INTEL_OUTPUT_EDP) - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + intel_encoder->type = INTEL_OUTPUT_DP; return connector_status_disconnected; } @@ -4309,7 +4365,7 @@ intel_dp_force(struct drm_connector *connector) intel_display_power_put(dev_priv, power_domain); if (intel_encoder->type != INTEL_OUTPUT_EDP) - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + intel_encoder->type = INTEL_OUTPUT_DP; } static int intel_dp_get_modes(struct drm_connector *connector) @@ -4358,7 +4414,7 @@ intel_dp_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_encoder *intel_encoder = intel_attached_encoder(connector); struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); @@ -4446,6 +4502,25 @@ done: return 0; } +static int +intel_dp_connector_register(struct drm_connector *connector) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + int ret; + + ret = intel_connector_register(connector); + if (ret) + return ret; + + i915_debugfs_connector_add(connector); + + DRM_DEBUG_KMS("registering %s bus for %s\n", + intel_dp->aux.name, connector->kdev->kobj.name); + + intel_dp->aux.dev = connector->kdev; + return drm_dp_aux_register(&intel_dp->aux); +} + static void intel_dp_connector_unregister(struct drm_connector *connector) { @@ -4521,7 +4596,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; lockdep_assert_held(&dev_priv->pps_mutex); @@ -4544,13 +4619,15 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) void intel_dp_encoder_reset(struct drm_encoder *encoder) { - struct intel_dp *intel_dp; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (!HAS_DDI(dev_priv)) + intel_dp->DP = I915_READ(intel_dp->output_reg); if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP) return; - intel_dp = enc_to_intel_dp(encoder); - pps_lock(intel_dp); /* @@ -4572,6 +4649,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_dp_set_property, .atomic_get_property = intel_connector_atomic_get_property, + .late_register = intel_dp_connector_register, .early_unregister = intel_dp_connector_unregister, .destroy = intel_dp_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, @@ -4594,13 +4672,13 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) struct intel_dp *intel_dp = &intel_dig_port->dp; struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; enum irqreturn ret = IRQ_NONE; if (intel_dig_port->base.type != INTEL_OUTPUT_EDP && intel_dig_port->base.type != INTEL_OUTPUT_HDMI) - intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; + intel_dig_port->base.type = INTEL_OUTPUT_DP; if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { /* @@ -4662,7 +4740,7 @@ put_power: /* check the VBT to see whether the eDP is on another port */ bool intel_dp_is_edp(struct drm_device *dev, enum port port) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* * eDP not supported on g4x. so bail out early just @@ -4704,82 +4782,93 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) } static void -intel_dp_init_panel_power_sequencer(struct drm_device *dev, - struct intel_dp *intel_dp) +intel_pps_readout_hw_state(struct drm_i915_private *dev_priv, + struct intel_dp *intel_dp, struct edp_power_seq *seq) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct edp_power_seq cur, vbt, spec, - *final = &intel_dp->pps_delays; u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0; - i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg; + struct pps_registers regs; - lockdep_assert_held(&dev_priv->pps_mutex); - - /* already initialized? */ - if (final->t11_t12 != 0) - return; - - if (IS_BROXTON(dev)) { - /* - * TODO: BXT has 2 sets of PPS registers. - * Correct Register for Broxton need to be identified - * using VBT. hardcoding for now - */ - pp_ctrl_reg = BXT_PP_CONTROL(0); - pp_on_reg = BXT_PP_ON_DELAYS(0); - pp_off_reg = BXT_PP_OFF_DELAYS(0); - } else if (HAS_PCH_SPLIT(dev)) { - pp_ctrl_reg = PCH_PP_CONTROL; - pp_on_reg = PCH_PP_ON_DELAYS; - pp_off_reg = PCH_PP_OFF_DELAYS; - pp_div_reg = PCH_PP_DIVISOR; - } else { - enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); - - pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); - pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); - pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe); - pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); - } + intel_pps_get_registers(dev_priv, intel_dp, ®s); /* Workaround: Need to write PP_CONTROL with the unlock key as * the very first thing. */ pp_ctl = ironlake_get_pp_control(intel_dp); - pp_on = I915_READ(pp_on_reg); - pp_off = I915_READ(pp_off_reg); - if (!IS_BROXTON(dev)) { - I915_WRITE(pp_ctrl_reg, pp_ctl); - pp_div = I915_READ(pp_div_reg); + pp_on = I915_READ(regs.pp_on); + pp_off = I915_READ(regs.pp_off); + if (!IS_BROXTON(dev_priv)) { + I915_WRITE(regs.pp_ctrl, pp_ctl); + pp_div = I915_READ(regs.pp_div); } /* Pull timing values out of registers */ - cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> - PANEL_POWER_UP_DELAY_SHIFT; + seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> + PANEL_POWER_UP_DELAY_SHIFT; - cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> - PANEL_LIGHT_ON_DELAY_SHIFT; + seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> + PANEL_LIGHT_ON_DELAY_SHIFT; - cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> - PANEL_LIGHT_OFF_DELAY_SHIFT; + seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> + PANEL_LIGHT_OFF_DELAY_SHIFT; - cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> - PANEL_POWER_DOWN_DELAY_SHIFT; + seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> + PANEL_POWER_DOWN_DELAY_SHIFT; - if (IS_BROXTON(dev)) { + if (IS_BROXTON(dev_priv)) { u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> BXT_POWER_CYCLE_DELAY_SHIFT; if (tmp > 0) - cur.t11_t12 = (tmp - 1) * 1000; + seq->t11_t12 = (tmp - 1) * 1000; else - cur.t11_t12 = 0; + seq->t11_t12 = 0; } else { - cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> + seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; } +} + +static void +intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) +{ + DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", + state_name, + seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); +} + +static void +intel_pps_verify_state(struct drm_i915_private *dev_priv, + struct intel_dp *intel_dp) +{ + struct edp_power_seq hw; + struct edp_power_seq *sw = &intel_dp->pps_delays; + + intel_pps_readout_hw_state(dev_priv, intel_dp, &hw); + + if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || + hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { + DRM_ERROR("PPS state mismatch\n"); + intel_pps_dump_state("sw", sw); + intel_pps_dump_state("hw", &hw); + } +} + +static void +intel_dp_init_panel_power_sequencer(struct drm_device *dev, + struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct edp_power_seq cur, vbt, spec, + *final = &intel_dp->pps_delays; + + lockdep_assert_held(&dev_priv->pps_mutex); - DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", - cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); + /* already initialized? */ + if (final->t11_t12 != 0) + return; + + intel_pps_readout_hw_state(dev_priv, intel_dp, &cur); + + intel_pps_dump_state("cur", &cur); vbt = dev_priv->vbt.edp.pps; @@ -4795,8 +4884,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, * too. */ spec.t11_t12 = (510 + 100) * 10; - DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", - vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); + intel_pps_dump_state("vbt", &vbt); /* Use the max of the register settings and vbt. If both are * unset, fall back to the spec limits. */ @@ -4824,59 +4912,41 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); + + /* + * We override the HW backlight delays to 1 because we do manual waits + * on them. For T8, even BSpec recommends doing it. For T9, if we + * don't do this, we'll end up waiting for the backlight off delay + * twice: once when we do the manual sleep, and once when we disable + * the panel and wait for the PP_STATUS bit to become zero. + */ + final->t8 = 1; + final->t9 = 1; } static void intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 pp_on, pp_off, pp_div, port_sel = 0; int div = dev_priv->rawclk_freq / 1000; - i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg; + struct pps_registers regs; enum port port = dp_to_dig_port(intel_dp)->port; const struct edp_power_seq *seq = &intel_dp->pps_delays; lockdep_assert_held(&dev_priv->pps_mutex); - if (IS_BROXTON(dev)) { - /* - * TODO: BXT has 2 sets of PPS registers. - * Correct Register for Broxton need to be identified - * using VBT. hardcoding for now - */ - pp_ctrl_reg = BXT_PP_CONTROL(0); - pp_on_reg = BXT_PP_ON_DELAYS(0); - pp_off_reg = BXT_PP_OFF_DELAYS(0); - - } else if (HAS_PCH_SPLIT(dev)) { - pp_on_reg = PCH_PP_ON_DELAYS; - pp_off_reg = PCH_PP_OFF_DELAYS; - pp_div_reg = PCH_PP_DIVISOR; - } else { - enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); + intel_pps_get_registers(dev_priv, intel_dp, ®s); - pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); - pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe); - pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); - } - - /* - * And finally store the new values in the power sequencer. The - * backlight delays are set to 1 because we do manual waits on them. For - * T8, even BSpec recommends doing it. For T9, if we don't do this, - * we'll end up waiting for the backlight off delay twice: once when we - * do the manual sleep, and once when we disable the panel and wait for - * the PP_STATUS bit to become zero. - */ pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | - (1 << PANEL_LIGHT_ON_DELAY_SHIFT); - pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) | + (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); + pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); /* Compute the divisor for the pp clock, simply match the Bspec * formula. */ if (IS_BROXTON(dev)) { - pp_div = I915_READ(pp_ctrl_reg); + pp_div = I915_READ(regs.pp_ctrl); pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK; pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000) << BXT_POWER_CYCLE_DELAY_SHIFT); @@ -4899,19 +4969,19 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, pp_on |= port_sel; - I915_WRITE(pp_on_reg, pp_on); - I915_WRITE(pp_off_reg, pp_off); + I915_WRITE(regs.pp_on, pp_on); + I915_WRITE(regs.pp_off, pp_off); if (IS_BROXTON(dev)) - I915_WRITE(pp_ctrl_reg, pp_div); + I915_WRITE(regs.pp_ctrl, pp_div); else - I915_WRITE(pp_div_reg, pp_div); + I915_WRITE(regs.pp_div, pp_div); DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", - I915_READ(pp_on_reg), - I915_READ(pp_off_reg), + I915_READ(regs.pp_on), + I915_READ(regs.pp_off), IS_BROXTON(dev) ? - (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) : - I915_READ(pp_div_reg)); + (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) : + I915_READ(regs.pp_div)); } /** @@ -4928,7 +4998,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, */ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; struct intel_digital_port *dig_port = NULL; struct intel_dp *intel_dp = dev_priv->drrs.dp; @@ -5027,7 +5097,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) void intel_edp_drrs_enable(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_crtc *crtc = dig_port->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -5059,7 +5129,7 @@ unlock: void intel_edp_drrs_disable(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_crtc *crtc = dig_port->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -5074,9 +5144,9 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp) } if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) - intel_dp_set_drrs_state(dev_priv->dev, - intel_dp->attached_connector->panel. - fixed_mode->vrefresh); + intel_dp_set_drrs_state(&dev_priv->drm, + intel_dp->attached_connector->panel. + fixed_mode->vrefresh); dev_priv->drrs.dp = NULL; mutex_unlock(&dev_priv->drrs.mutex); @@ -5106,9 +5176,9 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work) goto unlock; if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) - intel_dp_set_drrs_state(dev_priv->dev, - intel_dp->attached_connector->panel. - downclock_mode->vrefresh); + intel_dp_set_drrs_state(&dev_priv->drm, + intel_dp->attached_connector->panel. + downclock_mode->vrefresh); unlock: mutex_unlock(&dev_priv->drrs.mutex); @@ -5127,7 +5197,7 @@ unlock: void intel_edp_drrs_invalidate(struct drm_device *dev, unsigned frontbuffer_bits) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; enum pipe pipe; @@ -5150,9 +5220,9 @@ void intel_edp_drrs_invalidate(struct drm_device *dev, /* invalidate means busy screen hence upclock */ if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) - intel_dp_set_drrs_state(dev_priv->dev, - dev_priv->drrs.dp->attached_connector->panel. - fixed_mode->vrefresh); + intel_dp_set_drrs_state(&dev_priv->drm, + dev_priv->drrs.dp->attached_connector->panel. + fixed_mode->vrefresh); mutex_unlock(&dev_priv->drrs.mutex); } @@ -5172,7 +5242,7 @@ void intel_edp_drrs_invalidate(struct drm_device *dev, void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; enum pipe pipe; @@ -5195,9 +5265,9 @@ void intel_edp_drrs_flush(struct drm_device *dev, /* flush means busy screen hence upclock */ if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) - intel_dp_set_drrs_state(dev_priv->dev, - dev_priv->drrs.dp->attached_connector->panel. - fixed_mode->vrefresh); + intel_dp_set_drrs_state(&dev_priv->drm, + dev_priv->drrs.dp->attached_connector->panel. + fixed_mode->vrefresh); /* * flush also means no more activity hence schedule downclock, if all @@ -5265,7 +5335,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector, { struct drm_connector *connector = &intel_connector->base; struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_display_mode *downclock_mode = NULL; INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); @@ -5303,7 +5373,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_device *dev = intel_encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_display_mode *fixed_mode = NULL; struct drm_display_mode *downclock_mode = NULL; bool has_dpcd; @@ -5314,8 +5384,32 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (!is_edp(intel_dp)) return true; + /* + * On IBX/CPT we may get here with LVDS already registered. Since the + * driver uses the only internal power sequencer available for both + * eDP and LVDS bail out early in this case to prevent interfering + * with an already powered-on LVDS power sequencer. + */ + if (intel_get_lvds_encoder(dev)) { + WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); + DRM_INFO("LVDS was detected, not registering eDP\n"); + + return false; + } + pps_lock(intel_dp); + + intel_dp_init_panel_power_timestamps(intel_dp); + + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + vlv_initial_power_sequencer_setup(intel_dp); + } else { + intel_dp_init_panel_power_sequencer(dev, intel_dp); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); + } + intel_edp_panel_vdd_sanitize(intel_dp); + pps_unlock(intel_dp); /* Cache DPCD and EDID for edp. */ @@ -5329,14 +5423,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, } else { /* if this fails, presume the device is a ghost */ DRM_INFO("failed to retrieve link info, disabling eDP\n"); - return false; + goto out_vdd_off; } - /* We now know it's not a ghost, init power sequence regs. */ - pps_lock(intel_dp); - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); - pps_unlock(intel_dp); - mutex_lock(&dev->mode_config.mutex); edid = drm_get_edid(connector, &intel_dp->aux.ddc); if (edid) { @@ -5404,6 +5493,18 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, intel_panel_setup_backlight(connector, pipe); return true; + +out_vdd_off: + cancel_delayed_work_sync(&intel_dp->panel_vdd_work); + /* + * vdd might still be enabled do to the delayed vdd off. + * Make sure vdd is actually turned off here. + */ + pps_lock(intel_dp); + edp_panel_vdd_off_sync(intel_dp); + pps_unlock(intel_dp); + + return false; } bool @@ -5414,9 +5515,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, struct intel_dp *intel_dp = &intel_dig_port->dp; struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_device *dev = intel_encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dig_port->port; - int type, ret; + int type; if (WARN(intel_dig_port->max_lanes < 1, "Not enough lanes (%d) for DP on port %c\n", @@ -5475,11 +5576,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, connector->interlace_allowed = true; connector->doublescan_allowed = 0; + intel_dp_aux_init(intel_dp, intel_connector); + INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); intel_connector_attach_encoder(intel_connector, intel_encoder); - drm_connector_register(connector); if (HAS_DDI(dev)) intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; @@ -5509,22 +5611,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, BUG(); } - if (is_edp(intel_dp)) { - pps_lock(intel_dp); - intel_dp_init_panel_power_timestamps(intel_dp); - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) - vlv_initial_power_sequencer_setup(intel_dp); - else - intel_dp_init_panel_power_sequencer(dev, intel_dp); - pps_unlock(intel_dp); - } - - ret = intel_dp_aux_init(intel_dp, intel_connector); - if (ret) - goto fail; - /* init MST on ports that can support it */ - if (HAS_DP_MST(dev) && + if (HAS_DP_MST(dev) && !is_edp(intel_dp) && (port == PORT_B || port == PORT_C || port == PORT_D)) intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id); @@ -5546,22 +5634,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); } - i915_debugfs_connector_add(connector); - return true; fail: - if (is_edp(intel_dp)) { - cancel_delayed_work_sync(&intel_dp->panel_vdd_work); - /* - * vdd might still be enabled do to the delayed vdd off. - * Make sure vdd is actually turned off here. - */ - pps_lock(intel_dp); - edp_panel_vdd_off_sync(intel_dp); - pps_unlock(intel_dp); - } - drm_connector_unregister(connector); drm_connector_cleanup(connector); return false; @@ -5571,7 +5646,7 @@ bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; @@ -5619,7 +5694,7 @@ bool intel_dp_init(struct drm_device *dev, intel_dig_port->dp.output_reg = output_reg; intel_dig_port->max_lanes = 4; - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + intel_encoder->type = INTEL_OUTPUT_DP; if (IS_CHERRYVIEW(dev)) { if (port == PORT_D) intel_encoder->crtc_mask = 1 << 2; @@ -5649,43 +5724,35 @@ err_connector_alloc: void intel_dp_mst_suspend(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; /* disable MST */ for (i = 0; i < I915_MAX_PORTS; i++) { struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i]; - if (!intel_dig_port) + + if (!intel_dig_port || !intel_dig_port->dp.can_mst) continue; - if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) { - if (!intel_dig_port->dp.can_mst) - continue; - if (intel_dig_port->dp.is_mst) - drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr); - } + if (intel_dig_port->dp.is_mst) + drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr); } } void intel_dp_mst_resume(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int i; for (i = 0; i < I915_MAX_PORTS; i++) { struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i]; - if (!intel_dig_port) - continue; - if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) { - int ret; + int ret; - if (!intel_dig_port->dp.can_mst) - continue; + if (!intel_dig_port || !intel_dig_port->dp.can_mst) + continue; - ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr); - if (ret != 0) { - intel_dp_check_mst_status(&intel_dig_port->dp); - } - } + ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr); + if (ret) + intel_dp_check_mst_status(&intel_dig_port->dp); } } diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 9646816604be..68a005d729e9 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -47,7 +47,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, pipe_config->dp_encoder_is_mst = true; pipe_config->has_pch_encoder = false; - pipe_config->has_dp_encoder = true; bpp = 24; /* * for MST we always configure max link bw - the spec doesn't @@ -140,7 +139,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dig_port->port; int ret; uint32_t temp; @@ -207,14 +206,17 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder) struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dig_port->port; int ret; DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); - if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT), - 1)) + if (intel_wait_for_register(dev_priv, + DP_TP_STATUS(port), + DP_TP_STATUS_ACT_SENT, + DP_TP_STATUS_ACT_SENT, + 1)) DRM_ERROR("Timed out waiting for ACT sent\n"); ret = drm_dp_check_act_status(&intel_dp->mst_mgr); @@ -239,12 +241,10 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; u32 temp, flags = 0; - pipe_config->has_dp_encoder = true; - temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); if (temp & TRANS_DDI_PHSYNC) flags |= DRM_MODE_FLAG_PHSYNC; @@ -336,6 +336,7 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_dp_mst_set_property, .atomic_get_property = intel_connector_atomic_get_property, + .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_dp_mst_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, @@ -477,9 +478,11 @@ static void intel_dp_register_mst_connector(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_device *dev = connector->dev; + drm_modeset_lock_all(dev); intel_connector_add_to_fbdev(intel_connector); drm_modeset_unlock_all(dev); + drm_connector_register(&intel_connector->base); } diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c index 288da35572b4..047f48748944 100644 --- a/drivers/gpu/drm/i915/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/intel_dpio_phy.c @@ -168,7 +168,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder) { struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); enum dpio_channel ch = vlv_dport_to_channel(dport); @@ -250,7 +250,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder) struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_digital_port *dport = dp_to_dig_port(intel_dp); struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); enum dpio_channel ch = vlv_dport_to_channel(dport); @@ -400,7 +400,7 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder) { struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); enum dpio_channel port = vlv_dport_to_channel(dport); @@ -429,7 +429,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder) struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_digital_port *dport = dp_to_dig_port(intel_dp); struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); enum dpio_channel port = vlv_dport_to_channel(dport); int pipe = intel_crtc->pipe; @@ -457,7 +457,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder) void vlv_phy_reset_lanes(struct intel_encoder *encoder) { struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); enum dpio_channel port = vlv_dport_to_channel(dport); diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index c0eff1571731..5c1f2d235ffa 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -83,7 +83,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, void intel_prepare_shared_dpll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_shared_dpll *pll = crtc->config->shared_dpll; if (WARN_ON(pll == NULL)) @@ -112,7 +112,7 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc) void intel_enable_shared_dpll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_shared_dpll *pll = crtc->config->shared_dpll; unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); unsigned old_mask; @@ -151,7 +151,7 @@ out: void intel_disable_shared_dpll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_shared_dpll *pll = crtc->config->shared_dpll; unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); @@ -191,7 +191,7 @@ intel_find_shared_dpll(struct intel_crtc *crtc, enum intel_dpll_id range_min, enum intel_dpll_id range_max) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; struct intel_shared_dpll_config *shared_dpll; enum intel_dpll_id i; @@ -331,7 +331,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct intel_crtc *crtc; /* Make sure no transcoder isn't still depending on us. */ @@ -713,7 +713,7 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, pll = intel_find_shared_dpll(crtc, crtc_state, DPLL_ID_WRPLL1, DPLL_ID_WRPLL2); - } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || + } else if (encoder->type == INTEL_OUTPUT_DP || encoder->type == INTEL_OUTPUT_DP_MST || encoder->type == INTEL_OUTPUT_EDP) { enum intel_dpll_id pll_id; @@ -856,7 +856,11 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, I915_WRITE(regs[pll->id].ctl, I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE); - if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(pll->id), 5)) + if (intel_wait_for_register(dev_priv, + DPLL_STATUS, + DPLL_LOCK(pll->id), + DPLL_LOCK(pll->id), + 5)) DRM_ERROR("DPLL %d not locked\n", pll->id); } @@ -1222,7 +1226,7 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | wrpll_params.central_freq; - } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || + } else if (encoder->type == INTEL_OUTPUT_DP || encoder->type == INTEL_OUTPUT_DP_MST || encoder->type == INTEL_OUTPUT_EDP) { switch (crtc_state->port_clock / 2) { @@ -1374,8 +1378,8 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); POSTING_READ(BXT_PORT_PLL_ENABLE(port)); - if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & - PORT_PLL_LOCK), 200)) + if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK), + 200)) DRM_ERROR("PLL %d not locked\n", port); /* @@ -1530,7 +1534,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, clk_div.m2_frac_en = clk_div.m2_frac != 0; vco = best_clock.vco; - } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || + } else if (encoder->type == INTEL_OUTPUT_DP || encoder->type == INTEL_OUTPUT_EDP) { int i; @@ -1632,7 +1636,7 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = { static void intel_ddi_pll_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (INTEL_GEN(dev_priv) < 9) { uint32_t val = I915_READ(LCPLL_CTL); @@ -1719,7 +1723,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = { void intel_shared_dpll_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const struct intel_dpll_mgr *dpll_mgr = NULL; const struct dpll_info *dpll_info; int i; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 089a42577ea3..55aeaf041749 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -69,39 +69,63 @@ }) #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000) -#define wait_for_us(COND, US) _wait_for((COND), (US), 1) /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ #if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) -# define _WAIT_FOR_ATOMIC_CHECK WARN_ON_ONCE(!in_atomic()) +# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic()) #else -# define _WAIT_FOR_ATOMIC_CHECK do { } while (0) +# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0) #endif -#define _wait_for_atomic(COND, US) ({ \ - unsigned long end__; \ - int ret__ = 0; \ - _WAIT_FOR_ATOMIC_CHECK; \ +#define _wait_for_atomic(COND, US, ATOMIC) \ +({ \ + int cpu, ret, timeout = (US) * 1000; \ + u64 base; \ + _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \ BUILD_BUG_ON((US) > 50000); \ - end__ = (local_clock() >> 10) + (US) + 1; \ - while (!(COND)) { \ - if (time_after((unsigned long)(local_clock() >> 10), end__)) { \ - /* Unlike the regular wait_for(), this atomic variant \ - * cannot be preempted (and we'll just ignore the issue\ - * of irq interruptions) and so we know that no time \ - * has passed since the last check of COND and can \ - * immediately report the timeout. \ - */ \ - ret__ = -ETIMEDOUT; \ + if (!(ATOMIC)) { \ + preempt_disable(); \ + cpu = smp_processor_id(); \ + } \ + base = local_clock(); \ + for (;;) { \ + u64 now = local_clock(); \ + if (!(ATOMIC)) \ + preempt_enable(); \ + if (COND) { \ + ret = 0; \ + break; \ + } \ + if (now - base >= timeout) { \ + ret = -ETIMEDOUT; \ break; \ } \ cpu_relax(); \ + if (!(ATOMIC)) { \ + preempt_disable(); \ + if (unlikely(cpu != smp_processor_id())) { \ + timeout -= now - base; \ + cpu = smp_processor_id(); \ + base = local_clock(); \ + } \ + } \ } \ + ret; \ +}) + +#define wait_for_us(COND, US) \ +({ \ + int ret__; \ + BUILD_BUG_ON(!__builtin_constant_p(US)); \ + if ((US) > 10) \ + ret__ = _wait_for((COND), (US), 10); \ + else \ + ret__ = _wait_for_atomic((COND), (US), 0); \ ret__; \ }) -#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000) -#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US)) +#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000, 1) +#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US), 1) #define KHz(x) (1000 * (x)) #define MHz(x) KHz(1000 * (x)) @@ -135,7 +159,7 @@ enum intel_output_type { INTEL_OUTPUT_LVDS = 4, INTEL_OUTPUT_TVOUT = 5, INTEL_OUTPUT_HDMI = 6, - INTEL_OUTPUT_DISPLAYPORT = 7, + INTEL_OUTPUT_DP = 7, INTEL_OUTPUT_EDP = 8, INTEL_OUTPUT_DSI = 9, INTEL_OUTPUT_UNKNOWN = 10, @@ -159,6 +183,7 @@ struct intel_framebuffer { struct intel_fbdev { struct drm_fb_helper helper; struct intel_framebuffer *fb; + async_cookie_t cookie; int preferred_bpp; }; @@ -497,12 +522,10 @@ struct intel_crtc_state { */ bool limited_color_range; - /* DP has a bunch of special case unfortunately, so mark the pipe - * accordingly. */ - bool has_dp_encoder; - - /* DSI has special cases */ - bool has_dsi_encoder; + /* Bitmask of encoder types (enum intel_output_type) + * driven by the pipe. + */ + unsigned int output_types; /* Whether we should send NULL infoframes. Required for audio. */ bool has_hdmi_sink; @@ -861,6 +884,11 @@ struct intel_dp { * this port. Only relevant on VLV/CHV. */ enum pipe pps_pipe; + /* + * Set if the sequencer may be reset due to a power transition, + * requiring a reinitialization. Only relevant on BXT. + */ + bool pps_reset; struct edp_power_seq pps_delays; bool can_mst; /* this port supports mst */ @@ -957,14 +985,14 @@ vlv_pipe_to_channel(enum pipe pipe) static inline struct drm_crtc * intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); return dev_priv->pipe_to_crtc_mapping[pipe]; } static inline struct drm_crtc * intel_get_crtc_for_plane(struct drm_device *dev, int plane) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); return dev_priv->plane_to_crtc_mapping[plane]; } @@ -1157,7 +1185,20 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv); enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe); -bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type); +static inline bool +intel_crtc_has_type(const struct intel_crtc_state *crtc_state, + enum intel_output_type type) +{ + return crtc_state->output_types & (1 << type); +} +static inline bool +intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->output_types & + ((1 << INTEL_OUTPUT_DP) | + (1 << INTEL_OUTPUT_DP_MST) | + (1 << INTEL_OUTPUT_EDP)); +} static inline void intel_wait_for_vblank(struct drm_device *dev, int pipe) { @@ -1338,7 +1379,7 @@ void intel_dp_mst_resume(struct drm_device *dev); int intel_dp_max_link_rate(struct intel_dp *intel_dp); int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); void intel_dp_hot_plug(struct intel_encoder *intel_encoder); -void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv); +void intel_power_sequencer_reset(struct drm_i915_private *dev_priv); uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); void intel_plane_destroy(struct drm_plane *plane); void intel_edp_drrs_enable(struct intel_dp *intel_dp); @@ -1451,6 +1492,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); /* intel_lvds.c */ void intel_lvds_init(struct drm_device *dev); +struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev); bool intel_is_dual_link_lvds(struct drm_device *dev); @@ -1489,7 +1531,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc, int fitting_mode); void intel_panel_set_backlight_acpi(struct intel_connector *connector, u32 level, u32 max); -int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe); +int intel_panel_setup_backlight(struct drm_connector *connector, + enum pipe pipe); void intel_panel_enable_backlight(struct intel_connector *connector); void intel_panel_disable_backlight(struct intel_connector *connector); void intel_panel_destroy_backlight(struct drm_connector *connector); @@ -1498,11 +1541,15 @@ extern struct drm_display_mode *intel_find_panel_downclock( struct drm_device *dev, struct drm_display_mode *fixed_mode, struct drm_connector *connector); -void intel_backlight_register(struct drm_device *dev); #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) +int intel_backlight_device_register(struct intel_connector *connector); void intel_backlight_device_unregister(struct intel_connector *connector); #else /* CONFIG_BACKLIGHT_CLASS_DEVICE */ +static int intel_backlight_device_register(struct intel_connector *connector) +{ + return 0; +} static inline void intel_backlight_device_unregister(struct intel_connector *connector) { } diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index b444d0e35a98..de8e9fb51595 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -84,13 +84,15 @@ static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port) { struct drm_encoder *encoder = &intel_dsi->base.base; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 mask; mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY | LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY; - if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100)) + if (intel_wait_for_register(dev_priv, + MIPI_GEN_FIFO_STAT(port), mask, mask, + 100)) DRM_ERROR("DPI FIFOs are not empty\n"); } @@ -129,7 +131,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, { struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host); struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dsi_host->port; struct mipi_dsi_packet packet; ssize_t ret; @@ -158,8 +160,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, /* note: this is never true for reads */ if (packet.payload_length) { - - if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & data_mask) == 0, 50)) + if (intel_wait_for_register(dev_priv, + MIPI_GEN_FIFO_STAT(port), + data_mask, 0, + 50)) DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n"); write_data(dev_priv, data_reg, packet.payload, @@ -170,7 +174,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL); } - if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & ctrl_mask) == 0, 50)) { + if (intel_wait_for_register(dev_priv, + MIPI_GEN_FIFO_STAT(port), + ctrl_mask, 0, + 50)) { DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n"); } @@ -179,7 +186,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, /* ->rx_len is set only for reads */ if (msg->rx_len) { data_mask = GEN_READ_DATA_AVAIL; - if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & data_mask) == data_mask, 50)) + if (intel_wait_for_register(dev_priv, + MIPI_INTR_STAT(port), + data_mask, data_mask, + 50)) DRM_ERROR("Timeout waiting for read data.\n"); read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len); @@ -250,7 +260,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs, { struct drm_encoder *encoder = &intel_dsi->base.base; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 mask; /* XXX: pipe, hs */ @@ -269,7 +279,9 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs, I915_WRITE(MIPI_DPI_CONTROL(port), cmd); mask = SPL_PKT_SENT_INTERRUPT; - if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 100)) + if (intel_wait_for_register(dev_priv, + MIPI_INTR_STAT(port), mask, mask, + 100)) DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd); return 0; @@ -302,7 +314,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi) static bool intel_dsi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, base); struct intel_connector *intel_connector = intel_dsi->attached_connector; @@ -313,8 +325,6 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, DRM_DEBUG_KMS("\n"); - pipe_config->has_dsi_encoder = true; - if (fixed_mode) { intel_fixed_panel_mode(fixed_mode, adjusted_mode); @@ -348,7 +358,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, static void bxt_dsi_device_ready(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; u32 val; @@ -387,7 +397,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder) static void vlv_dsi_device_ready(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; u32 val; @@ -437,7 +447,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder) static void intel_dsi_port_enable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; @@ -478,7 +488,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder) static void intel_dsi_port_disable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; @@ -497,7 +507,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder) static void intel_dsi_enable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; @@ -528,7 +538,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder); static void intel_dsi_pre_enable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); enum port port; @@ -602,7 +612,7 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder) static void intel_dsi_disable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; u32 temp; @@ -641,7 +651,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder) static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; @@ -667,8 +677,9 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) /* Wait till Clock lanes are in LP-00 state for MIPI Port A * only. MIPI Port C has no similar bit for checking */ - if (wait_for(((I915_READ(port_ctrl) & AFE_LATCHOUT) - == 0x00000), 30)) + if (intel_wait_for_register(dev_priv, + port_ctrl, AFE_LATCHOUT, 0, + 30)) DRM_ERROR("DSI LP not going Low\n"); /* Disable MIPI PHY transparent latch */ @@ -685,7 +696,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) static void intel_dsi_post_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); DRM_DEBUG_KMS("\n"); @@ -720,7 +731,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder) static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct drm_device *dev = encoder->base.dev; enum intel_display_power_domain power_domain; @@ -794,7 +805,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; struct drm_display_mode *adjusted_mode_sw; @@ -954,8 +965,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, u32 pclk; DRM_DEBUG_KMS("\n"); - pipe_config->has_dsi_encoder = true; - if (IS_BROXTON(dev)) bxt_dsi_get_pipe_config(encoder, pipe_config); @@ -1013,7 +1022,7 @@ static void set_dsi_timings(struct drm_encoder *encoder, const struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); @@ -1099,7 +1108,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) { struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; @@ -1390,6 +1399,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs static const struct drm_connector_funcs intel_dsi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = intel_dsi_detect, + .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_dsi_connector_destroy, .fill_modes = drm_helper_probe_single_connector_modes, @@ -1420,7 +1430,7 @@ void intel_dsi_init(struct drm_device *dev) struct intel_connector *intel_connector; struct drm_connector *connector; struct drm_display_mode *scan, *fixed_mode = NULL; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port; unsigned int i; @@ -1587,13 +1597,10 @@ void intel_dsi_init(struct drm_device *dev) connector->display_info.height_mm = fixed_mode->height_mm; intel_panel_init(&intel_connector->panel, fixed_mode, NULL); + intel_panel_setup_backlight(connector, INVALID_PIPE); intel_dsi_add_properties(intel_connector); - drm_connector_register(connector); - - intel_panel_setup_backlight(connector, INVALID_PIPE); - return; err: diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c index f0dc427743f8..ac7c6020c443 100644 --- a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c @@ -159,7 +159,7 @@ static int dcs_setup_backlight(struct intel_connector *connector, int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector) { struct drm_device *dev = intel_connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder = intel_connector->encoder; struct intel_panel *panel = &intel_connector->panel; diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index f122484bedfc..cd154ce6b6c1 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -303,7 +303,7 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv, static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) { struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u8 gpio_source, gpio_index; bool value; @@ -469,7 +469,7 @@ static int vbt_panel_get_modes(struct drm_panel *panel) struct vbt_panel *vbt_panel = to_vbt_panel(panel); struct intel_dsi *intel_dsi = vbt_panel->intel_dsi; struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_display_mode *mode; if (!panel->connector) @@ -497,7 +497,7 @@ static const struct drm_panel_funcs vbt_panel_funcs = { struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) { struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps; struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode; @@ -649,14 +649,13 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) ); /* - * Exit zero is unified val ths_zero and ths_exit + * Exit zero is unified val ths_zero and ths_exit * minimum value for ths_exit = 110ns * min (exit_zero_cnt * 2) = 110/UI * exit_zero_cnt = 55/UI */ - if (exit_zero_cnt < (55 * ui_den / ui_num)) - if ((55 * ui_den) % ui_num) - exit_zero_cnt += 1; + if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num) + exit_zero_cnt += 1; /* clk zero count */ clk_zero_cnt = DIV_ROUND_UP( diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index 1765e6e18f2c..6ab58a01b18e 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c @@ -55,12 +55,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, struct intel_crtc_state *config, int target_dsi_clk) { - unsigned int calc_m = 0, calc_p = 0; unsigned int m_min, m_max, p_min = 2, p_max = 6; unsigned int m, n, p; - int ref_clk; - int delta = target_dsi_clk; - u32 m_seed; + unsigned int calc_m, calc_p; + int delta, ref_clk; /* target_dsi_clk is expected in kHz */ if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) { @@ -80,6 +78,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, m_max = 92; } + calc_p = p_min; + calc_m = m_min; + delta = abs(target_dsi_clk - (m_min * ref_clk) / (p_min * n)); + for (m = m_min; m <= m_max && delta; m++) { for (p = p_min; p <= p_max && delta; p++) { /* @@ -97,11 +99,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, } /* register has log2(N1), this works fine for powers of two */ - n = ffs(n) - 1; - m_seed = lfsr_converts[calc_m - 62]; config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2); - config->dsi_pll.div = n << DSI_PLL_N1_DIV_SHIFT | - m_seed << DSI_PLL_M1_DIV_SHIFT; + config->dsi_pll.div = + (ffs(n) - 1) << DSI_PLL_N1_DIV_SHIFT | + (u32)lfsr_converts[calc_m - 62] << DSI_PLL_M1_DIV_SHIFT; return 0; } @@ -113,7 +114,7 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, static int vlv_compute_dsi_pll(struct intel_encoder *encoder, struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); int ret; u32 dsi_clk; @@ -234,8 +235,11 @@ static void bxt_disable_dsi_pll(struct intel_encoder *encoder) * PLL lock should deassert within 200us. * Wait up to 1ms before timing out. */ - if (wait_for((I915_READ(BXT_DSI_PLL_ENABLE) - & BXT_DSI_PLL_LOCKED) == 0, 1)) + if (intel_wait_for_register(dev_priv, + BXT_DSI_PLL_ENABLE, + BXT_DSI_PLL_LOCKED, + 0, + 1)) DRM_ERROR("Timeout waiting for PLL lock deassertion\n"); } @@ -321,7 +325,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, u32 dsi_clk; u32 dsi_ratio; struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); /* Divide by zero */ if (!pipe_bpp) { @@ -356,7 +360,7 @@ u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { u32 temp; - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); temp = I915_READ(MIPI_CTRL(port)); @@ -370,7 +374,7 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, const struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp; u32 dsi_rate = 0; u32 pll_ratio = 0; @@ -465,7 +469,7 @@ static int bxt_compute_dsi_pll(struct intel_encoder *encoder, static void bxt_enable_dsi_pll(struct intel_encoder *encoder, const struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; u32 val; @@ -486,7 +490,11 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder, I915_WRITE(BXT_DSI_PLL_ENABLE, val); /* Timeout and fail if PLL not locked */ - if (wait_for(I915_READ(BXT_DSI_PLL_ENABLE) & BXT_DSI_PLL_LOCKED, 1)) { + if (intel_wait_for_register(dev_priv, + BXT_DSI_PLL_ENABLE, + BXT_DSI_PLL_LOCKED, + BXT_DSI_PLL_LOCKED, + 1)) { DRM_ERROR("Timed out waiting for DSI PLL to lock\n"); return; } @@ -542,7 +550,7 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { u32 tmp; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* Clear old configurations */ tmp = I915_READ(BXT_MIPI_CLOCK_CTL); diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 60e4ddf2ec6d..47bdf9dad0d3 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -122,7 +122,7 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) { struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base); u32 tmp; @@ -138,7 +138,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); u32 tmp; @@ -155,7 +155,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder, static void intel_dvo_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); u32 tmp, flags = 0; @@ -176,7 +176,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder, static void intel_disable_dvo(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg; u32 temp = I915_READ(dvo_reg); @@ -188,7 +188,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder) static void intel_enable_dvo(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg; @@ -256,7 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder, static void intel_dvo_pre_enable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; struct intel_dvo *intel_dvo = enc_to_dvo(encoder); @@ -305,7 +305,7 @@ intel_dvo_detect(struct drm_connector *connector, bool force) static int intel_dvo_get_modes(struct drm_connector *connector) { - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); const struct drm_display_mode *fixed_mode = to_intel_connector(connector)->panel.fixed_mode; @@ -341,6 +341,7 @@ static void intel_dvo_destroy(struct drm_connector *connector) static const struct drm_connector_funcs intel_dvo_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = intel_dvo_detect, + .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_dvo_destroy, .fill_modes = drm_helper_probe_single_connector_modes, @@ -378,7 +379,7 @@ static struct drm_display_mode * intel_dvo_get_current_mode(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dvo *intel_dvo = intel_attached_dvo(connector); uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg); struct drm_display_mode *mode = NULL; @@ -420,7 +421,7 @@ static char intel_dvo_port_name(i915_reg_t dvo_reg) void intel_dvo_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *intel_encoder; struct intel_dvo *intel_dvo; struct intel_connector *intel_connector; @@ -550,7 +551,6 @@ void intel_dvo_init(struct drm_device *dev) intel_dvo->panel_wants_dither = true; } - drm_connector_register(connector); return; } diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index a19944b6dc25..6a7ad3ed1463 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -124,7 +124,9 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) I915_WRITE(FBC_CONTROL, fbc_ctl); /* Wait for compressing bit to clear */ - if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { + if (intel_wait_for_register(dev_priv, + FBC_STATUS, FBC_STAT_COMPRESSING, 0, + 10)) { DRM_DEBUG_KMS("FBC idle timed out\n"); return; } @@ -390,7 +392,7 @@ static void intel_fbc_work_fn(struct work_struct *__work) struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_work *work = &fbc->work; struct intel_crtc *crtc = fbc->crtc; - struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe]; + struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe]; if (drm_crtc_vblank_get(&crtc->base)) { DRM_ERROR("vblank not available for FBC on pipe %c\n", @@ -443,7 +445,7 @@ out: static void intel_fbc_schedule_activation(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_work *work = &fbc->work; @@ -553,7 +555,7 @@ again: static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; struct drm_mm_node *uninitialized_var(compressed_llb); int size, fb_cpp, ret; @@ -684,7 +686,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, */ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; unsigned int effective_w, effective_h, max_w, max_h; @@ -711,7 +713,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_state_cache *cache = &fbc->state_cache; struct drm_framebuffer *fb = plane_state->base.fb; @@ -744,7 +746,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, static bool intel_fbc_can_activate(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_state_cache *cache = &fbc->state_cache; @@ -816,22 +818,16 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) static bool intel_fbc_can_choose(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; - bool enable_by_default = IS_BROADWELL(dev_priv); if (intel_vgpu_active(dev_priv)) { fbc->no_fbc_reason = "VGPU is active"; return false; } - if (i915.enable_fbc < 0 && !enable_by_default) { - fbc->no_fbc_reason = "disabled per chip default"; - return false; - } - if (!i915.enable_fbc) { - fbc->no_fbc_reason = "disabled per module param"; + fbc->no_fbc_reason = "disabled per module param or by default"; return false; } @@ -851,7 +847,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc) static void intel_fbc_get_reg_params(struct intel_crtc *crtc, struct intel_fbc_reg_params *params) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_state_cache *cache = &fbc->state_cache; @@ -884,7 +880,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) @@ -910,7 +906,7 @@ unlock: static void __intel_fbc_post_update(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_reg_params old_params; @@ -943,7 +939,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc) void intel_fbc_post_update(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) @@ -992,13 +988,13 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv, if (!fbc_supported(dev_priv)) return; - if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) - return; - mutex_lock(&fbc->lock); fbc->busy_bits &= ~frontbuffer_bits; + if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) + goto out; + if (!fbc->busy_bits && fbc->enabled && (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { if (fbc->active) @@ -1007,6 +1003,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv, __intel_fbc_post_update(fbc->crtc); } +out: mutex_unlock(&fbc->lock); } @@ -1088,7 +1085,7 @@ void intel_fbc_enable(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) @@ -1159,7 +1156,7 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv) */ void intel_fbc_disable(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) @@ -1213,12 +1210,32 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv) if (!no_fbc_on_multiple_pipes(dev_priv)) return; - for_each_intel_crtc(dev_priv->dev, crtc) + for_each_intel_crtc(&dev_priv->drm, crtc) if (intel_crtc_active(&crtc->base) && to_intel_plane_state(crtc->base.primary->state)->visible) dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe); } +/* + * The DDX driver changes its behavior depending on the value it reads from + * i915.enable_fbc, so sanitize it by translating the default value into either + * 0 or 1 in order to allow it to know what's going on. + * + * Notice that this is done at driver initialization and we still allow user + * space to change the value during runtime without sanitizing it again. IGT + * relies on being able to change i915.enable_fbc at runtime. + */ +static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) +{ + if (i915.enable_fbc >= 0) + return !!i915.enable_fbc; + + if (IS_BROADWELL(dev_priv)) + return 1; + + return 0; +} + /** * intel_fbc_init - Initialize FBC * @dev_priv: the i915 device @@ -1236,6 +1253,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) fbc->active = false; fbc->work.scheduled = false; + i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); + DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); + if (!HAS_FBC(dev_priv)) { fbc->no_fbc_reason = "unsupported by this chipset"; return; diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 4babefc51eb2..86b00c6db1a6 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -362,23 +362,24 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, bool *enabled, int width, int height) { struct drm_device *dev = fb_helper->dev; + unsigned long conn_configured, mask; + unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); int i, j; bool *save_enabled; bool fallback = true; int num_connectors_enabled = 0; int num_connectors_detected = 0; - uint64_t conn_configured = 0, mask; int pass = 0; - save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool), - GFP_KERNEL); + save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL); if (!save_enabled) return false; - memcpy(save_enabled, enabled, fb_helper->connector_count); - mask = (1 << fb_helper->connector_count) - 1; + memcpy(save_enabled, enabled, count); + mask = BIT(count) - 1; + conn_configured = 0; retry: - for (i = 0; i < fb_helper->connector_count; i++) { + for (i = 0; i < count; i++) { struct drm_fb_helper_connector *fb_conn; struct drm_connector *connector; struct drm_encoder *encoder; @@ -388,7 +389,7 @@ retry: fb_conn = fb_helper->connector_info[i]; connector = fb_conn->connector; - if (conn_configured & (1 << i)) + if (conn_configured & BIT(i)) continue; if (pass == 0 && !connector->has_tile) @@ -400,7 +401,7 @@ retry: if (!enabled[i]) { DRM_DEBUG_KMS("connector %s not enabled, skipping\n", connector->name); - conn_configured |= (1 << i); + conn_configured |= BIT(i); continue; } @@ -419,7 +420,7 @@ retry: DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n", connector->name); enabled[i] = false; - conn_configured |= (1 << i); + conn_configured |= BIT(i); continue; } @@ -432,14 +433,15 @@ retry: intel_crtc->lut_b[j] = j; } - new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc); + new_crtc = intel_fb_helper_crtc(fb_helper, + connector->state->crtc); /* * Make sure we're not trying to drive multiple connectors * with a single CRTC, since our cloning support may not * match the BIOS. */ - for (j = 0; j < fb_helper->connector_count; j++) { + for (j = 0; j < count; j++) { if (crtcs[j] == new_crtc) { DRM_DEBUG_KMS("fallback: cloned configuration\n"); goto bail; @@ -498,7 +500,7 @@ retry: modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :""); fallback = false; - conn_configured |= (1 << i); + conn_configured |= BIT(i); } if ((conn_configured & mask) != mask) { @@ -522,7 +524,7 @@ retry: if (fallback) { bail: DRM_DEBUG_KMS("Not using firmware configuration\n"); - memcpy(enabled, save_enabled, fb_helper->connector_count); + memcpy(enabled, save_enabled, count); kfree(save_enabled); return false; } @@ -538,8 +540,7 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = { .fb_probe = intelfb_create, }; -static void intel_fbdev_destroy(struct drm_device *dev, - struct intel_fbdev *ifbdev) +static void intel_fbdev_destroy(struct intel_fbdev *ifbdev) { /* We rely on the object-free to release the VMA pinning for * the info->screen_base mmaping. Leaking the VMA is simpler than @@ -552,12 +553,14 @@ static void intel_fbdev_destroy(struct drm_device *dev, drm_fb_helper_fini(&ifbdev->helper); if (ifbdev->fb) { - mutex_lock(&dev->struct_mutex); + mutex_lock(&ifbdev->helper.dev->struct_mutex); intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0)); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&ifbdev->helper.dev->struct_mutex); drm_framebuffer_remove(&ifbdev->fb->base); } + + kfree(ifbdev); } /* @@ -690,9 +693,9 @@ out: static void intel_fbdev_suspend_worker(struct work_struct *work) { - intel_fbdev_set_suspend(container_of(work, - struct drm_i915_private, - fbdev_suspend_work)->dev, + intel_fbdev_set_suspend(&container_of(work, + struct drm_i915_private, + fbdev_suspend_work)->drm, FBINFO_STATE_RUNNING, true); } @@ -700,7 +703,7 @@ static void intel_fbdev_suspend_worker(struct work_struct *work) int intel_fbdev_init(struct drm_device *dev) { struct intel_fbdev *ifbdev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0)) @@ -732,38 +735,50 @@ int intel_fbdev_init(struct drm_device *dev) static void intel_fbdev_initial_config(void *data, async_cookie_t cookie) { - struct drm_i915_private *dev_priv = data; - struct intel_fbdev *ifbdev = dev_priv->fbdev; + struct intel_fbdev *ifbdev = data; /* Due to peculiar init order wrt to hpd handling this is separate. */ if (drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp)) - intel_fbdev_fini(dev_priv->dev); + intel_fbdev_fini(ifbdev->helper.dev); } void intel_fbdev_initial_config_async(struct drm_device *dev) { - async_schedule(intel_fbdev_initial_config, to_i915(dev)); + struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; + + ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev); +} + +static void intel_fbdev_sync(struct intel_fbdev *ifbdev) +{ + if (!ifbdev->cookie) + return; + + /* Only serialises with all preceding async calls, hence +1 */ + async_synchronize_cookie(ifbdev->cookie + 1); + ifbdev->cookie = 0; } void intel_fbdev_fini(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (!dev_priv->fbdev) + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_fbdev *ifbdev = dev_priv->fbdev; + + if (!ifbdev) return; flush_work(&dev_priv->fbdev_suspend_work); - if (!current_is_async()) - async_synchronize_full(); - intel_fbdev_destroy(dev, dev_priv->fbdev); - kfree(dev_priv->fbdev); + intel_fbdev_sync(ifbdev); + + intel_fbdev_destroy(ifbdev); dev_priv->fbdev = NULL; } void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_fbdev *ifbdev = dev_priv->fbdev; struct fb_info *info; @@ -812,7 +827,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous void intel_fbdev_output_poll_changed(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (dev_priv->fbdev) drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); } @@ -820,13 +835,15 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) void intel_fbdev_restore_mode(struct drm_device *dev) { int ret; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_fbdev *ifbdev = dev_priv->fbdev; struct drm_fb_helper *fb_helper; if (!ifbdev) return; + intel_fbdev_sync(ifbdev); + fb_helper = &ifbdev->helper; ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c index 9be839a242f9..2aa744081f09 100644 --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c @@ -50,7 +50,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc; enum pipe pipe; @@ -68,7 +68,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev) static bool cpt_can_enable_serr_int(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; struct intel_crtc *crtc; @@ -105,7 +105,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable, bool old) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t reg = PIPESTAT(pipe); u32 pipestat = I915_READ(reg) & 0xffff0000; @@ -123,7 +123,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN; @@ -154,7 +154,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable, bool old) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (enable) { I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); @@ -176,7 +176,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (enable) bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); @@ -188,7 +188,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, enum transcoder pch_transcoder, bool enable) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t bit = (pch_transcoder == TRANSCODER_A) ? SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; @@ -220,7 +220,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, enum transcoder pch_transcoder, bool enable, bool old) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (enable) { I915_WRITE(SERR_INT, @@ -244,7 +244,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); bool old; @@ -289,7 +289,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv, bool ret; spin_lock_irqsave(&dev_priv->irq_lock, flags); - ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe, + ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe, enable); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -334,10 +334,12 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, intel_crtc->pch_fifo_underrun_disabled = !enable; if (HAS_PCH_IBX(dev_priv)) - ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, + ibx_set_fifo_underrun_reporting(&dev_priv->drm, + pch_transcoder, enable); else - cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, + cpt_set_fifo_underrun_reporting(&dev_priv->drm, + pch_transcoder, enable, old); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -405,7 +407,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv) spin_lock_irq(&dev_priv->irq_lock); - for_each_intel_crtc(dev_priv->dev, crtc) { + for_each_intel_crtc(&dev_priv->drm, crtc) { if (crtc->cpu_fifo_underrun_disabled) continue; @@ -432,7 +434,7 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv) spin_lock_irq(&dev_priv->irq_lock); - for_each_intel_crtc(dev_priv->dev, crtc) { + for_each_intel_crtc(&dev_priv->drm, crtc) { if (crtc->pch_fifo_underrun_disabled) continue; diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 4df80cc9a291..3e3e743740c0 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -26,6 +26,7 @@ #include "intel_guc_fwif.h" #include "i915_guc_reg.h" +#include "intel_ringbuffer.h" struct drm_i915_gem_request; @@ -86,7 +87,7 @@ struct i915_guc_client { int retcode; /* Per-engine counts of GuC submissions */ - uint64_t submissions[GUC_MAX_ENGINES_NUM]; + uint64_t submissions[I915_NUM_ENGINES]; }; enum intel_guc_fw_status { @@ -143,8 +144,8 @@ struct intel_guc { uint32_t action_fail; /* Total number of failures */ int32_t action_err; /* Last error code */ - uint64_t submissions[GUC_MAX_ENGINES_NUM]; - uint32_t last_seqno[GUC_MAX_ENGINES_NUM]; + uint64_t submissions[I915_NUM_ENGINES]; + uint32_t last_seqno[I915_NUM_ENGINES]; }; /* intel_guc_loader.c */ diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 8fe96a2d989e..605c69658d2c 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -65,6 +65,9 @@ MODULE_FIRMWARE(I915_SKL_GUC_UCODE); #define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin" MODULE_FIRMWARE(I915_BXT_GUC_UCODE); +#define I915_KBL_GUC_UCODE "i915/kbl_guc_ver9_14.bin" +MODULE_FIRMWARE(I915_KBL_GUC_UCODE); + /* User-friendly representation of an enum */ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status) { @@ -87,7 +90,7 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv) struct intel_engine_cs *engine; int irqs; - /* tell all command streamers NOT to forward interrupts and vblank to GuC */ + /* tell all command streamers NOT to forward interrupts or vblank to GuC */ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); for_each_engine(engine, dev_priv) @@ -105,9 +108,8 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv) int irqs; u32 tmp; - /* tell all command streamers to forward interrupts and vblank to GuC */ - irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS); - irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); + /* tell all command streamers to forward interrupts (but not vblank) to GuC */ + irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); for_each_engine(engine, dev_priv) I915_WRITE(RING_MODE_GEN7(engine), irqs); @@ -312,7 +314,7 @@ static u32 guc_wopcm_size(struct drm_i915_private *dev_priv) static int guc_ucode_xfer(struct drm_i915_private *dev_priv) { struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; int ret; ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false); @@ -411,7 +413,7 @@ static int i915_reset_guc(struct drm_i915_private *dev_priv) */ int intel_guc_setup(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; const char *fw_path = guc_fw->guc_fw_path; int retries, ret, err; @@ -606,7 +608,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) /* Header and uCode will be loaded to WOPCM. Size of the two. */ size = guc_fw->header_size + guc_fw->ucode_size; - if (size > guc_wopcm_size(dev->dev_private)) { + if (size > guc_wopcm_size(to_i915(dev))) { DRM_ERROR("Firmware is too large to fit in WOPCM\n"); goto fail; } @@ -679,7 +681,7 @@ fail: */ void intel_guc_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; const char *fw_path; @@ -699,6 +701,10 @@ void intel_guc_init(struct drm_device *dev) fw_path = I915_BXT_GUC_UCODE; guc_fw->guc_fw_major_wanted = 8; guc_fw->guc_fw_minor_wanted = 7; + } else if (IS_KABYLAKE(dev)) { + fw_path = I915_KBL_GUC_UCODE; + guc_fw->guc_fw_major_wanted = 9; + guc_fw->guc_fw_minor_wanted = 14; } else { fw_path = ""; /* unknown device */ } @@ -728,7 +734,7 @@ void intel_guc_init(struct drm_device *dev) */ void intel_guc_fini(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 9fa458ce40a6..434f4d5c553d 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -63,7 +63,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) if (!is_supported_device(dev_priv)) { DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n"); - return 0; + goto bail; } /* @@ -72,16 +72,20 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) ret = intel_gvt_init_host(); if (ret) { DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n"); - return 0; + goto bail; } ret = intel_gvt_init_device(dev_priv); if (ret) { DRM_DEBUG_DRIVER("Fail to init GVT device\n"); - return 0; + goto bail; } return 0; + +bail: + i915.enable_gvt = 0; + return 0; } /** diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index fb21626ada64..4df9f384910c 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -47,7 +47,7 @@ static void assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) { struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t enabled_bits; enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; @@ -138,7 +138,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, { const uint32_t *data = frame; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 val = I915_READ(VIDEO_DIP_CTL); int i; @@ -192,7 +192,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, { const uint32_t *data = frame; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -251,7 +251,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, { const uint32_t *data = frame; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -308,7 +308,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, { const uint32_t *data = frame; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -366,7 +366,7 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, { const uint32_t *data = frame; struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); @@ -508,7 +508,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, bool enable, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; i915_reg_t reg = VIDEO_DIP_CTL; @@ -629,7 +629,7 @@ static bool gcp_default_phase_possible(int pipe_bpp, static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); i915_reg_t reg; u32 val = 0; @@ -661,7 +661,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, bool enable, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; @@ -713,7 +713,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, bool enable, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); @@ -755,7 +755,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, bool enable, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); @@ -807,7 +807,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, bool enable, const struct drm_display_mode *adjusted_mode) { - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); @@ -855,7 +855,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) static void intel_hdmi_prepare(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; @@ -894,7 +894,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); enum intel_display_power_domain power_domain; u32 tmp; @@ -931,7 +931,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp, flags = 0; int dotclock; @@ -988,7 +988,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder) static void g4x_enable_hdmi(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); u32 temp; @@ -1009,7 +1009,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder) static void ibx_enable_hdmi(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); u32 temp; @@ -1058,7 +1058,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder) static void cpt_enable_hdmi(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); enum pipe pipe = crtc->pipe; @@ -1115,7 +1115,7 @@ static void vlv_enable_hdmi(struct intel_encoder *encoder) static void intel_disable_hdmi(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); u32 temp; @@ -1154,7 +1154,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) I915_WRITE(intel_hdmi->hdmi_reg, temp); POSTING_READ(intel_hdmi->hdmi_reg); - intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); + intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A); intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } @@ -1273,33 +1273,15 @@ intel_hdmi_mode_valid(struct drm_connector *connector, static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc_state->base.crtc->dev; - struct drm_atomic_state *state; - struct intel_encoder *encoder; - struct drm_connector *connector; - struct drm_connector_state *connector_state; - int count = 0, count_hdmi = 0; - int i; if (HAS_GMCH_DISPLAY(dev)) return false; - state = crtc_state->base.state; - - for_each_connector_in_state(state, connector, connector_state, i) { - if (connector_state->crtc != crtc_state->base.crtc) - continue; - - encoder = to_intel_encoder(connector_state->best_encoder); - - count_hdmi += encoder->type == INTEL_OUTPUT_HDMI; - count++; - } - /* * HDMI 12bpc affects the clocks, so it's only possible * when not cloning with other encoder types. */ - return count_hdmi > 0 && count_hdmi == count; + return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI; } bool intel_hdmi_compute_config(struct intel_encoder *encoder, @@ -1575,7 +1557,7 @@ intel_hdmi_set_property(struct drm_connector *connector, struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi); - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); int ret; ret = drm_object_property_set_value(&connector->base, property, val); @@ -1674,7 +1656,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder) struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); struct intel_hdmi *intel_hdmi = &dport->hdmi; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; @@ -1722,7 +1704,7 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder) static void chv_hdmi_post_disable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); mutex_lock(&dev_priv->sb_lock); @@ -1737,7 +1719,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); struct intel_hdmi *intel_hdmi = &dport->hdmi; struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; @@ -1774,6 +1756,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_hdmi_set_property, .atomic_get_property = intel_connector_atomic_get_property, + .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_hdmi_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, @@ -1806,7 +1789,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_device *dev = intel_encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dig_port->port; uint8_t alternate_ddc_pin; @@ -1914,7 +1897,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_hdmi_add_properties(intel_hdmi, connector); intel_connector_attach_encoder(intel_connector, intel_encoder); - drm_connector_register(connector); intel_hdmi->attached_connector = intel_connector; /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 38eeca7a6e72..51434ec871f2 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -144,7 +144,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_connector *intel_connector; struct intel_encoder *intel_encoder; @@ -191,7 +191,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), hotplug.reenable_work.work); - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_mode_config *mode_config = &dev->mode_config; int i; @@ -302,7 +302,7 @@ static void i915_hotplug_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, hotplug.hotplug_work); - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_connector *intel_connector; struct intel_encoder *intel_encoder; @@ -455,7 +455,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, */ void intel_hpd_init(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; int i; diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 81de23098be7..1f266d7df2ec 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -113,7 +113,7 @@ to_intel_gmbus(struct i2c_adapter *i2c) void intel_i2c_reset(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(GMBUS0, 0); I915_WRITE(GMBUS4, 0); @@ -138,7 +138,7 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) static u32 get_reserved(struct intel_gmbus *bus) { struct drm_i915_private *dev_priv = bus->dev_priv; - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; u32 reserved = 0; /* On most chips, these bits must be preserved in software. */ @@ -212,7 +212,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter) adapter); struct drm_i915_private *dev_priv = bus->dev_priv; - intel_i2c_reset(dev_priv->dev); + intel_i2c_reset(&dev_priv->drm); intel_i2c_quirk_set(dev_priv, true); set_data(bus, 1); set_clock(bus, 1); @@ -298,15 +298,16 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) { int ret; -#define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0) - if (!HAS_GMBUS_IRQ(dev_priv)) - return wait_for(C, 10); + return intel_wait_for_register(dev_priv, + GMBUS2, GMBUS_ACTIVE, 0, + 10); /* Important: The hw handles only the first bit, so set only one! */ I915_WRITE(GMBUS4, GMBUS_IDLE_EN); - ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, + ret = wait_event_timeout(dev_priv->gmbus_wait_queue, + (I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0, msecs_to_jiffies_timeout(10)); I915_WRITE(GMBUS4, 0); @@ -315,7 +316,6 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) return 0; else return -ETIMEDOUT; -#undef C } static int @@ -632,7 +632,7 @@ static const struct i2c_algorithm gmbus_algorithm = { */ int intel_setup_gmbus(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_gmbus *bus; unsigned int pin; int ret; @@ -688,7 +688,7 @@ int intel_setup_gmbus(struct drm_device *dev) goto err; } - intel_i2c_reset(dev_priv->dev); + intel_i2c_reset(&dev_priv->drm); return 0; @@ -736,7 +736,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) void intel_teardown_gmbus(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_gmbus *bus; unsigned int pin; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index debed011a958..70c699043d0e 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -789,9 +789,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) intel_logical_ring_emit(ringbuf, MI_NOOP); intel_logical_ring_advance(ringbuf); - if (intel_engine_stopped(engine)) - return 0; - /* We keep the previous context alive until we retire the following * request. This ensures that any the context object is still pinned * for any residual writes the HW makes into it on the context switch @@ -826,7 +823,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, { struct drm_device *dev = params->dev; struct intel_engine_cs *engine = params->engine; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf; u64 exec_start; int instp_mode; @@ -902,7 +899,7 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine) struct drm_i915_gem_request *req, *tmp; LIST_HEAD(cancel_list); - WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex)); + WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex)); spin_lock_bh(&engine->execlist_lock); list_replace_init(&engine->execlist_queue, &cancel_list); @@ -929,7 +926,10 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine) /* TODO: Is this correct with Execlists enabled? */ I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); - if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { + if (intel_wait_for_register(dev_priv, + RING_MI_MODE(engine->mmio_base), + MODE_IDLE, MODE_IDLE, + 1000)) { DRM_ERROR("%s :timed out trying to stop ring\n", engine->name); return; } @@ -961,7 +961,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx, u32 *lrc_reg_state; int ret; - lockdep_assert_held(&ctx->i915->dev->struct_mutex); + lockdep_assert_held(&ctx->i915->drm.struct_mutex); if (ce->pin_count++) return 0; @@ -1011,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx, { struct intel_context *ce = &ctx->engine[engine->id]; - lockdep_assert_held(&ctx->i915->dev->struct_mutex); + lockdep_assert_held(&ctx->i915->drm.struct_mutex); GEM_BUG_ON(ce->pin_count == 0); if (--ce->pin_count) @@ -1296,6 +1296,31 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, 0); wa_ctx_emit(batch, index, 0); } + + /* WaMediaPoolStateCmdInWABB:bxt */ + if (HAS_POOLED_EU(engine->i915)) { + /* + * EU pool configuration is setup along with golden context + * during context initialization. This value depends on + * device type (2x6 or 3x6) and needs to be updated based + * on which subslice is disabled especially for 2x6 + * devices, however it is safe to load default + * configuration of 3x6 device instead of masking off + * corresponding bits because HW ignores bits of a disabled + * subslice and drops down to appropriate config. Please + * see render_state_setup() in i915_gem_render_state.c for + * possible configurations, to avoid duplication they are + * not shown here again. + */ + u32 eu_pool_config = 0x00777000; + wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE); + wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE); + wa_ctx_emit(batch, index, eu_pool_config); + wa_ctx_emit(batch, index, 0); + wa_ctx_emit(batch, index, 0); + wa_ctx_emit(batch, index, 0); + } + /* Pad to end of cacheline */ while (index % CACHELINE_DWORDS) wa_ctx_emit(batch, index, MI_NOOP); @@ -1353,8 +1378,8 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size) { int ret; - engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev, - PAGE_ALIGN(size)); + engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm, + PAGE_ALIGN(size)); if (IS_ERR(engine->wa_ctx.obj)) { DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); ret = PTR_ERR(engine->wa_ctx.obj); @@ -1614,36 +1639,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, return 0; } -static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) +static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - I915_WRITE_IMR(engine, - ~(engine->irq_enable_mask | engine->irq_keep_mask)); - POSTING_READ(RING_IMR(engine->mmio_base)); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + I915_WRITE_IMR(engine, + ~(engine->irq_enable_mask | engine->irq_keep_mask)); + POSTING_READ_FW(RING_IMR(engine->mmio_base)); } -static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine) +static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - I915_WRITE_IMR(engine, ~engine->irq_keep_mask); - POSTING_READ(RING_IMR(engine->mmio_base)); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + I915_WRITE_IMR(engine, ~engine->irq_keep_mask); } static int gen8_emit_flush(struct drm_i915_gem_request *request, @@ -1780,16 +1787,6 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, return 0; } -static u32 gen8_get_seqno(struct intel_engine_cs *engine) -{ - return intel_read_status_page(engine, I915_GEM_HWS_INDEX); -} - -static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno) -{ - intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); -} - static void bxt_a_seqno_barrier(struct intel_engine_cs *engine) { /* @@ -1805,14 +1802,6 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine) intel_flush_status_page(engine, I915_GEM_HWS_INDEX); } -static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno) -{ - intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); - - /* See bxt_a_get_seqno() explaining the reason for the clflush. */ - intel_flush_status_page(engine, I915_GEM_HWS_INDEX); -} - /* * Reserve space for 2 NOOPs at the end of each request to be * used as a workaround for not being allowed to do lite @@ -1838,7 +1827,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request) intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT); intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); + intel_logical_ring_emit(ringbuf, request->seqno); intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); intel_logical_ring_emit(ringbuf, MI_NOOP); return intel_logical_ring_advance_and_submit(request); @@ -1958,6 +1947,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) i915_cmd_parser_fini_ring(engine); i915_gem_batch_pool_fini(&engine->batch_pool); + intel_engine_fini_breadcrumbs(engine); + if (engine->status_page.obj) { i915_gem_object_unpin_map(engine->status_page.obj); engine->status_page.obj = NULL; @@ -1979,15 +1970,11 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->init_hw = gen8_init_common_ring; engine->emit_request = gen8_emit_request; engine->emit_flush = gen8_emit_flush; - engine->irq_get = gen8_logical_ring_get_irq; - engine->irq_put = gen8_logical_ring_put_irq; + engine->irq_enable = gen8_logical_ring_enable_irq; + engine->irq_disable = gen8_logical_ring_disable_irq; engine->emit_bb_start = gen8_emit_bb_start; - engine->get_seqno = gen8_get_seqno; - engine->set_seqno = gen8_set_seqno; - if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) engine->irq_seqno_barrier = bxt_a_seqno_barrier; - engine->set_seqno = bxt_a_set_seqno; - } } static inline void @@ -1995,7 +1982,6 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; - init_waitqueue_head(&engine->irq_queue); } static int @@ -2016,12 +2002,94 @@ lrc_setup_hws(struct intel_engine_cs *engine, return 0; } +static int +logical_ring_init(struct intel_engine_cs *engine) +{ + struct i915_gem_context *dctx = engine->i915->kernel_context; + int ret; + + ret = intel_engine_init_breadcrumbs(engine); + if (ret) + goto error; + + ret = i915_cmd_parser_init_ring(engine); + if (ret) + goto error; + + ret = execlists_context_deferred_alloc(dctx, engine); + if (ret) + goto error; + + /* As this is the default context, always pin it */ + ret = intel_lr_context_pin(dctx, engine); + if (ret) { + DRM_ERROR("Failed to pin context for %s: %d\n", + engine->name, ret); + goto error; + } + + /* And setup the hardware status page. */ + ret = lrc_setup_hws(engine, dctx->engine[engine->id].state); + if (ret) { + DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret); + goto error; + } + + return 0; + +error: + intel_logical_ring_cleanup(engine); + return ret; +} + +static int logical_render_ring_init(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + int ret; + + if (HAS_L3_DPF(dev_priv)) + engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + + /* Override some for render ring. */ + if (INTEL_GEN(dev_priv) >= 9) + engine->init_hw = gen9_init_render_ring; + else + engine->init_hw = gen8_init_render_ring; + engine->init_context = gen8_init_rcs_context; + engine->cleanup = intel_fini_pipe_control; + engine->emit_flush = gen8_emit_flush_render; + engine->emit_request = gen8_emit_request_render; + + ret = intel_init_pipe_control(engine, 4096); + if (ret) + return ret; + + ret = intel_init_workaround_bb(engine); + if (ret) { + /* + * We continue even if we fail to initialize WA batch + * because we only expect rare glitches but nothing + * critical to prevent us from using GPU + */ + DRM_ERROR("WA batch buffer initialization failed: %d\n", + ret); + } + + ret = logical_ring_init(engine); + if (ret) { + lrc_destroy_wa_ctx_obj(engine); + } + + return ret; +} + static const struct logical_ring_info { const char *name; unsigned exec_id; unsigned guc_id; u32 mmio_base; unsigned irq_shift; + int (*init)(struct intel_engine_cs *engine); } logical_rings[] = { [RCS] = { .name = "render ring", @@ -2029,6 +2097,7 @@ static const struct logical_ring_info { .guc_id = GUC_RENDER_ENGINE, .mmio_base = RENDER_RING_BASE, .irq_shift = GEN8_RCS_IRQ_SHIFT, + .init = logical_render_ring_init, }, [BCS] = { .name = "blitter ring", @@ -2036,6 +2105,7 @@ static const struct logical_ring_info { .guc_id = GUC_BLITTER_ENGINE, .mmio_base = BLT_RING_BASE, .irq_shift = GEN8_BCS_IRQ_SHIFT, + .init = logical_ring_init, }, [VCS] = { .name = "bsd ring", @@ -2043,6 +2113,7 @@ static const struct logical_ring_info { .guc_id = GUC_VIDEO_ENGINE, .mmio_base = GEN6_BSD_RING_BASE, .irq_shift = GEN8_VCS1_IRQ_SHIFT, + .init = logical_ring_init, }, [VCS2] = { .name = "bsd2 ring", @@ -2050,6 +2121,7 @@ static const struct logical_ring_info { .guc_id = GUC_VIDEO_ENGINE2, .mmio_base = GEN8_BSD2_RING_BASE, .irq_shift = GEN8_VCS2_IRQ_SHIFT, + .init = logical_ring_init, }, [VECS] = { .name = "video enhancement ring", @@ -2057,14 +2129,14 @@ static const struct logical_ring_info { .guc_id = GUC_VIDEOENHANCE_ENGINE, .mmio_base = VEBOX_RING_BASE, .irq_shift = GEN8_VECS_IRQ_SHIFT, + .init = logical_ring_init, }, }; static struct intel_engine_cs * -logical_ring_setup(struct drm_device *dev, enum intel_engine_id id) +logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id) { const struct logical_ring_info *info = &logical_rings[id]; - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine = &dev_priv->engine[id]; enum forcewake_domains fw_domains; @@ -2107,169 +2179,62 @@ logical_ring_setup(struct drm_device *dev, enum intel_engine_id id) logical_ring_default_irqs(engine, info->irq_shift); intel_engine_init_hangcheck(engine); - i915_gem_batch_pool_init(dev, &engine->batch_pool); + i915_gem_batch_pool_init(&dev_priv->drm, &engine->batch_pool); return engine; } -static int -logical_ring_init(struct intel_engine_cs *engine) -{ - struct i915_gem_context *dctx = engine->i915->kernel_context; - int ret; - - ret = i915_cmd_parser_init_ring(engine); - if (ret) - goto error; - - ret = execlists_context_deferred_alloc(dctx, engine); - if (ret) - goto error; - - /* As this is the default context, always pin it */ - ret = intel_lr_context_pin(dctx, engine); - if (ret) { - DRM_ERROR("Failed to pin context for %s: %d\n", - engine->name, ret); - goto error; - } - - /* And setup the hardware status page. */ - ret = lrc_setup_hws(engine, dctx->engine[engine->id].state); - if (ret) { - DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret); - goto error; - } - - return 0; - -error: - intel_logical_ring_cleanup(engine); - return ret; -} - -static int logical_render_ring_init(struct drm_device *dev) -{ - struct intel_engine_cs *engine = logical_ring_setup(dev, RCS); - int ret; - - if (HAS_L3_DPF(dev)) - engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; - - /* Override some for render ring. */ - if (INTEL_INFO(dev)->gen >= 9) - engine->init_hw = gen9_init_render_ring; - else - engine->init_hw = gen8_init_render_ring; - engine->init_context = gen8_init_rcs_context; - engine->cleanup = intel_fini_pipe_control; - engine->emit_flush = gen8_emit_flush_render; - engine->emit_request = gen8_emit_request_render; - - ret = intel_init_pipe_control(engine); - if (ret) - return ret; - - ret = intel_init_workaround_bb(engine); - if (ret) { - /* - * We continue even if we fail to initialize WA batch - * because we only expect rare glitches but nothing - * critical to prevent us from using GPU - */ - DRM_ERROR("WA batch buffer initialization failed: %d\n", - ret); - } - - ret = logical_ring_init(engine); - if (ret) { - lrc_destroy_wa_ctx_obj(engine); - } - - return ret; -} - -static int logical_bsd_ring_init(struct drm_device *dev) -{ - struct intel_engine_cs *engine = logical_ring_setup(dev, VCS); - - return logical_ring_init(engine); -} - -static int logical_bsd2_ring_init(struct drm_device *dev) -{ - struct intel_engine_cs *engine = logical_ring_setup(dev, VCS2); - - return logical_ring_init(engine); -} - -static int logical_blt_ring_init(struct drm_device *dev) -{ - struct intel_engine_cs *engine = logical_ring_setup(dev, BCS); - - return logical_ring_init(engine); -} - -static int logical_vebox_ring_init(struct drm_device *dev) -{ - struct intel_engine_cs *engine = logical_ring_setup(dev, VECS); - - return logical_ring_init(engine); -} - /** * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers * @dev: DRM device. * - * This function inits the engines for an Execlists submission style (the equivalent in the - * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for - * those engines that are present in the hardware. + * This function inits the engines for an Execlists submission style (the + * equivalent in the legacy ringbuffer submission world would be + * i915_gem_init_engines). It does it only for those engines that are present in + * the hardware. * * Return: non-zero if the initialization failed. */ int intel_logical_rings_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + unsigned int mask = 0; + unsigned int i; int ret; - ret = logical_render_ring_init(dev); - if (ret) - return ret; + WARN_ON(INTEL_INFO(dev_priv)->ring_mask & + GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES)); - if (HAS_BSD(dev)) { - ret = logical_bsd_ring_init(dev); - if (ret) - goto cleanup_render_ring; - } + for (i = 0; i < ARRAY_SIZE(logical_rings); i++) { + if (!HAS_ENGINE(dev_priv, i)) + continue; - if (HAS_BLT(dev)) { - ret = logical_blt_ring_init(dev); - if (ret) - goto cleanup_bsd_ring; - } + if (!logical_rings[i].init) + continue; - if (HAS_VEBOX(dev)) { - ret = logical_vebox_ring_init(dev); + ret = logical_rings[i].init(logical_ring_setup(dev_priv, i)); if (ret) - goto cleanup_blt_ring; + goto cleanup; + + mask |= ENGINE_MASK(i); } - if (HAS_BSD2(dev)) { - ret = logical_bsd2_ring_init(dev); - if (ret) - goto cleanup_vebox_ring; + /* + * Catch failures to update logical_rings table when the new engines + * are added to the driver by a warning and disabling the forgotten + * engines. + */ + if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) { + struct intel_device_info *info = + (struct intel_device_info *)&dev_priv->info; + info->ring_mask = mask; } return 0; -cleanup_vebox_ring: - intel_logical_ring_cleanup(&dev_priv->engine[VECS]); -cleanup_blt_ring: - intel_logical_ring_cleanup(&dev_priv->engine[BCS]); -cleanup_bsd_ring: - intel_logical_ring_cleanup(&dev_priv->engine[VCS]); -cleanup_render_ring: - intel_logical_ring_cleanup(&dev_priv->engine[RCS]); +cleanup: + for (i = 0; i < I915_NUM_ENGINES; i++) + intel_logical_ring_cleanup(&dev_priv->engine[i]); return ret; } @@ -2546,7 +2511,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, /* One extra page as the sharing data between driver and GuC */ context_size += PAGE_SIZE * LRC_PPHWSP_PN; - ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size); + ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size); if (IS_ERR(ctx_obj)) { DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); return PTR_ERR(ctx_obj); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index e9082185a375..49550470483e 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -72,7 +72,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); enum intel_display_power_domain power_domain; u32 tmp; @@ -106,7 +106,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); u32 tmp, flags = 0; @@ -140,7 +140,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder) { struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; int pipe = crtc->pipe; @@ -184,8 +184,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder) * panels behave in the two modes. For now, let's just maintain the * value we got from the BIOS. */ - temp &= ~LVDS_A3_POWER_MASK; - temp |= lvds_encoder->a3_power; + temp &= ~LVDS_A3_POWER_MASK; + temp |= lvds_encoder->a3_power; /* Set the dithering flag on LVDS as needed, note that there is no * special lvds dither control bit on pch-split platforms, dithering is @@ -216,7 +216,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder) struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct intel_connector *intel_connector = &lvds_encoder->attached_connector->base; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t ctl_reg, stat_reg; if (HAS_PCH_SPLIT(dev)) { @@ -231,7 +231,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder) I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); POSTING_READ(lvds_encoder->reg); - if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) + if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, PP_ON, 1000)) DRM_ERROR("timed out waiting for panel to power on\n"); intel_panel_enable_backlight(intel_connector); @@ -241,7 +241,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t ctl_reg, stat_reg; if (HAS_PCH_SPLIT(dev)) { @@ -253,7 +253,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder) } I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); - if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) + if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, 0, 1000)) DRM_ERROR("timed out waiting for panel to power off\n"); I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN); @@ -442,7 +442,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, container_of(nb, struct intel_lvds_connector, lid_notifier); struct drm_connector *connector = &lvds_connector->base.base; struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (dev->switch_power_state != DRM_SWITCH_POWER_ON) return NOTIFY_OK; @@ -555,6 +555,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_lvds_set_property, .atomic_get_property = intel_connector_atomic_get_property, + .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_lvds_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, @@ -810,27 +811,29 @@ static const struct dmi_system_id intel_dual_link_lvds[] = { { } /* terminating entry */ }; -bool intel_is_dual_link_lvds(struct drm_device *dev) +struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev) { - struct intel_encoder *encoder; - struct intel_lvds_encoder *lvds_encoder; + struct intel_encoder *intel_encoder; - for_each_intel_encoder(dev, encoder) { - if (encoder->type == INTEL_OUTPUT_LVDS) { - lvds_encoder = to_lvds_encoder(&encoder->base); + for_each_intel_encoder(dev, intel_encoder) + if (intel_encoder->type == INTEL_OUTPUT_LVDS) + return intel_encoder; - return lvds_encoder->is_dual_link; - } - } + return NULL; +} - return false; +bool intel_is_dual_link_lvds(struct drm_device *dev) +{ + struct intel_encoder *encoder = intel_get_lvds_encoder(dev); + + return encoder && to_lvds_encoder(&encoder->base)->is_dual_link; } static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) { struct drm_device *dev = lvds_encoder->base.base.dev; unsigned int val; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* use the module option value if specified */ if (i915.lvds_channel_mode > 0) @@ -880,7 +883,7 @@ static bool intel_lvds_supported(struct drm_device *dev) */ void intel_lvds_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_lvds_encoder *lvds_encoder; struct intel_encoder *intel_encoder; struct intel_lvds_connector *lvds_connector; @@ -1118,6 +1121,7 @@ out: mutex_unlock(&dev->mode_config.mutex); intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); + intel_panel_setup_backlight(connector, INVALID_PIPE); lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); DRM_DEBUG_KMS("detected %s-link lvds configuration\n", @@ -1130,9 +1134,6 @@ out: DRM_DEBUG_KMS("lid notifier registration failed\n"); lvds_connector->lid_notifier.notifier_call = NULL; } - drm_connector_register(connector); - - intel_panel_setup_backlight(connector, INVALID_PIPE); return; diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 38a4c8ce7e63..f2584d0a01ab 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -82,7 +82,7 @@ void intel_attach_force_audio_property(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_property *prop; prop = dev_priv->force_audio_property; @@ -109,7 +109,7 @@ void intel_attach_broadcast_rgb_property(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_property *prop; prop = dev_priv->broadcast_rgb_property; diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index f6d8a21d2c49..c27d5eb063d0 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -232,11 +232,28 @@ struct opregion_asle_ext { #define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19) #define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21) -#define ACPI_OTHER_OUTPUT (0<<8) -#define ACPI_VGA_OUTPUT (1<<8) -#define ACPI_TV_OUTPUT (2<<8) -#define ACPI_DIGITAL_OUTPUT (3<<8) -#define ACPI_LVDS_OUTPUT (4<<8) +/* + * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices + * Attached to the Display Adapter). + */ +#define ACPI_DISPLAY_INDEX_SHIFT 0 +#define ACPI_DISPLAY_INDEX_MASK (0xf << 0) +#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4 +#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4) +#define ACPI_DISPLAY_TYPE_SHIFT 8 +#define ACPI_DISPLAY_TYPE_MASK (0xf << 8) +#define ACPI_DISPLAY_TYPE_OTHER (0 << 8) +#define ACPI_DISPLAY_TYPE_VGA (1 << 8) +#define ACPI_DISPLAY_TYPE_TV (2 << 8) +#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8) +#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8) +#define ACPI_VENDOR_SPECIFIC_SHIFT 12 +#define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12) +#define ACPI_BIOS_CAN_DETECT (1 << 16) +#define ACPI_DEPENDS_ON_VGA (1 << 17) +#define ACPI_PIPE_ID_SHIFT 18 +#define ACPI_PIPE_ID_MASK (7 << 18) +#define ACPI_DEVICE_ID_SCHEME (1 << 31) #define MAX_DSLP 1500 @@ -244,7 +261,7 @@ static int swsci(struct drm_i915_private *dev_priv, u32 function, u32 parm, u32 *parm_out) { struct opregion_swsci *swsci = dev_priv->opregion.swsci; - struct pci_dev *pdev = dev_priv->dev->pdev; + struct pci_dev *pdev = dev_priv->drm.pdev; u32 main_function, sub_function, scic; u16 swsci_val; u32 dslp; @@ -366,7 +383,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, type = DISPLAY_TYPE_CRT; break; case INTEL_OUTPUT_UNKNOWN: - case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_DP_MST: type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL; @@ -418,7 +435,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp) { struct intel_connector *connector; struct opregion_asle *asle = dev_priv->opregion.asle; - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); @@ -657,10 +674,51 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val) } } +static u32 acpi_display_type(struct drm_connector *connector) +{ + u32 display_type; + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_VGA: + case DRM_MODE_CONNECTOR_DVIA: + display_type = ACPI_DISPLAY_TYPE_VGA; + break; + case DRM_MODE_CONNECTOR_Composite: + case DRM_MODE_CONNECTOR_SVIDEO: + case DRM_MODE_CONNECTOR_Component: + case DRM_MODE_CONNECTOR_9PinDIN: + case DRM_MODE_CONNECTOR_TV: + display_type = ACPI_DISPLAY_TYPE_TV; + break; + case DRM_MODE_CONNECTOR_DVII: + case DRM_MODE_CONNECTOR_DVID: + case DRM_MODE_CONNECTOR_DisplayPort: + case DRM_MODE_CONNECTOR_HDMIA: + case DRM_MODE_CONNECTOR_HDMIB: + display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL; + break; + case DRM_MODE_CONNECTOR_LVDS: + case DRM_MODE_CONNECTOR_eDP: + case DRM_MODE_CONNECTOR_DSI: + display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL; + break; + case DRM_MODE_CONNECTOR_Unknown: + case DRM_MODE_CONNECTOR_VIRTUAL: + display_type = ACPI_DISPLAY_TYPE_OTHER; + break; + default: + MISSING_CASE(connector->connector_type); + display_type = ACPI_DISPLAY_TYPE_OTHER; + break; + } + + return display_type; +} + static void intel_didl_outputs(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->opregion; - struct pci_dev *pdev = dev_priv->dev->pdev; + struct pci_dev *pdev = dev_priv->drm.pdev; struct drm_connector *connector; acpi_handle handle; struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; @@ -724,37 +782,18 @@ end: blind_set: i = 0; - list_for_each_entry(connector, &dev_priv->dev->mode_config.connector_list, head) { - int output_type = ACPI_OTHER_OUTPUT; + list_for_each_entry(connector, + &dev_priv->drm.mode_config.connector_list, head) { + int display_type = acpi_display_type(connector); + if (i >= max_outputs) { DRM_DEBUG_KMS("More than %u outputs in connector list\n", max_outputs); return; } - switch (connector->connector_type) { - case DRM_MODE_CONNECTOR_VGA: - case DRM_MODE_CONNECTOR_DVIA: - output_type = ACPI_VGA_OUTPUT; - break; - case DRM_MODE_CONNECTOR_Composite: - case DRM_MODE_CONNECTOR_SVIDEO: - case DRM_MODE_CONNECTOR_Component: - case DRM_MODE_CONNECTOR_9PinDIN: - output_type = ACPI_TV_OUTPUT; - break; - case DRM_MODE_CONNECTOR_DVII: - case DRM_MODE_CONNECTOR_DVID: - case DRM_MODE_CONNECTOR_DisplayPort: - case DRM_MODE_CONNECTOR_HDMIA: - case DRM_MODE_CONNECTOR_HDMIB: - output_type = ACPI_DIGITAL_OUTPUT; - break; - case DRM_MODE_CONNECTOR_LVDS: - output_type = ACPI_LVDS_OUTPUT; - break; - } + temp = get_did(opregion, i); - set_did(opregion, i, temp | (1 << 31) | output_type | i); + set_did(opregion, i, temp | (1 << 31) | display_type | i); i++; } goto end; @@ -916,7 +955,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = { int intel_opregion_setup(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->opregion; - struct pci_dev *pdev = dev_priv->dev->pdev; + struct pci_dev *pdev = dev_priv->drm.pdev; u32 asls, mboxes; char buf[sizeof(OPREGION_SIGNATURE)]; int err = 0; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index eb93f90bb74d..3212d8806b5a 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -409,7 +409,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) struct intel_engine_cs *engine = &dev_priv->engine[RCS]; int ret; - lockdep_assert_held(&dev_priv->dev->struct_mutex); + lockdep_assert_held(&dev_priv->drm.struct_mutex); /* Only wait if there is actually an old frame to release to * guarantee forward progress. @@ -741,8 +741,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, u32 swidth, swidthsw, sheight, ostride; enum pipe pipe = overlay->crtc->pipe; - lockdep_assert_held(&dev_priv->dev->struct_mutex); - WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex)); + lockdep_assert_held(&dev_priv->drm.struct_mutex); + WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); ret = intel_overlay_release_old_vid(overlay); if (ret != 0) @@ -836,7 +836,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, overlay->old_vid_bo = overlay->vid_bo; overlay->vid_bo = new_bo; - intel_frontbuffer_flip(dev_priv->dev, INTEL_FRONTBUFFER_OVERLAY(pipe)); + intel_frontbuffer_flip(&dev_priv->drm, + INTEL_FRONTBUFFER_OVERLAY(pipe)); return 0; @@ -851,8 +852,8 @@ int intel_overlay_switch_off(struct intel_overlay *overlay) struct overlay_registers __iomem *regs; int ret; - lockdep_assert_held(&dev_priv->dev->struct_mutex); - WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex)); + lockdep_assert_held(&dev_priv->drm.struct_mutex); + WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); ret = intel_overlay_recover_from_interrupt(overlay); if (ret != 0) @@ -1084,7 +1085,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_intel_overlay_put_image *put_image_rec = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_overlay *overlay; struct drm_crtc *drmmode_crtc; struct intel_crtc *crtc; @@ -1282,7 +1283,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_intel_overlay_attrs *attrs = data; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_overlay *overlay; struct overlay_registers __iomem *regs; int ret; @@ -1379,7 +1380,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) if (!overlay) return; - mutex_lock(&dev_priv->dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); if (WARN_ON(dev_priv->overlay)) goto out_free; @@ -1387,9 +1388,10 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) reg_bo = NULL; if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) - reg_bo = i915_gem_object_create_stolen(dev_priv->dev, PAGE_SIZE); + reg_bo = i915_gem_object_create_stolen(&dev_priv->drm, + PAGE_SIZE); if (reg_bo == NULL) - reg_bo = i915_gem_object_create(dev_priv->dev, PAGE_SIZE); + reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE); if (IS_ERR(reg_bo)) goto out_free; overlay->reg_bo = reg_bo; @@ -1434,7 +1436,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) intel_overlay_unmap_regs(overlay, regs); dev_priv->overlay = overlay; - mutex_unlock(&dev_priv->dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); DRM_INFO("initialized overlay support\n"); return; @@ -1444,7 +1446,7 @@ out_unpin_bo: out_free_bo: drm_gem_object_unreference(®_bo->base); out_free: - mutex_unlock(&dev_priv->dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); kfree(overlay); return; } diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index bf721781c259..96c65d77e886 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -377,7 +377,7 @@ out: enum drm_connector_status intel_panel_detect(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* Assume that the BIOS does not lie through the OpRegion... */ if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) { @@ -504,7 +504,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector) if (panel->backlight.combination_mode) { u8 lbpc; - pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc); + pci_read_config_byte(dev_priv->drm.pdev, LBPC, &lbpc); val *= lbpc; } @@ -592,7 +592,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level) lbpc = level * 0xfe / panel->backlight.max + 1; level /= lbpc; - pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc); + pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc); } if (IS_GEN4(dev_priv)) { @@ -822,7 +822,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector) * backlight. This will leave the backlight on unnecessarily when * another client is not activated. */ - if (dev_priv->dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { + if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) { DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); return; } @@ -1142,7 +1142,7 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd) { struct intel_connector *connector = bl_get_data(bd); struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 hw_level; int ret; @@ -1163,7 +1163,7 @@ static const struct backlight_ops intel_backlight_device_ops = { .get_brightness = intel_backlight_device_get_brightness, }; -static int intel_backlight_device_register(struct intel_connector *connector) +int intel_backlight_device_register(struct intel_connector *connector) { struct intel_panel *panel = &connector->panel; struct backlight_properties props; @@ -1225,11 +1225,6 @@ void intel_backlight_device_unregister(struct intel_connector *connector) panel->backlight.device = NULL; } } -#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */ -static int intel_backlight_device_register(struct intel_connector *connector) -{ - return 0; -} #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ /* @@ -1321,7 +1316,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int clock; if (IS_G4X(dev_priv)) @@ -1736,7 +1731,8 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) panel->backlight.set = bxt_set_backlight; panel->backlight.get = bxt_get_backlight; panel->backlight.hz_to_pwm = bxt_hz_to_pwm; - } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) { + } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) || + HAS_PCH_KBP(dev_priv)) { panel->backlight.setup = lpt_setup_backlight; panel->backlight.enable = lpt_enable_backlight; panel->backlight.disable = lpt_disable_backlight; @@ -1809,11 +1805,3 @@ void intel_panel_fini(struct intel_panel *panel) drm_mode_destroy(intel_connector->base.dev, panel->downclock_mode); } - -void intel_backlight_register(struct drm_device *dev) -{ - struct intel_connector *connector; - - for_each_intel_connector(dev, connector) - intel_backlight_device_register(connector); -} diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 658a75659657..5a8ee0c76593 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -57,7 +57,7 @@ static void gen9_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */ I915_WRITE(CHICKEN_PAR1_1, @@ -83,7 +83,7 @@ static void gen9_init_clock_gating(struct drm_device *dev) static void bxt_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); gen9_init_clock_gating(dev); @@ -109,7 +109,7 @@ static void bxt_init_clock_gating(struct drm_device *dev) static void i915_pineview_get_mem_freq(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp; tmp = I915_READ(CLKCFG); @@ -148,7 +148,7 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev) static void i915_ironlake_get_mem_freq(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u16 ddrpll, csipll; ddrpll = I915_READ16(DDRMPLL1); @@ -319,7 +319,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; u32 val; if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { @@ -375,7 +375,7 @@ static const int pessimal_latency_ns = 5000; static int vlv_get_fifo_size(struct drm_device *dev, enum pipe pipe, int plane) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int sprite0_start, sprite1_start, size; switch (pipe) { @@ -426,7 +426,7 @@ static int vlv_get_fifo_size(struct drm_device *dev, static int i9xx_get_fifo_size(struct drm_device *dev, int plane) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t dsparb = I915_READ(DSPARB); int size; @@ -442,7 +442,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane) static int i830_get_fifo_size(struct drm_device *dev, int plane) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t dsparb = I915_READ(DSPARB); int size; @@ -459,7 +459,7 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) static int i845_get_fifo_size(struct drm_device *dev, int plane) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t dsparb = I915_READ(DSPARB); int size; @@ -637,7 +637,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) static void pineview_update_wm(struct drm_crtc *unused_crtc) { struct drm_device *dev = unused_crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; const struct cxsr_latency *latency; u32 reg; @@ -934,7 +934,7 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate, static void vlv_setup_wm_latency(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* all latencies in usec */ dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; @@ -1325,7 +1325,7 @@ static void vlv_merge_wm(struct drm_device *dev, static void vlv_update_wm(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum pipe pipe = intel_crtc->pipe; struct vlv_wm_values wm = {}; @@ -1381,7 +1381,7 @@ static void g4x_update_wm(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; static const int sr_latency_ns = 12000; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int planea_wm, planeb_wm, cursora_wm, cursorb_wm; int plane_sr, cursor_sr; unsigned int enabled = 0; @@ -1438,7 +1438,7 @@ static void g4x_update_wm(struct drm_crtc *crtc) static void i965_update_wm(struct drm_crtc *unused_crtc) { struct drm_device *dev = unused_crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; int srwm = 1; int cursor_sr = 16; @@ -1512,7 +1512,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) static void i9xx_update_wm(struct drm_crtc *unused_crtc) { struct drm_device *dev = unused_crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); const struct intel_watermark_params *wm_info; uint32_t fwater_lo; uint32_t fwater_hi; @@ -1642,7 +1642,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) static void i845_update_wm(struct drm_crtc *unused_crtc) { struct drm_device *dev = unused_crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; const struct drm_display_mode *adjusted_mode; uint32_t fwater_lo; @@ -2070,7 +2070,7 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate) static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8]) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (IS_GEN9(dev)) { uint32_t val; @@ -2236,7 +2236,7 @@ static void intel_print_wm_latency(struct drm_device *dev, static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, uint16_t wm[5], uint16_t min) { - int level, max_level = ilk_wm_max_level(dev_priv->dev); + int level, max_level = ilk_wm_max_level(&dev_priv->drm); if (wm[0] >= min) return false; @@ -2250,7 +2250,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, static void snb_wm_latency_quirk(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); bool changed; /* @@ -2272,7 +2272,7 @@ static void snb_wm_latency_quirk(struct drm_device *dev) static void ilk_setup_wm_latency(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); intel_read_wm_latency(dev, dev_priv->wm.pri_latency); @@ -2294,7 +2294,7 @@ static void ilk_setup_wm_latency(struct drm_device *dev) static void skl_setup_wm_latency(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); intel_read_wm_latency(dev, dev_priv->wm.skl_latency); intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency); @@ -2330,7 +2330,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); struct intel_pipe_wm *pipe_wm; struct drm_device *dev = state->dev; - const struct drm_i915_private *dev_priv = dev->dev_private; + const struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane; struct intel_plane_state *pristate = NULL; struct intel_plane_state *sprstate = NULL; @@ -2505,7 +2505,7 @@ static void ilk_wm_merge(struct drm_device *dev, const struct ilk_wm_maximums *max, struct intel_pipe_wm *merged) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int level, max_level = ilk_wm_max_level(dev); int last_enabled_level = max_level; @@ -2565,7 +2565,7 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) /* The value we need to program into the WM_LPx latency field */ static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (IS_HASWELL(dev) || IS_BROADWELL(dev)) return 2 * level; @@ -2765,7 +2765,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, static void ilk_write_wm_values(struct drm_i915_private *dev_priv, struct ilk_wm_values *results) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct ilk_wm_values *previous = &dev_priv->wm.hw; unsigned int dirty; uint32_t val; @@ -2840,7 +2840,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv, bool ilk_disable_lp_wm(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); } @@ -3498,7 +3498,6 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv, int level, struct skl_wm_level *result) { - struct drm_device *dev = dev_priv->dev; struct drm_atomic_state *state = cstate->base.state; struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); struct drm_plane *plane; @@ -3514,7 +3513,9 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv, */ memset(result, 0, sizeof(*result)); - for_each_intel_plane_mask(dev, intel_plane, cstate->base.plane_mask) { + for_each_intel_plane_mask(&dev_priv->drm, + intel_plane, + cstate->base.plane_mask) { int i = skl_wm_plane_id(intel_plane); plane = &intel_plane->base; @@ -3595,7 +3596,7 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate, struct skl_pipe_wm *pipe_wm) { struct drm_device *dev = cstate->base.crtc->dev; - const struct drm_i915_private *dev_priv = dev->dev_private; + const struct drm_i915_private *dev_priv = to_i915(dev); int level, max_level = ilk_wm_max_level(dev); int ret; @@ -3682,7 +3683,7 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, static void skl_write_wm_values(struct drm_i915_private *dev_priv, const struct skl_wm_values *new) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct intel_crtc *crtc; for_each_intel_crtc(dev, crtc) { @@ -3779,7 +3780,7 @@ skl_ddb_allocation_included(const struct skl_ddb_allocation *old, static void skl_flush_wm_values(struct drm_i915_private *dev_priv, struct skl_wm_values *new_values) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct skl_ddb_allocation *cur_ddb, *new_ddb; bool reallocated[I915_MAX_PIPES] = {}; struct intel_crtc *crtc; @@ -3879,6 +3880,19 @@ static int skl_update_pipe_wm(struct drm_crtc_state *cstate, return 0; } +static uint32_t +pipes_modified(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *cstate; + uint32_t i, ret = 0; + + for_each_crtc_in_state(state, crtc, cstate, i) + ret |= drm_crtc_mask(crtc); + + return ret; +} + static int skl_compute_ddb(struct drm_atomic_state *state) { @@ -3887,7 +3901,7 @@ skl_compute_ddb(struct drm_atomic_state *state) struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct intel_crtc *intel_crtc; struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb; - unsigned realloc_pipes = dev_priv->active_crtcs; + uint32_t realloc_pipes = pipes_modified(state); int ret; /* @@ -4002,7 +4016,7 @@ static void skl_update_wm(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct skl_wm_values *results = &dev_priv->wm.skl_results; struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; @@ -4043,7 +4057,7 @@ static void ilk_compute_wm_config(struct drm_device *dev, static void ilk_program_watermarks(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; struct ilk_wm_maximums max; struct intel_wm_config config = {}; @@ -4145,7 +4159,7 @@ static void skl_pipe_wm_active_state(uint32_t val, static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct skl_wm_values *hw = &dev_priv->wm.skl_hw; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); @@ -4199,7 +4213,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) void skl_wm_get_hw_state(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; struct drm_crtc *crtc; @@ -4219,7 +4233,7 @@ void skl_wm_get_hw_state(struct drm_device *dev) static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct ilk_wm_values *hw = &dev_priv->wm.hw; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); @@ -4423,7 +4437,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev) void ilk_wm_get_hw_state(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct ilk_wm_values *hw = &dev_priv->wm.hw; struct drm_crtc *crtc; @@ -4485,7 +4499,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev) */ void intel_update_watermarks(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); if (dev_priv->display.update_wm) dev_priv->display.update_wm(crtc); @@ -4654,19 +4668,23 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) new_power = dev_priv->rps.power; switch (dev_priv->rps.power) { case LOW_POWER: - if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq) + if (val > dev_priv->rps.efficient_freq + 1 && + val > dev_priv->rps.cur_freq) new_power = BETWEEN; break; case BETWEEN: - if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq) + if (val <= dev_priv->rps.efficient_freq && + val < dev_priv->rps.cur_freq) new_power = LOW_POWER; - else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq) + else if (val >= dev_priv->rps.rp0_freq && + val > dev_priv->rps.cur_freq) new_power = HIGH_POWER; break; case HIGH_POWER: - if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq) + if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && + val < dev_priv->rps.cur_freq) new_power = BETWEEN; break; } @@ -4712,22 +4730,24 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) } I915_WRITE(GEN6_RP_UP_EI, - GT_INTERVAL_FROM_US(dev_priv, ei_up)); + GT_INTERVAL_FROM_US(dev_priv, ei_up)); I915_WRITE(GEN6_RP_UP_THRESHOLD, - GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100))); + GT_INTERVAL_FROM_US(dev_priv, + ei_up * threshold_up / 100)); I915_WRITE(GEN6_RP_DOWN_EI, - GT_INTERVAL_FROM_US(dev_priv, ei_down)); + GT_INTERVAL_FROM_US(dev_priv, ei_down)); I915_WRITE(GEN6_RP_DOWN_THRESHOLD, - GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100))); + GT_INTERVAL_FROM_US(dev_priv, + ei_down * threshold_down / 100)); - I915_WRITE(GEN6_RP_CONTROL, - GEN6_RP_MEDIA_TURBO | - GEN6_RP_MEDIA_HW_NORMAL_MODE | - GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_AVG); + I915_WRITE(GEN6_RP_CONTROL, + GEN6_RP_MEDIA_TURBO | + GEN6_RP_MEDIA_HW_NORMAL_MODE | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_AVG); dev_priv->rps.power = new_power; dev_priv->rps.up_threshold = threshold_up; @@ -4844,12 +4864,27 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv) gen6_rps_reset_ei(dev_priv); I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); + + gen6_enable_rps_interrupts(dev_priv); + + /* Ensure we start at the user's desired frequency */ + intel_set_rps(dev_priv, + clamp(dev_priv->rps.cur_freq, + dev_priv->rps.min_freq_softlimit, + dev_priv->rps.max_freq_softlimit)); } mutex_unlock(&dev_priv->rps.hw_lock); } void gen6_rps_idle(struct drm_i915_private *dev_priv) { + /* Flush our bottom-half so that it does not race with us + * setting the idle frequency and so that it is bounded by + * our rpm wakeref. And then disable the interrupts to stop any + * futher RPS reclocking whilst we are asleep. + */ + gen6_disable_rps_interrupts(dev_priv); + mutex_lock(&dev_priv->rps.hw_lock); if (dev_priv->rps.enabled) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) @@ -4874,7 +4909,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv, /* This is intentionally racy! We peek at the state here, then * validate inside the RPS worker. */ - if (!(dev_priv->mm.busy && + if (!(dev_priv->gt.awake && dev_priv->rps.enabled && dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)) return; @@ -4890,7 +4925,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv, spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->rps.interrupts_enabled) { dev_priv->rps.client_boost = true; - queue_work(dev_priv->wq, &dev_priv->rps.work); + schedule_work(&dev_priv->rps.work); } spin_unlock_irq(&dev_priv->irq_lock); @@ -4954,14 +4989,15 @@ static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode) mode = 0; } if (HAS_RC6p(dev_priv)) - DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", - onoff(mode & GEN6_RC_CTL_RC6_ENABLE), - onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), - onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE)); + DRM_DEBUG_DRIVER("Enabling RC6 states: " + "RC6 %s RC6p %s RC6pp %s\n", + onoff(mode & GEN6_RC_CTL_RC6_ENABLE), + onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), + onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE)); else - DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n", - onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); + DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n", + onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); } static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv) @@ -4969,9 +5005,20 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv) struct i915_ggtt *ggtt = &dev_priv->ggtt; bool enable_rc6 = true; unsigned long rc6_ctx_base; + u32 rc_ctl; + int rc_sw_target; + + rc_ctl = I915_READ(GEN6_RC_CONTROL); + rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >> + RC_SW_TARGET_STATE_SHIFT; + DRM_DEBUG_DRIVER("BIOS enabled RC states: " + "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n", + onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE), + onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE), + rc_sw_target); if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) { - DRM_DEBUG_KMS("RC6 Base location not set properly.\n"); + DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n"); enable_rc6 = false; } @@ -4983,7 +5030,7 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv) if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) && (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base + ggtt->stolen_reserved_size))) { - DRM_DEBUG_KMS("RC6 Base address not as expected.\n"); + DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n"); enable_rc6 = false; } @@ -4991,15 +5038,24 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv) ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) && ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) && ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) { - DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n"); + DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n"); enable_rc6 = false; } - if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE | - GEN6_RC_CTL_HW_ENABLE)) && - ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) || - !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) { - DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n"); + if (!I915_READ(GEN8_PUSHBUS_CONTROL) || + !I915_READ(GEN8_PUSHBUS_ENABLE) || + !I915_READ(GEN8_PUSHBUS_SHIFT)) { + DRM_DEBUG_DRIVER("Pushbus not setup properly.\n"); + enable_rc6 = false; + } + + if (!I915_READ(GEN6_GFXPAUSE)) { + DRM_DEBUG_DRIVER("GFX pause not setup properly.\n"); + enable_rc6 = false; + } + + if (!I915_READ(GEN8_MISC_CTRL0)) { + DRM_DEBUG_DRIVER("GPM control not setup properly.\n"); enable_rc6 = false; } @@ -5031,8 +5087,9 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6) mask = INTEL_RC6_ENABLE; if ((enable_rc6 & mask) != enable_rc6) - DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n", - enable_rc6 & mask, enable_rc6, mask); + DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d " + "(requested %d, valid %d)\n", + enable_rc6 & mask, enable_rc6, mask); return enable_rc6 & mask; } @@ -5643,7 +5700,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) u32 pcbr; int pctx_size = 24*1024; - mutex_lock(&dev_priv->dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); pcbr = I915_READ(VLV_PCBR); if (pcbr) { @@ -5651,7 +5708,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) int pcbr_offset; pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; - pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, + pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm, pcbr_offset, I915_GTT_OFFSET_NONE, pctx_size); @@ -5668,7 +5725,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) * overlap with other ranges, such as the frame buffer, protected * memory, or any other relevant ranges. */ - pctx = i915_gem_object_create_stolen(dev_priv->dev, pctx_size); + pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size); if (!pctx) { DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); goto out; @@ -5680,7 +5737,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) out: DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); dev_priv->vlv_pctx = pctx; - mutex_unlock(&dev_priv->dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); } static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) @@ -6624,9 +6681,9 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) if (IS_IRONLAKE_M(dev_priv)) { ironlake_enable_drps(dev_priv); - mutex_lock(&dev_priv->dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); intel_init_emon(dev_priv); - mutex_unlock(&dev_priv->dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); } else if (INTEL_INFO(dev_priv)->gen >= 6) { /* * PCU communication is slow and this doesn't need to be @@ -6657,7 +6714,7 @@ void intel_reset_gt_powersave(struct drm_i915_private *dev_priv) static void ibx_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* * On Ibex Peak and Cougar Point, we need to disable clock @@ -6669,7 +6726,7 @@ static void ibx_init_clock_gating(struct drm_device *dev) static void g4x_disable_trickle_feed(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; for_each_pipe(dev_priv, pipe) { @@ -6684,7 +6741,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev) static void ilk_init_lp_watermarks(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); @@ -6698,7 +6755,7 @@ static void ilk_init_lp_watermarks(struct drm_device *dev) static void ironlake_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; /* @@ -6772,7 +6829,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev) static void cpt_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int pipe; uint32_t val; @@ -6809,7 +6866,7 @@ static void cpt_init_clock_gating(struct drm_device *dev) static void gen6_check_mch_setup(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t tmp; tmp = I915_READ(MCH_SSKPD); @@ -6820,7 +6877,7 @@ static void gen6_check_mch_setup(struct drm_device *dev) static void gen6_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); @@ -6935,7 +6992,7 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) static void lpt_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* * TODO: this bit should only be enabled when really needed, then @@ -6954,7 +7011,7 @@ static void lpt_init_clock_gating(struct drm_device *dev) static void lpt_suspend_hw(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (HAS_PCH_LPT_LP(dev)) { uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); @@ -6989,7 +7046,7 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, static void kabylake_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); gen9_init_clock_gating(dev); @@ -7010,7 +7067,7 @@ static void kabylake_init_clock_gating(struct drm_device *dev) static void skylake_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); gen9_init_clock_gating(dev); @@ -7025,7 +7082,7 @@ static void skylake_init_clock_gating(struct drm_device *dev) static void broadwell_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; ilk_init_lp_watermarks(dev); @@ -7076,7 +7133,7 @@ static void broadwell_init_clock_gating(struct drm_device *dev) static void haswell_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); ilk_init_lp_watermarks(dev); @@ -7132,7 +7189,7 @@ static void haswell_init_clock_gating(struct drm_device *dev) static void ivybridge_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t snpcr; ilk_init_lp_watermarks(dev); @@ -7230,7 +7287,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) static void valleyview_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* WaDisableEarlyCull:vlv */ I915_WRITE(_3D_CHICKEN3, @@ -7312,7 +7369,7 @@ static void valleyview_init_clock_gating(struct drm_device *dev) static void cherryview_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* WaVSRefCountFullforceMissDisable:chv */ /* WaDSRefCountFullforceMissDisable:chv */ @@ -7348,7 +7405,7 @@ static void cherryview_init_clock_gating(struct drm_device *dev) static void g4x_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t dspclk_gate; I915_WRITE(RENCLK_GATE_D1, 0); @@ -7375,7 +7432,7 @@ static void g4x_init_clock_gating(struct drm_device *dev) static void crestline_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); I915_WRITE(RENCLK_GATE_D2, 0); @@ -7391,7 +7448,7 @@ static void crestline_init_clock_gating(struct drm_device *dev) static void broadwater_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | I965_RCC_CLOCK_GATE_DISABLE | @@ -7408,7 +7465,7 @@ static void broadwater_init_clock_gating(struct drm_device *dev) static void gen3_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 dstate = I915_READ(D_STATE); dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | @@ -7433,7 +7490,7 @@ static void gen3_init_clock_gating(struct drm_device *dev) static void i85x_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); @@ -7447,7 +7504,7 @@ static void i85x_init_clock_gating(struct drm_device *dev) static void i830_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); @@ -7458,7 +7515,7 @@ static void i830_init_clock_gating(struct drm_device *dev) void intel_init_clock_gating(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->display.init_clock_gating(dev); } @@ -7526,7 +7583,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) /* Set up chip specific power management-related functions */ void intel_init_pm(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); intel_fbc_init(dev_priv); @@ -7604,46 +7661,59 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val { WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { + /* GEN6_PCODE_* are outside of the forcewake domain, we can + * use te fw I915_READ variants to reduce the amount of work + * required when reading/writing. + */ + + if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); return -EAGAIN; } - I915_WRITE(GEN6_PCODE_DATA, *val); - I915_WRITE(GEN6_PCODE_DATA1, 0); - I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); + I915_WRITE_FW(GEN6_PCODE_DATA, *val); + I915_WRITE_FW(GEN6_PCODE_DATA1, 0); + I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 500)) { + if (intel_wait_for_register_fw(dev_priv, + GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, + 500)) { DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); return -ETIMEDOUT; } - *val = I915_READ(GEN6_PCODE_DATA); - I915_WRITE(GEN6_PCODE_DATA, 0); + *val = I915_READ_FW(GEN6_PCODE_DATA); + I915_WRITE_FW(GEN6_PCODE_DATA, 0); return 0; } -int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val) +int sandybridge_pcode_write(struct drm_i915_private *dev_priv, + u32 mbox, u32 val) { WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { + /* GEN6_PCODE_* are outside of the forcewake domain, we can + * use te fw I915_READ variants to reduce the amount of work + * required when reading/writing. + */ + + if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); return -EAGAIN; } - I915_WRITE(GEN6_PCODE_DATA, val); - I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); + I915_WRITE_FW(GEN6_PCODE_DATA, val); + I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 500)) { + if (intel_wait_for_register_fw(dev_priv, + GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, + 500)) { DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); return -ETIMEDOUT; } - I915_WRITE(GEN6_PCODE_DATA, 0); + I915_WRITE_FW(GEN6_PCODE_DATA, 0); return 0; } @@ -7713,7 +7783,7 @@ static void __intel_rps_boost_work(struct work_struct *work) struct request_boost *boost = container_of(work, struct request_boost, work); struct drm_i915_gem_request *req = boost->req; - if (!i915_gem_request_completed(req, true)) + if (!i915_gem_request_completed(req)) gen6_rps_boost(req->i915, NULL, req->emitted_jiffies); i915_gem_request_unreference(req); @@ -7727,7 +7797,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req) if (req == NULL || INTEL_GEN(req->i915) < 6) return; - if (i915_gem_request_completed(req, true)) + if (i915_gem_request_completed(req)) return; boost = kmalloc(sizeof(*boost), GFP_ATOMIC); @@ -7743,7 +7813,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req) void intel_pm_setup(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); mutex_init(&dev_priv->rps.hw_lock); spin_lock_init(&dev_priv->rps.client_lock); diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 29a09bf6bd18..68bd0bb34817 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -63,7 +63,7 @@ static bool is_edp_psr(struct intel_dp *intel_dp) static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t val; val = I915_READ(VLV_PSRSTAT(pipe)) & @@ -77,7 +77,7 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp, { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); @@ -107,7 +107,7 @@ static void vlv_psr_setup_vsc(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc = intel_dig_port->base.base.crtc; enum pipe pipe = to_intel_crtc(crtc)->pipe; uint32_t val; @@ -173,7 +173,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t aux_clock_divider; i915_reg_t aux_ctl_reg; static const uint8_t aux_msg[] = { @@ -220,7 +220,7 @@ static void vlv_psr_enable_source(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc = dig_port->base.base.crtc; enum pipe pipe = to_intel_crtc(crtc)->pipe; @@ -235,7 +235,7 @@ static void vlv_psr_activate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc = dig_port->base.base.crtc; enum pipe pipe = to_intel_crtc(crtc)->pipe; @@ -252,7 +252,7 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); uint32_t max_sleep_time = 0x1f; /* Lately it was identified that depending on panel idle frame count @@ -324,7 +324,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc = dig_port->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -378,7 +378,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); WARN_ON(dev_priv->psr.active); @@ -407,7 +407,7 @@ void intel_psr_enable(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); if (!HAS_PSR(dev)) { @@ -494,15 +494,18 @@ static void vlv_psr_disable(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(intel_dig_port->base.base.crtc); uint32_t val; if (dev_priv->psr.active) { /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */ - if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) & - VLV_EDP_PSR_IN_TRANS) == 0, 1)) + if (intel_wait_for_register(dev_priv, + VLV_PSRSTAT(intel_crtc->pipe), + VLV_EDP_PSR_IN_TRANS, + 0, + 1)) WARN(1, "PSR transition took longer than expected\n"); val = I915_READ(VLV_PSRCTL(intel_crtc->pipe)); @@ -521,16 +524,18 @@ static void hsw_psr_disable(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); if (dev_priv->psr.active) { I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); /* Wait till PSR is idle */ - if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) & - EDP_PSR_STATUS_STATE_MASK) == 0, - 2 * USEC_PER_SEC, 10 * USEC_PER_MSEC)) + if (intel_wait_for_register(dev_priv, + EDP_PSR_STATUS_CTL, + EDP_PSR_STATUS_STATE_MASK, + 0, + 2000)) DRM_ERROR("Timed out waiting for PSR Idle State\n"); dev_priv->psr.active = false; @@ -549,7 +554,7 @@ void intel_psr_disable(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); mutex_lock(&dev_priv->psr.lock); if (!dev_priv->psr.enabled) { @@ -586,14 +591,20 @@ static void intel_psr_work(struct work_struct *work) * and be ready for re-enable. */ if (HAS_DDI(dev_priv)) { - if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) & - EDP_PSR_STATUS_STATE_MASK) == 0, 50)) { + if (intel_wait_for_register(dev_priv, + EDP_PSR_STATUS_CTL, + EDP_PSR_STATUS_STATE_MASK, + 0, + 50)) { DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); return; } } else { - if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) & - VLV_EDP_PSR_IN_TRANS) == 0, 1)) { + if (intel_wait_for_register(dev_priv, + VLV_PSRSTAT(pipe), + VLV_EDP_PSR_IN_TRANS, + 0, + 1)) { DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); return; } @@ -619,7 +630,7 @@ unlock: static void intel_psr_exit(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dp *intel_dp = dev_priv->psr.enabled; struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; enum pipe pipe = to_intel_crtc(crtc)->pipe; @@ -674,7 +685,7 @@ static void intel_psr_exit(struct drm_device *dev) void intel_psr_single_frame_update(struct drm_device *dev, unsigned frontbuffer_bits) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; enum pipe pipe; u32 val; @@ -722,7 +733,7 @@ void intel_psr_single_frame_update(struct drm_device *dev, void intel_psr_invalidate(struct drm_device *dev, unsigned frontbuffer_bits) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; enum pipe pipe; @@ -760,7 +771,7 @@ void intel_psr_invalidate(struct drm_device *dev, void intel_psr_flush(struct drm_device *dev, unsigned frontbuffer_bits, enum fb_op_origin origin) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; enum pipe pipe; @@ -796,7 +807,7 @@ void intel_psr_flush(struct drm_device *dev, */ void intel_psr_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index fedd27049814..61e00bf9e87f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -58,18 +58,10 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf) ringbuf->tail, ringbuf->size); } -bool intel_engine_stopped(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine); -} - static void __intel_ring_advance(struct intel_engine_cs *engine) { struct intel_ringbuffer *ringbuf = engine->buffer; ringbuf->tail &= ringbuf->size - 1; - if (intel_engine_stopped(engine)) - return; engine->write_tail(engine, ringbuf->tail); } @@ -515,8 +507,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) I915_WRITE(reg, _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | INSTPM_SYNC_FLUSH)); - if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, - 1000)) + if (intel_wait_for_register(dev_priv, + reg, INSTPM_SYNC_FLUSH, 0, + 1000)) DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", engine->name); } @@ -528,7 +521,11 @@ static bool stop_ring(struct intel_engine_cs *engine) if (!IS_GEN2(dev_priv)) { I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); - if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { + if (intel_wait_for_register(dev_priv, + RING_MI_MODE(engine->mmio_base), + MODE_IDLE, + MODE_IDLE, + 1000)) { DRM_ERROR("%s : timed out trying to stop ring\n", engine->name); /* Sometimes we observe that the idle flag is not @@ -643,58 +640,42 @@ out: return ret; } -void -intel_fini_pipe_control(struct intel_engine_cs *engine) +void intel_fini_pipe_control(struct intel_engine_cs *engine) { if (engine->scratch.obj == NULL) return; - if (INTEL_GEN(engine->i915) >= 5) { - kunmap(sg_page(engine->scratch.obj->pages->sgl)); - i915_gem_object_ggtt_unpin(engine->scratch.obj); - } - + i915_gem_object_ggtt_unpin(engine->scratch.obj); drm_gem_object_unreference(&engine->scratch.obj->base); engine->scratch.obj = NULL; } -int -intel_init_pipe_control(struct intel_engine_cs *engine) +int intel_init_pipe_control(struct intel_engine_cs *engine, int size) { + struct drm_i915_gem_object *obj; int ret; WARN_ON(engine->scratch.obj); - engine->scratch.obj = i915_gem_object_create(engine->i915->dev, 4096); - if (IS_ERR(engine->scratch.obj)) { - DRM_ERROR("Failed to allocate seqno page\n"); - ret = PTR_ERR(engine->scratch.obj); - engine->scratch.obj = NULL; + obj = i915_gem_object_create_stolen(&engine->i915->drm, size); + if (!obj) + obj = i915_gem_object_create(&engine->i915->drm, size); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate scratch page\n"); + ret = PTR_ERR(obj); goto err; } - ret = i915_gem_object_set_cache_level(engine->scratch.obj, - I915_CACHE_LLC); + ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH); if (ret) goto err_unref; - ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0); - if (ret) - goto err_unref; - - engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj); - engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl)); - if (engine->scratch.cpu_page == NULL) { - ret = -ENOMEM; - goto err_unpin; - } - + engine->scratch.obj = obj; + engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", engine->name, engine->scratch.gtt_offset); return 0; -err_unpin: - i915_gem_object_ggtt_unpin(engine->scratch.obj); err_unref: drm_gem_object_unreference(&engine->scratch.obj->base); err: @@ -1324,8 +1305,7 @@ static int init_render_ring(struct intel_engine_cs *engine) if (IS_GEN(dev_priv, 6, 7)) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); - if (HAS_L3_DPF(dev_priv)) - I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv)); + I915_WRITE_IMR(engine, ~engine->irq_keep_mask); return init_workarounds_ring(engine); } @@ -1362,19 +1342,17 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, return ret; for_each_engine_id(waiter, dev_priv, id) { - u32 seqno; u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) continue; - seqno = i915_gem_request_get_seqno(signaller_req); intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL); intel_ring_emit(signaller, lower_32_bits(gtt_offset)); intel_ring_emit(signaller, upper_32_bits(gtt_offset)); - intel_ring_emit(signaller, seqno); + intel_ring_emit(signaller, signaller_req->seqno); intel_ring_emit(signaller, 0); intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | MI_SEMAPHORE_TARGET(waiter->hw_id)); @@ -1403,18 +1381,16 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, return ret; for_each_engine_id(waiter, dev_priv, id) { - u32 seqno; u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) continue; - seqno = i915_gem_request_get_seqno(signaller_req); intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); intel_ring_emit(signaller, lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT); intel_ring_emit(signaller, upper_32_bits(gtt_offset)); - intel_ring_emit(signaller, seqno); + intel_ring_emit(signaller, signaller_req->seqno); intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | MI_SEMAPHORE_TARGET(waiter->hw_id)); intel_ring_emit(signaller, 0); @@ -1445,11 +1421,9 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req, i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id]; if (i915_mmio_reg_valid(mbox_reg)) { - u32 seqno = i915_gem_request_get_seqno(signaller_req); - intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit_reg(signaller, mbox_reg); - intel_ring_emit(signaller, seqno); + intel_ring_emit(signaller, signaller_req->seqno); } } @@ -1485,7 +1459,7 @@ gen6_add_request(struct drm_i915_gem_request *req) intel_ring_emit(engine, MI_STORE_DWORD_INDEX); intel_ring_emit(engine, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(engine, i915_gem_request_get_seqno(req)); + intel_ring_emit(engine, req->seqno); intel_ring_emit(engine, MI_USER_INTERRUPT); __intel_ring_advance(engine); @@ -1542,6 +1516,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, { struct intel_engine_cs *waiter = waiter_req->engine; struct drm_i915_private *dev_priv = waiter_req->i915; + u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id); struct i915_hw_ppgtt *ppgtt; int ret; @@ -1553,10 +1528,8 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, MI_SEMAPHORE_GLOBAL_GTT | MI_SEMAPHORE_SAD_GTE_SDD); intel_ring_emit(waiter, seqno); - intel_ring_emit(waiter, - lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); - intel_ring_emit(waiter, - upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); + intel_ring_emit(waiter, lower_32_bits(offset)); + intel_ring_emit(waiter, upper_32_bits(offset)); intel_ring_advance(waiter); /* When the !RCS engines idle waiting upon a semaphore, they lose their @@ -1611,66 +1584,22 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req, return 0; } -#define PIPE_CONTROL_FLUSH(ring__, addr__) \ -do { \ - intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ - PIPE_CONTROL_DEPTH_STALL); \ - intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ - intel_ring_emit(ring__, 0); \ - intel_ring_emit(ring__, 0); \ -} while (0) - -static int -pc_render_add_request(struct drm_i915_gem_request *req) +static void +gen5_seqno_barrier(struct intel_engine_cs *ring) { - struct intel_engine_cs *engine = req->engine; - u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; - int ret; - - /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently - * incoherent with writes to memory, i.e. completely fubar, - * so we need to use PIPE_NOTIFY instead. + /* MI_STORE are internally buffered by the GPU and not flushed + * either by MI_FLUSH or SyncFlush or any other combination of + * MI commands. * - * However, we also need to workaround the qword write - * incoherence by flushing the 6 PIPE_NOTIFY buffers out to - * memory before requesting an interrupt. + * "Only the submission of the store operation is guaranteed. + * The write result will be complete (coherent) some time later + * (this is practically a finite period but there is no guaranteed + * latency)." + * + * Empirically, we observe that we need a delay of at least 75us to + * be sure that the seqno write is visible by the CPU. */ - ret = intel_ring_begin(req, 32); - if (ret) - return ret; - - intel_ring_emit(engine, - GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WRITE_FLUSH | - PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); - intel_ring_emit(engine, - engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(engine, i915_gem_request_get_seqno(req)); - intel_ring_emit(engine, 0); - PIPE_CONTROL_FLUSH(engine, scratch_addr); - scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ - PIPE_CONTROL_FLUSH(engine, scratch_addr); - scratch_addr += 2 * CACHELINE_BYTES; - PIPE_CONTROL_FLUSH(engine, scratch_addr); - scratch_addr += 2 * CACHELINE_BYTES; - PIPE_CONTROL_FLUSH(engine, scratch_addr); - scratch_addr += 2 * CACHELINE_BYTES; - PIPE_CONTROL_FLUSH(engine, scratch_addr); - scratch_addr += 2 * CACHELINE_BYTES; - PIPE_CONTROL_FLUSH(engine, scratch_addr); - - intel_ring_emit(engine, - GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WRITE_FLUSH | - PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | - PIPE_CONTROL_NOTIFY); - intel_ring_emit(engine, - engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(engine, i915_gem_request_get_seqno(req)); - intel_ring_emit(engine, 0); - __intel_ring_advance(engine); - - return 0; + usleep_range(125, 250); } static void @@ -1698,127 +1627,54 @@ gen6_seqno_barrier(struct intel_engine_cs *engine) spin_unlock_irq(&dev_priv->uncore.lock); } -static u32 -ring_get_seqno(struct intel_engine_cs *engine) -{ - return intel_read_status_page(engine, I915_GEM_HWS_INDEX); -} - static void -ring_set_seqno(struct intel_engine_cs *engine, u32 seqno) -{ - intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); -} - -static u32 -pc_render_get_seqno(struct intel_engine_cs *engine) +gen5_irq_enable(struct intel_engine_cs *engine) { - return engine->scratch.cpu_page[0]; + gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask); } static void -pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno) +gen5_irq_disable(struct intel_engine_cs *engine) { - engine->scratch.cpu_page[0] = seqno; -} - -static bool -gen5_ring_get_irq(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) - gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask); } static void -gen5_ring_put_irq(struct intel_engine_cs *engine) +i9xx_irq_enable(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) - gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); -} - -static bool -i9xx_ring_get_irq(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - - if (!intel_irqs_enabled(dev_priv)) - return false; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - dev_priv->irq_mask &= ~engine->irq_enable_mask; - I915_WRITE(IMR, dev_priv->irq_mask); - POSTING_READ(IMR); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + dev_priv->irq_mask &= ~engine->irq_enable_mask; + I915_WRITE(IMR, dev_priv->irq_mask); + POSTING_READ_FW(RING_IMR(engine->mmio_base)); } static void -i9xx_ring_put_irq(struct intel_engine_cs *engine) +i9xx_irq_disable(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - dev_priv->irq_mask |= engine->irq_enable_mask; - I915_WRITE(IMR, dev_priv->irq_mask); - POSTING_READ(IMR); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + dev_priv->irq_mask |= engine->irq_enable_mask; + I915_WRITE(IMR, dev_priv->irq_mask); } -static bool -i8xx_ring_get_irq(struct intel_engine_cs *engine) +static void +i8xx_irq_enable(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - - if (!intel_irqs_enabled(dev_priv)) - return false; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - dev_priv->irq_mask &= ~engine->irq_enable_mask; - I915_WRITE16(IMR, dev_priv->irq_mask); - POSTING_READ16(IMR); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + dev_priv->irq_mask &= ~engine->irq_enable_mask; + I915_WRITE16(IMR, dev_priv->irq_mask); + POSTING_READ16(RING_IMR(engine->mmio_base)); } static void -i8xx_ring_put_irq(struct intel_engine_cs *engine) +i8xx_irq_disable(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - dev_priv->irq_mask |= engine->irq_enable_mask; - I915_WRITE16(IMR, dev_priv->irq_mask); - POSTING_READ16(IMR); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + dev_priv->irq_mask |= engine->irq_enable_mask; + I915_WRITE16(IMR, dev_priv->irq_mask); } static int @@ -1852,129 +1708,68 @@ i9xx_add_request(struct drm_i915_gem_request *req) intel_ring_emit(engine, MI_STORE_DWORD_INDEX); intel_ring_emit(engine, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(engine, i915_gem_request_get_seqno(req)); + intel_ring_emit(engine, req->seqno); intel_ring_emit(engine, MI_USER_INTERRUPT); __intel_ring_advance(engine); return 0; } -static bool -gen6_ring_get_irq(struct intel_engine_cs *engine) +static void +gen6_irq_enable(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev_priv) && engine->id == RCS) - I915_WRITE_IMR(engine, - ~(engine->irq_enable_mask | - GT_PARITY_ERROR(dev_priv))); - else - I915_WRITE_IMR(engine, ~engine->irq_enable_mask); - gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - return true; + I915_WRITE_IMR(engine, + ~(engine->irq_enable_mask | + engine->irq_keep_mask)); + gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); } static void -gen6_ring_put_irq(struct intel_engine_cs *engine) +gen6_irq_disable(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - if (HAS_L3_DPF(dev_priv) && engine->id == RCS) - I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv)); - else - I915_WRITE_IMR(engine, ~0); - gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + I915_WRITE_IMR(engine, ~engine->irq_keep_mask); + gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); } -static bool -hsw_vebox_get_irq(struct intel_engine_cs *engine) +static void +hsw_vebox_irq_enable(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - I915_WRITE_IMR(engine, ~engine->irq_enable_mask); - gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + I915_WRITE_IMR(engine, ~engine->irq_enable_mask); + gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask); } static void -hsw_vebox_put_irq(struct intel_engine_cs *engine) +hsw_vebox_irq_disable(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - I915_WRITE_IMR(engine, ~0); - gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + I915_WRITE_IMR(engine, ~0); + gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask); } -static bool -gen8_ring_get_irq(struct intel_engine_cs *engine) +static void +gen8_irq_enable(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev_priv) && engine->id == RCS) { - I915_WRITE_IMR(engine, - ~(engine->irq_enable_mask | - GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); - } else { - I915_WRITE_IMR(engine, ~engine->irq_enable_mask); - } - POSTING_READ(RING_IMR(engine->mmio_base)); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + I915_WRITE_IMR(engine, + ~(engine->irq_enable_mask | + engine->irq_keep_mask)); + POSTING_READ_FW(RING_IMR(engine->mmio_base)); } static void -gen8_ring_put_irq(struct intel_engine_cs *engine) +gen8_irq_disable(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - if (HAS_L3_DPF(dev_priv) && engine->id == RCS) { - I915_WRITE_IMR(engine, - ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); - } else { - I915_WRITE_IMR(engine, ~0); - } - POSTING_READ(RING_IMR(engine->mmio_base)); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + I915_WRITE_IMR(engine, ~engine->irq_keep_mask); } static int @@ -2093,7 +1888,7 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine) if (!dev_priv->status_page_dmah) return; - drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah); + drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); engine->status_page.page_addr = NULL; } @@ -2119,7 +1914,7 @@ static int init_status_page(struct intel_engine_cs *engine) unsigned flags; int ret; - obj = i915_gem_object_create(engine->i915->dev, 4096); + obj = i915_gem_object_create(&engine->i915->drm, 4096); if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate status page\n"); return PTR_ERR(obj); @@ -2168,7 +1963,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine) if (!dev_priv->status_page_dmah) { dev_priv->status_page_dmah = - drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE); + drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); if (!dev_priv->status_page_dmah) return -ENOMEM; } @@ -2301,7 +2096,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) ring->last_retired_head = -1; intel_ring_update_space(ring); - ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring); + ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring); if (ret) { DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", engine->name, ret); @@ -2321,6 +2116,57 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring) kfree(ring); } +static int intel_ring_context_pin(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + struct intel_context *ce = &ctx->engine[engine->id]; + int ret; + + lockdep_assert_held(&ctx->i915->drm.struct_mutex); + + if (ce->pin_count++) + return 0; + + if (ce->state) { + ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0); + if (ret) + goto error; + } + + /* The kernel context is only used as a placeholder for flushing the + * active context. It is never used for submitting user rendering and + * as such never requires the golden render context, and so we can skip + * emitting it when we switch to the kernel context. This is required + * as during eviction we cannot allocate and pin the renderstate in + * order to initialise the context. + */ + if (ctx == ctx->i915->kernel_context) + ce->initialised = true; + + i915_gem_context_reference(ctx); + return 0; + +error: + ce->pin_count = 0; + return ret; +} + +static void intel_ring_context_unpin(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + struct intel_context *ce = &ctx->engine[engine->id]; + + lockdep_assert_held(&ctx->i915->drm.struct_mutex); + + if (--ce->pin_count) + return; + + if (ce->state) + i915_gem_object_ggtt_unpin(ce->state); + + i915_gem_context_unreference(ctx); +} + static int intel_init_ring_buffer(struct drm_device *dev, struct intel_engine_cs *engine) { @@ -2339,7 +2185,20 @@ static int intel_init_ring_buffer(struct drm_device *dev, memset(engine->semaphore.sync_seqno, 0, sizeof(engine->semaphore.sync_seqno)); - init_waitqueue_head(&engine->irq_queue); + ret = intel_engine_init_breadcrumbs(engine); + if (ret) + goto error; + + /* We may need to do things with the shrinker which + * require us to immediately switch back to the default + * context. This can cause a problem as pinning the + * default context also requires GTT space which may not + * be available. To avoid this we always pin the default + * context. + */ + ret = intel_ring_context_pin(dev_priv->kernel_context, engine); + if (ret) + goto error; ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE); if (IS_ERR(ringbuf)) { @@ -2408,6 +2267,10 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) i915_cmd_parser_fini_ring(engine); i915_gem_batch_pool_fini(&engine->batch_pool); + intel_engine_fini_breadcrumbs(engine); + + intel_ring_context_unpin(dev_priv->kernel_context, engine); + engine->i915 = NULL; } @@ -2603,10 +2466,19 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) memset(engine->semaphore.sync_seqno, 0, sizeof(engine->semaphore.sync_seqno)); - engine->set_seqno(engine, seqno); + intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); + if (engine->irq_seqno_barrier) + engine->irq_seqno_barrier(engine); engine->last_submitted_seqno = seqno; engine->hangcheck.seqno = seqno; + + /* After manually advancing the seqno, fake the interrupt in case + * there are any waiters for that seqno. + */ + rcu_read_lock(); + intel_engine_wakeup(engine); + rcu_read_unlock(); } static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, @@ -2614,32 +2486,38 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, { struct drm_i915_private *dev_priv = engine->i915; + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + /* Every tail move must follow the sequence below */ /* Disable notification that the ring is IDLE. The GT * will then assume that it is busy and bring it out of rc6. */ - I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, - _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, + _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); /* Clear the context id. Here be magic! */ - I915_WRITE64(GEN6_BSD_RNCID, 0x0); + I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0); /* Wait for the ring not to be idle, i.e. for it to wake up. */ - if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & - GEN6_BSD_SLEEP_INDICATOR) == 0, - 50)) + if (intel_wait_for_register_fw(dev_priv, + GEN6_BSD_SLEEP_PSMI_CONTROL, + GEN6_BSD_SLEEP_INDICATOR, + 0, + 50)) DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); /* Now that the ring is fully powered up, update the tail */ - I915_WRITE_TAIL(engine, value); - POSTING_READ(RING_TAIL(engine->mmio_base)); + I915_WRITE_FW(RING_TAIL(engine->mmio_base), value); + POSTING_READ_FW(RING_TAIL(engine->mmio_base)); /* Let the ring send IDLE messages to the GT again, * and so let it sleep to conserve power when idle. */ - I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, - _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, + _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, @@ -2808,11 +2686,159 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, return 0; } +static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, + struct intel_engine_cs *engine) +{ + struct drm_i915_gem_object *obj; + int ret, i; + + if (!i915_semaphore_is_enabled(dev_priv)) + return; + + if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) { + obj = i915_gem_object_create(&dev_priv->drm, 4096); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); + i915.semaphores = 0; + } else { + i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); + if (ret != 0) { + drm_gem_object_unreference(&obj->base); + DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); + i915.semaphores = 0; + } else { + dev_priv->semaphore_obj = obj; + } + } + } + + if (!i915_semaphore_is_enabled(dev_priv)) + return; + + if (INTEL_GEN(dev_priv) >= 8) { + u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); + + engine->semaphore.sync_to = gen8_ring_sync; + engine->semaphore.signal = gen8_xcs_signal; + + for (i = 0; i < I915_NUM_ENGINES; i++) { + u64 ring_offset; + + if (i != engine->id) + ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i); + else + ring_offset = MI_SEMAPHORE_SYNC_INVALID; + + engine->semaphore.signal_ggtt[i] = ring_offset; + } + } else if (INTEL_GEN(dev_priv) >= 6) { + engine->semaphore.sync_to = gen6_ring_sync; + engine->semaphore.signal = gen6_signal; + + /* + * The current semaphore is only applied on pre-gen8 + * platform. And there is no VCS2 ring on the pre-gen8 + * platform. So the semaphore between RCS and VCS2 is + * initialized as INVALID. Gen8 will initialize the + * sema between VCS2 and RCS later. + */ + for (i = 0; i < I915_NUM_ENGINES; i++) { + static const struct { + u32 wait_mbox; + i915_reg_t mbox_reg; + } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = { + [RCS] = { + [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC }, + [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC }, + [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC }, + }, + [VCS] = { + [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC }, + [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC }, + [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC }, + }, + [BCS] = { + [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC }, + [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC }, + [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC }, + }, + [VECS] = { + [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC }, + [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC }, + [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC }, + }, + }; + u32 wait_mbox; + i915_reg_t mbox_reg; + + if (i == engine->id || i == VCS2) { + wait_mbox = MI_SEMAPHORE_SYNC_INVALID; + mbox_reg = GEN6_NOSYNC; + } else { + wait_mbox = sem_data[engine->id][i].wait_mbox; + mbox_reg = sem_data[engine->id][i].mbox_reg; + } + + engine->semaphore.mbox.wait[i] = wait_mbox; + engine->semaphore.mbox.signal[i] = mbox_reg; + } + } +} + +static void intel_ring_init_irq(struct drm_i915_private *dev_priv, + struct intel_engine_cs *engine) +{ + if (INTEL_GEN(dev_priv) >= 8) { + engine->irq_enable = gen8_irq_enable; + engine->irq_disable = gen8_irq_disable; + engine->irq_seqno_barrier = gen6_seqno_barrier; + } else if (INTEL_GEN(dev_priv) >= 6) { + engine->irq_enable = gen6_irq_enable; + engine->irq_disable = gen6_irq_disable; + engine->irq_seqno_barrier = gen6_seqno_barrier; + } else if (INTEL_GEN(dev_priv) >= 5) { + engine->irq_enable = gen5_irq_enable; + engine->irq_disable = gen5_irq_disable; + engine->irq_seqno_barrier = gen5_seqno_barrier; + } else if (INTEL_GEN(dev_priv) >= 3) { + engine->irq_enable = i9xx_irq_enable; + engine->irq_disable = i9xx_irq_disable; + } else { + engine->irq_enable = i8xx_irq_enable; + engine->irq_disable = i8xx_irq_disable; + } +} + +static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, + struct intel_engine_cs *engine) +{ + engine->init_hw = init_ring_common; + engine->write_tail = ring_write_tail; + + engine->add_request = i9xx_add_request; + if (INTEL_GEN(dev_priv) >= 6) + engine->add_request = gen6_add_request; + + if (INTEL_GEN(dev_priv) >= 8) + engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; + else if (INTEL_GEN(dev_priv) >= 6) + engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + else if (INTEL_GEN(dev_priv) >= 4) + engine->dispatch_execbuffer = i965_dispatch_execbuffer; + else if (IS_I830(dev_priv) || IS_845G(dev_priv)) + engine->dispatch_execbuffer = i830_dispatch_execbuffer; + else + engine->dispatch_execbuffer = i915_dispatch_execbuffer; + + intel_ring_init_irq(dev_priv, engine); + intel_ring_init_semaphores(dev_priv, engine); +} + int intel_init_render_ring_buffer(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine = &dev_priv->engine[RCS]; - struct drm_i915_gem_object *obj; int ret; engine->name = "render ring"; @@ -2821,139 +2847,49 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->hw_id = 0; engine->mmio_base = RENDER_RING_BASE; - if (INTEL_GEN(dev_priv) >= 8) { - if (i915_semaphore_is_enabled(dev_priv)) { - obj = i915_gem_object_create(dev, 4096); - if (IS_ERR(obj)) { - DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); - i915.semaphores = 0; - } else { - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); - if (ret != 0) { - drm_gem_object_unreference(&obj->base); - DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); - i915.semaphores = 0; - } else - dev_priv->semaphore_obj = obj; - } - } + intel_ring_default_vfuncs(dev_priv, engine); + + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; + if (HAS_L3_DPF(dev_priv)) + engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + if (INTEL_GEN(dev_priv) >= 8) { engine->init_context = intel_rcs_ctx_init; engine->add_request = gen8_render_add_request; engine->flush = gen8_render_ring_flush; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; - engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (i915_semaphore_is_enabled(dev_priv)) { - WARN_ON(!dev_priv->semaphore_obj); - engine->semaphore.sync_to = gen8_ring_sync; + if (i915_semaphore_is_enabled(dev_priv)) engine->semaphore.signal = gen8_rcs_signal; - GEN8_RING_SEMAPHORE_INIT(engine); - } } else if (INTEL_GEN(dev_priv) >= 6) { engine->init_context = intel_rcs_ctx_init; - engine->add_request = gen6_add_request; engine->flush = gen7_render_ring_flush; if (IS_GEN6(dev_priv)) engine->flush = gen6_render_ring_flush; - engine->irq_get = gen6_ring_get_irq; - engine->irq_put = gen6_ring_put_irq; - engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (i915_semaphore_is_enabled(dev_priv)) { - engine->semaphore.sync_to = gen6_ring_sync; - engine->semaphore.signal = gen6_signal; - /* - * The current semaphore is only applied on pre-gen8 - * platform. And there is no VCS2 ring on the pre-gen8 - * platform. So the semaphore between RCS and VCS2 is - * initialized as INVALID. Gen8 will initialize the - * sema between VCS2 and RCS later. - */ - engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; - engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; - engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; - engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; - engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; - engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; - engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; - engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; - } } else if (IS_GEN5(dev_priv)) { - engine->add_request = pc_render_add_request; engine->flush = gen4_render_ring_flush; - engine->get_seqno = pc_render_get_seqno; - engine->set_seqno = pc_render_set_seqno; - engine->irq_get = gen5_ring_get_irq; - engine->irq_put = gen5_ring_put_irq; - engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT | - GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; } else { - engine->add_request = i9xx_add_request; if (INTEL_GEN(dev_priv) < 4) engine->flush = gen2_render_ring_flush; else engine->flush = gen4_render_ring_flush; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (IS_GEN2(dev_priv)) { - engine->irq_get = i8xx_ring_get_irq; - engine->irq_put = i8xx_ring_put_irq; - } else { - engine->irq_get = i9xx_ring_get_irq; - engine->irq_put = i9xx_ring_put_irq; - } engine->irq_enable_mask = I915_USER_INTERRUPT; } - engine->write_tail = ring_write_tail; if (IS_HASWELL(dev_priv)) engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; - else if (IS_GEN8(dev_priv)) - engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - else if (INTEL_GEN(dev_priv) >= 6) - engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - else if (INTEL_GEN(dev_priv) >= 4) - engine->dispatch_execbuffer = i965_dispatch_execbuffer; - else if (IS_I830(dev_priv) || IS_845G(dev_priv)) - engine->dispatch_execbuffer = i830_dispatch_execbuffer; - else - engine->dispatch_execbuffer = i915_dispatch_execbuffer; + engine->init_hw = init_render_ring; engine->cleanup = render_ring_cleanup; - /* Workaround batchbuffer to combat CS tlb bug. */ - if (HAS_BROKEN_CS_TLB(dev_priv)) { - obj = i915_gem_object_create(dev, I830_WA_SIZE); - if (IS_ERR(obj)) { - DRM_ERROR("Failed to allocate batch bo\n"); - return PTR_ERR(obj); - } - - ret = i915_gem_obj_ggtt_pin(obj, 0, 0); - if (ret != 0) { - drm_gem_object_unreference(&obj->base); - DRM_ERROR("Failed to ping batch bo\n"); - return ret; - } - - engine->scratch.obj = obj; - engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); - } - ret = intel_init_ring_buffer(dev, engine); if (ret) return ret; - if (INTEL_GEN(dev_priv) >= 5) { - ret = intel_init_pipe_control(engine); + if (INTEL_GEN(dev_priv) >= 6) { + ret = intel_init_pipe_control(engine, 4096); + if (ret) + return ret; + } else if (HAS_BROKEN_CS_TLB(dev_priv)) { + ret = intel_init_pipe_control(engine, I830_WA_SIZE); if (ret) return ret; } @@ -2963,7 +2899,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) int intel_init_bsd_ring_buffer(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine = &dev_priv->engine[VCS]; engine->name = "bsd ring"; @@ -2971,68 +2907,27 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) engine->exec_id = I915_EXEC_BSD; engine->hw_id = 1; - engine->write_tail = ring_write_tail; + intel_ring_default_vfuncs(dev_priv, engine); + if (INTEL_GEN(dev_priv) >= 6) { engine->mmio_base = GEN6_BSD_RING_BASE; /* gen6 bsd needs a special wa for tail updates */ if (IS_GEN6(dev_priv)) engine->write_tail = gen6_bsd_ring_write_tail; engine->flush = gen6_bsd_ring_flush; - engine->add_request = gen6_add_request; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (INTEL_GEN(dev_priv) >= 8) { + if (INTEL_GEN(dev_priv) >= 8) engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; - engine->dispatch_execbuffer = - gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev_priv)) { - engine->semaphore.sync_to = gen8_ring_sync; - engine->semaphore.signal = gen8_xcs_signal; - GEN8_RING_SEMAPHORE_INIT(engine); - } - } else { + else engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; - engine->irq_get = gen6_ring_get_irq; - engine->irq_put = gen6_ring_put_irq; - engine->dispatch_execbuffer = - gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev_priv)) { - engine->semaphore.sync_to = gen6_ring_sync; - engine->semaphore.signal = gen6_signal; - engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; - engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; - engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; - engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; - engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; - engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; - engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; - engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; - } - } } else { engine->mmio_base = BSD_RING_BASE; engine->flush = bsd_ring_flush; - engine->add_request = i9xx_add_request; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (IS_GEN5(dev_priv)) { + if (IS_GEN5(dev_priv)) engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; - engine->irq_get = gen5_ring_get_irq; - engine->irq_put = gen5_ring_put_irq; - } else { + else engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; - engine->irq_get = i9xx_ring_get_irq; - engine->irq_put = i9xx_ring_put_irq; - } - engine->dispatch_execbuffer = i965_dispatch_execbuffer; } - engine->init_hw = init_ring_common; return intel_init_ring_buffer(dev, engine); } @@ -3042,147 +2937,70 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) */ int intel_init_bsd2_ring_buffer(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; engine->name = "bsd2 ring"; engine->id = VCS2; engine->exec_id = I915_EXEC_BSD; engine->hw_id = 4; - - engine->write_tail = ring_write_tail; engine->mmio_base = GEN8_BSD2_RING_BASE; + + intel_ring_default_vfuncs(dev_priv, engine); + engine->flush = gen6_bsd_ring_flush; - engine->add_request = gen6_add_request; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; - engine->dispatch_execbuffer = - gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev_priv)) { - engine->semaphore.sync_to = gen8_ring_sync; - engine->semaphore.signal = gen8_xcs_signal; - GEN8_RING_SEMAPHORE_INIT(engine); - } - engine->init_hw = init_ring_common; return intel_init_ring_buffer(dev, engine); } int intel_init_blt_ring_buffer(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine = &dev_priv->engine[BCS]; engine->name = "blitter ring"; engine->id = BCS; engine->exec_id = I915_EXEC_BLT; engine->hw_id = 2; - engine->mmio_base = BLT_RING_BASE; - engine->write_tail = ring_write_tail; + + intel_ring_default_vfuncs(dev_priv, engine); + engine->flush = gen6_ring_flush; - engine->add_request = gen6_add_request; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (INTEL_GEN(dev_priv) >= 8) { + if (INTEL_GEN(dev_priv) >= 8) engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; - engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev_priv)) { - engine->semaphore.sync_to = gen8_ring_sync; - engine->semaphore.signal = gen8_xcs_signal; - GEN8_RING_SEMAPHORE_INIT(engine); - } - } else { + else engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; - engine->irq_get = gen6_ring_get_irq; - engine->irq_put = gen6_ring_put_irq; - engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev_priv)) { - engine->semaphore.signal = gen6_signal; - engine->semaphore.sync_to = gen6_ring_sync; - /* - * The current semaphore is only applied on pre-gen8 - * platform. And there is no VCS2 ring on the pre-gen8 - * platform. So the semaphore between BCS and VCS2 is - * initialized as INVALID. Gen8 will initialize the - * sema between BCS and VCS2 later. - */ - engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; - engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; - engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; - engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; - engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; - engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; - engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; - engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; - } - } - engine->init_hw = init_ring_common; return intel_init_ring_buffer(dev, engine); } int intel_init_vebox_ring_buffer(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine = &dev_priv->engine[VECS]; engine->name = "video enhancement ring"; engine->id = VECS; engine->exec_id = I915_EXEC_VEBOX; engine->hw_id = 3; - engine->mmio_base = VEBOX_RING_BASE; - engine->write_tail = ring_write_tail; + + intel_ring_default_vfuncs(dev_priv, engine); + engine->flush = gen6_ring_flush; - engine->add_request = gen6_add_request; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; if (INTEL_GEN(dev_priv) >= 8) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; - engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev_priv)) { - engine->semaphore.sync_to = gen8_ring_sync; - engine->semaphore.signal = gen8_xcs_signal; - GEN8_RING_SEMAPHORE_INIT(engine); - } } else { engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; - engine->irq_get = hsw_vebox_get_irq; - engine->irq_put = hsw_vebox_put_irq; - engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev_priv)) { - engine->semaphore.sync_to = gen6_ring_sync; - engine->semaphore.signal = gen6_signal; - engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; - engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; - engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; - engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; - engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; - engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; - engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; - engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; - } + engine->irq_enable = hsw_vebox_irq_enable; + engine->irq_disable = hsw_vebox_irq_disable; } - engine->init_hw = init_ring_common; return intel_init_ring_buffer(dev, engine); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index b33c876fed20..12cb7ed90014 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -62,18 +62,6 @@ struct intel_hw_status_page { (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) -#define GEN8_RING_SEMAPHORE_INIT(e) do { \ - if (!dev_priv->semaphore_obj) { \ - break; \ - } \ - (e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \ - (e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \ - (e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \ - (e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \ - (e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \ - (e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \ - } while(0) - enum intel_ring_hangcheck_action { HANGCHECK_IDLE = 0, HANGCHECK_WAIT, @@ -86,8 +74,8 @@ enum intel_ring_hangcheck_action { struct intel_ring_hangcheck { u64 acthd; + unsigned long user_interrupts; u32 seqno; - unsigned user_interrupts; int score; enum intel_ring_hangcheck_action action; int deadlock; @@ -141,6 +129,8 @@ struct i915_ctx_workarounds { struct drm_i915_gem_object *obj; }; +struct drm_i915_gem_request; + struct intel_engine_cs { struct drm_i915_private *i915; const char *name; @@ -160,6 +150,39 @@ struct intel_engine_cs { struct intel_ringbuffer *buffer; struct list_head buffers; + /* Rather than have every client wait upon all user interrupts, + * with the herd waking after every interrupt and each doing the + * heavyweight seqno dance, we delegate the task (of being the + * bottom-half of the user interrupt) to the first client. After + * every interrupt, we wake up one client, who does the heavyweight + * coherent seqno read and either goes back to sleep (if incomplete), + * or wakes up all the completed clients in parallel, before then + * transferring the bottom-half status to the next client in the queue. + * + * Compared to walking the entire list of waiters in a single dedicated + * bottom-half, we reduce the latency of the first waiter by avoiding + * a context switch, but incur additional coherent seqno reads when + * following the chain of request breadcrumbs. Since it is most likely + * that we have a single client waiting on each seqno, then reducing + * the overhead of waking that client is much preferred. + */ + struct intel_breadcrumbs { + struct task_struct *irq_seqno_bh; /* bh for user interrupts */ + unsigned long irq_wakeups; + bool irq_posted; + + spinlock_t lock; /* protects the lists of requests */ + struct rb_root waiters; /* sorted by retirement, priority */ + struct rb_root signals; /* sorted by retirement */ + struct intel_wait *first_wait; /* oldest waiter by retirement */ + struct task_struct *signaler; /* used for fence signalling */ + struct drm_i915_gem_request *first_signal; + struct timer_list fake_irq; /* used after a missed interrupt */ + + bool irq_enabled : 1; + bool rpm_wakelock : 1; + } breadcrumbs; + /* * A pool of objects to use as shadow copies of client batch buffers * when the command parser is enabled. Prevents the client from @@ -170,11 +193,10 @@ struct intel_engine_cs { struct intel_hw_status_page status_page; struct i915_ctx_workarounds wa_ctx; - unsigned irq_refcount; /* protected by dev_priv->irq_lock */ - u32 irq_enable_mask; /* bitmask to enable ring interrupt */ - struct drm_i915_gem_request *trace_irq_req; - bool __must_check (*irq_get)(struct intel_engine_cs *ring); - void (*irq_put)(struct intel_engine_cs *ring); + u32 irq_keep_mask; /* always keep these interrupts */ + u32 irq_enable_mask; /* bitmask to enable ring interrupt */ + void (*irq_enable)(struct intel_engine_cs *ring); + void (*irq_disable)(struct intel_engine_cs *ring); int (*init_hw)(struct intel_engine_cs *ring); @@ -193,9 +215,6 @@ struct intel_engine_cs { * monotonic, even if not coherent. */ void (*irq_seqno_barrier)(struct intel_engine_cs *ring); - u32 (*get_seqno)(struct intel_engine_cs *ring); - void (*set_seqno)(struct intel_engine_cs *ring, - u32 seqno); int (*dispatch_execbuffer)(struct drm_i915_gem_request *req, u64 offset, u32 length, unsigned dispatch_flags); @@ -272,7 +291,6 @@ struct intel_engine_cs { unsigned int idle_lite_restore_wa; bool disable_lite_restore_wa; u32 ctx_desc_template; - u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ int (*emit_request)(struct drm_i915_gem_request *request); int (*emit_flush)(struct drm_i915_gem_request *request, u32 invalidate_domains, @@ -304,12 +322,9 @@ struct intel_engine_cs { * inspecting request list. */ u32 last_submitted_seqno; - unsigned user_interrupts; bool gpu_caches_dirty; - wait_queue_head_t irq_queue; - struct i915_gem_context *last_context; struct intel_ring_hangcheck hangcheck; @@ -317,7 +332,6 @@ struct intel_engine_cs { struct { struct drm_i915_gem_object *obj; u32 gtt_offset; - volatile u32 *cpu_page; } scratch; bool needs_cmd_parser; @@ -348,13 +362,13 @@ struct intel_engine_cs { }; static inline bool -intel_engine_initialized(struct intel_engine_cs *engine) +intel_engine_initialized(const struct intel_engine_cs *engine) { return engine->i915 != NULL; } static inline unsigned -intel_engine_flag(struct intel_engine_cs *engine) +intel_engine_flag(const struct intel_engine_cs *engine) { return 1 << engine->id; } @@ -456,15 +470,14 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine) } int __intel_ring_space(int head, int tail, int size); void intel_ring_update_space(struct intel_ringbuffer *ringbuf); -bool intel_engine_stopped(struct intel_engine_cs *engine); int __must_check intel_engine_idle(struct intel_engine_cs *engine); void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno); int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); +int intel_init_pipe_control(struct intel_engine_cs *engine, int size); void intel_fini_pipe_control(struct intel_engine_cs *engine); -int intel_init_pipe_control(struct intel_engine_cs *engine); int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev); @@ -473,6 +486,10 @@ int intel_init_blt_ring_buffer(struct drm_device *dev); int intel_init_vebox_ring_buffer(struct drm_device *dev); u64 intel_ring_get_active_head(struct intel_engine_cs *engine); +static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) +{ + return intel_read_status_page(engine, I915_GEM_HWS_INDEX); +} int init_workarounds_ring(struct intel_engine_cs *engine); @@ -495,4 +512,62 @@ static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR; } +/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ +struct intel_wait { + struct rb_node node; + struct task_struct *tsk; + u32 seqno; +}; + +struct intel_signal_node { + struct rb_node node; + struct intel_wait wait; +}; + +int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); + +static inline void intel_wait_init(struct intel_wait *wait, u32 seqno) +{ + wait->tsk = current; + wait->seqno = seqno; +} + +static inline bool intel_wait_complete(const struct intel_wait *wait) +{ + return RB_EMPTY_NODE(&wait->node); +} + +bool intel_engine_add_wait(struct intel_engine_cs *engine, + struct intel_wait *wait); +void intel_engine_remove_wait(struct intel_engine_cs *engine, + struct intel_wait *wait); +void intel_engine_enable_signaling(struct drm_i915_gem_request *request); + +static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine) +{ + return READ_ONCE(engine->breadcrumbs.irq_seqno_bh); +} + +static inline bool intel_engine_wakeup(struct intel_engine_cs *engine) +{ + bool wakeup = false; + struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh); + /* Note that for this not to dangerously chase a dangling pointer, + * the caller is responsible for ensure that the task remain valid for + * wake_up_process() i.e. that the RCU grace period cannot expire. + * + * Also note that tsk is likely to be in !TASK_RUNNING state so an + * early test for tsk->state != TASK_RUNNING before wake_up_process() + * is unlikely to be beneficial. + */ + if (tsk) + wakeup = wake_up_process(tsk); + return wakeup; +} + +void intel_engine_enable_fake_irq(struct intel_engine_cs *engine); +void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); +unsigned int intel_kick_waiters(struct drm_i915_private *i915); +unsigned int intel_kick_signalers(struct drm_i915_private *i915); + #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index e856d49d6dc3..6b78295f53db 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -287,7 +287,7 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv, */ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; /* * After we re-enable the power well, if we touch VGA register 0x3d5 @@ -318,7 +318,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv) static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; /* * After we re-enable the power well, if we touch VGA register 0x3d5 @@ -365,8 +365,11 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, if (!is_enabled) { DRM_DEBUG_KMS("Enabling power well\n"); - if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & - HSW_PWR_WELL_STATE_ENABLED), 20)) + if (intel_wait_for_register(dev_priv, + HSW_PWR_WELL_DRIVER, + HSW_PWR_WELL_STATE_ENABLED, + HSW_PWR_WELL_STATE_ENABLED, + 20)) DRM_ERROR("Timeout enabling power well\n"); hsw_power_well_post_enable(dev_priv); } @@ -578,6 +581,7 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("Enabling DC9\n"); + intel_power_sequencer_reset(dev_priv); gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); } @@ -699,8 +703,11 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, switch (power_well->data) { case SKL_DISP_PW_1: - if (wait_for((I915_READ(SKL_FUSE_STATUS) & - SKL_FUSE_PG0_DIST_STATUS), 1)) { + if (intel_wait_for_register(dev_priv, + SKL_FUSE_STATUS, + SKL_FUSE_PG0_DIST_STATUS, + SKL_FUSE_PG0_DIST_STATUS, + 1)) { DRM_ERROR("PG0 not enabled\n"); return; } @@ -761,12 +768,18 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, if (check_fuse_status) { if (power_well->data == SKL_DISP_PW_1) { - if (wait_for((I915_READ(SKL_FUSE_STATUS) & - SKL_FUSE_PG1_DIST_STATUS), 1)) + if (intel_wait_for_register(dev_priv, + SKL_FUSE_STATUS, + SKL_FUSE_PG1_DIST_STATUS, + SKL_FUSE_PG1_DIST_STATUS, + 1)) DRM_ERROR("PG1 distributing status timeout\n"); } else if (power_well->data == SKL_DISP_PW_2) { - if (wait_for((I915_READ(SKL_FUSE_STATUS) & - SKL_FUSE_PG2_DIST_STATUS), 1)) + if (intel_wait_for_register(dev_priv, + SKL_FUSE_STATUS, + SKL_FUSE_PG2_DIST_STATUS, + SKL_FUSE_PG2_DIST_STATUS, + 1)) DRM_ERROR("PG2 distributing status timeout\n"); } } @@ -917,7 +930,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); WARN_ON(dev_priv->cdclk_freq != - dev_priv->display.get_display_clock_speed(dev_priv->dev)); + dev_priv->display.get_display_clock_speed(&dev_priv->drm)); gen9_assert_dbuf_enabled(dev_priv); @@ -1075,7 +1088,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) * * CHV DPLL B/C have some issues if VGA mode is enabled. */ - for_each_pipe(dev_priv->dev, pipe) { + for_each_pipe(&dev_priv->drm, pipe) { u32 val = I915_READ(DPLL(pipe)); val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; @@ -1100,7 +1113,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) intel_hpd_init(dev_priv); - i915_redisable_vga_power_on(dev_priv->dev); + i915_redisable_vga_power_on(&dev_priv->drm); } static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) @@ -1110,9 +1123,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); /* make sure we're done processing display irqs */ - synchronize_irq(dev_priv->dev->irq); + synchronize_irq(dev_priv->drm.irq); - vlv_power_sequencer_reset(dev_priv); + intel_power_sequencer_reset(dev_priv); } static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, @@ -1205,7 +1218,6 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) u32 phy_control = dev_priv->chv_phy_control; u32 phy_status = 0; u32 phy_status_mask = 0xffffffff; - u32 tmp; /* * The BIOS can leave the PHY is some weird state @@ -1293,10 +1305,14 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) * The PHY may be busy with some initial calibration and whatnot, * so the power state can take a while to actually change. */ - if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10)) - WARN(phy_status != tmp, - "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", - tmp, phy_status, dev_priv->chv_phy_control); + if (intel_wait_for_register(dev_priv, + DISPLAY_PHY_STATUS, + phy_status_mask, + phy_status, + 10)) + DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", + I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask, + phy_status, dev_priv->chv_phy_control); } #undef BITS_SET @@ -1324,7 +1340,11 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, vlv_set_power_well(dev_priv, power_well, true); /* Poll for phypwrgood signal */ - if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) + if (intel_wait_for_register(dev_priv, + DISPLAY_PHY_STATUS, + PHY_POWERGOOD(phy), + PHY_POWERGOOD(phy), + 1)) DRM_ERROR("Display PHY %d is not power up\n", phy); mutex_lock(&dev_priv->sb_lock); @@ -2255,7 +2275,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) */ void intel_power_domains_fini(struct drm_i915_private *dev_priv) { - struct device *device = &dev_priv->dev->pdev->dev; + struct device *device = &dev_priv->drm.pdev->dev; /* * The i915.ko module is still not prepared to be loaded when @@ -2556,7 +2576,7 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) */ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct i915_power_domains *power_domains = &dev_priv->power_domains; power_domains->initializing = true; @@ -2618,7 +2638,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv) */ void intel_runtime_pm_get(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct device *device = &dev->pdev->dev; pm_runtime_get_sync(device); @@ -2639,7 +2659,7 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv) */ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct device *device = &dev->pdev->dev; if (IS_ENABLED(CONFIG_PM)) { @@ -2681,7 +2701,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) */ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct device *device = &dev->pdev->dev; assert_rpm_wakelock_held(dev_priv); @@ -2700,7 +2720,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) */ void intel_runtime_pm_put(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct device *device = &dev->pdev->dev; assert_rpm_wakelock_held(dev_priv); @@ -2723,7 +2743,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv) */ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct device *device = &dev->pdev->dev; pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 02b4a6695528..e378f35365a2 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -240,7 +240,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) { struct drm_device *dev = intel_sdvo->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 bval = val, cval = val; int i; @@ -1195,7 +1195,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder) { struct drm_device *dev = intel_encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc); const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; struct drm_display_mode *mode = &crtc->config->base.mode; @@ -1330,7 +1330,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); u16 active_outputs = 0; u32 tmp; @@ -1353,7 +1353,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_sdvo_dtd dtd; int encoder_pixel_multiplier = 0; @@ -1436,7 +1436,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, static void intel_disable_sdvo(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); u32 temp; @@ -1471,7 +1471,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder) temp &= ~SDVO_ENABLE; intel_sdvo_write_sdvox(intel_sdvo, temp); - intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); + intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A); intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } @@ -1489,7 +1489,7 @@ static void pch_post_disable_sdvo(struct intel_encoder *encoder) static void intel_enable_sdvo(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); u32 temp; @@ -1633,7 +1633,7 @@ intel_sdvo_get_edid(struct drm_connector *connector) static struct edid * intel_sdvo_get_analog_edid(struct drm_connector *connector) { - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); return drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, @@ -1916,7 +1916,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); struct drm_display_mode *newmode; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", @@ -2001,7 +2001,7 @@ intel_sdvo_set_property(struct drm_connector *connector, { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); - struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); uint16_t temp_value; uint8_t cmd; int ret; @@ -2177,6 +2177,21 @@ done: #undef CHECK_PROPERTY } +static int +intel_sdvo_connector_register(struct drm_connector *connector) +{ + struct intel_sdvo *sdvo = intel_attached_sdvo(connector); + int ret; + + ret = intel_connector_register(connector); + if (ret) + return ret; + + return sysfs_create_link(&connector->kdev->kobj, + &sdvo->ddc.dev.kobj, + sdvo->ddc.dev.kobj.name); +} + static void intel_sdvo_connector_unregister(struct drm_connector *connector) { @@ -2193,6 +2208,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_sdvo_set_property, .atomic_get_property = intel_connector_atomic_get_property, + .late_register = intel_sdvo_connector_register, .early_unregister = intel_sdvo_connector_unregister, .destroy = intel_sdvo_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, @@ -2322,7 +2338,7 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) static u8 intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct sdvo_device_mapping *my_mapping, *other_mapping; if (sdvo->port == PORT_B) { @@ -2380,24 +2396,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, connector->base.get_hw_state = intel_sdvo_connector_get_hw_state; intel_connector_attach_encoder(&connector->base, &encoder->base); - ret = drm_connector_register(drm_connector); - if (ret < 0) - goto err1; - - ret = sysfs_create_link(&drm_connector->kdev->kobj, - &encoder->ddc.dev.kobj, - encoder->ddc.dev.kobj.name); - if (ret < 0) - goto err2; return 0; - -err2: - drm_connector_unregister(drm_connector); -err1: - drm_connector_cleanup(drm_connector); - - return ret; } static void @@ -2524,7 +2524,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) return true; err: - drm_connector_unregister(connector); intel_sdvo_destroy(connector); return false; } @@ -2603,7 +2602,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) return true; err: - drm_connector_unregister(connector); intel_sdvo_destroy(connector); return false; } @@ -2954,7 +2952,7 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv, bool intel_sdvo_init(struct drm_device *dev, i915_reg_t sdvo_reg, enum port port) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *intel_encoder; struct intel_sdvo *intel_sdvo; int i; diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index c3998188cf35..1a840bf92eea 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -51,7 +51,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn, WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); - if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) { + if (intel_wait_for_register(dev_priv, + VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0, + 5)) { DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n", is_read ? "read" : "write"); return -EAGAIN; @@ -62,7 +64,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn, I915_WRITE(VLV_IOSF_DATA, *val); I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd); - if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) { + if (intel_wait_for_register(dev_priv, + VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0, + 5)) { DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n", is_read ? "read" : "write"); return -ETIMEDOUT; @@ -202,8 +206,9 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, u32 value = 0; WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); - if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, - 100)) { + if (intel_wait_for_register(dev_priv, + SBI_CTL_STAT, SBI_BUSY, 0, + 100)) { DRM_ERROR("timeout waiting for SBI to become ready\n"); return 0; } @@ -216,8 +221,11 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD; I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY); - if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, - 100)) { + if (intel_wait_for_register(dev_priv, + SBI_CTL_STAT, + SBI_BUSY | SBI_RESPONSE_FAIL, + 0, + 100)) { DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); return 0; } @@ -232,8 +240,9 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); - if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, - 100)) { + if (intel_wait_for_register(dev_priv, + SBI_CTL_STAT, SBI_BUSY, 0, + 100)) { DRM_ERROR("timeout waiting for SBI to become ready\n"); return; } @@ -247,8 +256,11 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR; I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp); - if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, - 100)) { + if (intel_wait_for_register(dev_priv, + SBI_CTL_STAT, + SBI_BUSY | SBI_RESPONSE_FAIL, + 0, + 100)) { DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); return; } diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index fc654173c491..0de935ad01c2 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -199,7 +199,7 @@ skl_update_plane(struct drm_plane *drm_plane, const struct intel_plane_state *plane_state) { struct drm_device *dev = drm_plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(drm_plane); struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); @@ -303,7 +303,7 @@ static void skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) { struct drm_device *dev = dplane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(dplane); const int pipe = intel_plane->pipe; const int plane = intel_plane->plane + 1; @@ -317,7 +317,7 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) static void chv_update_csc(struct intel_plane *intel_plane, uint32_t format) { - struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); int plane = intel_plane->plane; /* Seems RGB data bypasses the CSC always */ @@ -359,7 +359,7 @@ vlv_update_plane(struct drm_plane *dplane, const struct intel_plane_state *plane_state) { struct drm_device *dev = dplane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(dplane); struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); @@ -485,7 +485,7 @@ static void vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) { struct drm_device *dev = dplane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(dplane); int pipe = intel_plane->pipe; int plane = intel_plane->plane; @@ -502,7 +502,7 @@ ivb_update_plane(struct drm_plane *plane, const struct intel_plane_state *plane_state) { struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); @@ -624,7 +624,7 @@ static void ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) { struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(plane); int pipe = intel_plane->pipe; @@ -643,7 +643,7 @@ ilk_update_plane(struct drm_plane *plane, const struct intel_plane_state *plane_state) { struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); @@ -753,7 +753,7 @@ static void ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) { struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(plane); int pipe = intel_plane->pipe; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 4ce70a9f9df2..49136ad5473e 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -826,7 +826,7 @@ static bool intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp = I915_READ(TV_CTL); if (!(tmp & TV_ENC_ENABLE)) @@ -841,7 +841,7 @@ static void intel_enable_tv(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); /* Prevents vblank waits from timing out in intel_tv_detect_type() */ intel_wait_for_vblank(encoder->base.dev, @@ -854,7 +854,7 @@ static void intel_disable_tv(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); } @@ -1013,7 +1013,7 @@ static void set_color_conversion(struct drm_i915_private *dev_priv, static void intel_tv_pre_enable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_tv *intel_tv = enc_to_tv(encoder); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); @@ -1173,7 +1173,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, struct drm_crtc *crtc = connector->state->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); u32 tv_ctl, save_tv_ctl; u32 tv_dac, save_tv_dac; int type; @@ -1501,6 +1501,7 @@ out: static const struct drm_connector_funcs intel_tv_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = intel_tv_detect, + .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_tv_destroy, .set_property = intel_tv_set_property, @@ -1522,7 +1523,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = { void intel_tv_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_connector *connector; struct intel_tv *intel_tv; struct intel_encoder *intel_encoder; @@ -1641,5 +1642,4 @@ intel_tv_init(struct drm_device *dev) drm_object_attach_property(&connector->base, dev->mode_config.tv_bottom_margin_property, intel_tv->margin[TV_MARGIN_BOTTOM]); - drm_connector_register(connector); } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index c1ca458d688e..ff80a81b1a84 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1299,9 +1299,11 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_MT_ACK); + spin_lock_irq(&dev_priv->uncore.lock); fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); ecobus = __raw_i915_read32(dev_priv, ECOBUS); fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); + spin_unlock_irq(&dev_priv->uncore.lock); if (!(ecobus & FORCEWAKE_MT_ENABLE)) { DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); @@ -1407,7 +1409,7 @@ static const struct register_whitelist { int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_reg_read *reg = data; struct register_whitelist const *entry = whitelist; unsigned size; @@ -1469,7 +1471,7 @@ static int i915_reset_complete(struct pci_dev *pdev) static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { - struct pci_dev *pdev = dev_priv->dev->pdev; + struct pci_dev *pdev = dev_priv->drm.pdev; /* assert reset for at least 20 usec */ pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); @@ -1488,14 +1490,14 @@ static int g4x_reset_complete(struct pci_dev *pdev) static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { - struct pci_dev *pdev = dev_priv->dev->pdev; + struct pci_dev *pdev = dev_priv->drm.pdev; pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); return wait_for(g4x_reset_complete(pdev), 500); } static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { - struct pci_dev *pdev = dev_priv->dev->pdev; + struct pci_dev *pdev = dev_priv->drm.pdev; int ret; pci_write_config_byte(pdev, I915_GDRST, @@ -1530,15 +1532,17 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv, I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); - ret = wait_for((I915_READ(ILK_GDSR) & - ILK_GRDOM_RESET_ENABLE) == 0, 500); + ret = intel_wait_for_register(dev_priv, + ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, + 500); if (ret) return ret; I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); - ret = wait_for((I915_READ(ILK_GDSR) & - ILK_GRDOM_RESET_ENABLE) == 0, 500); + ret = intel_wait_for_register(dev_priv, + ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, + 500); if (ret) return ret; @@ -1551,20 +1555,16 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv, static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, u32 hw_domain_mask) { - int ret; - /* GEN6_GDRST is not in the gt power well, no need to check * for fifo space for the write or forcewake the chip for * the read */ __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask); -#define ACKED ((__raw_i915_read32(dev_priv, GEN6_GDRST) & hw_domain_mask) == 0) /* Spin waiting for the device to ack the reset requests */ - ret = wait_for(ACKED, 500); -#undef ACKED - - return ret; + return intel_wait_for_register_fw(dev_priv, + GEN6_GDRST, hw_domain_mask, 0, + 500); } /** @@ -1609,13 +1609,74 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv, return ret; } -static int wait_for_register_fw(struct drm_i915_private *dev_priv, - i915_reg_t reg, - const u32 mask, - const u32 value, - const unsigned long timeout_ms) +/** + * intel_wait_for_register_fw - wait until register matches expected state + * @dev_priv: the i915 device + * @reg: the register to read + * @mask: mask to apply to register value + * @value: expected value + * @timeout_ms: timeout in millisecond + * + * This routine waits until the target register @reg contains the expected + * @value after applying the @mask, i.e. it waits until + * (I915_READ_FW(@reg) & @mask) == @value + * Otherwise, the wait will timeout after @timeout_ms milliseconds. + * + * Note that this routine assumes the caller holds forcewake asserted, it is + * not suitable for very long waits. See intel_wait_for_register() if you + * wish to wait without holding forcewake for the duration (i.e. you expect + * the wait to be slow). + * + * Returns 0 if the register matches the desired condition, or -ETIMEOUT. + */ +int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, + i915_reg_t reg, + const u32 mask, + const u32 value, + const unsigned long timeout_ms) +{ +#define done ((I915_READ_FW(reg) & mask) == value) + int ret = wait_for_us(done, 2); + if (ret) + ret = wait_for(done, timeout_ms); + return ret; +#undef done +} + +/** + * intel_wait_for_register - wait until register matches expected state + * @dev_priv: the i915 device + * @reg: the register to read + * @mask: mask to apply to register value + * @value: expected value + * @timeout_ms: timeout in millisecond + * + * This routine waits until the target register @reg contains the expected + * @value after applying the @mask, i.e. it waits until + * (I915_READ(@reg) & @mask) == @value + * Otherwise, the wait will timeout after @timeout_ms milliseconds. + * + * Returns 0 if the register matches the desired condition, or -ETIMEOUT. + */ +int intel_wait_for_register(struct drm_i915_private *dev_priv, + i915_reg_t reg, + const u32 mask, + const u32 value, + const unsigned long timeout_ms) { - return wait_for((I915_READ_FW(reg) & mask) == value, timeout_ms); + + unsigned fw = + intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); + int ret; + + intel_uncore_forcewake_get(dev_priv, fw); + ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2); + intel_uncore_forcewake_put(dev_priv, fw); + if (ret) + ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value, + timeout_ms); + + return ret; } static int gen8_request_engine_reset(struct intel_engine_cs *engine) @@ -1626,11 +1687,11 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine) I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); - ret = wait_for_register_fw(dev_priv, - RING_RESET_CTL(engine->mmio_base), - RESET_CTL_READY_TO_RESET, - RESET_CTL_READY_TO_RESET, - 700); + ret = intel_wait_for_register_fw(dev_priv, + RING_RESET_CTL(engine->mmio_base), + RESET_CTL_READY_TO_RESET, + RESET_CTL_READY_TO_RESET, + 700); if (ret) DRM_ERROR("%s: reset request timeout\n", engine->name); diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 9094599a1150..33466bfc6440 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -309,6 +309,7 @@ INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ + INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \ INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \ INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */ @@ -322,15 +323,12 @@ INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */ #define INTEL_KBL_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \ - INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */ + INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */ #define INTEL_KBL_GT4_IDS(info) \ - INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \ - INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \ - INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \ - INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */ + INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */ #define INTEL_KBL_IDS(info) \ INTEL_KBL_GT1_IDS(info), \ diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index c17d63d8b543..d7e81a3886fd 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -361,6 +361,8 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_GPU_RESET 35 #define I915_PARAM_HAS_RESOURCE_STREAMER 36 #define I915_PARAM_HAS_EXEC_SOFTPIN 37 +#define I915_PARAM_HAS_POOLED_EU 38 +#define I915_PARAM_MIN_EU_IN_POOL 39 typedef struct drm_i915_getparam { __s32 param; @@ -1171,6 +1173,7 @@ struct drm_i915_gem_context_param { #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 +#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 __u64 value; }; |