summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/drm.tmpl2835
-rw-r--r--drivers/acpi/video.c8
-rw-r--r--drivers/char/agp/intel-gtt.c62
-rw-r--r--drivers/gpu/drm/Kconfig19
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h1
-rw-r--r--drivers/gpu/drm/drm_cache.c28
-rw-r--r--drivers/gpu/drm/drm_crtc.c139
-rw-r--r--drivers/gpu/drm/drm_drv.c22
-rw-r--r--drivers/gpu/drm/drm_edid.c208
-rw-r--r--drivers/gpu/drm/drm_edid_load.c29
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h42
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c406
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c10
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c251
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c13
-rw-r--r--drivers/gpu/drm/gma500/Makefile5
-rw-r--r--drivers/gpu/drm/gma500/backlight.c45
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c72
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c236
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c1951
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c12
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c7
-rw-r--r--drivers/gpu/drm/gma500/gem.c9
-rw-r--r--drivers/gpu/drm/gma500/gem_glue.c90
-rw-r--r--drivers/gpu/drm/gma500/gem_glue.h2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c101
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h46
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c13
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c8
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c1
-rw-r--r--drivers/gpu/drm/gma500/opregion.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h20
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h28
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c13
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h197
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c16
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/dvo.h16
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c17
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c23
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c588
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c18
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c251
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c73
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c61
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h239
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1514
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c65
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c174
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c44
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c391
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c144
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c16
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c194
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h328
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c220
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h25
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c165
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c144
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2129
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c401
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h147
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c115
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c221
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c99
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c1
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c23
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c60
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c420
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c152
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h20
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c210
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c70
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c663
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c367
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c282
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c61
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h7
-rw-r--r--drivers/gpu/drm/radeon/ni.c134
-rw-r--r--drivers/gpu/drm/radeon/nid.h1
-rw-r--r--drivers/gpu/drm/radeon/r100.c96
-rw-r--r--drivers/gpu/drm/radeon/r300.c4
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c37
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c115
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c52
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h1
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h192
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c607
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.h445
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c93
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c411
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c602
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h49
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c104
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c6
-rw-r--r--drivers/gpu/drm/radeon/rs600.c49
-rw-r--r--drivers/gpu/drm/radeon/rs690.c6
-rw-r--r--drivers/gpu/drm/radeon/rv515.c18
-rw-r--r--drivers/gpu/drm/radeon/rv770.c10
-rw-r--r--drivers/gpu/drm/radeon/si.c113
-rw-r--r--drivers/gpu/drm/radeon/sid.h15
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c2
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig10
-rw-r--r--drivers/gpu/drm/shmobile/Makefile7
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_backlight.c90
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_backlight.h23
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c763
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.h60
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c361
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.h47
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c160
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.h34
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.c268
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.h22
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_regs.h311
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c16
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c3
-rw-r--r--drivers/gpu/drm/udl/udl_encoder.c2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c26
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c7
-rw-r--r--drivers/gpu/drm/udl/udl_main.c7
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1
-rw-r--r--drivers/staging/omapdrm/omap_connector.c5
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c4
-rw-r--r--include/drm/drmP.h1
-rw-r--r--include/drm/drm_crtc.h24
-rw-r--r--include/drm/drm_fb_cma_helper.h27
-rw-r--r--include/drm/drm_fourcc.h2
-rw-r--r--include/drm/drm_gem_cma_helper.h44
-rw-r--r--include/drm/drm_sarea.h2
-rw-r--r--include/drm/i915_drm.h36
-rw-r--r--include/drm/intel-gtt.h10
-rw-r--r--include/linux/platform_data/shmob_drm.h99
180 files changed, 17658 insertions, 5701 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 196b8b9dba11..b0300529ab13 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -6,11 +6,36 @@
<bookinfo>
<title>Linux DRM Developer's Guide</title>
+ <authorgroup>
+ <author>
+ <firstname>Jesse</firstname>
+ <surname>Barnes</surname>
+ <contrib>Initial version</contrib>
+ <affiliation>
+ <orgname>Intel Corporation</orgname>
+ <address>
+ <email>jesse.barnes@intel.com</email>
+ </address>
+ </affiliation>
+ </author>
+ <author>
+ <firstname>Laurent</firstname>
+ <surname>Pinchart</surname>
+ <contrib>Driver internals</contrib>
+ <affiliation>
+ <orgname>Ideas on board SPRL</orgname>
+ <address>
+ <email>laurent.pinchart@ideasonboard.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
<copyright>
<year>2008-2009</year>
- <holder>
- Intel Corporation (Jesse Barnes &lt;jesse.barnes@intel.com&gt;)
- </holder>
+ <year>2012</year>
+ <holder>Intel Corporation</holder>
+ <holder>Laurent Pinchart</holder>
</copyright>
<legalnotice>
@@ -20,6 +45,17 @@
the kernel source COPYING file.
</para>
</legalnotice>
+
+ <revhistory>
+ <!-- Put document revisions here, newest first. -->
+ <revision>
+ <revnumber>1.0</revnumber>
+ <date>2012-07-13</date>
+ <authorinitials>LP</authorinitials>
+ <revremark>Added extensive documentation about driver internals.
+ </revremark>
+ </revision>
+ </revhistory>
</bookinfo>
<toc></toc>
@@ -72,342 +108,361 @@
submission &amp; fencing, suspend/resume support, and DMA
services.
</para>
- <para>
- The core of every DRM driver is struct drm_driver. Drivers
- typically statically initialize a drm_driver structure,
- then pass it to drm_init() at load time.
- </para>
<!-- Internals: driver init -->
<sect1>
- <title>Driver initialization</title>
- <para>
- Before calling the DRM initialization routines, the driver must
- first create and fill out a struct drm_driver structure.
- </para>
- <programlisting>
- static struct drm_driver driver = {
- /* Don't use MTRRs here; the Xserver or userspace app should
- * deal with them for Intel hardware.
- */
- .driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET,
- .load = i915_driver_load,
- .unload = i915_driver_unload,
- .firstopen = i915_driver_firstopen,
- .lastclose = i915_driver_lastclose,
- .preclose = i915_driver_preclose,
- .save = i915_save,
- .restore = i915_restore,
- .device_is_agp = i915_driver_device_is_agp,
- .get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i915_enable_vblank,
- .disable_vblank = i915_disable_vblank,
- .irq_preinstall = i915_driver_irq_preinstall,
- .irq_postinstall = i915_driver_irq_postinstall,
- .irq_uninstall = i915_driver_irq_uninstall,
- .irq_handler = i915_driver_irq_handler,
- .reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
- .fb_probe = intelfb_probe,
- .fb_remove = intelfb_remove,
- .fb_resize = intelfb_resize,
- .master_create = i915_master_create,
- .master_destroy = i915_master_destroy,
-#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = i915_debugfs_init,
- .debugfs_cleanup = i915_debugfs_cleanup,
-#endif
- .gem_init_object = i915_gem_init_object,
- .gem_free_object = i915_gem_free_object,
- .gem_vm_ops = &amp;i915_gem_vm_ops,
- .ioctls = i915_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = i915_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = probe,
- .remove = __devexit_p(drm_cleanup_pci),
- },
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
- };
- </programlisting>
- <para>
- In the example above, taken from the i915 DRM driver, the driver
- sets several flags indicating what core features it supports;
- we go over the individual callbacks in later sections. Since
- flags indicate which features your driver supports to the DRM
- core, you need to set most of them prior to calling drm_init(). Some,
- like DRIVER_MODESET can be set later based on user supplied parameters,
- but that's the exception rather than the rule.
- </para>
- <variablelist>
- <title>Driver flags</title>
- <varlistentry>
- <term>DRIVER_USE_AGP</term>
- <listitem><para>
- Driver uses AGP interface
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_REQUIRE_AGP</term>
- <listitem><para>
- Driver needs AGP interface to function.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_USE_MTRR</term>
- <listitem>
- <para>
- Driver uses MTRR interface for mapping memory. Deprecated.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_PCI_DMA</term>
- <listitem><para>
- Driver is capable of PCI DMA. Deprecated.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_SG</term>
- <listitem><para>
- Driver can perform scatter/gather DMA. Deprecated.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_HAVE_DMA</term>
- <listitem><para>Driver supports DMA. Deprecated.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
- <listitem>
- <para>
- DRIVER_HAVE_IRQ indicates whether the driver has an IRQ
- handler. DRIVER_IRQ_SHARED indicates whether the device &amp;
- handler support shared IRQs (note that this is required of
- PCI drivers).
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_DMA_QUEUE</term>
- <listitem>
- <para>
- Should be set if the driver queues DMA requests and completes them
- asynchronously. Deprecated.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_FB_DMA</term>
- <listitem>
- <para>
- Driver supports DMA to/from the framebuffer. Deprecated.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_MODESET</term>
- <listitem>
- <para>
- Driver supports mode setting interfaces.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>
- In this specific case, the driver requires AGP and supports
- IRQs. DMA, as discussed later, is handled by device-specific ioctls
- in this case. It also supports the kernel mode setting APIs, though
- unlike in the actual i915 driver source, this example unconditionally
- exports KMS capability.
+ <title>Driver Initialization</title>
+ <para>
+ At the core of every DRM driver is a <structname>drm_driver</structname>
+ structure. Drivers typically statically initialize a drm_driver structure,
+ and then pass it to one of the <function>drm_*_init()</function> functions
+ to register it with the DRM subsystem.
</para>
- </sect1>
-
- <!-- Internals: driver load -->
-
- <sect1>
- <title>Driver load</title>
- <para>
- In the previous section, we saw what a typical drm_driver
- structure might look like. One of the more important fields in
- the structure is the hook for the load function.
- </para>
- <programlisting>
- static struct drm_driver driver = {
- ...
- .load = i915_driver_load,
- ...
- };
- </programlisting>
- <para>
- The load function has many responsibilities: allocating a driver
- private structure, specifying supported performance counters,
- configuring the device (e.g. mapping registers &amp; command
- buffers), initializing the memory manager, and setting up the
- initial output configuration.
- </para>
- <para>
- If compatibility is a concern (e.g. with drivers converted over
- to the new interfaces from the old ones), care must be taken to
- prevent device initialization and control that is incompatible with
- currently active userspace drivers. For instance, if user
- level mode setting drivers are in use, it would be problematic
- to perform output discovery &amp; configuration at load time.
- Likewise, if user-level drivers unaware of memory management are
- in use, memory management and command buffer setup may need to
- be omitted. These requirements are driver-specific, and care
- needs to be taken to keep both old and new applications and
- libraries working. The i915 driver supports the "modeset"
- module parameter to control whether advanced features are
- enabled at load time or in legacy fashion.
+ <para>
+ The <structname>drm_driver</structname> structure contains static
+ information that describes the driver and features it supports, and
+ pointers to methods that the DRM core will call to implement the DRM API.
+ We will first go through the <structname>drm_driver</structname> static
+ information fields, and will then describe individual operations in
+ details as they get used in later sections.
</para>
-
<sect2>
- <title>Driver private &amp; performance counters</title>
- <para>
- The driver private hangs off the main drm_device structure and
- can be used for tracking various device-specific bits of
- information, like register offsets, command buffer status,
- register state for suspend/resume, etc. At load time, a
- driver may simply allocate one and set drm_device.dev_priv
- appropriately; it should be freed and drm_device.dev_priv set
- to NULL when the driver is unloaded.
- </para>
+ <title>Driver Information</title>
+ <sect3>
+ <title>Driver Features</title>
+ <para>
+ Drivers inform the DRM core about their requirements and supported
+ features by setting appropriate flags in the
+ <structfield>driver_features</structfield> field. Since those flags
+ influence the DRM core behaviour since registration time, most of them
+ must be set to registering the <structname>drm_driver</structname>
+ instance.
+ </para>
+ <synopsis>u32 driver_features;</synopsis>
+ <variablelist>
+ <title>Driver Feature Flags</title>
+ <varlistentry>
+ <term>DRIVER_USE_AGP</term>
+ <listitem><para>
+ Driver uses AGP interface, the DRM core will manage AGP resources.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_REQUIRE_AGP</term>
+ <listitem><para>
+ Driver needs AGP interface to function. AGP initialization failure
+ will become a fatal error.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_USE_MTRR</term>
+ <listitem><para>
+ Driver uses MTRR interface for mapping memory, the DRM core will
+ manage MTRR resources. Deprecated.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_PCI_DMA</term>
+ <listitem><para>
+ Driver is capable of PCI DMA, mapping of PCI DMA buffers to
+ userspace will be enabled. Deprecated.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_SG</term>
+ <listitem><para>
+ Driver can perform scatter/gather DMA, allocation and mapping of
+ scatter/gather buffers will be enabled. Deprecated.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_HAVE_DMA</term>
+ <listitem><para>
+ Driver supports DMA, the userspace DMA API will be supported.
+ Deprecated.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
+ <listitem><para>
+ DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler. The
+ DRM core will automatically register an interrupt handler when the
+ flag is set. DRIVER_IRQ_SHARED indicates whether the device &amp;
+ handler support shared IRQs (note that this is required of PCI
+ drivers).
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_IRQ_VBL</term>
+ <listitem><para>Unused. Deprecated.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_DMA_QUEUE</term>
+ <listitem><para>
+ Should be set if the driver queues DMA requests and completes them
+ asynchronously. Deprecated.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_FB_DMA</term>
+ <listitem><para>
+ Driver supports DMA to/from the framebuffer, mapping of frambuffer
+ DMA buffers to userspace will be supported. Deprecated.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_IRQ_VBL2</term>
+ <listitem><para>Unused. Deprecated.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_GEM</term>
+ <listitem><para>
+ Driver use the GEM memory manager.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_MODESET</term>
+ <listitem><para>
+ Driver supports mode setting interfaces (KMS).
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_PRIME</term>
+ <listitem><para>
+ Driver implements DRM PRIME buffer sharing.
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+ </sect3>
+ <sect3>
+ <title>Major, Minor and Patchlevel</title>
+ <synopsis>int major;
+int minor;
+int patchlevel;</synopsis>
+ <para>
+ The DRM core identifies driver versions by a major, minor and patch
+ level triplet. The information is printed to the kernel log at
+ initialization time and passed to userspace through the
+ DRM_IOCTL_VERSION ioctl.
+ </para>
+ <para>
+ The major and minor numbers are also used to verify the requested driver
+ API version passed to DRM_IOCTL_SET_VERSION. When the driver API changes
+ between minor versions, applications can call DRM_IOCTL_SET_VERSION to
+ select a specific version of the API. If the requested major isn't equal
+ to the driver major, or the requested minor is larger than the driver
+ minor, the DRM_IOCTL_SET_VERSION call will return an error. Otherwise
+ the driver's set_version() method will be called with the requested
+ version.
+ </para>
+ </sect3>
+ <sect3>
+ <title>Name, Description and Date</title>
+ <synopsis>char *name;
+char *desc;
+char *date;</synopsis>
+ <para>
+ The driver name is printed to the kernel log at initialization time,
+ used for IRQ registration and passed to userspace through
+ DRM_IOCTL_VERSION.
+ </para>
+ <para>
+ The driver description is a purely informative string passed to
+ userspace through the DRM_IOCTL_VERSION ioctl and otherwise unused by
+ the kernel.
+ </para>
+ <para>
+ The driver date, formatted as YYYYMMDD, is meant to identify the date of
+ the latest modification to the driver. However, as most drivers fail to
+ update it, its value is mostly useless. The DRM core prints it to the
+ kernel log at initialization time and passes it to userspace through the
+ DRM_IOCTL_VERSION ioctl.
+ </para>
+ </sect3>
+ </sect2>
+ <sect2>
+ <title>Driver Load</title>
<para>
- The DRM supports several counters which may be used for rough
- performance characterization. Note that the DRM stat counter
- system is not often used by applications, and supporting
- additional counters is completely optional.
+ The <methodname>load</methodname> method is the driver and device
+ initialization entry point. The method is responsible for allocating and
+ initializing driver private data, specifying supported performance
+ counters, performing resource allocation and mapping (e.g. acquiring
+ clocks, mapping registers or allocating command buffers), initializing
+ the memory manager (<xref linkend="drm-memory-management"/>), installing
+ the IRQ handler (<xref linkend="drm-irq-registration"/>), setting up
+ vertical blanking handling (<xref linkend="drm-vertical-blank"/>), mode
+ setting (<xref linkend="drm-mode-setting"/>) and initial output
+ configuration (<xref linkend="drm-kms-init"/>).
</para>
+ <note><para>
+ If compatibility is a concern (e.g. with drivers converted over from
+ User Mode Setting to Kernel Mode Setting), care must be taken to prevent
+ device initialization and control that is incompatible with currently
+ active userspace drivers. For instance, if user level mode setting
+ drivers are in use, it would be problematic to perform output discovery
+ &amp; configuration at load time. Likewise, if user-level drivers
+ unaware of memory management are in use, memory management and command
+ buffer setup may need to be omitted. These requirements are
+ driver-specific, and care needs to be taken to keep both old and new
+ applications and libraries working.
+ </para></note>
+ <synopsis>int (*load) (struct drm_device *, unsigned long flags);</synopsis>
<para>
- These interfaces are deprecated and should not be used. If performance
- monitoring is desired, the developer should investigate and
- potentially enhance the kernel perf and tracing infrastructure to export
- GPU related performance information for consumption by performance
- monitoring tools and applications.
+ The method takes two arguments, a pointer to the newly created
+ <structname>drm_device</structname> and flags. The flags are used to
+ pass the <structfield>driver_data</structfield> field of the device id
+ corresponding to the device passed to <function>drm_*_init()</function>.
+ Only PCI devices currently use this, USB and platform DRM drivers have
+ their <methodname>load</methodname> method called with flags to 0.
</para>
+ <sect3>
+ <title>Driver Private &amp; Performance Counters</title>
+ <para>
+ The driver private hangs off the main
+ <structname>drm_device</structname> structure and can be used for
+ tracking various device-specific bits of information, like register
+ offsets, command buffer status, register state for suspend/resume, etc.
+ At load time, a driver may simply allocate one and set
+ <structname>drm_device</structname>.<structfield>dev_priv</structfield>
+ appropriately; it should be freed and
+ <structname>drm_device</structname>.<structfield>dev_priv</structfield>
+ set to NULL when the driver is unloaded.
+ </para>
+ <para>
+ DRM supports several counters which were used for rough performance
+ characterization. This stat counter system is deprecated and should not
+ be used. If performance monitoring is desired, the developer should
+ investigate and potentially enhance the kernel perf and tracing
+ infrastructure to export GPU related performance information for
+ consumption by performance monitoring tools and applications.
+ </para>
+ </sect3>
+ <sect3 id="drm-irq-registration">
+ <title>IRQ Registration</title>
+ <para>
+ The DRM core tries to facilitate IRQ handler registration and
+ unregistration by providing <function>drm_irq_install</function> and
+ <function>drm_irq_uninstall</function> functions. Those functions only
+ support a single interrupt per device.
+ </para>
+ <!--!Fdrivers/char/drm/drm_irq.c drm_irq_install-->
+ <para>
+ Both functions get the device IRQ by calling
+ <function>drm_dev_to_irq</function>. This inline function will call a
+ bus-specific operation to retrieve the IRQ number. For platform devices,
+ <function>platform_get_irq</function>(..., 0) is used to retrieve the
+ IRQ number.
+ </para>
+ <para>
+ <function>drm_irq_install</function> starts by calling the
+ <methodname>irq_preinstall</methodname> driver operation. The operation
+ is optional and must make sure that the interrupt will not get fired by
+ clearing all pending interrupt flags or disabling the interrupt.
+ </para>
+ <para>
+ The IRQ will then be requested by a call to
+ <function>request_irq</function>. If the DRIVER_IRQ_SHARED driver
+ feature flag is set, a shared (IRQF_SHARED) IRQ handler will be
+ requested.
+ </para>
+ <para>
+ The IRQ handler function must be provided as the mandatory irq_handler
+ driver operation. It will get passed directly to
+ <function>request_irq</function> and thus has the same prototype as all
+ IRQ handlers. It will get called with a pointer to the DRM device as the
+ second argument.
+ </para>
+ <para>
+ Finally the function calls the optional
+ <methodname>irq_postinstall</methodname> driver operation. The operation
+ usually enables interrupts (excluding the vblank interrupt, which is
+ enabled separately), but drivers may choose to enable/disable interrupts
+ at a different time.
+ </para>
+ <para>
+ <function>drm_irq_uninstall</function> is similarly used to uninstall an
+ IRQ handler. It starts by waking up all processes waiting on a vblank
+ interrupt to make sure they don't hang, and then calls the optional
+ <methodname>irq_uninstall</methodname> driver operation. The operation
+ must disable all hardware interrupts. Finally the function frees the IRQ
+ by calling <function>free_irq</function>.
+ </para>
+ </sect3>
+ <sect3>
+ <title>Memory Manager Initialization</title>
+ <para>
+ Every DRM driver requires a memory manager which must be initialized at
+ load time. DRM currently contains two memory managers, the Translation
+ Table Manager (TTM) and the Graphics Execution Manager (GEM).
+ This document describes the use of the GEM memory manager only. See
+ <xref linkend="drm-memory-management"/> for details.
+ </para>
+ </sect3>
+ <sect3>
+ <title>Miscellaneous Device Configuration</title>
+ <para>
+ Another task that may be necessary for PCI devices during configuration
+ is mapping the video BIOS. On many devices, the VBIOS describes device
+ configuration, LCD panel timings (if any), and contains flags indicating
+ device state. Mapping the BIOS can be done using the pci_map_rom() call,
+ a convenience function that takes care of mapping the actual ROM,
+ whether it has been shadowed into memory (typically at address 0xc0000)
+ or exists on the PCI device in the ROM BAR. Note that after the ROM has
+ been mapped and any necessary information has been extracted, it should
+ be unmapped; on many devices, the ROM address decoder is shared with
+ other BARs, so leaving it mapped could cause undesired behaviour like
+ hangs or memory corruption.
+ <!--!Fdrivers/pci/rom.c pci_map_rom-->
+ </para>
+ </sect3>
</sect2>
+ </sect1>
- <sect2>
- <title>Configuring the device</title>
- <para>
- Obviously, device configuration is device-specific.
- However, there are several common operations: finding a
- device's PCI resources, mapping them, and potentially setting
- up an IRQ handler.
- </para>
- <para>
- Finding &amp; mapping resources is fairly straightforward. The
- DRM wrapper functions, drm_get_resource_start() and
- drm_get_resource_len(), may be used to find BARs on the given
- drm_device struct. Once those values have been retrieved, the
- driver load function can call drm_addmap() to create a new
- mapping for the BAR in question. Note that you probably want a
- drm_local_map_t in your driver private structure to track any
- mappings you create.
-<!-- !Fdrivers/gpu/drm/drm_bufs.c drm_get_resource_* -->
-<!-- !Finclude/drm/drmP.h drm_local_map_t -->
- </para>
- <para>
- if compatibility with other operating systems isn't a concern
- (DRM drivers can run under various BSD variants and OpenSolaris),
- native Linux calls may be used for the above, e.g. pci_resource_*
- and iomap*/iounmap. See the Linux device driver book for more
- info.
- </para>
- <para>
- Once you have a register map, you may use the DRM_READn() and
- DRM_WRITEn() macros to access the registers on your device, or
- use driver-specific versions to offset into your MMIO space
- relative to a driver-specific base pointer (see I915_READ for
- an example).
- </para>
- <para>
- If your device supports interrupt generation, you may want to
- set up an interrupt handler when the driver is loaded. This
- is done using the drm_irq_install() function. If your device
- supports vertical blank interrupts, it should call
- drm_vblank_init() to initialize the core vblank handling code before
- enabling interrupts on your device. This ensures the vblank related
- structures are allocated and allows the core to handle vblank events.
- </para>
-<!--!Fdrivers/char/drm/drm_irq.c drm_irq_install-->
- <para>
- Once your interrupt handler is registered (it uses your
- drm_driver.irq_handler as the actual interrupt handling
- function), you can safely enable interrupts on your device,
- assuming any other state your interrupt handler uses is also
- initialized.
- </para>
- <para>
- Another task that may be necessary during configuration is
- mapping the video BIOS. On many devices, the VBIOS describes
- device configuration, LCD panel timings (if any), and contains
- flags indicating device state. Mapping the BIOS can be done
- using the pci_map_rom() call, a convenience function that
- takes care of mapping the actual ROM, whether it has been
- shadowed into memory (typically at address 0xc0000) or exists
- on the PCI device in the ROM BAR. Note that after the ROM
- has been mapped and any necessary information has been extracted,
- it should be unmapped; on many devices, the ROM address decoder is
- shared with other BARs, so leaving it mapped could cause
- undesired behavior like hangs or memory corruption.
-<!--!Fdrivers/pci/rom.c pci_map_rom-->
- </para>
- </sect2>
+ <!-- Internals: memory management -->
+ <sect1 id="drm-memory-management">
+ <title>Memory management</title>
+ <para>
+ Modern Linux systems require large amount of graphics memory to store
+ frame buffers, textures, vertices and other graphics-related data. Given
+ the very dynamic nature of many of that data, managing graphics memory
+ efficiently is thus crucial for the graphics stack and plays a central
+ role in the DRM infrastructure.
+ </para>
+ <para>
+ The DRM core includes two memory managers, namely Translation Table Maps
+ (TTM) and Graphics Execution Manager (GEM). TTM was the first DRM memory
+ manager to be developed and tried to be a one-size-fits-them all
+ solution. It provides a single userspace API to accomodate the need of
+ all hardware, supporting both Unified Memory Architecture (UMA) devices
+ and devices with dedicated video RAM (i.e. most discrete video cards).
+ This resulted in a large, complex piece of code that turned out to be
+ hard to use for driver development.
+ </para>
+ <para>
+ GEM started as an Intel-sponsored project in reaction to TTM's
+ complexity. Its design philosophy is completely different: instead of
+ providing a solution to every graphics memory-related problems, GEM
+ identified common code between drivers and created a support library to
+ share it. GEM has simpler initialization and execution requirements than
+ TTM, but has no video RAM management capabitilies and is thus limited to
+ UMA devices.
+ </para>
<sect2>
- <title>Memory manager initialization</title>
- <para>
- In order to allocate command buffers, cursor memory, scanout
- buffers, etc., as well as support the latest features provided
- by packages like Mesa and the X.Org X server, your driver
- should support a memory manager.
- </para>
+ <title>The Translation Table Manager (TTM)</title>
<para>
- If your driver supports memory management (it should!), you
- need to set that up at load time as well. How you initialize
- it depends on which memory manager you're using: TTM or GEM.
+ TTM design background and information belongs here.
</para>
<sect3>
<title>TTM initialization</title>
- <para>
- TTM (for Translation Table Manager) manages video memory and
- aperture space for graphics devices. TTM supports both UMA devices
- and devices with dedicated video RAM (VRAM), i.e. most discrete
- graphics devices. If your device has dedicated RAM, supporting
- TTM is desirable. TTM also integrates tightly with your
- driver-specific buffer execution function. See the radeon
- driver for examples.
- </para>
- <para>
- The core TTM structure is the ttm_bo_driver struct. It contains
- several fields with function pointers for initializing the TTM,
- allocating and freeing memory, waiting for command completion
- and fence synchronization, and memory migration. See the
- radeon_ttm.c file for an example of usage.
+ <warning><para>This section is outdated.</para></warning>
+ <para>
+ Drivers wishing to support TTM must fill out a drm_bo_driver
+ structure. The structure contains several fields with function
+ pointers for initializing the TTM, allocating and freeing memory,
+ waiting for command completion and fence synchronization, and memory
+ migration. See the radeon_ttm.c file for an example of usage.
</para>
<para>
The ttm_global_reference structure is made up of several fields:
@@ -445,82 +500,1081 @@
count for the TTM, which will call your initialization function.
</para>
</sect3>
+ </sect2>
+ <sect2 id="drm-gem">
+ <title>The Graphics Execution Manager (GEM)</title>
+ <para>
+ The GEM design approach has resulted in a memory manager that doesn't
+ provide full coverage of all (or even all common) use cases in its
+ userspace or kernel API. GEM exposes a set of standard memory-related
+ operations to userspace and a set of helper functions to drivers, and let
+ drivers implement hardware-specific operations with their own private API.
+ </para>
+ <para>
+ The GEM userspace API is described in the
+ <ulink url="http://lwn.net/Articles/283798/"><citetitle>GEM - the Graphics
+ Execution Manager</citetitle></ulink> article on LWN. While slightly
+ outdated, the document provides a good overview of the GEM API principles.
+ Buffer allocation and read and write operations, described as part of the
+ common GEM API, are currently implemented using driver-specific ioctls.
+ </para>
+ <para>
+ GEM is data-agnostic. It manages abstract buffer objects without knowing
+ what individual buffers contain. APIs that require knowledge of buffer
+ contents or purpose, such as buffer allocation or synchronization
+ primitives, are thus outside of the scope of GEM and must be implemented
+ using driver-specific ioctls.
+ </para>
+ <para>
+ On a fundamental level, GEM involves several operations:
+ <itemizedlist>
+ <listitem>Memory allocation and freeing</listitem>
+ <listitem>Command execution</listitem>
+ <listitem>Aperture management at command execution time</listitem>
+ </itemizedlist>
+ Buffer object allocation is relatively straightforward and largely
+ provided by Linux's shmem layer, which provides memory to back each
+ object.
+ </para>
+ <para>
+ Device-specific operations, such as command execution, pinning, buffer
+ read &amp; write, mapping, and domain ownership transfers are left to
+ driver-specific ioctls.
+ </para>
+ <sect3>
+ <title>GEM Initialization</title>
+ <para>
+ Drivers that use GEM must set the DRIVER_GEM bit in the struct
+ <structname>drm_driver</structname>
+ <structfield>driver_features</structfield> field. The DRM core will
+ then automatically initialize the GEM core before calling the
+ <methodname>load</methodname> operation. Behind the scene, this will
+ create a DRM Memory Manager object which provides an address space
+ pool for object allocation.
+ </para>
+ <para>
+ In a KMS configuration, drivers need to allocate and initialize a
+ command ring buffer following core GEM initialization if required by
+ the hardware. UMA devices usually have what is called a "stolen"
+ memory region, which provides space for the initial framebuffer and
+ large, contiguous memory regions required by the device. This space is
+ typically not managed by GEM, and must be initialized separately into
+ its own DRM MM object.
+ </para>
+ </sect3>
<sect3>
- <title>GEM initialization</title>
- <para>
- GEM is an alternative to TTM, designed specifically for UMA
- devices. It has simpler initialization and execution requirements
- than TTM, but has no VRAM management capability. Core GEM
- is initialized by calling drm_mm_init() to create
- a GTT DRM MM object, which provides an address space pool for
- object allocation. In a KMS configuration, the driver
- needs to allocate and initialize a command ring buffer following
- core GEM initialization. A UMA device usually has what is called a
- "stolen" memory region, which provides space for the initial
- framebuffer and large, contiguous memory regions required by the
- device. This space is not typically managed by GEM, and it must
- be initialized separately into its own DRM MM object.
- </para>
- <para>
- Initialization is driver-specific. In the case of Intel
- integrated graphics chips like 965GM, GEM initialization can
- be done by calling the internal GEM init function,
- i915_gem_do_init(). Since the 965GM is a UMA device
- (i.e. it doesn't have dedicated VRAM), GEM manages
- making regular RAM available for GPU operations. Memory set
- aside by the BIOS (called "stolen" memory by the i915
- driver) is managed by the DRM memrange allocator; the
- rest of the aperture is managed by GEM.
- <programlisting>
- /* Basic memrange allocator for stolen space (aka vram) */
- drm_memrange_init(&amp;dev_priv->vram, 0, prealloc_size);
- /* Let GEM Manage from end of prealloc space to end of aperture */
- i915_gem_do_init(dev, prealloc_size, agp_size);
- </programlisting>
-<!--!Edrivers/char/drm/drm_memrange.c-->
- </para>
- <para>
- Once the memory manager has been set up, we may allocate the
- command buffer. In the i915 case, this is also done with a
- GEM function, i915_gem_init_ringbuffer().
- </para>
+ <title>GEM Objects Creation</title>
+ <para>
+ GEM splits creation of GEM objects and allocation of the memory that
+ backs them in two distinct operations.
+ </para>
+ <para>
+ GEM objects are represented by an instance of struct
+ <structname>drm_gem_object</structname>. Drivers usually need to extend
+ GEM objects with private information and thus create a driver-specific
+ GEM object structure type that embeds an instance of struct
+ <structname>drm_gem_object</structname>.
+ </para>
+ <para>
+ To create a GEM object, a driver allocates memory for an instance of its
+ specific GEM object type and initializes the embedded struct
+ <structname>drm_gem_object</structname> with a call to
+ <function>drm_gem_object_init</function>. The function takes a pointer to
+ the DRM device, a pointer to the GEM object and the buffer object size
+ in bytes.
+ </para>
+ <para>
+ GEM uses shmem to allocate anonymous pageable memory.
+ <function>drm_gem_object_init</function> will create an shmfs file of
+ the requested size and store it into the struct
+ <structname>drm_gem_object</structname> <structfield>filp</structfield>
+ field. The memory is used as either main storage for the object when the
+ graphics hardware uses system memory directly or as a backing store
+ otherwise.
+ </para>
+ <para>
+ Drivers are responsible for the actual physical pages allocation by
+ calling <function>shmem_read_mapping_page_gfp</function> for each page.
+ Note that they can decide to allocate pages when initializing the GEM
+ object, or to delay allocation until the memory is needed (for instance
+ when a page fault occurs as a result of a userspace memory access or
+ when the driver needs to start a DMA transfer involving the memory).
+ </para>
+ <para>
+ Anonymous pageable memory allocation is not always desired, for instance
+ when the hardware requires physically contiguous system memory as is
+ often the case in embedded devices. Drivers can create GEM objects with
+ no shmfs backing (called private GEM objects) by initializing them with
+ a call to <function>drm_gem_private_object_init</function> instead of
+ <function>drm_gem_object_init</function>. Storage for private GEM
+ objects must be managed by drivers.
+ </para>
+ <para>
+ Drivers that do not need to extend GEM objects with private information
+ can call the <function>drm_gem_object_alloc</function> function to
+ allocate and initialize a struct <structname>drm_gem_object</structname>
+ instance. The GEM core will call the optional driver
+ <methodname>gem_init_object</methodname> operation after initializing
+ the GEM object with <function>drm_gem_object_init</function>.
+ <synopsis>int (*gem_init_object) (struct drm_gem_object *obj);</synopsis>
+ </para>
+ <para>
+ No alloc-and-init function exists for private GEM objects.
+ </para>
+ </sect3>
+ <sect3>
+ <title>GEM Objects Lifetime</title>
+ <para>
+ All GEM objects are reference-counted by the GEM core. References can be
+ acquired and release by <function>calling drm_gem_object_reference</function>
+ and <function>drm_gem_object_unreference</function> respectively. The
+ caller must hold the <structname>drm_device</structname>
+ <structfield>struct_mutex</structfield> lock. As a convenience, GEM
+ provides the <function>drm_gem_object_reference_unlocked</function> and
+ <function>drm_gem_object_unreference_unlocked</function> functions that
+ can be called without holding the lock.
+ </para>
+ <para>
+ When the last reference to a GEM object is released the GEM core calls
+ the <structname>drm_driver</structname>
+ <methodname>gem_free_object</methodname> operation. That operation is
+ mandatory for GEM-enabled drivers and must free the GEM object and all
+ associated resources.
+ </para>
+ <para>
+ <synopsis>void (*gem_free_object) (struct drm_gem_object *obj);</synopsis>
+ Drivers are responsible for freeing all GEM object resources, including
+ the resources created by the GEM core. If an mmap offset has been
+ created for the object (in which case
+ <structname>drm_gem_object</structname>::<structfield>map_list</structfield>::<structfield>map</structfield>
+ is not NULL) it must be freed by a call to
+ <function>drm_gem_free_mmap_offset</function>. The shmfs backing store
+ must be released by calling <function>drm_gem_object_release</function>
+ (that function can safely be called if no shmfs backing store has been
+ created).
+ </para>
+ </sect3>
+ <sect3>
+ <title>GEM Objects Naming</title>
+ <para>
+ Communication between userspace and the kernel refers to GEM objects
+ using local handles, global names or, more recently, file descriptors.
+ All of those are 32-bit integer values; the usual Linux kernel limits
+ apply to the file descriptors.
+ </para>
+ <para>
+ GEM handles are local to a DRM file. Applications get a handle to a GEM
+ object through a driver-specific ioctl, and can use that handle to refer
+ to the GEM object in other standard or driver-specific ioctls. Closing a
+ DRM file handle frees all its GEM handles and dereferences the
+ associated GEM objects.
+ </para>
+ <para>
+ To create a handle for a GEM object drivers call
+ <function>drm_gem_handle_create</function>. The function takes a pointer
+ to the DRM file and the GEM object and returns a locally unique handle.
+ When the handle is no longer needed drivers delete it with a call to
+ <function>drm_gem_handle_delete</function>. Finally the GEM object
+ associated with a handle can be retrieved by a call to
+ <function>drm_gem_object_lookup</function>.
+ </para>
+ <para>
+ Handles don't take ownership of GEM objects, they only take a reference
+ to the object that will be dropped when the handle is destroyed. To
+ avoid leaking GEM objects, drivers must make sure they drop the
+ reference(s) they own (such as the initial reference taken at object
+ creation time) as appropriate, without any special consideration for the
+ handle. For example, in the particular case of combined GEM object and
+ handle creation in the implementation of the
+ <methodname>dumb_create</methodname> operation, drivers must drop the
+ initial reference to the GEM object before returning the handle.
+ </para>
+ <para>
+ GEM names are similar in purpose to handles but are not local to DRM
+ files. They can be passed between processes to reference a GEM object
+ globally. Names can't be used directly to refer to objects in the DRM
+ API, applications must convert handles to names and names to handles
+ using the DRM_IOCTL_GEM_FLINK and DRM_IOCTL_GEM_OPEN ioctls
+ respectively. The conversion is handled by the DRM core without any
+ driver-specific support.
+ </para>
+ <para>
+ Similar to global names, GEM file descriptors are also used to share GEM
+ objects across processes. They offer additional security: as file
+ descriptors must be explictly sent over UNIX domain sockets to be shared
+ between applications, they can't be guessed like the globally unique GEM
+ names.
+ </para>
+ <para>
+ Drivers that support GEM file descriptors, also known as the DRM PRIME
+ API, must set the DRIVER_PRIME bit in the struct
+ <structname>drm_driver</structname>
+ <structfield>driver_features</structfield> field, and implement the
+ <methodname>prime_handle_to_fd</methodname> and
+ <methodname>prime_fd_to_handle</methodname> operations.
+ </para>
+ <para>
+ <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags, int *prime_fd);
+ int (*prime_fd_to_handle)(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd,
+ uint32_t *handle);</synopsis>
+ Those two operations convert a handle to a PRIME file descriptor and
+ vice versa. Drivers must use the kernel dma-buf buffer sharing framework
+ to manage the PRIME file descriptors.
+ </para>
+ <para>
+ While non-GEM drivers must implement the operations themselves, GEM
+ drivers must use the <function>drm_gem_prime_handle_to_fd</function>
+ and <function>drm_gem_prime_fd_to_handle</function> helper functions.
+ Those helpers rely on the driver
+ <methodname>gem_prime_export</methodname> and
+ <methodname>gem_prime_import</methodname> operations to create a dma-buf
+ instance from a GEM object (dma-buf exporter role) and to create a GEM
+ object from a dma-buf instance (dma-buf importer role).
+ </para>
+ <para>
+ <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags);
+ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
+ struct dma_buf *dma_buf);</synopsis>
+ These two operations are mandatory for GEM drivers that support DRM
+ PRIME.
+ </para>
+ </sect3>
+ <sect3 id="drm-gem-objects-mapping">
+ <title>GEM Objects Mapping</title>
+ <para>
+ Because mapping operations are fairly heavyweight GEM favours
+ read/write-like access to buffers, implemented through driver-specific
+ ioctls, over mapping buffers to userspace. However, when random access
+ to the buffer is needed (to perform software rendering for instance),
+ direct access to the object can be more efficient.
+ </para>
+ <para>
+ The mmap system call can't be used directly to map GEM objects, as they
+ don't have their own file handle. Two alternative methods currently
+ co-exist to map GEM objects to userspace. The first method uses a
+ driver-specific ioctl to perform the mapping operation, calling
+ <function>do_mmap</function> under the hood. This is often considered
+ dubious, seems to be discouraged for new GEM-enabled drivers, and will
+ thus not be described here.
+ </para>
+ <para>
+ The second method uses the mmap system call on the DRM file handle.
+ <synopsis>void *mmap(void *addr, size_t length, int prot, int flags, int fd,
+ off_t offset);</synopsis>
+ DRM identifies the GEM object to be mapped by a fake offset passed
+ through the mmap offset argument. Prior to being mapped, a GEM object
+ must thus be associated with a fake offset. To do so, drivers must call
+ <function>drm_gem_create_mmap_offset</function> on the object. The
+ function allocates a fake offset range from a pool and stores the
+ offset divided by PAGE_SIZE in
+ <literal>obj-&gt;map_list.hash.key</literal>. Care must be taken not to
+ call <function>drm_gem_create_mmap_offset</function> if a fake offset
+ has already been allocated for the object. This can be tested by
+ <literal>obj-&gt;map_list.map</literal> being non-NULL.
+ </para>
+ <para>
+ Once allocated, the fake offset value
+ (<literal>obj-&gt;map_list.hash.key &lt;&lt; PAGE_SHIFT</literal>)
+ must be passed to the application in a driver-specific way and can then
+ be used as the mmap offset argument.
+ </para>
+ <para>
+ The GEM core provides a helper method <function>drm_gem_mmap</function>
+ to handle object mapping. The method can be set directly as the mmap
+ file operation handler. It will look up the GEM object based on the
+ offset value and set the VMA operations to the
+ <structname>drm_driver</structname> <structfield>gem_vm_ops</structfield>
+ field. Note that <function>drm_gem_mmap</function> doesn't map memory to
+ userspace, but relies on the driver-provided fault handler to map pages
+ individually.
+ </para>
+ <para>
+ To use <function>drm_gem_mmap</function>, drivers must fill the struct
+ <structname>drm_driver</structname> <structfield>gem_vm_ops</structfield>
+ field with a pointer to VM operations.
+ </para>
+ <para>
+ <synopsis>struct vm_operations_struct *gem_vm_ops
+
+ struct vm_operations_struct {
+ void (*open)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct * area);
+ int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ };</synopsis>
+ </para>
+ <para>
+ The <methodname>open</methodname> and <methodname>close</methodname>
+ operations must update the GEM object reference count. Drivers can use
+ the <function>drm_gem_vm_open</function> and
+ <function>drm_gem_vm_close</function> helper functions directly as open
+ and close handlers.
+ </para>
+ <para>
+ The fault operation handler is responsible for mapping individual pages
+ to userspace when a page fault occurs. Depending on the memory
+ allocation scheme, drivers can allocate pages at fault time, or can
+ decide to allocate memory for the GEM object at the time the object is
+ created.
+ </para>
+ <para>
+ Drivers that want to map the GEM object upfront instead of handling page
+ faults can implement their own mmap file operation handler.
+ </para>
+ </sect3>
+ <sect3>
+ <title>Dumb GEM Objects</title>
+ <para>
+ The GEM API doesn't standardize GEM objects creation and leaves it to
+ driver-specific ioctls. While not an issue for full-fledged graphics
+ stacks that include device-specific userspace components (in libdrm for
+ instance), this limit makes DRM-based early boot graphics unnecessarily
+ complex.
+ </para>
+ <para>
+ Dumb GEM objects partly alleviate the problem by providing a standard
+ API to create dumb buffers suitable for scanout, which can then be used
+ to create KMS frame buffers.
+ </para>
+ <para>
+ To support dumb GEM objects drivers must implement the
+ <methodname>dumb_create</methodname>,
+ <methodname>dumb_destroy</methodname> and
+ <methodname>dumb_map_offset</methodname> operations.
+ </para>
+ <itemizedlist>
+ <listitem>
+ <synopsis>int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);</synopsis>
+ <para>
+ The <methodname>dumb_create</methodname> operation creates a GEM
+ object suitable for scanout based on the width, height and depth
+ from the struct <structname>drm_mode_create_dumb</structname>
+ argument. It fills the argument's <structfield>handle</structfield>,
+ <structfield>pitch</structfield> and <structfield>size</structfield>
+ fields with a handle for the newly created GEM object and its line
+ pitch and size in bytes.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>int (*dumb_destroy)(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle);</synopsis>
+ <para>
+ The <methodname>dumb_destroy</methodname> operation destroys a dumb
+ GEM object created by <methodname>dumb_create</methodname>.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);</synopsis>
+ <para>
+ The <methodname>dumb_map_offset</methodname> operation associates an
+ mmap fake offset with the GEM object given by the handle and returns
+ it. Drivers must use the
+ <function>drm_gem_create_mmap_offset</function> function to
+ associate the fake offset as described in
+ <xref linkend="drm-gem-objects-mapping"/>.
+ </para>
+ </listitem>
+ </itemizedlist>
+ </sect3>
+ <sect3>
+ <title>Memory Coherency</title>
+ <para>
+ When mapped to the device or used in a command buffer, backing pages
+ for an object are flushed to memory and marked write combined so as to
+ be coherent with the GPU. Likewise, if the CPU accesses an object
+ after the GPU has finished rendering to the object, then the object
+ must be made coherent with the CPU's view of memory, usually involving
+ GPU cache flushing of various kinds. This core CPU&lt;-&gt;GPU
+ coherency management is provided by a device-specific ioctl, which
+ evaluates an object's current domain and performs any necessary
+ flushing or synchronization to put the object into the desired
+ coherency domain (note that the object may be busy, i.e. an active
+ render target; in that case, setting the domain blocks the client and
+ waits for rendering to complete before performing any necessary
+ flushing operations).
+ </para>
+ </sect3>
+ <sect3>
+ <title>Command Execution</title>
+ <para>
+ Perhaps the most important GEM function for GPU devices is providing a
+ command execution interface to clients. Client programs construct
+ command buffers containing references to previously allocated memory
+ objects, and then submit them to GEM. At that point, GEM takes care to
+ bind all the objects into the GTT, execute the buffer, and provide
+ necessary synchronization between clients accessing the same buffers.
+ This often involves evicting some objects from the GTT and re-binding
+ others (a fairly expensive operation), and providing relocation
+ support which hides fixed GTT offsets from clients. Clients must take
+ care not to submit command buffers that reference more objects than
+ can fit in the GTT; otherwise, GEM will reject them and no rendering
+ will occur. Similarly, if several objects in the buffer require fence
+ registers to be allocated for correct rendering (e.g. 2D blits on
+ pre-965 chips), care must be taken not to require more fence registers
+ than are available to the client. Such resource management should be
+ abstracted from the client in libdrm.
+ </para>
</sect3>
</sect2>
+ </sect1>
+
+ <!-- Internals: mode setting -->
+ <sect1 id="drm-mode-setting">
+ <title>Mode Setting</title>
+ <para>
+ Drivers must initialize the mode setting core by calling
+ <function>drm_mode_config_init</function> on the DRM device. The function
+ initializes the <structname>drm_device</structname>
+ <structfield>mode_config</structfield> field and never fails. Once done,
+ mode configuration must be setup by initializing the following fields.
+ </para>
+ <itemizedlist>
+ <listitem>
+ <synopsis>int min_width, min_height;
+int max_width, max_height;</synopsis>
+ <para>
+ Minimum and maximum width and height of the frame buffers in pixel
+ units.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>struct drm_mode_config_funcs *funcs;</synopsis>
+ <para>Mode setting functions.</para>
+ </listitem>
+ </itemizedlist>
<sect2>
- <title>Output configuration</title>
+ <title>Frame Buffer Creation</title>
+ <synopsis>struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd);</synopsis>
<para>
- The final initialization task is output configuration. This involves:
- <itemizedlist>
- <listitem>
- Finding and initializing the CRTCs, encoders, and connectors
- for the device.
- </listitem>
- <listitem>
- Creating an initial configuration.
- </listitem>
- <listitem>
- Registering a framebuffer console driver.
- </listitem>
- </itemizedlist>
+ Frame buffers are abstract memory objects that provide a source of
+ pixels to scanout to a CRTC. Applications explicitly request the
+ creation of frame buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and
+ receive an opaque handle that can be passed to the KMS CRTC control,
+ plane configuration and page flip functions.
+ </para>
+ <para>
+ Frame buffers rely on the underneath memory manager for low-level memory
+ operations. When creating a frame buffer applications pass a memory
+ handle (or a list of memory handles for multi-planar formats) through
+ the <parameter>drm_mode_fb_cmd2</parameter> argument. This document
+ assumes that the driver uses GEM, those handles thus reference GEM
+ objects.
+ </para>
+ <para>
+ Drivers must first validate the requested frame buffer parameters passed
+ through the mode_cmd argument. In particular this is where invalid
+ sizes, pixel formats or pitches can be caught.
+ </para>
+ <para>
+ If the parameters are deemed valid, drivers then create, initialize and
+ return an instance of struct <structname>drm_framebuffer</structname>.
+ If desired the instance can be embedded in a larger driver-specific
+ structure. The new instance is initialized with a call to
+ <function>drm_framebuffer_init</function> which takes a pointer to DRM
+ frame buffer operations (struct
+ <structname>drm_framebuffer_funcs</structname>). Frame buffer operations are
+ <itemizedlist>
+ <listitem>
+ <synopsis>int (*create_handle)(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int *handle);</synopsis>
+ <para>
+ Create a handle to the frame buffer underlying memory object. If
+ the frame buffer uses a multi-plane format, the handle will
+ reference the memory object associated with the first plane.
+ </para>
+ <para>
+ Drivers call <function>drm_gem_handle_create</function> to create
+ the handle.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>void (*destroy)(struct drm_framebuffer *framebuffer);</synopsis>
+ <para>
+ Destroy the frame buffer object and frees all associated
+ resources. Drivers must call
+ <function>drm_framebuffer_cleanup</function> to free resources
+ allocated by the DRM core for the frame buffer object, and must
+ make sure to unreference all memory objects associated with the
+ frame buffer. Handles created by the
+ <methodname>create_handle</methodname> operation are released by
+ the DRM core.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>int (*dirty)(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv, unsigned flags, unsigned color,
+ struct drm_clip_rect *clips, unsigned num_clips);</synopsis>
+ <para>
+ This optional operation notifies the driver that a region of the
+ frame buffer has changed in response to a DRM_IOCTL_MODE_DIRTYFB
+ ioctl call.
+ </para>
+ </listitem>
+ </itemizedlist>
+ </para>
+ <para>
+ After initializing the <structname>drm_framebuffer</structname>
+ instance drivers must fill its <structfield>width</structfield>,
+ <structfield>height</structfield>, <structfield>pitches</structfield>,
+ <structfield>offsets</structfield>, <structfield>depth</structfield>,
+ <structfield>bits_per_pixel</structfield> and
+ <structfield>pixel_format</structfield> fields from the values passed
+ through the <parameter>drm_mode_fb_cmd2</parameter> argument. They
+ should call the <function>drm_helper_mode_fill_fb_struct</function>
+ helper function to do so.
+ </para>
+ </sect2>
+ <sect2>
+ <title>Output Polling</title>
+ <synopsis>void (*output_poll_changed)(struct drm_device *dev);</synopsis>
+ <para>
+ This operation notifies the driver that the status of one or more
+ connectors has changed. Drivers that use the fb helper can just call the
+ <function>drm_fb_helper_hotplug_event</function> function to handle this
+ operation.
+ </para>
+ </sect2>
+ </sect1>
+
+ <!-- Internals: kms initialization and cleanup -->
+
+ <sect1 id="drm-kms-init">
+ <title>KMS Initialization and Cleanup</title>
+ <para>
+ A KMS device is abstracted and exposed as a set of planes, CRTCs, encoders
+ and connectors. KMS drivers must thus create and initialize all those
+ objects at load time after initializing mode setting.
+ </para>
+ <sect2>
+ <title>CRTCs (struct <structname>drm_crtc</structname>)</title>
+ <para>
+ A CRTC is an abstraction representing a part of the chip that contains a
+ pointer to a scanout buffer. Therefore, the number of CRTCs available
+ determines how many independent scanout buffers can be active at any
+ given time. The CRTC structure contains several fields to support this:
+ a pointer to some video memory (abstracted as a frame buffer object), a
+ display mode, and an (x, y) offset into the video memory to support
+ panning or configurations where one piece of video memory spans multiple
+ CRTCs.
</para>
<sect3>
- <title>Output discovery and initialization</title>
- <para>
- Several core functions exist to create CRTCs, encoders, and
- connectors, namely: drm_crtc_init(), drm_connector_init(), and
- drm_encoder_init(), along with several "helper" functions to
- perform common tasks.
- </para>
- <para>
- Connectors should be registered with sysfs once they've been
- detected and initialized, using the
- drm_sysfs_connector_add() function. Likewise, when they're
- removed from the system, they should be destroyed with
- drm_sysfs_connector_remove().
- </para>
- <programlisting>
-<![CDATA[
+ <title>CRTC Initialization</title>
+ <para>
+ A KMS device must create and register at least one struct
+ <structname>drm_crtc</structname> instance. The instance is allocated
+ and zeroed by the driver, possibly as part of a larger structure, and
+ registered with a call to <function>drm_crtc_init</function> with a
+ pointer to CRTC functions.
+ </para>
+ </sect3>
+ <sect3>
+ <title>CRTC Operations</title>
+ <sect4>
+ <title>Set Configuration</title>
+ <synopsis>int (*set_config)(struct drm_mode_set *set);</synopsis>
+ <para>
+ Apply a new CRTC configuration to the device. The configuration
+ specifies a CRTC, a frame buffer to scan out from, a (x,y) position in
+ the frame buffer, a display mode and an array of connectors to drive
+ with the CRTC if possible.
+ </para>
+ <para>
+ If the frame buffer specified in the configuration is NULL, the driver
+ must detach all encoders connected to the CRTC and all connectors
+ attached to those encoders and disable them.
+ </para>
+ <para>
+ This operation is called with the mode config lock held.
+ </para>
+ <note><para>
+ FIXME: How should set_config interact with DPMS? If the CRTC is
+ suspended, should it be resumed?
+ </para></note>
+ </sect4>
+ <sect4>
+ <title>Page Flipping</title>
+ <synopsis>int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event);</synopsis>
+ <para>
+ Schedule a page flip to the given frame buffer for the CRTC. This
+ operation is called with the mode config mutex held.
+ </para>
+ <para>
+ Page flipping is a synchronization mechanism that replaces the frame
+ buffer being scanned out by the CRTC with a new frame buffer during
+ vertical blanking, avoiding tearing. When an application requests a page
+ flip the DRM core verifies that the new frame buffer is large enough to
+ be scanned out by the CRTC in the currently configured mode and then
+ calls the CRTC <methodname>page_flip</methodname> operation with a
+ pointer to the new frame buffer.
+ </para>
+ <para>
+ The <methodname>page_flip</methodname> operation schedules a page flip.
+ Once any pending rendering targetting the new frame buffer has
+ completed, the CRTC will be reprogrammed to display that frame buffer
+ after the next vertical refresh. The operation must return immediately
+ without waiting for rendering or page flip to complete and must block
+ any new rendering to the frame buffer until the page flip completes.
+ </para>
+ <para>
+ If a page flip is already pending, the
+ <methodname>page_flip</methodname> operation must return
+ -<errorname>EBUSY</errorname>.
+ </para>
+ <para>
+ To synchronize page flip to vertical blanking the driver will likely
+ need to enable vertical blanking interrupts. It should call
+ <function>drm_vblank_get</function> for that purpose, and call
+ <function>drm_vblank_put</function> after the page flip completes.
+ </para>
+ <para>
+ If the application has requested to be notified when page flip completes
+ the <methodname>page_flip</methodname> operation will be called with a
+ non-NULL <parameter>event</parameter> argument pointing to a
+ <structname>drm_pending_vblank_event</structname> instance. Upon page
+ flip completion the driver must fill the
+ <parameter>event</parameter>::<structfield>event</structfield>
+ <structfield>sequence</structfield>, <structfield>tv_sec</structfield>
+ and <structfield>tv_usec</structfield> fields with the associated
+ vertical blanking count and timestamp, add the event to the
+ <parameter>drm_file</parameter> list of events to be signaled, and wake
+ up any waiting process. This can be performed with
+ <programlisting><![CDATA[
+ struct timeval now;
+
+ event->event.sequence = drm_vblank_count_and_time(..., &now);
+ event->event.tv_sec = now.tv_sec;
+ event->event.tv_usec = now.tv_usec;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_add_tail(&event->base.link, &event->base.file_priv->event_list);
+ wake_up_interruptible(&event->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ ]]></programlisting>
+ </para>
+ <note><para>
+ FIXME: Could drivers that don't need to wait for rendering to complete
+ just add the event to <literal>dev-&gt;vblank_event_list</literal> and
+ let the DRM core handle everything, as for "normal" vertical blanking
+ events?
+ </para></note>
+ <para>
+ While waiting for the page flip to complete, the
+ <literal>event-&gt;base.link</literal> list head can be used freely by
+ the driver to store the pending event in a driver-specific list.
+ </para>
+ <para>
+ If the file handle is closed before the event is signaled, drivers must
+ take care to destroy the event in their
+ <methodname>preclose</methodname> operation (and, if needed, call
+ <function>drm_vblank_put</function>).
+ </para>
+ </sect4>
+ <sect4>
+ <title>Miscellaneous</title>
+ <itemizedlist>
+ <listitem>
+ <synopsis>void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size);</synopsis>
+ <para>
+ Apply a gamma table to the device. The operation is optional.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>void (*destroy)(struct drm_crtc *crtc);</synopsis>
+ <para>
+ Destroy the CRTC when not needed anymore. See
+ <xref linkend="drm-kms-init"/>.
+ </para>
+ </listitem>
+ </itemizedlist>
+ </sect4>
+ </sect3>
+ </sect2>
+ <sect2>
+ <title>Planes (struct <structname>drm_plane</structname>)</title>
+ <para>
+ A plane represents an image source that can be blended with or overlayed
+ on top of a CRTC during the scanout process. Planes are associated with
+ a frame buffer to crop a portion of the image memory (source) and
+ optionally scale it to a destination size. The result is then blended
+ with or overlayed on top of a CRTC.
+ </para>
+ <sect3>
+ <title>Plane Initialization</title>
+ <para>
+ Planes are optional. To create a plane, a KMS drivers allocates and
+ zeroes an instances of struct <structname>drm_plane</structname>
+ (possibly as part of a larger structure) and registers it with a call
+ to <function>drm_plane_init</function>. The function takes a bitmask
+ of the CRTCs that can be associated with the plane, a pointer to the
+ plane functions and a list of format supported formats.
+ </para>
+ </sect3>
+ <sect3>
+ <title>Plane Operations</title>
+ <itemizedlist>
+ <listitem>
+ <synopsis>int (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);</synopsis>
+ <para>
+ Enable and configure the plane to use the given CRTC and frame buffer.
+ </para>
+ <para>
+ The source rectangle in frame buffer memory coordinates is given by
+ the <parameter>src_x</parameter>, <parameter>src_y</parameter>,
+ <parameter>src_w</parameter> and <parameter>src_h</parameter>
+ parameters (as 16.16 fixed point values). Devices that don't support
+ subpixel plane coordinates can ignore the fractional part.
+ </para>
+ <para>
+ The destination rectangle in CRTC coordinates is given by the
+ <parameter>crtc_x</parameter>, <parameter>crtc_y</parameter>,
+ <parameter>crtc_w</parameter> and <parameter>crtc_h</parameter>
+ parameters (as integer values). Devices scale the source rectangle to
+ the destination rectangle. If scaling is not supported, and the source
+ rectangle size doesn't match the destination rectangle size, the
+ driver must return a -<errorname>EINVAL</errorname> error.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>int (*disable_plane)(struct drm_plane *plane);</synopsis>
+ <para>
+ Disable the plane. The DRM core calls this method in response to a
+ DRM_IOCTL_MODE_SETPLANE ioctl call with the frame buffer ID set to 0.
+ Disabled planes must not be processed by the CRTC.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>void (*destroy)(struct drm_plane *plane);</synopsis>
+ <para>
+ Destroy the plane when not needed anymore. See
+ <xref linkend="drm-kms-init"/>.
+ </para>
+ </listitem>
+ </itemizedlist>
+ </sect3>
+ </sect2>
+ <sect2>
+ <title>Encoders (struct <structname>drm_encoder</structname>)</title>
+ <para>
+ An encoder takes pixel data from a CRTC and converts it to a format
+ suitable for any attached connectors. On some devices, it may be
+ possible to have a CRTC send data to more than one encoder. In that
+ case, both encoders would receive data from the same scanout buffer,
+ resulting in a "cloned" display configuration across the connectors
+ attached to each encoder.
+ </para>
+ <sect3>
+ <title>Encoder Initialization</title>
+ <para>
+ As for CRTCs, a KMS driver must create, initialize and register at
+ least one struct <structname>drm_encoder</structname> instance. The
+ instance is allocated and zeroed by the driver, possibly as part of a
+ larger structure.
+ </para>
+ <para>
+ Drivers must initialize the struct <structname>drm_encoder</structname>
+ <structfield>possible_crtcs</structfield> and
+ <structfield>possible_clones</structfield> fields before registering the
+ encoder. Both fields are bitmasks of respectively the CRTCs that the
+ encoder can be connected to, and sibling encoders candidate for cloning.
+ </para>
+ <para>
+ After being initialized, the encoder must be registered with a call to
+ <function>drm_encoder_init</function>. The function takes a pointer to
+ the encoder functions and an encoder type. Supported types are
+ <itemizedlist>
+ <listitem>
+ DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A
+ </listitem>
+ <listitem>
+ DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort
+ </listitem>
+ <listitem>
+ DRM_MODE_ENCODER_LVDS for display panels
+ </listitem>
+ <listitem>
+ DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video, Component,
+ SCART)
+ </listitem>
+ <listitem>
+ DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
+ </listitem>
+ </itemizedlist>
+ </para>
+ <para>
+ Encoders must be attached to a CRTC to be used. DRM drivers leave
+ encoders unattached at initialization time. Applications (or the fbdev
+ compatibility layer when implemented) are responsible for attaching the
+ encoders they want to use to a CRTC.
+ </para>
+ </sect3>
+ <sect3>
+ <title>Encoder Operations</title>
+ <itemizedlist>
+ <listitem>
+ <synopsis>void (*destroy)(struct drm_encoder *encoder);</synopsis>
+ <para>
+ Called to destroy the encoder when not needed anymore. See
+ <xref linkend="drm-kms-init"/>.
+ </para>
+ </listitem>
+ </itemizedlist>
+ </sect3>
+ </sect2>
+ <sect2>
+ <title>Connectors (struct <structname>drm_connector</structname>)</title>
+ <para>
+ A connector is the final destination for pixel data on a device, and
+ usually connects directly to an external display device like a monitor
+ or laptop panel. A connector can only be attached to one encoder at a
+ time. The connector is also the structure where information about the
+ attached display is kept, so it contains fields for display data, EDID
+ data, DPMS &amp; connection status, and information about modes
+ supported on the attached displays.
+ </para>
+ <sect3>
+ <title>Connector Initialization</title>
+ <para>
+ Finally a KMS driver must create, initialize, register and attach at
+ least one struct <structname>drm_connector</structname> instance. The
+ instance is created as other KMS objects and initialized by setting the
+ following fields.
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term><structfield>interlace_allowed</structfield></term>
+ <listitem><para>
+ Whether the connector can handle interlaced modes.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><structfield>doublescan_allowed</structfield></term>
+ <listitem><para>
+ Whether the connector can handle doublescan.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><structfield>display_info
+ </structfield></term>
+ <listitem><para>
+ Display information is filled from EDID information when a display
+ is detected. For non hot-pluggable displays such as flat panels in
+ embedded systems, the driver should initialize the
+ <structfield>display_info</structfield>.<structfield>width_mm</structfield>
+ and
+ <structfield>display_info</structfield>.<structfield>height_mm</structfield>
+ fields with the physical size of the display.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term id="drm-kms-connector-polled"><structfield>polled</structfield></term>
+ <listitem><para>
+ Connector polling mode, a combination of
+ <variablelist>
+ <varlistentry>
+ <term>DRM_CONNECTOR_POLL_HPD</term>
+ <listitem><para>
+ The connector generates hotplug events and doesn't need to be
+ periodically polled. The CONNECT and DISCONNECT flags must not
+ be set together with the HPD flag.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_CONNECTOR_POLL_CONNECT</term>
+ <listitem><para>
+ Periodically poll the connector for connection.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_CONNECTOR_POLL_DISCONNECT</term>
+ <listitem><para>
+ Periodically poll the connector for disconnection.
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+ Set to 0 for connectors that don't support connection status
+ discovery.
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ The connector is then registered with a call to
+ <function>drm_connector_init</function> with a pointer to the connector
+ functions and a connector type, and exposed through sysfs with a call to
+ <function>drm_sysfs_connector_add</function>.
+ </para>
+ <para>
+ Supported connector types are
+ <itemizedlist>
+ <listitem>DRM_MODE_CONNECTOR_VGA</listitem>
+ <listitem>DRM_MODE_CONNECTOR_DVII</listitem>
+ <listitem>DRM_MODE_CONNECTOR_DVID</listitem>
+ <listitem>DRM_MODE_CONNECTOR_DVIA</listitem>
+ <listitem>DRM_MODE_CONNECTOR_Composite</listitem>
+ <listitem>DRM_MODE_CONNECTOR_SVIDEO</listitem>
+ <listitem>DRM_MODE_CONNECTOR_LVDS</listitem>
+ <listitem>DRM_MODE_CONNECTOR_Component</listitem>
+ <listitem>DRM_MODE_CONNECTOR_9PinDIN</listitem>
+ <listitem>DRM_MODE_CONNECTOR_DisplayPort</listitem>
+ <listitem>DRM_MODE_CONNECTOR_HDMIA</listitem>
+ <listitem>DRM_MODE_CONNECTOR_HDMIB</listitem>
+ <listitem>DRM_MODE_CONNECTOR_TV</listitem>
+ <listitem>DRM_MODE_CONNECTOR_eDP</listitem>
+ <listitem>DRM_MODE_CONNECTOR_VIRTUAL</listitem>
+ </itemizedlist>
+ </para>
+ <para>
+ Connectors must be attached to an encoder to be used. For devices that
+ map connectors to encoders 1:1, the connector should be attached at
+ initialization time with a call to
+ <function>drm_mode_connector_attach_encoder</function>. The driver must
+ also set the <structname>drm_connector</structname>
+ <structfield>encoder</structfield> field to point to the attached
+ encoder.
+ </para>
+ <para>
+ Finally, drivers must initialize the connectors state change detection
+ with a call to <function>drm_kms_helper_poll_init</function>. If at
+ least one connector is pollable but can't generate hotplug interrupts
+ (indicated by the DRM_CONNECTOR_POLL_CONNECT and
+ DRM_CONNECTOR_POLL_DISCONNECT connector flags), a delayed work will
+ automatically be queued to periodically poll for changes. Connectors
+ that can generate hotplug interrupts must be marked with the
+ DRM_CONNECTOR_POLL_HPD flag instead, and their interrupt handler must
+ call <function>drm_helper_hpd_irq_event</function>. The function will
+ queue a delayed work to check the state of all connectors, but no
+ periodic polling will be done.
+ </para>
+ </sect3>
+ <sect3>
+ <title>Connector Operations</title>
+ <note><para>
+ Unless otherwise state, all operations are mandatory.
+ </para></note>
+ <sect4>
+ <title>DPMS</title>
+ <synopsis>void (*dpms)(struct drm_connector *connector, int mode);</synopsis>
+ <para>
+ The DPMS operation sets the power state of a connector. The mode
+ argument is one of
+ <itemizedlist>
+ <listitem><para>DRM_MODE_DPMS_ON</para></listitem>
+ <listitem><para>DRM_MODE_DPMS_STANDBY</para></listitem>
+ <listitem><para>DRM_MODE_DPMS_SUSPEND</para></listitem>
+ <listitem><para>DRM_MODE_DPMS_OFF</para></listitem>
+ </itemizedlist>
+ </para>
+ <para>
+ In all but DPMS_ON mode the encoder to which the connector is attached
+ should put the display in low-power mode by driving its signals
+ appropriately. If more than one connector is attached to the encoder
+ care should be taken not to change the power state of other displays as
+ a side effect. Low-power mode should be propagated to the encoders and
+ CRTCs when all related connectors are put in low-power mode.
+ </para>
+ </sect4>
+ <sect4>
+ <title>Modes</title>
+ <synopsis>int (*fill_modes)(struct drm_connector *connector, uint32_t max_width,
+ uint32_t max_height);</synopsis>
+ <para>
+ Fill the mode list with all supported modes for the connector. If the
+ <parameter>max_width</parameter> and <parameter>max_height</parameter>
+ arguments are non-zero, the implementation must ignore all modes wider
+ than <parameter>max_width</parameter> or higher than
+ <parameter>max_height</parameter>.
+ </para>
+ <para>
+ The connector must also fill in this operation its
+ <structfield>display_info</structfield>
+ <structfield>width_mm</structfield> and
+ <structfield>height_mm</structfield> fields with the connected display
+ physical size in millimeters. The fields should be set to 0 if the value
+ isn't known or is not applicable (for instance for projector devices).
+ </para>
+ </sect4>
+ <sect4>
+ <title>Connection Status</title>
+ <para>
+ The connection status is updated through polling or hotplug events when
+ supported (see <xref linkend="drm-kms-connector-polled"/>). The status
+ value is reported to userspace through ioctls and must not be used
+ inside the driver, as it only gets initialized by a call to
+ <function>drm_mode_getconnector</function> from userspace.
+ </para>
+ <synopsis>enum drm_connector_status (*detect)(struct drm_connector *connector,
+ bool force);</synopsis>
+ <para>
+ Check to see if anything is attached to the connector. The
+ <parameter>force</parameter> parameter is set to false whilst polling or
+ to true when checking the connector due to user request.
+ <parameter>force</parameter> can be used by the driver to avoid
+ expensive, destructive operations during automated probing.
+ </para>
+ <para>
+ Return connector_status_connected if something is connected to the
+ connector, connector_status_disconnected if nothing is connected and
+ connector_status_unknown if the connection state isn't known.
+ </para>
+ <para>
+ Drivers should only return connector_status_connected if the connection
+ status has really been probed as connected. Connectors that can't detect
+ the connection status, or failed connection status probes, should return
+ connector_status_unknown.
+ </para>
+ </sect4>
+ <sect4>
+ <title>Miscellaneous</title>
+ <itemizedlist>
+ <listitem>
+ <synopsis>void (*destroy)(struct drm_connector *connector);</synopsis>
+ <para>
+ Destroy the connector when not needed anymore. See
+ <xref linkend="drm-kms-init"/>.
+ </para>
+ </listitem>
+ </itemizedlist>
+ </sect4>
+ </sect3>
+ </sect2>
+ <sect2>
+ <title>Cleanup</title>
+ <para>
+ The DRM core manages its objects' lifetime. When an object is not needed
+ anymore the core calls its destroy function, which must clean up and
+ free every resource allocated for the object. Every
+ <function>drm_*_init</function> call must be matched with a
+ corresponding <function>drm_*_cleanup</function> call to cleanup CRTCs
+ (<function>drm_crtc_cleanup</function>), planes
+ (<function>drm_plane_cleanup</function>), encoders
+ (<function>drm_encoder_cleanup</function>) and connectors
+ (<function>drm_connector_cleanup</function>). Furthermore, connectors
+ that have been added to sysfs must be removed by a call to
+ <function>drm_sysfs_connector_remove</function> before calling
+ <function>drm_connector_cleanup</function>.
+ </para>
+ <para>
+ Connectors state change detection must be cleanup up with a call to
+ <function>drm_kms_helper_poll_fini</function>.
+ </para>
+ </sect2>
+ <sect2>
+ <title>Output discovery and initialization example</title>
+ <programlisting><![CDATA[
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
@@ -556,252 +1610,741 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
-}
-]]>
- </programlisting>
- <para>
- In the example above (again, taken from the i915 driver), a
- CRT connector and encoder combination is created. A device-specific
- i2c bus is also created for fetching EDID data and
- performing monitor detection. Once the process is complete,
- the new connector is registered with sysfs to make its
- properties available to applications.
- </para>
- <sect4>
- <title>Helper functions and core functions</title>
- <para>
- Since many PC-class graphics devices have similar display output
- designs, the DRM provides a set of helper functions to make
- output management easier. The core helper routines handle
- encoder re-routing and the disabling of unused functions following
- mode setting. Using the helpers is optional, but recommended for
- devices with PC-style architectures (i.e. a set of display planes
- for feeding pixels to encoders which are in turn routed to
- connectors). Devices with more complex requirements needing
- finer grained management may opt to use the core callbacks
- directly.
- </para>
- <para>
- [Insert typical diagram here.] [Insert OMAP style config here.]
- </para>
- </sect4>
- <para>
- Each encoder object needs to provide:
- <itemizedlist>
- <listitem>
- A DPMS (basically on/off) function.
- </listitem>
- <listitem>
- A mode-fixup function (for converting requested modes into
- native hardware timings).
- </listitem>
- <listitem>
- Functions (prepare, set, and commit) for use by the core DRM
- helper functions.
- </listitem>
- </itemizedlist>
- Connector helpers need to provide functions (mode-fetch, validity,
- and encoder-matching) for returning an ideal encoder for a given
- connector. The core connector functions include a DPMS callback,
- save/restore routines (deprecated), detection, mode probing,
- property handling, and cleanup functions.
- </para>
-<!--!Edrivers/char/drm/drm_crtc.h-->
-<!--!Edrivers/char/drm/drm_crtc.c-->
-<!--!Edrivers/char/drm/drm_crtc_helper.c-->
- </sect3>
+}]]></programlisting>
+ <para>
+ In the example above (taken from the i915 driver), a CRTC, connector and
+ encoder combination is created. A device-specific i2c bus is also
+ created for fetching EDID data and performing monitor detection. Once
+ the process is complete, the new connector is registered with sysfs to
+ make its properties available to applications.
+ </para>
</sect2>
</sect1>
- <!-- Internals: vblank handling -->
+ <!-- Internals: mid-layer helper functions -->
<sect1>
- <title>VBlank event handling</title>
+ <title>Mid-layer Helper Functions</title>
<para>
- The DRM core exposes two vertical blank related ioctls:
- <variablelist>
- <varlistentry>
- <term>DRM_IOCTL_WAIT_VBLANK</term>
- <listitem>
- <para>
- This takes a struct drm_wait_vblank structure as its argument,
- and it is used to block or request a signal when a specified
- vblank event occurs.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>DRM_IOCTL_MODESET_CTL</term>
- <listitem>
- <para>
- This should be called by application level drivers before and
- after mode setting, since on many devices the vertical blank
- counter is reset at that time. Internally, the DRM snapshots
- the last vblank count when the ioctl is called with the
- _DRM_PRE_MODESET command, so that the counter won't go backwards
- (which is dealt with when _DRM_POST_MODESET is used).
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
-<!--!Edrivers/char/drm/drm_irq.c-->
+ The CRTC, encoder and connector functions provided by the drivers
+ implement the DRM API. They're called by the DRM core and ioctl handlers
+ to handle device state changes and configuration request. As implementing
+ those functions often requires logic not specific to drivers, mid-layer
+ helper functions are available to avoid duplicating boilerplate code.
+ </para>
+ <para>
+ The DRM core contains one mid-layer implementation. The mid-layer provides
+ implementations of several CRTC, encoder and connector functions (called
+ from the top of the mid-layer) that pre-process requests and call
+ lower-level functions provided by the driver (at the bottom of the
+ mid-layer). For instance, the
+ <function>drm_crtc_helper_set_config</function> function can be used to
+ fill the struct <structname>drm_crtc_funcs</structname>
+ <structfield>set_config</structfield> field. When called, it will split
+ the <methodname>set_config</methodname> operation in smaller, simpler
+ operations and call the driver to handle them.
</para>
<para>
- To support the functions above, the DRM core provides several
- helper functions for tracking vertical blank counters, and
- requires drivers to provide several callbacks:
- get_vblank_counter(), enable_vblank() and disable_vblank(). The
- core uses get_vblank_counter() to keep the counter accurate
- across interrupt disable periods. It should return the current
- vertical blank event count, which is often tracked in a device
- register. The enable and disable vblank callbacks should enable
- and disable vertical blank interrupts, respectively. In the
- absence of DRM clients waiting on vblank events, the core DRM
- code uses the disable_vblank() function to disable
- interrupts, which saves power. They are re-enabled again when
- a client calls the vblank wait ioctl above.
+ To use the mid-layer, drivers call <function>drm_crtc_helper_add</function>,
+ <function>drm_encoder_helper_add</function> and
+ <function>drm_connector_helper_add</function> functions to install their
+ mid-layer bottom operations handlers, and fill the
+ <structname>drm_crtc_funcs</structname>,
+ <structname>drm_encoder_funcs</structname> and
+ <structname>drm_connector_funcs</structname> structures with pointers to
+ the mid-layer top API functions. Installing the mid-layer bottom operation
+ handlers is best done right after registering the corresponding KMS object.
</para>
<para>
- A device that doesn't provide a count register may simply use an
- internal atomic counter incremented on every vertical blank
- interrupt (and then treat the enable_vblank() and disable_vblank()
- callbacks as no-ops).
+ The mid-layer is not split between CRTC, encoder and connector operations.
+ To use it, a driver must provide bottom functions for all of the three KMS
+ entities.
</para>
+ <sect2>
+ <title>Helper Functions</title>
+ <itemizedlist>
+ <listitem>
+ <synopsis>int drm_crtc_helper_set_config(struct drm_mode_set *set);</synopsis>
+ <para>
+ The <function>drm_crtc_helper_set_config</function> helper function
+ is a CRTC <methodname>set_config</methodname> implementation. It
+ first tries to locate the best encoder for each connector by calling
+ the connector <methodname>best_encoder</methodname> helper
+ operation.
+ </para>
+ <para>
+ After locating the appropriate encoders, the helper function will
+ call the <methodname>mode_fixup</methodname> encoder and CRTC helper
+ operations to adjust the requested mode, or reject it completely in
+ which case an error will be returned to the application. If the new
+ configuration after mode adjustment is identical to the current
+ configuration the helper function will return without performing any
+ other operation.
+ </para>
+ <para>
+ If the adjusted mode is identical to the current mode but changes to
+ the frame buffer need to be applied, the
+ <function>drm_crtc_helper_set_config</function> function will call
+ the CRTC <methodname>mode_set_base</methodname> helper operation. If
+ the adjusted mode differs from the current mode, or if the
+ <methodname>mode_set_base</methodname> helper operation is not
+ provided, the helper function performs a full mode set sequence by
+ calling the <methodname>prepare</methodname>,
+ <methodname>mode_set</methodname> and
+ <methodname>commit</methodname> CRTC and encoder helper operations,
+ in that order.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>void drm_helper_connector_dpms(struct drm_connector *connector, int mode);</synopsis>
+ <para>
+ The <function>drm_helper_connector_dpms</function> helper function
+ is a connector <methodname>dpms</methodname> implementation that
+ tracks power state of connectors. To use the function, drivers must
+ provide <methodname>dpms</methodname> helper operations for CRTCs
+ and encoders to apply the DPMS state to the device.
+ </para>
+ <para>
+ The mid-layer doesn't track the power state of CRTCs and encoders.
+ The <methodname>dpms</methodname> helper operations can thus be
+ called with a mode identical to the currently active mode.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY);</synopsis>
+ <para>
+ The <function>drm_helper_probe_single_connector_modes</function> helper
+ function is a connector <methodname>fill_modes</methodname>
+ implementation that updates the connection status for the connector
+ and then retrieves a list of modes by calling the connector
+ <methodname>get_modes</methodname> helper operation.
+ </para>
+ <para>
+ The function filters out modes larger than
+ <parameter>max_width</parameter> and <parameter>max_height</parameter>
+ if specified. It then calls the connector
+ <methodname>mode_valid</methodname> helper operation for each mode in
+ the probed list to check whether the mode is valid for the connector.
+ </para>
+ </listitem>
+ </itemizedlist>
+ </sect2>
+ <sect2>
+ <title>CRTC Helper Operations</title>
+ <itemizedlist>
+ <listitem id="drm-helper-crtc-mode-fixup">
+ <synopsis>bool (*mode_fixup)(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);</synopsis>
+ <para>
+ Let CRTCs adjust the requested mode or reject it completely. This
+ operation returns true if the mode is accepted (possibly after being
+ adjusted) or false if it is rejected.
+ </para>
+ <para>
+ The <methodname>mode_fixup</methodname> operation should reject the
+ mode if it can't reasonably use it. The definition of "reasonable"
+ is currently fuzzy in this context. One possible behaviour would be
+ to set the adjusted mode to the panel timings when a fixed-mode
+ panel is used with hardware capable of scaling. Another behaviour
+ would be to accept any input mode and adjust it to the closest mode
+ supported by the hardware (FIXME: This needs to be clarified).
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)</synopsis>
+ <para>
+ Move the CRTC on the current frame buffer (stored in
+ <literal>crtc-&gt;fb</literal>) to position (x,y). Any of the frame
+ buffer, x position or y position may have been modified.
+ </para>
+ <para>
+ This helper operation is optional. If not provided, the
+ <function>drm_crtc_helper_set_config</function> function will fall
+ back to the <methodname>mode_set</methodname> helper operation.
+ </para>
+ <note><para>
+ FIXME: Why are x and y passed as arguments, as they can be accessed
+ through <literal>crtc-&gt;x</literal> and
+ <literal>crtc-&gt;y</literal>?
+ </para></note>
+ </listitem>
+ <listitem>
+ <synopsis>void (*prepare)(struct drm_crtc *crtc);</synopsis>
+ <para>
+ Prepare the CRTC for mode setting. This operation is called after
+ validating the requested mode. Drivers use it to perform
+ device-specific operations required before setting the new mode.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb);</synopsis>
+ <para>
+ Set a new mode, position and frame buffer. Depending on the device
+ requirements, the mode can be stored internally by the driver and
+ applied in the <methodname>commit</methodname> operation, or
+ programmed to the hardware immediately.
+ </para>
+ <para>
+ The <methodname>mode_set</methodname> operation returns 0 on success
+ or a negative error code if an error occurs.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>void (*commit)(struct drm_crtc *crtc);</synopsis>
+ <para>
+ Commit a mode. This operation is called after setting the new mode.
+ Upon return the device must use the new mode and be fully
+ operational.
+ </para>
+ </listitem>
+ </itemizedlist>
+ </sect2>
+ <sect2>
+ <title>Encoder Helper Operations</title>
+ <itemizedlist>
+ <listitem>
+ <synopsis>bool (*mode_fixup)(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);</synopsis>
+ <note><para>
+ FIXME: The mode argument be const, but the i915 driver modifies
+ mode-&gt;clock in <function>intel_dp_mode_fixup</function>.
+ </para></note>
+ <para>
+ Let encoders adjust the requested mode or reject it completely. This
+ operation returns true if the mode is accepted (possibly after being
+ adjusted) or false if it is rejected. See the
+ <link linkend="drm-helper-crtc-mode-fixup">mode_fixup CRTC helper
+ operation</link> for an explanation of the allowed adjustments.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>void (*prepare)(struct drm_encoder *encoder);</synopsis>
+ <para>
+ Prepare the encoder for mode setting. This operation is called after
+ validating the requested mode. Drivers use it to perform
+ device-specific operations required before setting the new mode.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>void (*mode_set)(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);</synopsis>
+ <para>
+ Set a new mode. Depending on the device requirements, the mode can
+ be stored internally by the driver and applied in the
+ <methodname>commit</methodname> operation, or programmed to the
+ hardware immediately.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>void (*commit)(struct drm_encoder *encoder);</synopsis>
+ <para>
+ Commit a mode. This operation is called after setting the new mode.
+ Upon return the device must use the new mode and be fully
+ operational.
+ </para>
+ </listitem>
+ </itemizedlist>
+ </sect2>
+ <sect2>
+ <title>Connector Helper Operations</title>
+ <itemizedlist>
+ <listitem>
+ <synopsis>struct drm_encoder *(*best_encoder)(struct drm_connector *connector);</synopsis>
+ <para>
+ Return a pointer to the best encoder for the connecter. Device that
+ map connectors to encoders 1:1 simply return the pointer to the
+ associated encoder. This operation is mandatory.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>int (*get_modes)(struct drm_connector *connector);</synopsis>
+ <para>
+ Fill the connector's <structfield>probed_modes</structfield> list
+ by parsing EDID data with <function>drm_add_edid_modes</function> or
+ calling <function>drm_mode_probed_add</function> directly for every
+ supported mode and return the number of modes it has detected. This
+ operation is mandatory.
+ </para>
+ <para>
+ When adding modes manually the driver creates each mode with a call to
+ <function>drm_mode_create</function> and must fill the following fields.
+ <itemizedlist>
+ <listitem>
+ <synopsis>__u32 type;</synopsis>
+ <para>
+ Mode type bitmask, a combination of
+ <variablelist>
+ <varlistentry>
+ <term>DRM_MODE_TYPE_BUILTIN</term>
+ <listitem><para>not used?</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_TYPE_CLOCK_C</term>
+ <listitem><para>not used?</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_TYPE_CRTC_C</term>
+ <listitem><para>not used?</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>
+ DRM_MODE_TYPE_PREFERRED - The preferred mode for the connector
+ </term>
+ <listitem>
+ <para>not used?</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_TYPE_DEFAULT</term>
+ <listitem><para>not used?</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_TYPE_USERDEF</term>
+ <listitem><para>not used?</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_TYPE_DRIVER</term>
+ <listitem>
+ <para>
+ The mode has been created by the driver (as opposed to
+ to user-created modes).
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ Drivers must set the DRM_MODE_TYPE_DRIVER bit for all modes they
+ create, and set the DRM_MODE_TYPE_PREFERRED bit for the preferred
+ mode.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>__u32 clock;</synopsis>
+ <para>Pixel clock frequency in kHz unit</para>
+ </listitem>
+ <listitem>
+ <synopsis>__u16 hdisplay, hsync_start, hsync_end, htotal;
+ __u16 vdisplay, vsync_start, vsync_end, vtotal;</synopsis>
+ <para>Horizontal and vertical timing information</para>
+ <screen><![CDATA[
+ Active Front Sync Back
+ Region Porch Porch
+ <-----------------------><----------------><-------------><-------------->
+
+ //////////////////////|
+ ////////////////////// |
+ ////////////////////// |.................. ................
+ _______________
+
+ <----- [hv]display ----->
+ <------------- [hv]sync_start ------------>
+ <--------------------- [hv]sync_end --------------------->
+ <-------------------------------- [hv]total ----------------------------->
+]]></screen>
+ </listitem>
+ <listitem>
+ <synopsis>__u16 hskew;
+ __u16 vscan;</synopsis>
+ <para>Unknown</para>
+ </listitem>
+ <listitem>
+ <synopsis>__u32 flags;</synopsis>
+ <para>
+ Mode flags, a combination of
+ <variablelist>
+ <varlistentry>
+ <term>DRM_MODE_FLAG_PHSYNC</term>
+ <listitem><para>
+ Horizontal sync is active high
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_FLAG_NHSYNC</term>
+ <listitem><para>
+ Horizontal sync is active low
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_FLAG_PVSYNC</term>
+ <listitem><para>
+ Vertical sync is active high
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_FLAG_NVSYNC</term>
+ <listitem><para>
+ Vertical sync is active low
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_FLAG_INTERLACE</term>
+ <listitem><para>
+ Mode is interlaced
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_FLAG_DBLSCAN</term>
+ <listitem><para>
+ Mode uses doublescan
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_FLAG_CSYNC</term>
+ <listitem><para>
+ Mode uses composite sync
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_FLAG_PCSYNC</term>
+ <listitem><para>
+ Composite sync is active high
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_FLAG_NCSYNC</term>
+ <listitem><para>
+ Composite sync is active low
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_FLAG_HSKEW</term>
+ <listitem><para>
+ hskew provided (not used?)
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_FLAG_BCAST</term>
+ <listitem><para>
+ not used?
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_FLAG_PIXMUX</term>
+ <listitem><para>
+ not used?
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_FLAG_DBLCLK</term>
+ <listitem><para>
+ not used?
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_MODE_FLAG_CLKDIV2</term>
+ <listitem><para>
+ ?
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ <para>
+ Note that modes marked with the INTERLACE or DBLSCAN flags will be
+ filtered out by
+ <function>drm_helper_probe_single_connector_modes</function> if
+ the connector's <structfield>interlace_allowed</structfield> or
+ <structfield>doublescan_allowed</structfield> field is set to 0.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>char name[DRM_DISPLAY_MODE_LEN];</synopsis>
+ <para>
+ Mode name. The driver must call
+ <function>drm_mode_set_name</function> to fill the mode name from
+ <structfield>hdisplay</structfield>,
+ <structfield>vdisplay</structfield> and interlace flag after
+ filling the corresponding fields.
+ </para>
+ </listitem>
+ </itemizedlist>
+ </para>
+ <para>
+ The <structfield>vrefresh</structfield> value is computed by
+ <function>drm_helper_probe_single_connector_modes</function>.
+ </para>
+ <para>
+ When parsing EDID data, <function>drm_add_edid_modes</function> fill the
+ connector <structfield>display_info</structfield>
+ <structfield>width_mm</structfield> and
+ <structfield>height_mm</structfield> fields. When creating modes
+ manually the <methodname>get_modes</methodname> helper operation must
+ set the <structfield>display_info</structfield>
+ <structfield>width_mm</structfield> and
+ <structfield>height_mm</structfield> fields if they haven't been set
+ already (for instance at initilization time when a fixed-size panel is
+ attached to the connector). The mode <structfield>width_mm</structfield>
+ and <structfield>height_mm</structfield> fields are only used internally
+ during EDID parsing and should not be set when creating modes manually.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>int (*mode_valid)(struct drm_connector *connector,
+ struct drm_display_mode *mode);</synopsis>
+ <para>
+ Verify whether a mode is valid for the connector. Return MODE_OK for
+ supported modes and one of the enum drm_mode_status values (MODE_*)
+ for unsupported modes. This operation is mandatory.
+ </para>
+ <para>
+ As the mode rejection reason is currently not used beside for
+ immediately removing the unsupported mode, an implementation can
+ return MODE_BAD regardless of the exact reason why the mode is not
+ valid.
+ </para>
+ <note><para>
+ Note that the <methodname>mode_valid</methodname> helper operation is
+ only called for modes detected by the device, and
+ <emphasis>not</emphasis> for modes set by the user through the CRTC
+ <methodname>set_config</methodname> operation.
+ </para></note>
+ </listitem>
+ </itemizedlist>
+ </sect2>
</sect1>
- <sect1>
- <title>Memory management</title>
+ <!-- Internals: vertical blanking -->
+
+ <sect1 id="drm-vertical-blank">
+ <title>Vertical Blanking</title>
+ <para>
+ Vertical blanking plays a major role in graphics rendering. To achieve
+ tear-free display, users must synchronize page flips and/or rendering to
+ vertical blanking. The DRM API offers ioctls to perform page flips
+ synchronized to vertical blanking and wait for vertical blanking.
+ </para>
+ <para>
+ The DRM core handles most of the vertical blanking management logic, which
+ involves filtering out spurious interrupts, keeping race-free blanking
+ counters, coping with counter wrap-around and resets and keeping use
+ counts. It relies on the driver to generate vertical blanking interrupts
+ and optionally provide a hardware vertical blanking counter. Drivers must
+ implement the following operations.
+ </para>
+ <itemizedlist>
+ <listitem>
+ <synopsis>int (*enable_vblank) (struct drm_device *dev, int crtc);
+void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis>
+ <para>
+ Enable or disable vertical blanking interrupts for the given CRTC.
+ </para>
+ </listitem>
+ <listitem>
+ <synopsis>u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);</synopsis>
+ <para>
+ Retrieve the value of the vertical blanking counter for the given
+ CRTC. If the hardware maintains a vertical blanking counter its value
+ should be returned. Otherwise drivers can use the
+ <function>drm_vblank_count</function> helper function to handle this
+ operation.
+ </para>
+ </listitem>
+ </itemizedlist>
<para>
- The memory manager lies at the heart of many DRM operations; it
- is required to support advanced client features like OpenGL
- pbuffers. The DRM currently contains two memory managers: TTM
- and GEM.
+ Drivers must initialize the vertical blanking handling core with a call to
+ <function>drm_vblank_init</function> in their
+ <methodname>load</methodname> operation. The function will set the struct
+ <structname>drm_device</structname>
+ <structfield>vblank_disable_allowed</structfield> field to 0. This will
+ keep vertical blanking interrupts enabled permanently until the first mode
+ set operation, where <structfield>vblank_disable_allowed</structfield> is
+ set to 1. The reason behind this is not clear. Drivers can set the field
+ to 1 after <function>calling drm_vblank_init</function> to make vertical
+ blanking interrupts dynamically managed from the beginning.
</para>
+ <para>
+ Vertical blanking interrupts can be enabled by the DRM core or by drivers
+ themselves (for instance to handle page flipping operations). The DRM core
+ maintains a vertical blanking use count to ensure that the interrupts are
+ not disabled while a user still needs them. To increment the use count,
+ drivers call <function>drm_vblank_get</function>. Upon return vertical
+ blanking interrupts are guaranteed to be enabled.
+ </para>
+ <para>
+ To decrement the use count drivers call
+ <function>drm_vblank_put</function>. Only when the use count drops to zero
+ will the DRM core disable the vertical blanking interrupts after a delay
+ by scheduling a timer. The delay is accessible through the vblankoffdelay
+ module parameter or the <varname>drm_vblank_offdelay</varname> global
+ variable and expressed in milliseconds. Its default value is 5000 ms.
+ </para>
+ <para>
+ When a vertical blanking interrupt occurs drivers only need to call the
+ <function>drm_handle_vblank</function> function to account for the
+ interrupt.
+ </para>
+ <para>
+ Resources allocated by <function>drm_vblank_init</function> must be freed
+ with a call to <function>drm_vblank_cleanup</function> in the driver
+ <methodname>unload</methodname> operation handler.
+ </para>
+ </sect1>
+
+ <!-- Internals: open/close, file operations and ioctls -->
+ <sect1>
+ <title>Open/Close, File Operations and IOCTLs</title>
<sect2>
- <title>The Translation Table Manager (TTM)</title>
+ <title>Open and Close</title>
+ <synopsis>int (*firstopen) (struct drm_device *);
+void (*lastclose) (struct drm_device *);
+int (*open) (struct drm_device *, struct drm_file *);
+void (*preclose) (struct drm_device *, struct drm_file *);
+void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
+ <abstract>Open and close handlers. None of those methods are mandatory.
+ </abstract>
<para>
- TTM was developed by Tungsten Graphics, primarily by Thomas
- Hellström, and is intended to be a flexible, high performance
- graphics memory manager.
+ The <methodname>firstopen</methodname> method is called by the DRM core
+ when an application opens a device that has no other opened file handle.
+ Similarly the <methodname>lastclose</methodname> method is called when
+ the last application holding a file handle opened on the device closes
+ it. Both methods are mostly used for UMS (User Mode Setting) drivers to
+ acquire and release device resources which should be done in the
+ <methodname>load</methodname> and <methodname>unload</methodname>
+ methods for KMS drivers.
</para>
<para>
- Drivers wishing to support TTM must fill out a drm_bo_driver
- structure.
+ Note that the <methodname>lastclose</methodname> method is also called
+ at module unload time or, for hot-pluggable devices, when the device is
+ unplugged. The <methodname>firstopen</methodname> and
+ <methodname>lastclose</methodname> calls can thus be unbalanced.
</para>
<para>
- TTM design background and information belongs here.
+ The <methodname>open</methodname> method is called every time the device
+ is opened by an application. Drivers can allocate per-file private data
+ in this method and store them in the struct
+ <structname>drm_file</structname> <structfield>driver_priv</structfield>
+ field. Note that the <methodname>open</methodname> method is called
+ before <methodname>firstopen</methodname>.
+ </para>
+ <para>
+ The close operation is split into <methodname>preclose</methodname> and
+ <methodname>postclose</methodname> methods. Drivers must stop and
+ cleanup all per-file operations in the <methodname>preclose</methodname>
+ method. For instance pending vertical blanking and page flip events must
+ be cancelled. No per-file operation is allowed on the file handle after
+ returning from the <methodname>preclose</methodname> method.
+ </para>
+ <para>
+ Finally the <methodname>postclose</methodname> method is called as the
+ last step of the close operation, right before calling the
+ <methodname>lastclose</methodname> method if no other open file handle
+ exists for the device. Drivers that have allocated per-file private data
+ in the <methodname>open</methodname> method should free it here.
+ </para>
+ <para>
+ The <methodname>lastclose</methodname> method should restore CRTC and
+ plane properties to default value, so that a subsequent open of the
+ device will not inherit state from the previous user.
</para>
</sect2>
-
<sect2>
- <title>The Graphics Execution Manager (GEM)</title>
+ <title>File Operations</title>
+ <synopsis>const struct file_operations *fops</synopsis>
+ <abstract>File operations for the DRM device node.</abstract>
<para>
- GEM is an Intel project, authored by Eric Anholt and Keith
- Packard. It provides simpler interfaces than TTM, and is well
- suited for UMA devices.
+ Drivers must define the file operations structure that forms the DRM
+ userspace API entry point, even though most of those operations are
+ implemented in the DRM core. The <methodname>open</methodname>,
+ <methodname>release</methodname> and <methodname>ioctl</methodname>
+ operations are handled by
+ <programlisting>
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ #ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+ #endif
+ </programlisting>
</para>
<para>
- GEM-enabled drivers must provide gem_init_object() and
- gem_free_object() callbacks to support the core memory
- allocation routines. They should also provide several driver-specific
- ioctls to support command execution, pinning, buffer
- read &amp; write, mapping, and domain ownership transfers.
+ Drivers that implement private ioctls that requires 32/64bit
+ compatibility support must provide their own
+ <methodname>compat_ioctl</methodname> handler that processes private
+ ioctls and calls <function>drm_compat_ioctl</function> for core ioctls.
</para>
<para>
- On a fundamental level, GEM involves several operations:
- <itemizedlist>
- <listitem>Memory allocation and freeing</listitem>
- <listitem>Command execution</listitem>
- <listitem>Aperture management at command execution time</listitem>
- </itemizedlist>
- Buffer object allocation is relatively
- straightforward and largely provided by Linux's shmem layer, which
- provides memory to back each object. When mapped into the GTT
- or used in a command buffer, the backing pages for an object are
- flushed to memory and marked write combined so as to be coherent
- with the GPU. Likewise, if the CPU accesses an object after the GPU
- has finished rendering to the object, then the object must be made
- coherent with the CPU's view
- of memory, usually involving GPU cache flushing of various kinds.
- This core CPU&lt;-&gt;GPU coherency management is provided by a
- device-specific ioctl, which evaluates an object's current domain and
- performs any necessary flushing or synchronization to put the object
- into the desired coherency domain (note that the object may be busy,
- i.e. an active render target; in that case, setting the domain
- blocks the client and waits for rendering to complete before
- performing any necessary flushing operations).
- </para>
- <para>
- Perhaps the most important GEM function is providing a command
- execution interface to clients. Client programs construct command
- buffers containing references to previously allocated memory objects,
- and then submit them to GEM. At that point, GEM takes care to bind
- all the objects into the GTT, execute the buffer, and provide
- necessary synchronization between clients accessing the same buffers.
- This often involves evicting some objects from the GTT and re-binding
- others (a fairly expensive operation), and providing relocation
- support which hides fixed GTT offsets from clients. Clients must
- take care not to submit command buffers that reference more objects
- than can fit in the GTT; otherwise, GEM will reject them and no rendering
- will occur. Similarly, if several objects in the buffer require
- fence registers to be allocated for correct rendering (e.g. 2D blits
- on pre-965 chips), care must be taken not to require more fence
- registers than are available to the client. Such resource management
- should be abstracted from the client in libdrm.
+ The <methodname>read</methodname> and <methodname>poll</methodname>
+ operations provide support for reading DRM events and polling them. They
+ are implemented by
+ <programlisting>
+ .poll = drm_poll,
+ .read = drm_read,
+ .fasync = drm_fasync,
+ .llseek = no_llseek,
+ </programlisting>
+ </para>
+ <para>
+ The memory mapping implementation varies depending on how the driver
+ manages memory. Pre-GEM drivers will use <function>drm_mmap</function>,
+ while GEM-aware drivers will use <function>drm_gem_mmap</function>. See
+ <xref linkend="drm-gem"/>.
+ <programlisting>
+ .mmap = drm_gem_mmap,
+ </programlisting>
+ </para>
+ <para>
+ No other file operation is supported by the DRM API.
+ </para>
+ </sect2>
+ <sect2>
+ <title>IOCTLs</title>
+ <synopsis>struct drm_ioctl_desc *ioctls;
+int num_ioctls;</synopsis>
+ <abstract>Driver-specific ioctls descriptors table.</abstract>
+ <para>
+ Driver-specific ioctls numbers start at DRM_COMMAND_BASE. The ioctls
+ descriptors table is indexed by the ioctl number offset from the base
+ value. Drivers can use the DRM_IOCTL_DEF_DRV() macro to initialize the
+ table entries.
+ </para>
+ <para>
+ <programlisting>DRM_IOCTL_DEF_DRV(ioctl, func, flags)</programlisting>
+ <para>
+ <parameter>ioctl</parameter> is the ioctl name. Drivers must define
+ the DRM_##ioctl and DRM_IOCTL_##ioctl macros to the ioctl number
+ offset from DRM_COMMAND_BASE and the ioctl number respectively. The
+ first macro is private to the device while the second must be exposed
+ to userspace in a public header.
+ </para>
+ <para>
+ <parameter>func</parameter> is a pointer to the ioctl handler function
+ compatible with the <type>drm_ioctl_t</type> type.
+ <programlisting>typedef int drm_ioctl_t(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);</programlisting>
+ </para>
+ <para>
+ <parameter>flags</parameter> is a bitmask combination of the following
+ values. It restricts how the ioctl is allowed to be called.
+ <itemizedlist>
+ <listitem><para>
+ DRM_AUTH - Only authenticated callers allowed
+ </para></listitem>
+ <listitem><para>
+ DRM_MASTER - The ioctl can only be called on the master file
+ handle
+ </para></listitem>
+ <listitem><para>
+ DRM_ROOT_ONLY - Only callers with the SYSADMIN capability allowed
+ </para></listitem>
+ <listitem><para>
+ DRM_CONTROL_ALLOW - The ioctl can only be called on a control
+ device
+ </para></listitem>
+ <listitem><para>
+ DRM_UNLOCKED - The ioctl handler will be called without locking
+ the DRM global mutex
+ </para></listitem>
+ </itemizedlist>
+ </para>
</para>
</sect2>
-
- </sect1>
-
- <!-- Output management -->
- <sect1>
- <title>Output management</title>
- <para>
- At the core of the DRM output management code is a set of
- structures representing CRTCs, encoders, and connectors.
- </para>
- <para>
- A CRTC is an abstraction representing a part of the chip that
- contains a pointer to a scanout buffer. Therefore, the number
- of CRTCs available determines how many independent scanout
- buffers can be active at any given time. The CRTC structure
- contains several fields to support this: a pointer to some video
- memory, a display mode, and an (x, y) offset into the video
- memory to support panning or configurations where one piece of
- video memory spans multiple CRTCs.
- </para>
- <para>
- An encoder takes pixel data from a CRTC and converts it to a
- format suitable for any attached connectors. On some devices,
- it may be possible to have a CRTC send data to more than one
- encoder. In that case, both encoders would receive data from
- the same scanout buffer, resulting in a "cloned" display
- configuration across the connectors attached to each encoder.
- </para>
- <para>
- A connector is the final destination for pixel data on a device,
- and usually connects directly to an external display device like
- a monitor or laptop panel. A connector can only be attached to
- one encoder at a time. The connector is also the structure
- where information about the attached display is kept, so it
- contains fields for display data, EDID data, DPMS &amp;
- connection status, and information about modes supported on the
- attached displays.
- </para>
-<!--!Edrivers/char/drm/drm_crtc.c-->
- </sect1>
-
- <sect1>
- <title>Framebuffer management</title>
- <para>
- Clients need to provide a framebuffer object which provides a source
- of pixels for a CRTC to deliver to the encoder(s) and ultimately the
- connector(s). A framebuffer is fundamentally a driver-specific memory
- object, made into an opaque handle by the DRM's addfb() function.
- Once a framebuffer has been created this way, it may be passed to the
- KMS mode setting routines for use in a completed configuration.
- </para>
</sect1>
<sect1>
@@ -812,15 +2355,24 @@ void intel_crt_init(struct drm_device *dev)
</para>
</sect1>
+ <!-- Internals: suspend/resume -->
+
<sect1>
- <title>Suspend/resume</title>
+ <title>Suspend/Resume</title>
+ <para>
+ The DRM core provides some suspend/resume code, but drivers wanting full
+ suspend/resume support should provide save() and restore() functions.
+ These are called at suspend, hibernate, or resume time, and should perform
+ any state save or restore required by your device across suspend or
+ hibernate states.
+ </para>
+ <synopsis>int (*suspend) (struct drm_device *, pm_message_t state);
+int (*resume) (struct drm_device *);</synopsis>
<para>
- The DRM core provides some suspend/resume code, but drivers
- wanting full suspend/resume support should provide save() and
- restore() functions. These are called at suspend,
- hibernate, or resume time, and should perform any state save or
- restore required by your device across suspend or hibernate
- states.
+ Those are legacy suspend and resume methods. New driver should use the
+ power management interface provided by their bus type (usually through
+ the struct <structname>device_driver</structname> dev_pm_ops) and set
+ these methods to NULL.
</para>
</sect1>
@@ -833,6 +2385,35 @@ void intel_crt_init(struct drm_device *dev)
</sect1>
</chapter>
+<!-- TODO
+
+- Add a glossary
+- Document the struct_mutex catch-all lock
+- Document connector properties
+
+- Why is the load method optional?
+- What are drivers supposed to set the initial display state to, and how?
+ Connector's DPMS states are not initialized and are thus equal to
+ DRM_MODE_DPMS_ON. The fbcon compatibility layer calls
+ drm_helper_disable_unused_functions(), which disables unused encoders and
+ CRTCs, but doesn't touch the connectors' DPMS state, and
+ drm_helper_connector_dpms() in reaction to fbdev blanking events. Do drivers
+ that don't implement (or just don't use) fbcon compatibility need to call
+ those functions themselves?
+- KMS drivers must call drm_vblank_pre_modeset() and drm_vblank_post_modeset()
+ around mode setting. Should this be done in the DRM core?
+- vblank_disable_allowed is set to 1 in the first drm_vblank_post_modeset()
+ call and never set back to 0. It seems to be safe to permanently set it to 1
+ in drm_vblank_init() for KMS driver, and it might be safe for UMS drivers as
+ well. This should be investigated.
+- crtc and connector .save and .restore operations are only used internally in
+ drivers, should they be removed from the core?
+- encoder mid-layer .save and .restore operations are only used internally in
+ drivers, should they be removed from the core?
+- encoder mid-layer .detect operation is only used internally in drivers,
+ should it be removed from the core?
+-->
+
<!-- External interfaces -->
<chapter id="drmExternals">
@@ -853,6 +2434,42 @@ void intel_crt_init(struct drm_device *dev)
Cover generic ioctls and sysfs layout here. We only need high-level
info, since man pages should cover the rest.
</para>
+
+ <!-- External: vblank handling -->
+
+ <sect1>
+ <title>VBlank event handling</title>
+ <para>
+ The DRM core exposes two vertical blank related ioctls:
+ <variablelist>
+ <varlistentry>
+ <term>DRM_IOCTL_WAIT_VBLANK</term>
+ <listitem>
+ <para>
+ This takes a struct drm_wait_vblank structure as its argument,
+ and it is used to block or request a signal when a specified
+ vblank event occurs.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRM_IOCTL_MODESET_CTL</term>
+ <listitem>
+ <para>
+ This should be called by application level drivers before and
+ after mode setting, since on many devices the vertical blank
+ counter is reset at that time. Internally, the DRM snapshots
+ the last vblank count when the ioctl is called with the
+ _DRM_PRE_MODESET command, so that the counter won't go backwards
+ (which is dealt with when _DRM_POST_MODESET is used).
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+<!--!Edrivers/char/drm/drm_irq.c-->
+ </para>
+ </sect1>
+
</chapter>
<!-- API reference -->
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 1e0a9e17c31d..f94d4c818fc7 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -1448,8 +1448,7 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch,
* most likely via hotkey. */
acpi_bus_generate_proc_event(device, event, 0);
- if (!acpi_notifier_call_chain(device, event, 0))
- keycode = KEY_SWITCHVIDEOMODE;
+ keycode = KEY_SWITCHVIDEOMODE;
break;
case ACPI_VIDEO_NOTIFY_PROBE: /* User plugged in or removed a video
@@ -1479,8 +1478,9 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
break;
}
- if (event != ACPI_VIDEO_NOTIFY_SWITCH)
- acpi_notifier_call_chain(device, event, 0);
+ if (acpi_notifier_call_chain(device, event, 0))
+ /* Something vetoed the keypress. */
+ keycode = 0;
if (keycode) {
input_report_key(input, keycode, 1);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 58e32f7c3229..e01f5eaaec82 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -84,40 +84,33 @@ static struct _intel_private {
#define IS_IRONLAKE intel_private.driver->is_ironlake
#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
-int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
- struct scatterlist **sg_list, int *num_sg)
+static int intel_gtt_map_memory(struct page **pages,
+ unsigned int num_entries,
+ struct sg_table *st)
{
- struct sg_table st;
struct scatterlist *sg;
int i;
- if (*sg_list)
- return 0; /* already mapped (for e.g. resume */
-
DBG("try mapping %lu pages\n", (unsigned long)num_entries);
- if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
+ if (sg_alloc_table(st, num_entries, GFP_KERNEL))
goto err;
- *sg_list = sg = st.sgl;
-
- for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
+ for_each_sg(st->sgl, sg, num_entries, i)
sg_set_page(sg, pages[i], PAGE_SIZE, 0);
- *num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
- num_entries, PCI_DMA_BIDIRECTIONAL);
- if (unlikely(!*num_sg))
+ if (!pci_map_sg(intel_private.pcidev,
+ st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
goto err;
return 0;
err:
- sg_free_table(&st);
+ sg_free_table(st);
return -ENOMEM;
}
-EXPORT_SYMBOL(intel_gtt_map_memory);
-void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
+static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
{
struct sg_table st;
DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
@@ -130,7 +123,6 @@ void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
sg_free_table(&st);
}
-EXPORT_SYMBOL(intel_gtt_unmap_memory);
static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
@@ -674,9 +666,14 @@ static int intel_gtt_init(void)
gtt_map_size = intel_private.base.gtt_total_entries * 4;
- intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
- gtt_map_size);
- if (!intel_private.gtt) {
+ intel_private.gtt = NULL;
+ if (INTEL_GTT_GEN < 6)
+ intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
+ gtt_map_size);
+ if (intel_private.gtt == NULL)
+ intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
+ gtt_map_size);
+ if (intel_private.gtt == NULL) {
intel_private.driver->cleanup();
iounmap(intel_private.registers);
return -ENOMEM;
@@ -879,8 +876,7 @@ static bool i830_check_flags(unsigned int flags)
return false;
}
-void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
- unsigned int sg_len,
+void intel_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags)
{
@@ -892,12 +888,11 @@ void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
/* sg may merge pages, but we have to separate
* per-page addr for GTT */
- for_each_sg(sg_list, sg, sg_len, i) {
+ for_each_sg(st->sgl, sg, st->nents, i) {
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (m = 0; m < len; m++) {
dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- intel_private.driver->write_entry(addr,
- j, flags);
+ intel_private.driver->write_entry(addr, j, flags);
j++;
}
}
@@ -905,8 +900,10 @@ void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
}
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
-void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
- struct page **pages, unsigned int flags)
+static void intel_gtt_insert_pages(unsigned int first_entry,
+ unsigned int num_entries,
+ struct page **pages,
+ unsigned int flags)
{
int i, j;
@@ -917,7 +914,6 @@ void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
}
readl(intel_private.gtt+j-1);
}
-EXPORT_SYMBOL(intel_gtt_insert_pages);
static int intel_fake_agp_insert_entries(struct agp_memory *mem,
off_t pg_start, int type)
@@ -953,13 +949,15 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
global_cache_flush();
if (intel_private.base.needs_dmar) {
- ret = intel_gtt_map_memory(mem->pages, mem->page_count,
- &mem->sg_list, &mem->num_sg);
+ struct sg_table st;
+
+ ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
if (ret != 0)
return ret;
- intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
- pg_start, type);
+ intel_gtt_insert_sg_entries(&st, pg_start, type);
+ mem->sg_list = st.sgl;
+ mem->num_sg = st.nents;
} else
intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
type);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 90e28081712d..18321b68b880 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -22,7 +22,7 @@ menuconfig DRM
config DRM_USB
tristate
depends on DRM
- depends on USB_ARCH_HAS_HCD
+ depends on USB_SUPPORT && USB_ARCH_HAS_HCD
select USB
config DRM_KMS_HELPER
@@ -54,6 +54,21 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver
uses it.
+config DRM_GEM_CMA_HELPER
+ bool
+ depends on DRM
+ help
+ Choose this if you need the GEM CMA helper functions
+
+config DRM_KMS_CMA_HELPER
+ bool
+ select DRM_GEM_CMA_HELPER
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ help
+ Choose this if you need the KMS CMA helper functions
+
config DRM_TDFX
tristate "3dfx Banshee/Voodoo3+"
depends on DRM && PCI
@@ -193,3 +208,5 @@ source "drivers/gpu/drm/ast/Kconfig"
source "drivers/gpu/drm/mgag200/Kconfig"
source "drivers/gpu/drm/cirrus/Kconfig"
+
+source "drivers/gpu/drm/shmobile/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index f65f65ed0ddf..2ff5cefe9ead 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -15,11 +15,13 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_trace_points.o drm_global.o drm_prime.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
+drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-usb-y := drm_usb.o
drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -45,4 +47,5 @@ obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
+obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index d4af9edcbb97..20a437f88780 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -94,7 +94,6 @@ struct ast_private {
struct drm_global_reference mem_global_ref;
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
- atomic_t validate_sequence;
} ttm;
struct drm_gem_object *cursor_cache;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index a712cafcfa1d..e87f1ff223f4 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -582,7 +582,6 @@ static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.mode_set_base = ast_crtc_mode_set_base,
.disable = ast_crtc_disable,
.load_lut = ast_crtc_load_lut,
- .disable = ast_crtc_disable,
.prepare = ast_crtc_prepare,
.commit = ast_crtc_commit,
@@ -737,6 +736,7 @@ static int ast_get_modes(struct drm_connector *connector)
if (edid) {
drm_mode_connector_update_edid_property(&ast_connector->base, edid);
ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
return ret;
} else
drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 64ea597cb6d3..5d045647a3fc 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -143,7 +143,6 @@ struct cirrus_device {
struct drm_global_reference mem_global_ref;
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
- atomic_t validate_sequence;
} ttm;
bool mm_inited;
};
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 08758e061478..4a4274b348b6 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -37,12 +37,13 @@ drm_clflush_page(struct page *page)
{
uint8_t *page_virtual;
unsigned int i;
+ const int size = boot_cpu_data.x86_clflush_size;
if (unlikely(page == NULL))
return;
page_virtual = kmap_atomic(page);
- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+ for (i = 0; i < PAGE_SIZE; i += size)
clflush(page_virtual + i);
kunmap_atomic(page_virtual);
}
@@ -100,6 +101,31 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
EXPORT_SYMBOL(drm_clflush_pages);
void
+drm_clflush_sg(struct sg_table *st)
+{
+#if defined(CONFIG_X86)
+ if (cpu_has_clflush) {
+ struct scatterlist *sg;
+ int i;
+
+ mb();
+ for_each_sg(st->sgl, sg, st->nents, i)
+ drm_clflush_page(sg_page(sg));
+ mb();
+
+ return;
+ }
+
+ if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+#else
+ printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+ WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_sg);
+
+void
drm_clflush_virt_range(char *addr, unsigned long length)
{
#if defined(CONFIG_X86)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 6fbfc244748f..9c346a50379f 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -294,6 +294,8 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
{
int ret;
+ kref_init(&fb->refcount);
+
ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
if (ret)
return ret;
@@ -307,6 +309,38 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
}
EXPORT_SYMBOL(drm_framebuffer_init);
+static void drm_framebuffer_free(struct kref *kref)
+{
+ struct drm_framebuffer *fb =
+ container_of(kref, struct drm_framebuffer, refcount);
+ fb->funcs->destroy(fb);
+}
+
+/**
+ * drm_framebuffer_unreference - unref a framebuffer
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ */
+void drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+ DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ kref_put(&fb->refcount, drm_framebuffer_free);
+}
+EXPORT_SYMBOL(drm_framebuffer_unreference);
+
+/**
+ * drm_framebuffer_reference - incr the fb refcnt
+ */
+void drm_framebuffer_reference(struct drm_framebuffer *fb)
+{
+ DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ kref_get(&fb->refcount);
+}
+EXPORT_SYMBOL(drm_framebuffer_reference);
+
/**
* drm_framebuffer_cleanup - remove a framebuffer object
* @fb: framebuffer to remove
@@ -320,6 +354,32 @@ EXPORT_SYMBOL(drm_framebuffer_init);
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
+ /*
+ * This could be moved to drm_framebuffer_remove(), but for
+ * debugging is nice to keep around the list of fb's that are
+ * no longer associated w/ a drm_file but are not unreferenced
+ * yet. (i915 and omapdrm have debugfs files which will show
+ * this.)
+ */
+ drm_mode_object_put(dev, &fb->base);
+ list_del(&fb->head);
+ dev->mode_config.num_fb--;
+}
+EXPORT_SYMBOL(drm_framebuffer_cleanup);
+
+/**
+ * drm_framebuffer_remove - remove and unreference a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Scans all the CRTCs and planes in @dev's mode_config. If they're
+ * using @fb, removes it, setting it to NULL.
+ */
+void drm_framebuffer_remove(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
struct drm_crtc *crtc;
struct drm_plane *plane;
struct drm_mode_set set;
@@ -350,11 +410,11 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
}
}
- drm_mode_object_put(dev, &fb->base);
- list_del(&fb->head);
- dev->mode_config.num_fb--;
+ list_del(&fb->filp_head);
+
+ drm_framebuffer_unreference(fb);
}
-EXPORT_SYMBOL(drm_framebuffer_cleanup);
+EXPORT_SYMBOL(drm_framebuffer_remove);
/**
* drm_crtc_init - Initialise a new CRTC object
@@ -377,6 +437,7 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
crtc->dev = dev;
crtc->funcs = funcs;
+ crtc->invert_dimensions = false;
mutex_lock(&dev->mode_config.mutex);
@@ -1031,11 +1092,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
}
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
- fb->funcs->destroy(fb);
- }
-
- list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
- crtc->funcs->destroy(crtc);
+ drm_framebuffer_remove(fb);
}
list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
@@ -1043,6 +1100,10 @@ void drm_mode_config_cleanup(struct drm_device *dev)
plane->funcs->destroy(plane);
}
+ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+ crtc->funcs->destroy(crtc);
+ }
+
idr_remove_all(&dev->mode_config.crtc_idr);
idr_destroy(&dev->mode_config.crtc_idr);
}
@@ -1852,6 +1913,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (crtc_req->mode_valid) {
+ int hdisplay, vdisplay;
/* If we have a mode we need a framebuffer. */
/* If we pass -1, set the mode with the currently bound fb */
if (crtc_req->fb_id == -1) {
@@ -1887,14 +1949,20 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- if (mode->hdisplay > fb->width ||
- mode->vdisplay > fb->height ||
- crtc_req->x > fb->width - mode->hdisplay ||
- crtc_req->y > fb->height - mode->vdisplay) {
- DRM_DEBUG_KMS("Invalid CRTC viewport %ux%u+%u+%u for fb size %ux%u.\n",
- mode->hdisplay, mode->vdisplay,
- crtc_req->x, crtc_req->y,
- fb->width, fb->height);
+ hdisplay = mode->hdisplay;
+ vdisplay = mode->vdisplay;
+
+ if (crtc->invert_dimensions)
+ swap(hdisplay, vdisplay);
+
+ if (hdisplay > fb->width ||
+ vdisplay > fb->height ||
+ crtc_req->x > fb->width - hdisplay ||
+ crtc_req->y > fb->height - vdisplay) {
+ DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+ fb->width, fb->height,
+ hdisplay, vdisplay, crtc_req->x, crtc_req->y,
+ crtc->invert_dimensions ? " (inverted)" : "");
ret = -ENOSPC;
goto out;
}
@@ -2169,6 +2237,8 @@ static int format_check(const struct drm_mode_fb_cmd2 *r)
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
case DRM_FORMAT_YUV411:
@@ -2335,11 +2405,7 @@ int drm_mode_rmfb(struct drm_device *dev,
goto out;
}
- /* TODO release all crtc connected to the framebuffer */
- /* TODO unhock the destructor from the buffer object */
-
- list_del(&fb->filp_head);
- fb->funcs->destroy(fb);
+ drm_framebuffer_remove(fb);
out:
mutex_unlock(&dev->mode_config.mutex);
@@ -2489,8 +2555,7 @@ void drm_fb_release(struct drm_file *priv)
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
- list_del(&fb->filp_head);
- fb->funcs->destroy(fb);
+ drm_framebuffer_remove(fb);
}
mutex_unlock(&dev->mode_config.mutex);
}
@@ -3489,6 +3554,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
struct drm_framebuffer *fb;
struct drm_pending_vblank_event *e = NULL;
unsigned long flags;
+ int hdisplay, vdisplay;
int ret = -EINVAL;
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -3518,14 +3584,19 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
goto out;
fb = obj_to_fb(obj);
- if (crtc->mode.hdisplay > fb->width ||
- crtc->mode.vdisplay > fb->height ||
- crtc->x > fb->width - crtc->mode.hdisplay ||
- crtc->y > fb->height - crtc->mode.vdisplay) {
- DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d.\n",
- fb->width, fb->height,
- crtc->mode.hdisplay, crtc->mode.vdisplay,
- crtc->x, crtc->y);
+ hdisplay = crtc->mode.hdisplay;
+ vdisplay = crtc->mode.vdisplay;
+
+ if (crtc->invert_dimensions)
+ swap(hdisplay, vdisplay);
+
+ if (hdisplay > fb->width ||
+ vdisplay > fb->height ||
+ crtc->x > fb->width - hdisplay ||
+ crtc->y > fb->height - vdisplay) {
+ DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+ fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
+ crtc->invert_dimensions ? " (inverted)" : "");
ret = -ENOSPC;
goto out;
}
@@ -3718,6 +3789,8 @@ int drm_format_num_planes(uint32_t format)
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
return 2;
default:
return 1;
@@ -3751,6 +3824,8 @@ int drm_format_plane_cpp(uint32_t format, int plane)
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
return plane ? 2 : 1;
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 9238de4009fa..a2c6eb6e0dd8 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -140,10 +140,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
@@ -152,19 +152,19 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index b7ee230572b7..289c9b18c0d3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -161,7 +161,7 @@ MODULE_PARM_DESC(edid_fixup,
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
*/
-bool drm_edid_block_valid(u8 *raw_edid, int block)
+bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
{
int i;
u8 csum = 0;
@@ -184,7 +184,9 @@ bool drm_edid_block_valid(u8 *raw_edid, int block)
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
if (csum) {
- DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+ if (print_bad_edid) {
+ DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+ }
/* allow CEA to slide through, switches mangle this */
if (raw_edid[0] != 0x02)
@@ -210,7 +212,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block)
return 1;
bad:
- if (raw_edid) {
+ if (raw_edid && print_bad_edid) {
printk(KERN_ERR "Raw EDID:\n");
print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
raw_edid, EDID_LENGTH, false);
@@ -234,7 +236,7 @@ bool drm_edid_is_valid(struct edid *edid)
return false;
for (i = 0; i <= edid->extensions; i++)
- if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i))
+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
return false;
return true;
@@ -257,6 +259,8 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
int block, int len)
{
unsigned char start = block * EDID_LENGTH;
+ unsigned char segment = block >> 1;
+ unsigned char xfers = segment ? 3 : 2;
int ret, retries = 5;
/* The core i2c driver will automatically retry the transfer if the
@@ -268,6 +272,11 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
do {
struct i2c_msg msgs[] = {
{
+ .addr = DDC_SEGMENT_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &segment,
+ }, {
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
@@ -279,15 +288,21 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
.buf = buf,
}
};
- ret = i2c_transfer(adapter, msgs, 2);
+
+ /*
+ * Avoid sending the segment addr to not upset non-compliant ddc
+ * monitors.
+ */
+ ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
+
if (ret == -ENXIO) {
DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n",
adapter->name);
break;
}
- } while (ret != 2 && --retries);
+ } while (ret != xfers && --retries);
- return ret == 2 ? 0 : -1;
+ return ret == xfers ? 0 : -1;
}
static bool drm_edid_is_zero(u8 *in_edid, int length)
@@ -306,6 +321,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
int i, j = 0, valid_extensions = 0;
u8 *block, *new;
+ bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
@@ -314,7 +330,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block, 0))
+ if (drm_edid_block_valid(block, 0, print_bad_edid))
break;
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
connector->null_edid_counter++;
@@ -339,7 +355,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j)) {
+ if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
valid_extensions++;
break;
}
@@ -362,8 +378,11 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
return block;
carp:
- dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
- drm_get_connector_name(connector), j);
+ if (print_bad_edid) {
+ dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
+ drm_get_connector_name(connector), j);
+ }
+ connector->bad_edid_counter++;
out:
kfree(block);
@@ -402,10 +421,7 @@ struct edid *drm_get_edid(struct drm_connector *connector,
if (drm_probe_ddc(adapter))
edid = (struct edid *)drm_do_get_edid(connector, adapter);
- connector->display_info.raw_edid = (char *)edid;
-
return edid;
-
}
EXPORT_SYMBOL(drm_get_edid);
@@ -1523,16 +1539,57 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
}
static int
+cea_db_payload_len(const u8 *db)
+{
+ return db[0] & 0x1f;
+}
+
+static int
+cea_db_tag(const u8 *db)
+{
+ return db[0] >> 5;
+}
+
+static int
+cea_revision(const u8 *cea)
+{
+ return cea[1];
+}
+
+static int
+cea_db_offsets(const u8 *cea, int *start, int *end)
+{
+ /* Data block offset in CEA extension block */
+ *start = 4;
+ *end = cea[2];
+ if (*end == 0)
+ *end = 127;
+ if (*end < 4 || *end > 127)
+ return -ERANGE;
+ return 0;
+}
+
+#define for_each_cea_db(cea, i, start, end) \
+ for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
+
+static int
add_cea_modes(struct drm_connector *connector, struct edid *edid)
{
u8 * cea = drm_find_cea_extension(edid);
u8 * db, dbl;
int modes = 0;
- if (cea && cea[1] >= 3) {
- for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
- dbl = db[0] & 0x1f;
- if (((db[0] & 0xe0) >> 5) == VIDEO_BLOCK)
+ if (cea && cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (cea_db_offsets(cea, &start, &end))
+ return 0;
+
+ for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ dbl = cea_db_payload_len(db);
+
+ if (cea_db_tag(db) == VIDEO_BLOCK)
modes += do_cea_modes (connector, db+1, dbl);
}
}
@@ -1541,19 +1598,28 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
}
static void
-parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
+parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
{
- connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
-
- connector->dvi_dual = db[6] & 1;
- connector->max_tmds_clock = db[7] * 5;
+ u8 len = cea_db_payload_len(db);
- connector->latency_present[0] = db[8] >> 7;
- connector->latency_present[1] = (db[8] >> 6) & 1;
- connector->video_latency[0] = db[9];
- connector->audio_latency[0] = db[10];
- connector->video_latency[1] = db[11];
- connector->audio_latency[1] = db[12];
+ if (len >= 6) {
+ connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
+ connector->dvi_dual = db[6] & 1;
+ }
+ if (len >= 7)
+ connector->max_tmds_clock = db[7] * 5;
+ if (len >= 8) {
+ connector->latency_present[0] = db[8] >> 7;
+ connector->latency_present[1] = (db[8] >> 6) & 1;
+ }
+ if (len >= 9)
+ connector->video_latency[0] = db[9];
+ if (len >= 10)
+ connector->audio_latency[0] = db[10];
+ if (len >= 11)
+ connector->video_latency[1] = db[11];
+ if (len >= 12)
+ connector->audio_latency[1] = db[12];
DRM_LOG_KMS("HDMI: DVI dual %d, "
"max TMDS clock %d, "
@@ -1577,6 +1643,21 @@ monitor_name(struct detailed_timing *t, void *data)
*(u8 **)data = t->data.other_data.data.str.str;
}
+static bool cea_db_is_hdmi_vsdb(const u8 *db)
+{
+ int hdmi_id;
+
+ if (cea_db_tag(db) != VENDOR_BLOCK)
+ return false;
+
+ if (cea_db_payload_len(db) < 5)
+ return false;
+
+ hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+ return hdmi_id == HDMI_IDENTIFIER;
+}
+
/**
* drm_edid_to_eld - build ELD from EDID
* @connector: connector corresponding to the HDMI/DP sink
@@ -1623,29 +1704,40 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
eld[18] = edid->prod_code[0];
eld[19] = edid->prod_code[1];
- if (cea[1] >= 3)
- for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
- dbl = db[0] & 0x1f;
-
- switch ((db[0] & 0xe0) >> 5) {
+ if (cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (cea_db_offsets(cea, &start, &end)) {
+ start = 0;
+ end = 0;
+ }
+
+ for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ dbl = cea_db_payload_len(db);
+
+ switch (cea_db_tag(db)) {
case AUDIO_BLOCK:
/* Audio Data Block, contains SADs */
sad_count = dbl / 3;
- memcpy(eld + 20 + mnl, &db[1], dbl);
+ if (dbl >= 1)
+ memcpy(eld + 20 + mnl, &db[1], dbl);
break;
case SPEAKER_BLOCK:
- /* Speaker Allocation Data Block */
- eld[7] = db[1];
+ /* Speaker Allocation Data Block */
+ if (dbl >= 1)
+ eld[7] = db[1];
break;
case VENDOR_BLOCK:
/* HDMI Vendor-Specific Data Block */
- if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
+ if (cea_db_is_hdmi_vsdb(db))
parse_hdmi_vsdb(connector, db);
break;
default:
break;
}
}
+ }
eld[5] |= sad_count << 4;
eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
@@ -1723,38 +1815,26 @@ EXPORT_SYMBOL(drm_select_eld);
bool drm_detect_hdmi_monitor(struct edid *edid)
{
u8 *edid_ext;
- int i, hdmi_id;
+ int i;
int start_offset, end_offset;
- bool is_hdmi = false;
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
- goto end;
+ return false;
- /* Data block offset in CEA extension block */
- start_offset = 4;
- end_offset = edid_ext[2];
+ if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+ return false;
/*
* Because HDMI identifier is in Vendor Specific Block,
* search it from all data blocks of CEA extension.
*/
- for (i = start_offset; i < end_offset;
- /* Increased by data block len */
- i += ((edid_ext[i] & 0x1f) + 1)) {
- /* Find vendor specific block */
- if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
- hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
- edid_ext[i + 3] << 16;
- /* Find HDMI identifier */
- if (hdmi_id == HDMI_IDENTIFIER)
- is_hdmi = true;
- break;
- }
+ for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+ if (cea_db_is_hdmi_vsdb(&edid_ext[i]))
+ return true;
}
-end:
- return is_hdmi;
+ return false;
}
EXPORT_SYMBOL(drm_detect_hdmi_monitor);
@@ -1786,15 +1866,13 @@ bool drm_detect_monitor_audio(struct edid *edid)
goto end;
}
- /* Data block offset in CEA extension block */
- start_offset = 4;
- end_offset = edid_ext[2];
+ if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+ goto end;
- for (i = start_offset; i < end_offset;
- i += ((edid_ext[i] & 0x1f) + 1)) {
- if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
+ for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+ if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) {
has_audio = true;
- for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
+ for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3)
DRM_DEBUG_KMS("CEA audio format %d\n",
(edid_ext[i + j] >> 3) & 0xf);
goto end;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 0303935d10e2..ea9cdab3d431 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -114,8 +114,8 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
},
};
-static int edid_load(struct drm_connector *connector, char *name,
- char *connector_name)
+static u8 *edid_load(struct drm_connector *connector, char *name,
+ char *connector_name)
{
const struct firmware *fw;
struct platform_device *pdev;
@@ -123,6 +123,7 @@ static int edid_load(struct drm_connector *connector, char *name,
int fwsize, expected;
int builtin = 0, err = 0;
int i, valid_extensions = 0;
+ bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
if (IS_ERR(pdev)) {
@@ -173,7 +174,8 @@ static int edid_load(struct drm_connector *connector, char *name,
}
memcpy(edid, fwdata, fwsize);
- if (!drm_edid_block_valid(edid, 0)) {
+ if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
+ connector->bad_edid_counter++;
DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
name);
kfree(edid);
@@ -185,7 +187,7 @@ static int edid_load(struct drm_connector *connector, char *name,
if (i != valid_extensions + 1)
memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
edid + i * EDID_LENGTH, EDID_LENGTH);
- if (drm_edid_block_valid(edid + i * EDID_LENGTH, i))
+ if (drm_edid_block_valid(edid + i * EDID_LENGTH, i, print_bad_edid))
valid_extensions++;
}
@@ -205,7 +207,6 @@ static int edid_load(struct drm_connector *connector, char *name,
edid = new_edid;
}
- connector->display_info.raw_edid = edid;
DRM_INFO("Got %s EDID base block and %d extension%s from "
"\"%s\" for connector \"%s\"\n", builtin ? "built-in" :
"external", valid_extensions, valid_extensions == 1 ? "" : "s",
@@ -215,7 +216,10 @@ relfw_out:
release_firmware(fw);
out:
- return err;
+ if (err)
+ return ERR_PTR(err);
+
+ return edid;
}
int drm_load_edid_firmware(struct drm_connector *connector)
@@ -223,6 +227,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
char *connector_name = drm_get_connector_name(connector);
char *edidname = edid_firmware, *last, *colon;
int ret;
+ struct edid *edid;
if (*edidname == '\0')
return 0;
@@ -240,13 +245,13 @@ int drm_load_edid_firmware(struct drm_connector *connector)
if (*last == '\n')
*last = '\0';
- ret = edid_load(connector, edidname, connector_name);
- if (ret)
+ edid = (struct edid *) edid_load(connector, edidname, connector_name);
+ if (IS_ERR_OR_NULL(edid))
return 0;
- drm_mode_connector_update_edid_property(connector,
- (struct edid *) connector->display_info.raw_edid);
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
- return drm_add_edid_modes(connector, (struct edid *)
- connector->display_info.raw_edid);
+ return ret;
}
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index ff98a7eb38dd..57459b316adc 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -89,7 +89,7 @@ static const struct drm_display_mode drm_dmt_modes[] = {
976, 1088, 0, 480, 486, 494, 517, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@43Hz, interlace */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
1208, 1264, 0, 768, 768, 772, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
@@ -395,7 +395,7 @@ static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
@@ -506,17 +506,17 @@ static const struct drm_display_mode edid_cea_modes[] = {
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 5 - 1920x1080i@60Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 6 - 1440x480i@60Hz */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 7 - 1440x480i@60Hz */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -531,12 +531,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 10 - 2880x480i@60Hz */
- { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 11 - 2880x480i@60Hz */
- { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
@@ -573,17 +573,17 @@ static const struct drm_display_mode edid_cea_modes[] = {
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 20 - 1920x1080i@50Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 21 - 1440x576i@50Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 22 - 1440x576i@50Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -598,12 +598,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 25 - 2880x576i@50Hz */
- { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 26 - 2880x576i@50Hz */
- { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
@@ -656,12 +656,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 39 - 1920x1080i@50Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 40 - 1920x1080i@100Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
@@ -688,7 +688,7 @@ static const struct drm_display_mode edid_cea_modes[] = {
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 46 - 1920x1080i@120Hz */
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
@@ -705,12 +705,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 50 - 1440x480i@120Hz */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 51 - 1440x480i@120Hz */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -723,12 +723,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 54 - 1440x576i@200Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 55 - 1440x576i@200Hz */
- { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -741,12 +741,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 58 - 1440x480i@240 */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 59 - 1440x480i@240 */
- { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
new file mode 100644
index 000000000000..09e11a5d921a
--- /dev/null
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -0,0 +1,406 @@
+/*
+ * drm kms/fb cma (contiguous memory allocator) helper functions
+ *
+ * Copyright (C) 2012 Analog Device Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Based on udl_fbdev.c
+ * Copyright (C) 2012 Red Hat
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <linux/module.h>
+
+struct drm_fb_cma {
+ struct drm_framebuffer fb;
+ struct drm_gem_cma_object *obj[4];
+};
+
+struct drm_fbdev_cma {
+ struct drm_fb_helper fb_helper;
+ struct drm_fb_cma *fb;
+};
+
+static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
+{
+ return container_of(helper, struct drm_fbdev_cma, fb_helper);
+}
+
+static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
+{
+ return container_of(fb, struct drm_fb_cma, fb);
+}
+
+static void drm_fb_cma_destroy(struct drm_framebuffer *fb)
+{
+ struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (fb_cma->obj[i])
+ drm_gem_object_unreference_unlocked(&fb_cma->obj[i]->base);
+ }
+
+ drm_framebuffer_cleanup(fb);
+ kfree(fb_cma);
+}
+
+static int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int *handle)
+{
+ struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+
+ return drm_gem_handle_create(file_priv,
+ &fb_cma->obj[0]->base, handle);
+}
+
+static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
+ .destroy = drm_fb_cma_destroy,
+ .create_handle = drm_fb_cma_create_handle,
+};
+
+static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj,
+ unsigned int num_planes)
+{
+ struct drm_fb_cma *fb_cma;
+ int ret;
+ int i;
+
+ fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL);
+ if (!fb_cma)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs);
+ if (ret) {
+ dev_err(dev->dev, "Failed to initalize framebuffer: %d\n", ret);
+ kfree(fb_cma);
+ return ERR_PTR(ret);
+ }
+
+ drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ fb_cma->obj[i] = obj[i];
+
+ return fb_cma;
+}
+
+/**
+ * drm_fb_cma_create() - (struct drm_mode_config_funcs *)->fb_create callback function
+ *
+ * If your hardware has special alignment or pitch requirements these should be
+ * checked before calling this function.
+ */
+struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
+ struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_fb_cma *fb_cma;
+ struct drm_gem_cma_object *objs[4];
+ struct drm_gem_object *obj;
+ unsigned int hsub;
+ unsigned int vsub;
+ int ret;
+ int i;
+
+ hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+
+ for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+ unsigned int width = mode_cmd->width / (i ? hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int min_size;
+
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]);
+ if (!obj) {
+ dev_err(dev->dev, "Failed to lookup GEM object\n");
+ ret = -ENXIO;
+ goto err_gem_object_unreference;
+ }
+
+ min_size = (height - 1) * mode_cmd->pitches[i]
+ + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+ + mode_cmd->offsets[i];
+
+ if (obj->size < min_size) {
+ drm_gem_object_unreference_unlocked(obj);
+ ret = -EINVAL;
+ goto err_gem_object_unreference;
+ }
+ objs[i] = to_drm_gem_cma_obj(obj);
+ }
+
+ fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i);
+ if (IS_ERR(fb_cma)) {
+ ret = PTR_ERR(fb_cma);
+ goto err_gem_object_unreference;
+ }
+
+ return &fb_cma->fb;
+
+err_gem_object_unreference:
+ for (i--; i >= 0; i--)
+ drm_gem_object_unreference_unlocked(&objs[i]->base);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_create);
+
+/**
+ * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
+ * @fb: The framebuffer
+ * @plane: Which plane
+ *
+ * Return the CMA GEM object for given framebuffer.
+ *
+ * This function will usually be called from the CRTC callback functions.
+ */
+struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
+ unsigned int plane)
+{
+ struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+
+ if (plane >= 4)
+ return NULL;
+
+ return fb_cma->obj[plane];
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
+
+static struct fb_ops drm_fbdev_cma_ops = {
+ .owner = THIS_MODULE,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_device *dev = helper->dev;
+ struct drm_gem_cma_object *obj;
+ struct drm_framebuffer *fb;
+ unsigned int bytes_per_pixel;
+ unsigned long offset;
+ struct fb_info *fbi;
+ size_t size;
+ int ret;
+
+ DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ obj = drm_gem_cma_create(dev, size);
+ if (!obj)
+ return -ENOMEM;
+
+ fbi = framebuffer_alloc(0, dev->dev);
+ if (!fbi) {
+ dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
+ ret = -ENOMEM;
+ goto err_drm_gem_cma_free_object;
+ }
+
+ fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1);
+ if (IS_ERR(fbdev_cma->fb)) {
+ dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
+ ret = PTR_ERR(fbdev_cma->fb);
+ goto err_framebuffer_release;
+ }
+
+ fb = &fbdev_cma->fb->fb;
+ helper->fb = fb;
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &drm_fbdev_cma_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ dev_err(dev->dev, "Failed to allocate color map.\n");
+ goto err_drm_fb_cma_destroy;
+ }
+
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+
+ offset = fbi->var.xoffset * bytes_per_pixel;
+ offset += fbi->var.yoffset * fb->pitches[0];
+
+ dev->mode_config.fb_base = (resource_size_t)obj->paddr;
+ fbi->screen_base = obj->vaddr + offset;
+ fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
+ fbi->screen_size = size;
+ fbi->fix.smem_len = size;
+
+ return 0;
+
+err_drm_fb_cma_destroy:
+ drm_fb_cma_destroy(fb);
+err_framebuffer_release:
+ framebuffer_release(fbi);
+err_drm_gem_cma_free_object:
+ drm_gem_cma_free_object(&obj->base);
+ return ret;
+}
+
+static int drm_fbdev_cma_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ int ret = 0;
+
+ if (!helper->fb) {
+ ret = drm_fbdev_cma_create(helper, sizes);
+ if (ret < 0)
+ return ret;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
+ .fb_probe = drm_fbdev_cma_probe,
+};
+
+/**
+ * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device
+ * @num_crtc: Number of CRTCs
+ * @max_conn_count: Maximum number of connectors
+ *
+ * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
+ */
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+ unsigned int preferred_bpp, unsigned int num_crtc,
+ unsigned int max_conn_count)
+{
+ struct drm_fbdev_cma *fbdev_cma;
+ struct drm_fb_helper *helper;
+ int ret;
+
+ fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
+ if (!fbdev_cma) {
+ dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fbdev_cma->fb_helper.funcs = &drm_fb_cma_helper_funcs;
+ helper = &fbdev_cma->fb_helper;
+
+ ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
+ goto err_free;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(helper);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to add connectors.\n");
+ goto err_drm_fb_helper_fini;
+
+ }
+
+ ret = drm_fb_helper_initial_config(helper, preferred_bpp);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to set inital hw configuration.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ return fbdev_cma;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(helper);
+err_free:
+ kfree(fbdev_cma);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
+
+/**
+ * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct
+ * @fbdev_cma: The drm_fbdev_cma struct
+ */
+void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
+{
+ if (fbdev_cma->fb_helper.fbdev) {
+ struct fb_info *info;
+ int ret;
+
+ info = fbdev_cma->fb_helper.fbdev;
+ ret = unregister_framebuffer(info);
+ if (ret < 0)
+ DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
+
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+
+ framebuffer_release(info);
+ }
+
+ if (fbdev_cma->fb)
+ drm_fb_cma_destroy(&fbdev_cma->fb->fb);
+
+ drm_fb_helper_fini(&fbdev_cma->fb_helper);
+ kfree(fbdev_cma);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
+
+/**
+ * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ *
+ * This function is usually called from the DRM drivers lastclose callback.
+ */
+void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
+{
+ if (fbdev_cma)
+ drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
+
+/**
+ * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ *
+ * This function is usually called from the DRM drivers output_poll_changed
+ * callback.
+ */
+void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
+{
+ if (fbdev_cma)
+ drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f546d1e8af82..b5d05f58c15a 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -236,7 +236,7 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
-bool drm_fb_helper_force_kernel_mode(void)
+static bool drm_fb_helper_force_kernel_mode(void)
{
bool ret, error = false;
struct drm_fb_helper *helper;
@@ -330,7 +330,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
/* Walk the connectors & encoders on this fb turning them on/off */
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
- drm_helper_connector_dpms(connector, dpms_mode);
+ connector->funcs->dpms(connector, dpms_mode);
drm_connector_property_set_value(connector,
dev->mode_config.dpms_property, dpms_mode);
}
@@ -1230,7 +1230,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
- struct drm_encoder *encoder;
struct drm_mode_set *modeset;
bool *enabled;
int width, height;
@@ -1241,11 +1240,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
width = dev->mode_config.max_width;
height = dev->mode_config.max_height;
- /* clean out all the encoder/crtc combos */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- encoder->crtc = NULL;
- }
-
crtcs = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
modes = kcalloc(dev->mode_config.num_connector,
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
new file mode 100644
index 000000000000..1aa8fee1e865
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -0,0 +1,251 @@
+/*
+ * drm gem CMA (contiguous memory allocator) helper functions
+ *
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ *
+ * Based on Samsung Exynos code
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/export.h>
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_gem_cma_helper.h>
+
+static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
+{
+ return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
+}
+
+static void drm_gem_cma_buf_destroy(struct drm_device *drm,
+ struct drm_gem_cma_object *cma_obj)
+{
+ dma_free_writecombine(drm->dev, cma_obj->base.size, cma_obj->vaddr,
+ cma_obj->paddr);
+}
+
+/*
+ * drm_gem_cma_create - allocate an object with the given size
+ *
+ * returns a struct drm_gem_cma_object* on success or ERR_PTR values
+ * on failure.
+ */
+struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
+ unsigned int size)
+{
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *gem_obj;
+ int ret;
+
+ size = round_up(size, PAGE_SIZE);
+
+ cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+ if (!cma_obj)
+ return ERR_PTR(-ENOMEM);
+
+ cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
+ &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
+ if (!cma_obj->vaddr) {
+ dev_err(drm->dev, "failed to allocate buffer with size %d\n", size);
+ ret = -ENOMEM;
+ goto err_dma_alloc;
+ }
+
+ gem_obj = &cma_obj->base;
+
+ ret = drm_gem_object_init(drm, gem_obj, size);
+ if (ret)
+ goto err_obj_init;
+
+ ret = drm_gem_create_mmap_offset(gem_obj);
+ if (ret)
+ goto err_create_mmap_offset;
+
+ return cma_obj;
+
+err_create_mmap_offset:
+ drm_gem_object_release(gem_obj);
+
+err_obj_init:
+ drm_gem_cma_buf_destroy(drm, cma_obj);
+
+err_dma_alloc:
+ kfree(cma_obj);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_create);
+
+/*
+ * drm_gem_cma_create_with_handle - allocate an object with the given
+ * size and create a gem handle on it
+ *
+ * returns a struct drm_gem_cma_object* on success or ERR_PTR values
+ * on failure.
+ */
+static struct drm_gem_cma_object *drm_gem_cma_create_with_handle(
+ struct drm_file *file_priv,
+ struct drm_device *drm, unsigned int size,
+ unsigned int *handle)
+{
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *gem_obj;
+ int ret;
+
+ cma_obj = drm_gem_cma_create(drm, size);
+ if (IS_ERR(cma_obj))
+ return cma_obj;
+
+ gem_obj = &cma_obj->base;
+
+ /*
+ * allocate a id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, gem_obj, handle);
+ if (ret)
+ goto err_handle_create;
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_unreference_unlocked(gem_obj);
+
+ return cma_obj;
+
+err_handle_create:
+ drm_gem_cma_free_object(gem_obj);
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback
+ * function
+ */
+void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
+{
+ struct drm_gem_cma_object *cma_obj;
+
+ if (gem_obj->map_list.map)
+ drm_gem_free_mmap_offset(gem_obj);
+
+ drm_gem_object_release(gem_obj);
+
+ cma_obj = to_drm_gem_cma_obj(gem_obj);
+
+ drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj);
+
+ kfree(cma_obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
+
+/*
+ * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
+ * function
+ *
+ * This aligns the pitch and size arguments to the minimum required. wrap
+ * this into your own function if you need bigger alignment.
+ */
+int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev, struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_cma_object *cma_obj;
+ int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+ if (args->pitch < min_pitch)
+ args->pitch = min_pitch;
+
+ if (args->size < args->pitch * args->height)
+ args->size = args->pitch * args->height;
+
+ cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
+ args->size, &args->handle);
+ if (IS_ERR(cma_obj))
+ return PTR_ERR(cma_obj);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
+
+/*
+ * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback
+ * function
+ */
+int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *drm, uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *gem_obj;
+
+ mutex_lock(&drm->struct_mutex);
+
+ gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
+ if (!gem_obj) {
+ dev_err(drm->dev, "failed to lookup gem object\n");
+ mutex_unlock(&drm->struct_mutex);
+ return -EINVAL;
+ }
+
+ *offset = get_gem_mmap_offset(gem_obj);
+
+ drm_gem_object_unreference(gem_obj);
+
+ mutex_unlock(&drm->struct_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
+
+const struct vm_operations_struct drm_gem_cma_vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
+
+/*
+ * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
+ */
+int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gem_obj;
+ struct drm_gem_cma_object *cma_obj;
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ gem_obj = vma->vm_private_data;
+ cma_obj = to_drm_gem_cma_obj(gem_obj);
+
+ ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+ if (ret)
+ drm_gem_vm_close(vma);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
+
+/*
+ * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
+ */
+int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *drm, unsigned int handle)
+{
+ return drm_gem_handle_delete(file_priv, handle);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 03f16f352fe2..076c4a86ff84 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1236,7 +1236,7 @@ done:
return ret;
}
-void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
{
struct drm_pending_vblank_event *e, *t;
struct timeval now;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 961ee08927fe..6fed21502313 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -62,7 +62,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
-#elif defined(__sparc__) || defined(__arm__)
+#elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
tmp = pgprot_noncached(tmp);
#endif
return tmp;
@@ -619,20 +619,11 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
offset = drm_core_get_reg_ofs(dev);
vma->vm_flags |= VM_IO; /* not in core dump */
vma->vm_page_prot = drm_io_prot(map->type, vma);
-#if !defined(__arm__)
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
-#else
- if (remap_pfn_range(vma, vma->vm_start,
- (map->offset + offset) >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-#endif
-
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
" offset = 0x%llx\n",
map->type,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index d9568198c300..9dce3b9c3896 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -147,9 +147,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
-
- kfree(connector->display_info.raw_edid);
- connector->display_info.raw_edid = edid;
+ kfree(edid);
} else {
struct drm_display_mode *mode = drm_mode_create(connector->dev);
struct exynos_drm_panel_info *panel;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index d5586cc75163..f4ac43356583 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -266,8 +266,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
fb = fb_helper->fb;
- if (fb && fb->funcs->destroy)
- fb->funcs->destroy(fb);
+ if (fb)
+ drm_framebuffer_remove(fb);
}
/* release linux framebuffer */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 537027a74fd5..e364165f1a2a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -102,7 +102,6 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
u8 *edid, int len)
{
struct vidi_context *ctx = get_vidi_context(dev);
- struct edid *raw_edid;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -115,18 +114,6 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
return -EFAULT;
}
- raw_edid = kzalloc(len, GFP_KERNEL);
- if (!raw_edid) {
- DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
- return -ENOMEM;
- }
-
- memcpy(raw_edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
- * EDID_LENGTH, len));
-
- /* attach the edid data to connector. */
- connector->display_info.raw_edid = (char *)raw_edid;
-
memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
* EDID_LENGTH, len));
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index abfa2a93f0d0..7a2d40a5c1e1 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -3,7 +3,7 @@
#
ccflags-y += -I$(srctree)/include/drm
-gma500_gfx-y += gem_glue.o \
+gma500_gfx-y += \
accel_2d.o \
backlight.o \
framebuffer.o \
@@ -30,7 +30,8 @@ gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
cdv_intel_crt.o \
cdv_intel_display.o \
cdv_intel_hdmi.o \
- cdv_intel_lvds.o
+ cdv_intel_lvds.o \
+ cdv_intel_dp.o
gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
oaktrail_crtc.o \
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 20793951fcac..143eba3309c5 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -26,10 +26,55 @@
#include "intel_bios.h"
#include "power.h"
+static void do_gma_backlight_set(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ backlight_update_status(dev_priv->backlight_device);
+#endif
+}
+
+void gma_backlight_enable(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->backlight_enabled = true;
+ if (dev_priv->backlight_device) {
+ dev_priv->backlight_device->props.brightness = dev_priv->backlight_level;
+ do_gma_backlight_set(dev);
+ }
+#endif
+}
+
+void gma_backlight_disable(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->backlight_enabled = false;
+ if (dev_priv->backlight_device) {
+ dev_priv->backlight_device->props.brightness = 0;
+ do_gma_backlight_set(dev);
+ }
+#endif
+}
+
+void gma_backlight_set(struct drm_device *dev, int v)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->backlight_level = v;
+ if (dev_priv->backlight_device && dev_priv->backlight_enabled) {
+ dev_priv->backlight_device->props.brightness = v;
+ do_gma_backlight_set(dev);
+ }
+#endif
+}
+
int gma_backlight_init(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->backlight_enabled = true;
return dev_priv->ops->backlight_init(dev);
#else
return 0;
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index b7e7b49d8f62..bfc2f397019a 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -58,10 +58,17 @@ static int cdv_output_init(struct drm_device *dev)
cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
/* These bits indicate HDMI not SDVO on CDV */
- if (REG_READ(SDVOB) & SDVO_DETECTED)
+ if (REG_READ(SDVOB) & SDVO_DETECTED) {
cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
- if (REG_READ(SDVOC) & SDVO_DETECTED)
+ if (REG_READ(DP_B) & DP_DETECTED)
+ cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_B);
+ }
+
+ if (REG_READ(SDVOC) & SDVO_DETECTED) {
cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
+ if (REG_READ(DP_C) & DP_DETECTED)
+ cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_C);
+ }
return 0;
}
@@ -163,6 +170,7 @@ static int cdv_backlight_init(struct drm_device *dev)
cdv_get_brightness(cdv_backlight_device);
backlight_update_status(cdv_backlight_device);
dev_priv->backlight_device = cdv_backlight_device;
+ dev_priv->backlight_enabled = true;
return 0;
}
@@ -449,6 +457,7 @@ static void cdv_get_core_freq(struct drm_device *dev)
case 6:
case 7:
dev_priv->core_freq = 266;
+ break;
default:
dev_priv->core_freq = 0;
}
@@ -488,6 +497,65 @@ static void cdv_hotplug_enable(struct drm_device *dev, bool on)
}
}
+static const char *force_audio_names[] = {
+ "off",
+ "auto",
+ "on",
+};
+
+void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+ int i;
+
+ prop = dev_priv->force_audio_property;
+ if (prop == NULL) {
+ prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "audio",
+ ARRAY_SIZE(force_audio_names));
+ if (prop == NULL)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
+ drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
+
+ dev_priv->force_audio_property = prop;
+ }
+ drm_connector_attach_property(connector, prop, 0);
+}
+
+
+static const char *broadcast_rgb_names[] = {
+ "Full",
+ "Limited 16:235",
+};
+
+void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+ int i;
+
+ prop = dev_priv->broadcast_rgb_property;
+ if (prop == NULL) {
+ prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ ARRAY_SIZE(broadcast_rgb_names));
+ if (prop == NULL)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
+ drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
+
+ dev_priv->broadcast_rgb_property = prop;
+ }
+
+ drm_connector_attach_property(connector, prop, 0);
+}
+
/* Cedarview */
static const struct psb_offset cdv_regmap[2] = {
{
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index a68509ba22a8..3cfd0931fbfb 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -57,15 +57,26 @@ struct cdv_intel_clock_t {
struct cdv_intel_limit_t {
struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
struct cdv_intel_p2_t p2;
+ bool (*find_pll)(const struct cdv_intel_limit_t *, struct drm_crtc *,
+ int, int, struct cdv_intel_clock_t *);
};
+static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
+ struct drm_crtc *crtc, int target, int refclk,
+ struct cdv_intel_clock_t *best_clock);
+static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
+ int refclk,
+ struct cdv_intel_clock_t *best_clock);
+
#define CDV_LIMIT_SINGLE_LVDS_96 0
#define CDV_LIMIT_SINGLE_LVDS_100 1
#define CDV_LIMIT_DAC_HDMI_27 2
#define CDV_LIMIT_DAC_HDMI_96 3
+#define CDV_LIMIT_DP_27 4
+#define CDV_LIMIT_DP_100 5
static const struct cdv_intel_limit_t cdv_intel_limits[] = {
- { /* CDV_SIGNLE_LVDS_96MHz */
+ { /* CDV_SINGLE_LVDS_96MHz */
.dot = {.min = 20000, .max = 115500},
.vco = {.min = 1800000, .max = 3600000},
.n = {.min = 2, .max = 6},
@@ -76,6 +87,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
.p1 = {.min = 2, .max = 10},
.p2 = {.dot_limit = 200000,
.p2_slow = 14, .p2_fast = 14},
+ .find_pll = cdv_intel_find_best_PLL,
},
{ /* CDV_SINGLE_LVDS_100MHz */
.dot = {.min = 20000, .max = 115500},
@@ -90,6 +102,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
* is 80-224Mhz. Prefer single channel as much as possible.
*/
.p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
+ .find_pll = cdv_intel_find_best_PLL,
},
{ /* CDV_DAC_HDMI_27MHz */
.dot = {.min = 20000, .max = 400000},
@@ -101,6 +114,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
.p = {.min = 5, .max = 90},
.p1 = {.min = 1, .max = 9},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+ .find_pll = cdv_intel_find_best_PLL,
},
{ /* CDV_DAC_HDMI_96MHz */
.dot = {.min = 20000, .max = 400000},
@@ -112,7 +126,32 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
.p = {.min = 5, .max = 100},
.p1 = {.min = 1, .max = 10},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+ .find_pll = cdv_intel_find_best_PLL,
+ },
+ { /* CDV_DP_27MHz */
+ .dot = {.min = 160000, .max = 272000},
+ .vco = {.min = 1809000, .max = 3564000},
+ .n = {.min = 1, .max = 1},
+ .m = {.min = 67, .max = 132},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 65, .max = 130},
+ .p = {.min = 5, .max = 90},
+ .p1 = {.min = 1, .max = 9},
+ .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
+ .find_pll = cdv_intel_find_dp_pll,
},
+ { /* CDV_DP_100MHz */
+ .dot = {.min = 160000, .max = 272000},
+ .vco = {.min = 1800000, .max = 3600000},
+ .n = {.min = 2, .max = 6},
+ .m = {.min = 60, .max = 164},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 58, .max = 162},
+ .p = {.min = 5, .max = 100},
+ .p1 = {.min = 1, .max = 10},
+ .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
+ .find_pll = cdv_intel_find_dp_pll,
+ }
};
#define _wait_for(COND, MS, W) ({ \
@@ -132,7 +171,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
-static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
+int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
{
int ret;
@@ -159,7 +198,7 @@ static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
return 0;
}
-static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
+int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
{
int ret;
static bool dpio_debug = true;
@@ -201,7 +240,7 @@ static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
/* Reset the DPIO configuration register. The BIOS does this at every
* mode set.
*/
-static void cdv_sb_reset(struct drm_device *dev)
+void cdv_sb_reset(struct drm_device *dev)
{
REG_WRITE(DPIO_CFG, 0);
@@ -216,7 +255,7 @@ static void cdv_sb_reset(struct drm_device *dev)
*/
static int
cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
- struct cdv_intel_clock_t *clock, bool is_lvds)
+ struct cdv_intel_clock_t *clock, bool is_lvds, u32 ddi_select)
{
struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_crtc->pipe;
@@ -259,7 +298,7 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
ref_value &= ~(REF_CLK_MASK);
/* use DPLL_A for pipeB on CRT/HDMI */
- if (pipe == 1 && !is_lvds) {
+ if (pipe == 1 && !is_lvds && !(ddi_select & DP_MASK)) {
DRM_DEBUG_KMS("use DPLLA for pipe B\n");
ref_value |= REF_CLK_DPLLA;
} else {
@@ -336,30 +375,33 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
if (ret)
return ret;
- lane_reg = PSB_LANE0;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
- cdv_sb_write(dev, lane_reg, lane_value);
-
- lane_reg = PSB_LANE1;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
- cdv_sb_write(dev, lane_reg, lane_value);
-
- lane_reg = PSB_LANE2;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
- cdv_sb_write(dev, lane_reg, lane_value);
-
- lane_reg = PSB_LANE3;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
- cdv_sb_write(dev, lane_reg, lane_value);
-
+ if (ddi_select) {
+ if ((ddi_select & DDI_MASK) == DDI0_SELECT) {
+ lane_reg = PSB_LANE0;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE1;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+ } else {
+ lane_reg = PSB_LANE2;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE3;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+ }
+ }
return 0;
}
@@ -396,6 +438,12 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
else
limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
+ } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ if (refclk == 27000)
+ limit = &cdv_intel_limits[CDV_LIMIT_DP_27];
+ else
+ limit = &cdv_intel_limits[CDV_LIMIT_DP_100];
} else {
if (refclk == 27000)
limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
@@ -438,13 +486,12 @@ static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
return true;
}
-static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
- int refclk,
- struct cdv_intel_clock_t *best_clock)
+static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
+ struct drm_crtc *crtc, int target, int refclk,
+ struct cdv_intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct cdv_intel_clock_t clock;
- const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);
int err = target;
@@ -498,6 +545,49 @@ static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
return err != target;
}
+static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
+ int refclk,
+ struct cdv_intel_clock_t *best_clock)
+{
+ struct cdv_intel_clock_t clock;
+ if (refclk == 27000) {
+ if (target < 200000) {
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 1;
+ clock.m1 = 0;
+ clock.m2 = 118;
+ } else {
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 1;
+ clock.m1 = 0;
+ clock.m2 = 98;
+ }
+ } else if (refclk == 100000) {
+ if (target < 200000) {
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 5;
+ clock.m1 = 0;
+ clock.m2 = 160;
+ } else {
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 5;
+ clock.m1 = 0;
+ clock.m2 = 133;
+ }
+ } else
+ return false;
+ clock.m = clock.m2 + 2;
+ clock.p = clock.p1 * clock.p2;
+ clock.vco = (refclk * clock.m) / clock.n;
+ clock.dot = clock.vco / clock.p;
+ memcpy(best_clock, &clock, sizeof(struct cdv_intel_clock_t));
+ return true;
+}
+
static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
@@ -791,7 +881,7 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
if (psb_intel_crtc->active)
- return;
+ break;
psb_intel_crtc->active = true;
@@ -835,17 +925,15 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_WRITE(map->status, temp);
REG_READ(map->status);
- cdv_intel_update_watermark(dev, crtc);
cdv_intel_crtc_load_lut(crtc);
/* Give the overlay scaler a chance to enable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, true); TODO */
- psb_intel_crtc->crtc_enable = true;
break;
case DRM_MODE_DPMS_OFF:
if (!psb_intel_crtc->active)
- return;
+ break;
psb_intel_crtc->active = false;
@@ -892,10 +980,9 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Wait for the clocks to turn off. */
udelay(150);
- cdv_intel_update_watermark(dev, crtc);
- psb_intel_crtc->crtc_enable = false;
break;
}
+ cdv_intel_update_watermark(dev, crtc);
/*Set FIFO Watermarks*/
REG_WRITE(DSPARB, 0x3F3E);
}
@@ -952,9 +1039,12 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
u32 dpll = 0, dspcntr, pipeconf;
bool ok;
bool is_crt = false, is_lvds = false, is_tv = false;
- bool is_hdmi = false;
+ bool is_hdmi = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ const struct cdv_intel_limit_t *limit;
+ u32 ddi_select = 0;
+ bool is_edp = false;
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct psb_intel_encoder *psb_intel_encoder =
@@ -964,6 +1054,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
|| connector->encoder->crtc != crtc)
continue;
+ ddi_select = psb_intel_encoder->ddi_select;
switch (psb_intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -977,6 +1068,15 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_HDMI:
is_hdmi = true;
break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_edp = true;
+ break;
+ default:
+ DRM_ERROR("invalid output type.\n");
+ return 0;
}
}
@@ -986,6 +1086,20 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
else
/* high-end sku, 27/100 mhz */
refclk = 27000;
+ if (is_dp || is_edp) {
+ /*
+ * Based on the spec the low-end SKU has only CRT/LVDS. So it is
+ * unnecessary to consider it for DP/eDP.
+ * On the high-end SKU, it will use the 27/100M reference clk
+ * for DP/eDP. When using SSC clock, the ref clk is 100MHz.Otherwise
+ * it will be 27MHz. From the VBIOS code it seems that the pipe A choose
+ * 27MHz for DP/eDP while the Pipe B chooses the 100MHz.
+ */
+ if (pipe == 0)
+ refclk = 27000;
+ else
+ refclk = 100000;
+ }
if (is_lvds && dev_priv->lvds_use_ssc) {
refclk = dev_priv->lvds_ssc_freq * 1000;
@@ -993,8 +1107,10 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
}
drm_mode_debug_printmodeline(adjusted_mode);
+
+ limit = cdv_intel_limit(crtc, refclk);
- ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
&clock);
if (!ok) {
dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
@@ -1009,6 +1125,15 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
}
/* dpll |= PLL_REF_INPUT_DREFCLK; */
+ if (is_dp || is_edp) {
+ cdv_intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ } else {
+ REG_WRITE(PIPE_GMCH_DATA_M(pipe), 0);
+ REG_WRITE(PIPE_GMCH_DATA_N(pipe), 0);
+ REG_WRITE(PIPE_DP_LINK_M(pipe), 0);
+ REG_WRITE(PIPE_DP_LINK_N(pipe), 0);
+ }
+
dpll |= DPLL_SYNCLOCK_ENABLE;
/* if (is_lvds)
dpll |= DPLLB_MODE_LVDS;
@@ -1019,6 +1144,31 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
/* setup pipeconf */
pipeconf = REG_READ(map->conf);
+ pipeconf &= ~(PIPE_BPC_MASK);
+ if (is_edp) {
+ switch (dev_priv->edp.bpp) {
+ case 24:
+ pipeconf |= PIPE_8BPC;
+ break;
+ case 18:
+ pipeconf |= PIPE_6BPC;
+ break;
+ case 30:
+ pipeconf |= PIPE_10BPC;
+ break;
+ default:
+ pipeconf |= PIPE_8BPC;
+ break;
+ }
+ } else if (is_lvds) {
+ /* the BPC will be 6 if it is 18-bit LVDS panel */
+ if ((REG_READ(LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+ pipeconf |= PIPE_8BPC;
+ else
+ pipeconf |= PIPE_6BPC;
+ } else
+ pipeconf |= PIPE_8BPC;
+
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -1033,7 +1183,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
REG_READ(map->dpll);
- cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds);
+ cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds, ddi_select);
udelay(150);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
new file mode 100644
index 000000000000..c9abc06ef680
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -0,0 +1,1951 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "drm_dp_helper.h"
+
+#define _wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (! (COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W && !in_dbg_master()) msleep(W); \
+ } \
+ ret__; \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+
+#define DP_LINK_STATUS_SIZE 6
+#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
+
+#define DP_LINK_CONFIGURATION_SIZE 9
+
+#define CDV_FAST_LINK_TRAIN 1
+
+struct cdv_intel_dp {
+ uint32_t output_reg;
+ uint32_t DP;
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ bool has_audio;
+ int force_audio;
+ uint32_t color_range;
+ uint8_t link_bw;
+ uint8_t lane_count;
+ uint8_t dpcd[4];
+ struct psb_intel_encoder *encoder;
+ struct i2c_adapter adapter;
+ struct i2c_algo_dp_aux_data algo;
+ uint8_t train_set[4];
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ struct drm_display_mode *panel_fixed_mode; /* for eDP */
+ bool panel_on;
+};
+
+struct ddi_regoff {
+ uint32_t PreEmph1;
+ uint32_t PreEmph2;
+ uint32_t VSwing1;
+ uint32_t VSwing2;
+ uint32_t VSwing3;
+ uint32_t VSwing4;
+ uint32_t VSwing5;
+};
+
+static struct ddi_regoff ddi_DP_train_table[] = {
+ {.PreEmph1 = 0x812c, .PreEmph2 = 0x8124, .VSwing1 = 0x8154,
+ .VSwing2 = 0x8148, .VSwing3 = 0x814C, .VSwing4 = 0x8150,
+ .VSwing5 = 0x8158,},
+ {.PreEmph1 = 0x822c, .PreEmph2 = 0x8224, .VSwing1 = 0x8254,
+ .VSwing2 = 0x8248, .VSwing3 = 0x824C, .VSwing4 = 0x8250,
+ .VSwing5 = 0x8258,},
+};
+
+static uint32_t dp_vswing_premph_table[] = {
+ 0x55338954, 0x4000,
+ 0x554d8954, 0x2000,
+ 0x55668954, 0,
+ 0x559ac0d4, 0x6000,
+};
+/**
+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ */
+static bool is_edp(struct psb_intel_encoder *encoder)
+{
+ return encoder->type == INTEL_OUTPUT_EDP;
+}
+
+
+static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder);
+static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder);
+static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder);
+
+static int
+cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int max_lane_count = 4;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+ max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
+ switch (max_lane_count) {
+ case 1: case 2: case 4:
+ break;
+ default:
+ max_lane_count = 4;
+ }
+ }
+ return max_lane_count;
+}
+
+static int
+cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+
+ switch (max_link_bw) {
+ case DP_LINK_BW_1_62:
+ case DP_LINK_BW_2_7:
+ break;
+ default:
+ max_link_bw = DP_LINK_BW_1_62;
+ break;
+ }
+ return max_link_bw;
+}
+
+static int
+cdv_intel_dp_link_clock(uint8_t link_bw)
+{
+ if (link_bw == DP_LINK_BW_2_7)
+ return 270000;
+ else
+ return 162000;
+}
+
+static int
+cdv_intel_dp_link_required(int pixel_clock, int bpp)
+{
+ return (pixel_clock * bpp + 7) / 8;
+}
+
+static int
+cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+{
+ return (max_link_clock * max_lanes * 19) / 20;
+}
+
+static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ u32 pp;
+
+ if (intel_dp->panel_on) {
+ DRM_DEBUG_KMS("Skip VDD on because of panel on\n");
+ return;
+ }
+ DRM_DEBUG_KMS("\n");
+
+ pp = REG_READ(PP_CONTROL);
+
+ pp |= EDP_FORCE_VDD;
+ REG_WRITE(PP_CONTROL, pp);
+ REG_READ(PP_CONTROL);
+ msleep(intel_dp->panel_power_up_delay);
+}
+
+static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ u32 pp;
+
+ DRM_DEBUG_KMS("\n");
+ pp = REG_READ(PP_CONTROL);
+
+ pp &= ~EDP_FORCE_VDD;
+ REG_WRITE(PP_CONTROL, pp);
+ REG_READ(PP_CONTROL);
+
+}
+
+/* Returns true if the panel was already on when called */
+static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_NONE;
+
+ if (intel_dp->panel_on)
+ return true;
+
+ DRM_DEBUG_KMS("\n");
+ pp = REG_READ(PP_CONTROL);
+ pp &= ~PANEL_UNLOCK_MASK;
+
+ pp |= (PANEL_UNLOCK_REGS | POWER_TARGET_ON);
+ REG_WRITE(PP_CONTROL, pp);
+ REG_READ(PP_CONTROL);
+
+ if (wait_for(((REG_READ(PP_STATUS) & idle_on_mask) == idle_on_mask), 1000)) {
+ DRM_DEBUG_KMS("Error in Powering up eDP panel, status %x\n", REG_READ(PP_STATUS));
+ intel_dp->panel_on = false;
+ } else
+ intel_dp->panel_on = true;
+ msleep(intel_dp->panel_power_up_delay);
+
+ return false;
+}
+
+static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ u32 pp, idle_off_mask = PP_ON ;
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+
+ DRM_DEBUG_KMS("\n");
+
+ pp = REG_READ(PP_CONTROL);
+
+ if ((pp & POWER_TARGET_ON) == 0)
+ return;
+
+ intel_dp->panel_on = false;
+ pp &= ~PANEL_UNLOCK_MASK;
+ /* ILK workaround: disable reset around power sequence */
+
+ pp &= ~POWER_TARGET_ON;
+ pp &= ~EDP_FORCE_VDD;
+ pp &= ~EDP_BLC_ENABLE;
+ REG_WRITE(PP_CONTROL, pp);
+ REG_READ(PP_CONTROL);
+ DRM_DEBUG_KMS("PP_STATUS %x\n", REG_READ(PP_STATUS));
+
+ if (wait_for((REG_READ(PP_STATUS) & idle_off_mask) == 0, 1000)) {
+ DRM_DEBUG_KMS("Error in turning off Panel\n");
+ }
+
+ msleep(intel_dp->panel_power_cycle_delay);
+ DRM_DEBUG_KMS("Over\n");
+}
+
+static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ u32 pp;
+
+ DRM_DEBUG_KMS("\n");
+ /*
+ * If we enable the backlight right away following a panel power
+ * on, we may see slight flicker as the panel syncs with the eDP
+ * link. So delay a bit to make sure the image is solid before
+ * allowing it to appear.
+ */
+ msleep(300);
+ pp = REG_READ(PP_CONTROL);
+
+ pp |= EDP_BLC_ENABLE;
+ REG_WRITE(PP_CONTROL, pp);
+ gma_backlight_enable(dev);
+}
+
+static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ u32 pp;
+
+ DRM_DEBUG_KMS("\n");
+ gma_backlight_disable(dev);
+ msleep(10);
+ pp = REG_READ(PP_CONTROL);
+
+ pp &= ~EDP_BLC_ENABLE;
+ REG_WRITE(PP_CONTROL, pp);
+ msleep(intel_dp->backlight_off_delay);
+}
+
+static int
+cdv_intel_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
+ int max_lanes = cdv_intel_dp_max_lane_count(encoder);
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+
+ if (is_edp(encoder) && intel_dp->panel_fixed_mode) {
+ if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ /* only refuse the mode on non eDP since we have seen some weird eDP panels
+ which are outside spec tolerances but somehow work by magic */
+ if (!is_edp(encoder) &&
+ (cdv_intel_dp_link_required(mode->clock, dev_priv->edp.bpp)
+ > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes)))
+ return MODE_CLOCK_HIGH;
+
+ if (is_edp(encoder)) {
+ if (cdv_intel_dp_link_required(mode->clock, 24)
+ > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes))
+ return MODE_CLOCK_HIGH;
+
+ }
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ return MODE_OK;
+}
+
+static uint32_t
+pack_aux(uint8_t *src, int src_bytes)
+{
+ int i;
+ uint32_t v = 0;
+
+ if (src_bytes > 4)
+ src_bytes = 4;
+ for (i = 0; i < src_bytes; i++)
+ v |= ((uint32_t) src[i]) << ((3-i) * 8);
+ return v;
+}
+
+static void
+unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+{
+ int i;
+ if (dst_bytes > 4)
+ dst_bytes = 4;
+ for (i = 0; i < dst_bytes; i++)
+ dst[i] = src >> ((3-i) * 8);
+}
+
+static int
+cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_size)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ uint32_t output_reg = intel_dp->output_reg;
+ struct drm_device *dev = encoder->base.dev;
+ uint32_t ch_ctl = output_reg + 0x10;
+ uint32_t ch_data = ch_ctl + 4;
+ int i;
+ int recv_bytes;
+ uint32_t status;
+ uint32_t aux_clock_divider;
+ int try, precharge;
+
+ /* The clock divider is based off the hrawclk,
+ * and would like to run at 2MHz. So, take the
+ * hrawclk value and divide by 2 and use that
+ * On CDV platform it uses 200MHz as hrawclk.
+ *
+ */
+ aux_clock_divider = 200 / 2;
+
+ precharge = 4;
+ if (is_edp(encoder))
+ precharge = 10;
+
+ if (REG_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
+ DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
+ REG_READ(ch_ctl));
+ return -EBUSY;
+ }
+
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4)
+ REG_WRITE(ch_data + i,
+ pack_aux(send + i, send_bytes - i));
+
+ /* Send the command and wait for it to complete */
+ REG_WRITE(ch_ctl,
+ DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+ for (;;) {
+ status = REG_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ udelay(100);
+ }
+
+ /* Clear done status and any errors */
+ REG_WRITE(ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+ if (status & DP_AUX_CH_CTL_DONE)
+ break;
+ }
+
+ if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+ DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+ return -EBUSY;
+ }
+
+ /* Check for timeout or receive error.
+ * Timeouts occur when the sink is not connected
+ */
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+ return -EIO;
+ }
+
+ /* Timeouts occur when the device isn't connected, so they're
+ * "normal" -- don't fill the kernel log with these */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+ DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+ return -ETIMEDOUT;
+ }
+
+ /* Unload any bytes sent back from the other side */
+ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+ if (recv_bytes > recv_size)
+ recv_bytes = recv_size;
+
+ for (i = 0; i < recv_bytes; i += 4)
+ unpack_aux(REG_READ(ch_data + i),
+ recv + i, recv_bytes - i);
+
+ return recv_bytes;
+}
+
+/* Write data to the aux channel in native mode */
+static int
+cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder,
+ uint16_t address, uint8_t *send, int send_bytes)
+{
+ int ret;
+ uint8_t msg[20];
+ int msg_bytes;
+ uint8_t ack;
+
+ if (send_bytes > 16)
+ return -1;
+ msg[0] = AUX_NATIVE_WRITE << 4;
+ msg[1] = address >> 8;
+ msg[2] = address & 0xff;
+ msg[3] = send_bytes - 1;
+ memcpy(&msg[4], send, send_bytes);
+ msg_bytes = send_bytes + 4;
+ for (;;) {
+ ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1);
+ if (ret < 0)
+ return ret;
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ break;
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(100);
+ else
+ return -EIO;
+ }
+ return send_bytes;
+}
+
+/* Write a single byte to the aux channel in native mode */
+static int
+cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder,
+ uint16_t address, uint8_t byte)
+{
+ return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1);
+}
+
+/* read bytes from a native aux channel */
+static int
+cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder,
+ uint16_t address, uint8_t *recv, int recv_bytes)
+{
+ uint8_t msg[4];
+ int msg_bytes;
+ uint8_t reply[20];
+ int reply_bytes;
+ uint8_t ack;
+ int ret;
+
+ msg[0] = AUX_NATIVE_READ << 4;
+ msg[1] = address >> 8;
+ msg[2] = address & 0xff;
+ msg[3] = recv_bytes - 1;
+
+ msg_bytes = 4;
+ reply_bytes = recv_bytes + 1;
+
+ for (;;) {
+ ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret == 0)
+ return -EPROTO;
+ if (ret < 0)
+ return ret;
+ ack = reply[0];
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+ memcpy(recv, reply + 1, ret - 1);
+ return ret - 1;
+ }
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(100);
+ else
+ return -EIO;
+ }
+}
+
+static int
+cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ struct cdv_intel_dp *intel_dp = container_of(adapter,
+ struct cdv_intel_dp,
+ adapter);
+ struct psb_intel_encoder *encoder = intel_dp->encoder;
+ uint16_t address = algo_data->address;
+ uint8_t msg[5];
+ uint8_t reply[2];
+ unsigned retry;
+ int msg_bytes;
+ int reply_bytes;
+ int ret;
+
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[0] = AUX_I2C_READ << 4;
+ else
+ msg[0] = AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[0] |= AUX_I2C_MOT << 4;
+
+ msg[1] = address >> 8;
+ msg[2] = address;
+
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg[3] = 0;
+ msg[4] = write_byte;
+ msg_bytes = 5;
+ reply_bytes = 1;
+ break;
+ case MODE_I2C_READ:
+ msg[3] = 0;
+ msg_bytes = 4;
+ reply_bytes = 2;
+ break;
+ default:
+ msg_bytes = 3;
+ reply_bytes = 1;
+ break;
+ }
+
+ for (retry = 0; retry < 5; retry++) {
+ ret = cdv_intel_dp_aux_ch(encoder,
+ msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+ return ret;
+ }
+
+ switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
+ case AUX_NATIVE_REPLY_ACK:
+ /* I2C-over-AUX Reply field is only valid
+ * when paired with AUX ACK.
+ */
+ break;
+ case AUX_NATIVE_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_ch native nack\n");
+ return -EREMOTEIO;
+ case AUX_NATIVE_REPLY_DEFER:
+ udelay(100);
+ continue;
+ default:
+ DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
+ reply[0]);
+ return -EREMOTEIO;
+ }
+
+ switch (reply[0] & AUX_I2C_REPLY_MASK) {
+ case AUX_I2C_REPLY_ACK:
+ if (mode == MODE_I2C_READ) {
+ *read_byte = reply[1];
+ }
+ return reply_bytes - 1;
+ case AUX_I2C_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_i2c nack\n");
+ return -EREMOTEIO;
+ case AUX_I2C_REPLY_DEFER:
+ DRM_DEBUG_KMS("aux_i2c defer\n");
+ udelay(100);
+ break;
+ default:
+ DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
+ return -EREMOTEIO;
+ }
+ }
+
+ DRM_ERROR("too many retries, giving up\n");
+ return -EREMOTEIO;
+}
+
+static int
+cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int ret;
+
+ DRM_DEBUG_KMS("i2c_init %s\n", name);
+
+ intel_dp->algo.running = false;
+ intel_dp->algo.address = 0;
+ intel_dp->algo.aux_ch = cdv_intel_dp_i2c_aux_ch;
+
+ memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
+ intel_dp->adapter.owner = THIS_MODULE;
+ intel_dp->adapter.class = I2C_CLASS_DDC;
+ strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
+ intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
+ intel_dp->adapter.algo_data = &intel_dp->algo;
+ intel_dp->adapter.dev.parent = &connector->base.kdev;
+
+ if (is_edp(encoder))
+ cdv_intel_edp_panel_vdd_on(encoder);
+ ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
+ if (is_edp(encoder))
+ cdv_intel_edp_panel_vdd_off(encoder);
+
+ return ret;
+}
+
+void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ adjusted_mode->hdisplay = fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = fixed_mode->hsync_end;
+ adjusted_mode->htotal = fixed_mode->htotal;
+
+ adjusted_mode->vdisplay = fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = fixed_mode->vsync_end;
+ adjusted_mode->vtotal = fixed_mode->vtotal;
+
+ adjusted_mode->clock = fixed_mode->clock;
+
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+}
+
+static bool
+cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_psb_private *dev_priv = encoder->dev->dev_private;
+ struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ int lane_count, clock;
+ int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder);
+ int max_clock = cdv_intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
+ static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+ int refclock = mode->clock;
+ int bpp = 24;
+
+ if (is_edp(intel_encoder) && intel_dp->panel_fixed_mode) {
+ cdv_intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
+ refclock = intel_dp->panel_fixed_mode->clock;
+ bpp = dev_priv->edp.bpp;
+ }
+
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ for (clock = max_clock; clock >= 0; clock--) {
+ int link_avail = cdv_intel_dp_max_data_rate(cdv_intel_dp_link_clock(bws[clock]), lane_count);
+
+ if (cdv_intel_dp_link_required(refclock, bpp) <= link_avail) {
+ intel_dp->link_bw = bws[clock];
+ intel_dp->lane_count = lane_count;
+ adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
+ DRM_DEBUG_KMS("Display port link bw %02x lane "
+ "count %d clock %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ adjusted_mode->clock);
+ return true;
+ }
+ }
+ }
+ if (is_edp(intel_encoder)) {
+ /* okay we failed just pick the highest */
+ intel_dp->lane_count = max_lane_count;
+ intel_dp->link_bw = bws[max_clock];
+ adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
+ DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
+ "count %d clock %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ adjusted_mode->clock);
+
+ return true;
+ }
+ return false;
+}
+
+struct cdv_intel_dp_m_n {
+ uint32_t tu;
+ uint32_t gmch_m;
+ uint32_t gmch_n;
+ uint32_t link_m;
+ uint32_t link_n;
+};
+
+static void
+cdv_intel_reduce_ratio(uint32_t *num, uint32_t *den)
+{
+ /*
+ while (*num > 0xffffff || *den > 0xffffff) {
+ *num >>= 1;
+ *den >>= 1;
+ }*/
+ uint64_t value, m;
+ m = *num;
+ value = m * (0x800000);
+ m = do_div(value, *den);
+ *num = value;
+ *den = 0x800000;
+}
+
+static void
+cdv_intel_dp_compute_m_n(int bpp,
+ int nlanes,
+ int pixel_clock,
+ int link_clock,
+ struct cdv_intel_dp_m_n *m_n)
+{
+ m_n->tu = 64;
+ m_n->gmch_m = (pixel_clock * bpp + 7) >> 3;
+ m_n->gmch_n = link_clock * nlanes;
+ cdv_intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+ m_n->link_m = pixel_clock;
+ m_n->link_n = link_clock;
+ cdv_intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+}
+
+void
+cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_encoder *encoder;
+ struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+ int lane_count = 4, bpp = 24;
+ struct cdv_intel_dp_m_n m_n;
+ int pipe = intel_crtc->pipe;
+
+ /*
+ * Find the lane count in the intel_encoder private
+ */
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct psb_intel_encoder *intel_encoder;
+ struct cdv_intel_dp *intel_dp;
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ intel_encoder = to_psb_intel_encoder(encoder);
+ intel_dp = intel_encoder->dev_priv;
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ lane_count = intel_dp->lane_count;
+ break;
+ } else if (is_edp(intel_encoder)) {
+ lane_count = intel_dp->lane_count;
+ bpp = dev_priv->edp.bpp;
+ break;
+ }
+ }
+
+ /*
+ * Compute the GMCH and Link ratios. The '3' here is
+ * the number of bytes_per_pixel post-LUT, which we always
+ * set up for 8-bits of R/G/B, or 3 bytes total.
+ */
+ cdv_intel_dp_compute_m_n(bpp, lane_count,
+ mode->clock, adjusted_mode->clock, &m_n);
+
+ {
+ REG_WRITE(PIPE_GMCH_DATA_M(pipe),
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ REG_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
+ REG_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
+ REG_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
+ }
+}
+
+static void
+cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ struct drm_crtc *crtc = encoder->crtc;
+ struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ struct drm_device *dev = encoder->dev;
+
+ intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ intel_dp->DP |= intel_dp->color_range;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+ switch (intel_dp->lane_count) {
+ case 1:
+ intel_dp->DP |= DP_PORT_WIDTH_1;
+ break;
+ case 2:
+ intel_dp->DP |= DP_PORT_WIDTH_2;
+ break;
+ case 4:
+ intel_dp->DP |= DP_PORT_WIDTH_4;
+ break;
+ }
+ if (intel_dp->has_audio)
+ intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+
+ memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ intel_dp->link_configuration[0] = intel_dp->link_bw;
+ intel_dp->link_configuration[1] = intel_dp->lane_count;
+
+ /*
+ * Check for DPCD version > 1.1 and enhanced framing support
+ */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+ intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+ }
+
+ /* CPT DP's pipe select is decided in TRANS_DP_CTL */
+ if (intel_crtc->pipe == 1)
+ intel_dp->DP |= DP_PIPEB_SELECT;
+
+ REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN));
+ DRM_DEBUG_KMS("DP expected reg is %x\n", intel_dp->DP);
+ if (is_edp(intel_encoder)) {
+ uint32_t pfit_control;
+ cdv_intel_edp_panel_on(intel_encoder);
+
+ if (mode->hdisplay != adjusted_mode->hdisplay ||
+ mode->vdisplay != adjusted_mode->vdisplay)
+ pfit_control = PFIT_ENABLE;
+ else
+ pfit_control = 0;
+
+ pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
+
+ REG_WRITE(PFIT_CONTROL, pfit_control);
+ }
+}
+
+
+/* If the sink supports it, try to set the power state appropriately */
+static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int ret, i;
+
+ /* Should have a valid DPCD by this point */
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ ret = cdv_intel_dp_aux_native_write_1(encoder, DP_SET_POWER,
+ DP_SET_POWER_D3);
+ if (ret != 1)
+ DRM_DEBUG_DRIVER("failed to write sink power state\n");
+ } else {
+ /*
+ * When turning on, we need to retry for 1ms to give the sink
+ * time to wake up.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = cdv_intel_dp_aux_native_write_1(encoder,
+ DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ udelay(1000);
+ }
+ }
+}
+
+static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
+{
+ struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ int edp = is_edp(intel_encoder);
+
+ if (edp) {
+ cdv_intel_edp_backlight_off(intel_encoder);
+ cdv_intel_edp_panel_off(intel_encoder);
+ cdv_intel_edp_panel_vdd_on(intel_encoder);
+ }
+ /* Wake up the sink first */
+ cdv_intel_dp_sink_dpms(intel_encoder, DRM_MODE_DPMS_ON);
+ cdv_intel_dp_link_down(intel_encoder);
+ if (edp)
+ cdv_intel_edp_panel_vdd_off(intel_encoder);
+}
+
+static void cdv_intel_dp_commit(struct drm_encoder *encoder)
+{
+ struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ int edp = is_edp(intel_encoder);
+
+ if (edp)
+ cdv_intel_edp_panel_on(intel_encoder);
+ cdv_intel_dp_start_link_train(intel_encoder);
+ cdv_intel_dp_complete_link_train(intel_encoder);
+ if (edp)
+ cdv_intel_edp_backlight_on(intel_encoder);
+}
+
+static void
+cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ struct drm_device *dev = encoder->dev;
+ uint32_t dp_reg = REG_READ(intel_dp->output_reg);
+ int edp = is_edp(intel_encoder);
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ if (edp) {
+ cdv_intel_edp_backlight_off(intel_encoder);
+ cdv_intel_edp_panel_vdd_on(intel_encoder);
+ }
+ cdv_intel_dp_sink_dpms(intel_encoder, mode);
+ cdv_intel_dp_link_down(intel_encoder);
+ if (edp) {
+ cdv_intel_edp_panel_vdd_off(intel_encoder);
+ cdv_intel_edp_panel_off(intel_encoder);
+ }
+ } else {
+ if (edp)
+ cdv_intel_edp_panel_on(intel_encoder);
+ cdv_intel_dp_sink_dpms(intel_encoder, mode);
+ if (!(dp_reg & DP_PORT_EN)) {
+ cdv_intel_dp_start_link_train(intel_encoder);
+ cdv_intel_dp_complete_link_train(intel_encoder);
+ }
+ if (edp)
+ cdv_intel_edp_backlight_on(intel_encoder);
+ }
+}
+
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ */
+static bool
+cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address,
+ uint8_t *recv, int recv_bytes)
+{
+ int ret, i;
+
+ /*
+ * Sinks are *supposed* to come up within 1ms from an off state,
+ * but we're also supposed to retry 3 times per the spec.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = cdv_intel_dp_aux_native_read(encoder, address, recv,
+ recv_bytes);
+ if (ret == recv_bytes)
+ return true;
+ udelay(1000);
+ }
+
+ return false;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+static bool
+cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ return cdv_intel_dp_aux_native_read_retry(encoder,
+ DP_LANE0_1_STATUS,
+ intel_dp->link_status,
+ DP_LINK_STATUS_SIZE);
+}
+
+static uint8_t
+cdv_intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static uint8_t
+cdv_intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+ return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static uint8_t
+cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+ return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+
+#if 0
+static char *voltage_names[] = {
+ "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+ "0dB", "3.5dB", "6dB", "9.5dB"
+};
+static char *link_train_names[] = {
+ "pattern 1", "pattern 2", "idle", "off"
+};
+#endif
+
+#define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
+/*
+static uint8_t
+cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+{
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+}
+*/
+static void
+cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ uint8_t v = 0;
+ uint8_t p = 0;
+ int lane;
+
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ uint8_t this_v = cdv_intel_get_adjust_request_voltage(intel_dp->link_status, lane);
+ uint8_t this_p = cdv_intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
+
+ if (this_v > v)
+ v = this_v;
+ if (this_p > p)
+ p = this_p;
+ }
+
+ if (v >= CDV_DP_VOLTAGE_MAX)
+ v = CDV_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+
+ if (p == DP_TRAIN_PRE_EMPHASIS_MASK)
+ p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (lane = 0; lane < 4; lane++)
+ intel_dp->train_set[lane] = v | p;
+}
+
+
+static uint8_t
+cdv_intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+ return (l >> s) & 0xf;
+}
+
+/* Check for clock recovery is done on all channels */
+static bool
+cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+ int lane;
+ uint8_t lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = cdv_intel_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+
+/* Check to see if channel eq is done on all channels */
+#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
+ DP_LANE_CHANNEL_EQ_DONE|\
+ DP_LANE_SYMBOL_LOCKED)
+static bool
+cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ uint8_t lane_align;
+ uint8_t lane_status;
+ int lane;
+
+ lane_align = cdv_intel_dp_link_status(intel_dp->link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ lane_status = cdv_intel_get_lane_status(intel_dp->link_status, lane);
+ if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+
+static bool
+cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder,
+ uint32_t dp_reg_value,
+ uint8_t dp_train_pat)
+{
+
+ struct drm_device *dev = encoder->base.dev;
+ int ret;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+
+ REG_WRITE(intel_dp->output_reg, dp_reg_value);
+ REG_READ(intel_dp->output_reg);
+
+ ret = cdv_intel_dp_aux_native_write_1(encoder,
+ DP_TRAINING_PATTERN_SET,
+ dp_train_pat);
+
+ if (ret != 1) {
+ DRM_DEBUG_KMS("Failure in setting link pattern %x\n",
+ dp_train_pat);
+ return false;
+ }
+
+ return true;
+}
+
+
+static bool
+cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder,
+ uint8_t dp_train_pat)
+{
+
+ int ret;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+
+ ret = cdv_intel_dp_aux_native_write(encoder,
+ DP_TRAINING_LANE0_SET,
+ intel_dp->train_set,
+ intel_dp->lane_count);
+
+ if (ret != intel_dp->lane_count) {
+ DRM_DEBUG_KMS("Failure in setting level %d, lane_cnt= %d\n",
+ intel_dp->train_set[0], intel_dp->lane_count);
+ return false;
+ }
+ return true;
+}
+
+static void
+cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ struct ddi_regoff *ddi_reg;
+ int vswing, premph, index;
+
+ if (intel_dp->output_reg == DP_B)
+ ddi_reg = &ddi_DP_train_table[0];
+ else
+ ddi_reg = &ddi_DP_train_table[1];
+
+ vswing = (signal_level & DP_TRAIN_VOLTAGE_SWING_MASK);
+ premph = ((signal_level & DP_TRAIN_PRE_EMPHASIS_MASK)) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if (vswing + premph > 3)
+ return;
+#ifdef CDV_FAST_LINK_TRAIN
+ return;
+#endif
+ DRM_DEBUG_KMS("Test2\n");
+ //return ;
+ cdv_sb_reset(dev);
+ /* ;Swing voltage programming
+ ;gfx_dpio_set_reg(0xc058, 0x0505313A) */
+ cdv_sb_write(dev, ddi_reg->VSwing5, 0x0505313A);
+
+ /* ;gfx_dpio_set_reg(0x8154, 0x43406055) */
+ cdv_sb_write(dev, ddi_reg->VSwing1, 0x43406055);
+
+ /* ;gfx_dpio_set_reg(0x8148, 0x55338954)
+ * The VSwing_PreEmph table is also considered based on the vswing/premp
+ */
+ index = (vswing + premph) * 2;
+ if (premph == 1 && vswing == 1) {
+ cdv_sb_write(dev, ddi_reg->VSwing2, 0x055738954);
+ } else
+ cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]);
+
+ /* ;gfx_dpio_set_reg(0x814c, 0x40802040) */
+ if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_1200)
+ cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040);
+ else
+ cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040);
+
+ /* ;gfx_dpio_set_reg(0x8150, 0x2b405555) */
+ /* cdv_sb_write(dev, ddi_reg->VSwing4, 0x2b405555); */
+
+ /* ;gfx_dpio_set_reg(0x8154, 0xc3406055) */
+ cdv_sb_write(dev, ddi_reg->VSwing1, 0xc3406055);
+
+ /* ;Pre emphasis programming
+ * ;gfx_dpio_set_reg(0xc02c, 0x1f030040)
+ */
+ cdv_sb_write(dev, ddi_reg->PreEmph1, 0x1f030040);
+
+ /* ;gfx_dpio_set_reg(0x8124, 0x00004000) */
+ index = 2 * premph + 1;
+ cdv_sb_write(dev, ddi_reg->PreEmph2, dp_vswing_premph_table[index]);
+ return;
+}
+
+
+/* Enable corresponding port and start training pattern 1 */
+static void
+cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int i;
+ uint8_t voltage;
+ bool clock_recovery = false;
+ int tries;
+ u32 reg;
+ uint32_t DP = intel_dp->DP;
+
+ DP |= DP_PORT_EN;
+ DP &= ~DP_LINK_TRAIN_MASK;
+
+ reg = DP;
+ reg |= DP_LINK_TRAIN_PAT_1;
+ /* Enable output, wait for it to become active */
+ REG_WRITE(intel_dp->output_reg, reg);
+ REG_READ(intel_dp->output_reg);
+ psb_intel_wait_for_vblank(dev);
+
+ DRM_DEBUG_KMS("Link config\n");
+ /* Write the link configuration data */
+ cdv_intel_dp_aux_native_write(encoder, DP_LINK_BW_SET,
+ intel_dp->link_configuration,
+ 2);
+
+ memset(intel_dp->train_set, 0, 4);
+ voltage = 0;
+ tries = 0;
+ clock_recovery = false;
+
+ DRM_DEBUG_KMS("Start train\n");
+ reg = DP | DP_LINK_TRAIN_PAT_1;
+
+
+ for (;;) {
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+ DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
+ intel_dp->train_set[0],
+ intel_dp->link_configuration[0],
+ intel_dp->link_configuration[1]);
+
+ if (!cdv_intel_dp_set_link_train(encoder, reg, DP_TRAINING_PATTERN_1)) {
+ DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 1\n");
+ }
+ cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
+ /* Set training pattern 1 */
+
+ cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_1);
+
+ udelay(200);
+ if (!cdv_intel_dp_get_link_status(encoder))
+ break;
+
+ DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
+ intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
+ intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
+
+ if (cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+ DRM_DEBUG_KMS("PT1 train is done\n");
+ clock_recovery = true;
+ break;
+ }
+
+ /* Check to see if we've tried the max voltage */
+ for (i = 0; i < intel_dp->lane_count; i++)
+ if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ break;
+ if (i == intel_dp->lane_count)
+ break;
+
+ /* Check to see if we've tried the same voltage 5 times */
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++tries;
+ if (tries == 5)
+ break;
+ } else
+ tries = 0;
+ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ /* Compute new intel_dp->train_set as requested by target */
+ cdv_intel_get_adjust_train(encoder);
+
+ }
+
+ if (!clock_recovery) {
+ DRM_DEBUG_KMS("failure in DP patter 1 training, train set %x\n", intel_dp->train_set[0]);
+ }
+
+ intel_dp->DP = DP;
+}
+
+static void
+cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ bool channel_eq = false;
+ int tries, cr_tries;
+ u32 reg;
+ uint32_t DP = intel_dp->DP;
+
+ /* channel equalization */
+ tries = 0;
+ cr_tries = 0;
+ channel_eq = false;
+
+ DRM_DEBUG_KMS("\n");
+ reg = DP | DP_LINK_TRAIN_PAT_2;
+
+ for (;;) {
+
+ DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
+ intel_dp->train_set[0],
+ intel_dp->link_configuration[0],
+ intel_dp->link_configuration[1]);
+ /* channel eq pattern */
+
+ if (!cdv_intel_dp_set_link_train(encoder, reg,
+ DP_TRAINING_PATTERN_2)) {
+ DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 2\n");
+ }
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+
+ if (cr_tries > 5) {
+ DRM_ERROR("failed to train DP, aborting\n");
+ cdv_intel_dp_link_down(encoder);
+ break;
+ }
+
+ cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
+
+ cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_2);
+
+ udelay(1000);
+ if (!cdv_intel_dp_get_link_status(encoder))
+ break;
+
+ DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
+ intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
+ intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
+
+ /* Make sure clock is still ok */
+ if (!cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+ cdv_intel_dp_start_link_train(encoder);
+ cr_tries++;
+ continue;
+ }
+
+ if (cdv_intel_channel_eq_ok(encoder)) {
+ DRM_DEBUG_KMS("PT2 train is done\n");
+ channel_eq = true;
+ break;
+ }
+
+ /* Try 5 times, then try clock recovery if that fails */
+ if (tries > 5) {
+ cdv_intel_dp_link_down(encoder);
+ cdv_intel_dp_start_link_train(encoder);
+ tries = 0;
+ cr_tries++;
+ continue;
+ }
+
+ /* Compute new intel_dp->train_set as requested by target */
+ cdv_intel_get_adjust_train(encoder);
+ ++tries;
+
+ }
+
+ reg = DP | DP_LINK_TRAIN_OFF;
+
+ REG_WRITE(intel_dp->output_reg, reg);
+ REG_READ(intel_dp->output_reg);
+ cdv_intel_dp_aux_native_write_1(encoder,
+ DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void
+cdv_intel_dp_link_down(struct psb_intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ uint32_t DP = intel_dp->DP;
+
+ if ((REG_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+ return;
+
+ DRM_DEBUG_KMS("\n");
+
+
+ {
+ DP &= ~DP_LINK_TRAIN_MASK;
+ REG_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+ }
+ REG_READ(intel_dp->output_reg);
+
+ msleep(17);
+
+ REG_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+ REG_READ(intel_dp->output_reg);
+}
+
+static enum drm_connector_status
+cdv_dp_detect(struct psb_intel_encoder *encoder)
+{
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ enum drm_connector_status status;
+
+ status = connector_status_disconnected;
+ if (cdv_intel_dp_aux_native_read(encoder, 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
+ {
+ if (intel_dp->dpcd[DP_DPCD_REV] != 0)
+ status = connector_status_connected;
+ }
+ if (status == connector_status_connected)
+ DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
+ intel_dp->dpcd[0], intel_dp->dpcd[1],
+ intel_dp->dpcd[2], intel_dp->dpcd[3]);
+ return status;
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+ *
+ * \return true if DP port is connected.
+ * \return false if DP port is disconnected.
+ */
+static enum drm_connector_status
+cdv_intel_dp_detect(struct drm_connector *connector, bool force)
+{
+ struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ enum drm_connector_status status;
+ struct edid *edid = NULL;
+ int edp = is_edp(encoder);
+
+ intel_dp->has_audio = false;
+
+ if (edp)
+ cdv_intel_edp_panel_vdd_on(encoder);
+ status = cdv_dp_detect(encoder);
+ if (status != connector_status_connected) {
+ if (edp)
+ cdv_intel_edp_panel_vdd_off(encoder);
+ return status;
+ }
+
+ if (intel_dp->force_audio) {
+ intel_dp->has_audio = intel_dp->force_audio > 0;
+ } else {
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ intel_dp->has_audio = drm_detect_monitor_audio(edid);
+ kfree(edid);
+ }
+ }
+ if (edp)
+ cdv_intel_edp_panel_vdd_off(encoder);
+
+ return connector_status_connected;
+}
+
+static int cdv_intel_dp_get_modes(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+ struct edid *edid = NULL;
+ int ret = 0;
+ int edp = is_edp(intel_encoder);
+
+
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ if (is_edp(intel_encoder)) {
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ cdv_intel_edp_panel_vdd_off(intel_encoder);
+ if (ret) {
+ if (edp && !intel_dp->panel_fixed_mode) {
+ struct drm_display_mode *newmode;
+ list_for_each_entry(newmode, &connector->probed_modes,
+ head) {
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ intel_dp->panel_fixed_mode =
+ drm_mode_duplicate(dev, newmode);
+ break;
+ }
+ }
+ }
+
+ return ret;
+ }
+ if (!intel_dp->panel_fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
+ intel_dp->panel_fixed_mode =
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (intel_dp->panel_fixed_mode) {
+ intel_dp->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ }
+ }
+ if (intel_dp->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+ }
+
+ return ret;
+}
+
+static bool
+cdv_intel_dp_detect_audio(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ struct edid *edid;
+ bool has_audio = false;
+ int edp = is_edp(encoder);
+
+ if (edp)
+ cdv_intel_edp_panel_vdd_on(encoder);
+
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ has_audio = drm_detect_monitor_audio(edid);
+ kfree(edid);
+ }
+ if (edp)
+ cdv_intel_edp_panel_vdd_off(encoder);
+
+ return has_audio;
+}
+
+static int
+cdv_intel_dp_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+ int ret;
+
+ ret = drm_connector_property_set_value(connector, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ int i = val;
+ bool has_audio;
+
+ if (i == intel_dp->force_audio)
+ return 0;
+
+ intel_dp->force_audio = i;
+
+ if (i == 0)
+ has_audio = cdv_intel_dp_detect_audio(connector);
+ else
+ has_audio = i > 0;
+
+ if (has_audio == intel_dp->has_audio)
+ return 0;
+
+ intel_dp->has_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_dp->color_range)
+ return 0;
+
+ intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
+ return -EINVAL;
+
+done:
+ if (encoder->base.crtc) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y,
+ crtc->fb);
+ }
+
+ return 0;
+}
+
+static void
+cdv_intel_dp_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv;
+
+ if (is_edp(psb_intel_encoder)) {
+ /* cdv_intel_panel_destroy_backlight(connector->dev); */
+ if (intel_dp->panel_fixed_mode) {
+ kfree(intel_dp->panel_fixed_mode);
+ intel_dp->panel_fixed_mode = NULL;
+ }
+ }
+ i2c_del_adapter(&intel_dp->adapter);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = {
+ .dpms = cdv_intel_dp_dpms,
+ .mode_fixup = cdv_intel_dp_mode_fixup,
+ .prepare = cdv_intel_dp_prepare,
+ .mode_set = cdv_intel_dp_mode_set,
+ .commit = cdv_intel_dp_commit,
+};
+
+static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = cdv_intel_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = cdv_intel_dp_set_property,
+ .destroy = cdv_intel_dp_destroy,
+};
+
+static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = {
+ .get_modes = cdv_intel_dp_get_modes,
+ .mode_valid = cdv_intel_dp_mode_valid,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
+ .destroy = cdv_intel_dp_encoder_destroy,
+};
+
+
+static void cdv_intel_dp_add_properties(struct drm_connector *connector)
+{
+ cdv_intel_attach_force_audio_property(connector);
+ cdv_intel_attach_broadcast_rgb_property(connector);
+}
+
+/* check the VBT to see whether the eDP is on DP-D port */
+static bool cdv_intel_dpc_is_edp(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i;
+
+ if (!dev_priv->child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+
+ if (p_child->dvo_port == PORT_IDPC &&
+ p_child->device_type == DEVICE_TYPE_eDP)
+ return true;
+ }
+ return false;
+}
+
+/* Cedarview display clock gating
+
+ We need this disable dot get correct behaviour while enabling
+ DP/eDP. TODO - investigate if we can turn it back to normality
+ after enabling */
+static void cdv_disable_intel_clock_gating(struct drm_device *dev)
+{
+ u32 reg_value;
+ reg_value = REG_READ(DSPCLK_GATE_D);
+
+ reg_value |= (DPUNIT_PIPEB_GATE_DISABLE |
+ DPUNIT_PIPEA_GATE_DISABLE |
+ DPCUNIT_CLOCK_GATE_DISABLE |
+ DPLSUNIT_CLOCK_GATE_DISABLE |
+ DPOUNIT_CLOCK_GATE_DISABLE |
+ DPIOUNIT_CLOCK_GATE_DISABLE);
+
+ REG_WRITE(DSPCLK_GATE_D, reg_value);
+
+ udelay(500);
+}
+
+void
+cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg)
+{
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct cdv_intel_dp *intel_dp;
+ const char *name = NULL;
+ int type = DRM_MODE_CONNECTOR_DisplayPort;
+
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+ if (!psb_intel_encoder)
+ return;
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+ if (!psb_intel_connector)
+ goto err_connector;
+ intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL);
+ if (!intel_dp)
+ goto err_priv;
+
+ if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev))
+ type = DRM_MODE_CONNECTOR_eDP;
+
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
+
+ drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
+ drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS);
+
+ psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder);
+
+ if (type == DRM_MODE_CONNECTOR_DisplayPort)
+ psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ else
+ psb_intel_encoder->type = INTEL_OUTPUT_EDP;
+
+
+ psb_intel_encoder->dev_priv=intel_dp;
+ intel_dp->encoder = psb_intel_encoder;
+ intel_dp->output_reg = output_reg;
+
+ drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs);
+ drm_connector_helper_add(connector, &cdv_intel_dp_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_sysfs_connector_add(connector);
+
+ /* Set up the DDC bus. */
+ switch (output_reg) {
+ case DP_B:
+ name = "DPDDC-B";
+ psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
+ break;
+ case DP_C:
+ name = "DPDDC-C";
+ psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
+ break;
+ }
+
+ cdv_disable_intel_clock_gating(dev);
+
+ cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name);
+ /* FIXME:fail check */
+ cdv_intel_dp_add_properties(connector);
+
+ if (is_edp(psb_intel_encoder)) {
+ int ret;
+ struct edp_power_seq cur;
+ u32 pp_on, pp_off, pp_div;
+ u32 pwm_ctrl;
+
+ pp_on = REG_READ(PP_CONTROL);
+ pp_on &= ~PANEL_UNLOCK_MASK;
+ pp_on |= PANEL_UNLOCK_REGS;
+
+ REG_WRITE(PP_CONTROL, pp_on);
+
+ pwm_ctrl = REG_READ(BLC_PWM_CTL2);
+ pwm_ctrl |= PWM_PIPE_B;
+ REG_WRITE(BLC_PWM_CTL2, pwm_ctrl);
+
+ pp_on = REG_READ(PP_ON_DELAYS);
+ pp_off = REG_READ(PP_OFF_DELAYS);
+ pp_div = REG_READ(PP_DIVISOR);
+
+ /* Pull timing values out of registers */
+ cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+ PANEL_POWER_UP_DELAY_SHIFT;
+
+ cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+ PANEL_LIGHT_ON_DELAY_SHIFT;
+
+ cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+ PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+ cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+ PANEL_POWER_DOWN_DELAY_SHIFT;
+
+ cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+ DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+
+ intel_dp->panel_power_up_delay = cur.t1_t3 / 10;
+ intel_dp->backlight_on_delay = cur.t8 / 10;
+ intel_dp->backlight_off_delay = cur.t9 / 10;
+ intel_dp->panel_power_down_delay = cur.t10 / 10;
+ intel_dp->panel_power_cycle_delay = (cur.t11_t12 - 1) * 100;
+
+ DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
+
+ DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+
+ cdv_intel_edp_panel_vdd_on(psb_intel_encoder);
+ ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV,
+ intel_dp->dpcd,
+ sizeof(intel_dp->dpcd));
+ cdv_intel_edp_panel_vdd_off(psb_intel_encoder);
+ if (ret == 0) {
+ /* if this fails, presume the device is a ghost */
+ DRM_INFO("failed to retrieve link info, disabling eDP\n");
+ cdv_intel_dp_encoder_destroy(encoder);
+ cdv_intel_dp_destroy(connector);
+ goto err_priv;
+ } else {
+ DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
+ intel_dp->dpcd[0], intel_dp->dpcd[1],
+ intel_dp->dpcd[2], intel_dp->dpcd[3]);
+
+ }
+ /* The CDV reference driver moves pnale backlight setup into the displays that
+ have a backlight: this is a good idea and one we should probably adopt, however
+ we need to migrate all the drivers before we can do that */
+ /*cdv_intel_panel_setup_backlight(dev); */
+ }
+ return;
+
+err_priv:
+ kfree(psb_intel_connector);
+err_connector:
+ kfree(psb_intel_encoder);
+}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index a86f87b9ddde..7272a461edfe 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -139,8 +139,6 @@ static enum drm_connector_status cdv_hdmi_detect(
{
struct psb_intel_encoder *psb_intel_encoder =
psb_intel_attached_encoder(connector);
- struct psb_intel_connector *psb_intel_connector =
- to_psb_intel_connector(connector);
struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
@@ -157,8 +155,6 @@ static enum drm_connector_status cdv_hdmi_detect(
hdmi_priv->has_hdmi_audio =
drm_detect_monitor_audio(edid);
}
-
- psb_intel_connector->base.display_info.raw_edid = NULL;
kfree(edid);
}
return status;
@@ -352,9 +348,11 @@ void cdv_hdmi_init(struct drm_device *dev,
switch (reg) {
case SDVOB:
ddc_bus = GPIOE;
+ psb_intel_encoder->ddi_select = DDI0_SELECT;
break;
case SDVOC:
ddc_bus = GPIOD;
+ psb_intel_encoder->ddi_select = DDI1_SELECT;
break;
default:
DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index c7f9468b74ba..b362dd39bf5a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -506,16 +506,8 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
property,
value))
return -1;
- else {
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv =
- encoder->dev->dev_private;
- struct backlight_device *bd =
- dev_priv->backlight_device;
- bd->props.brightness = value;
- backlight_update_status(bd);
-#endif
- }
+ else
+ gma_backlight_set(encoder->dev, value);
} else if (!strcmp(property->name, "DPMS") && encoder) {
struct drm_encoder_helper_funcs *helpers =
encoder->helper_private;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 5732b5702e1c..884ba73ac6ce 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -764,6 +764,13 @@ static void psb_setup_outputs(struct drm_device *dev)
crtc_mask = dev_priv->ops->hdmi_mask;
clone_mask = (1 << INTEL_OUTPUT_HDMI);
break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ crtc_mask = (1 << 0) | (1 << 1);
+ clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
+ break;
+ case INTEL_OUTPUT_EDP:
+ crtc_mask = (1 << 1);
+ clone_mask = (1 << INTEL_OUTPUT_EDP);
}
encoder->possible_crtcs = crtc_mask;
encoder->possible_clones =
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index fc7d144bc2d3..df20546a2a34 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -36,7 +36,12 @@ int psb_gem_init_object(struct drm_gem_object *obj)
void psb_gem_free_object(struct drm_gem_object *obj)
{
struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
- drm_gem_object_release_wrap(obj);
+
+ /* Remove the list map if one is present */
+ if (obj->map_list.map)
+ drm_gem_free_mmap_offset(obj);
+ drm_gem_object_release(obj);
+
/* This must occur last as it frees up the memory of the GEM object */
psb_gtt_free_range(obj->dev, gtt);
}
@@ -77,7 +82,7 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
/* Make it mmapable */
if (!obj->map_list.map) {
- ret = gem_create_mmap_offset(obj);
+ ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
}
diff --git a/drivers/gpu/drm/gma500/gem_glue.c b/drivers/gpu/drm/gma500/gem_glue.c
deleted file mode 100644
index 3c17634f6061..000000000000
--- a/drivers/gpu/drm/gma500/gem_glue.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/**************************************************************************
- * Copyright (c) 2011, Intel Corporation.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- **************************************************************************/
-
-#include <drm/drmP.h>
-#include <drm/drm.h>
-#include "gem_glue.h"
-
-void drm_gem_object_release_wrap(struct drm_gem_object *obj)
-{
- /* Remove the list map if one is present */
- if (obj->map_list.map) {
- struct drm_gem_mm *mm = obj->dev->mm_private;
- struct drm_map_list *list = &obj->map_list;
- drm_ht_remove_item(&mm->offset_hash, &list->hash);
- drm_mm_put_block(list->file_offset_node);
- kfree(list->map);
- list->map = NULL;
- }
- drm_gem_object_release(obj);
-}
-
-/**
- * gem_create_mmap_offset - invent an mmap offset
- * @obj: our object
- *
- * Standard implementation of offset generation for mmap as is
- * duplicated in several drivers. This belongs in GEM.
- */
-int gem_create_mmap_offset(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list;
- struct drm_local_map *map;
- int ret;
-
- list = &obj->map_list;
- list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
- if (list->map == NULL)
- return -ENOMEM;
- map = list->map;
- map->type = _DRM_GEM;
- map->size = obj->size;
- map->handle = obj;
-
- list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- obj->size / PAGE_SIZE, 0, 0);
- if (!list->file_offset_node) {
- dev_err(dev->dev, "failed to allocate offset for bo %d\n",
- obj->name);
- ret = -ENOSPC;
- goto free_it;
- }
- list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- obj->size / PAGE_SIZE, 0);
- if (!list->file_offset_node) {
- ret = -ENOMEM;
- goto free_it;
- }
- list->hash.key = list->file_offset_node->start;
- ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
- if (ret) {
- dev_err(dev->dev, "failed to add to map hash\n");
- goto free_mm;
- }
- return 0;
-
-free_mm:
- drm_mm_put_block(list->file_offset_node);
-free_it:
- kfree(list->map);
- list->map = NULL;
- return ret;
-}
diff --git a/drivers/gpu/drm/gma500/gem_glue.h b/drivers/gpu/drm/gma500/gem_glue.h
deleted file mode 100644
index ce5ce30f74db..000000000000
--- a/drivers/gpu/drm/gma500/gem_glue.h
+++ /dev/null
@@ -1,2 +0,0 @@
-extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
-extern int gem_create_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 8d7caf0f363e..4fb79cf00ed8 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -54,6 +54,98 @@ static void *find_section(struct bdb_header *bdb, int section_id)
return NULL;
}
+static void
+parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
+{
+ struct bdb_edp *edp;
+ struct edp_power_seq *edp_pps;
+ struct edp_link_params *edp_link_params;
+ uint8_t panel_type;
+
+ edp = find_section(bdb, BDB_EDP);
+
+ dev_priv->edp.bpp = 18;
+ if (!edp) {
+ if (dev_priv->edp.support) {
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported, assume %dbpp panel color depth.\n",
+ dev_priv->edp.bpp);
+ }
+ return;
+ }
+
+ panel_type = dev_priv->panel_type;
+ switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+ case EDP_18BPP:
+ dev_priv->edp.bpp = 18;
+ break;
+ case EDP_24BPP:
+ dev_priv->edp.bpp = 24;
+ break;
+ case EDP_30BPP:
+ dev_priv->edp.bpp = 30;
+ break;
+ }
+
+ /* Get the eDP sequencing and link info */
+ edp_pps = &edp->power_seqs[panel_type];
+ edp_link_params = &edp->link_params[panel_type];
+
+ dev_priv->edp.pps = *edp_pps;
+
+ DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8,
+ dev_priv->edp.pps.t9, dev_priv->edp.pps.t10,
+ dev_priv->edp.pps.t11_t12);
+
+ dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
+ DP_LINK_BW_1_62;
+ switch (edp_link_params->lanes) {
+ case 0:
+ dev_priv->edp.lanes = 1;
+ break;
+ case 1:
+ dev_priv->edp.lanes = 2;
+ break;
+ case 3:
+ default:
+ dev_priv->edp.lanes = 4;
+ break;
+ }
+ DRM_DEBUG_KMS("VBT reports EDP: Lane_count %d, Lane_rate %d, Bpp %d\n",
+ dev_priv->edp.lanes, dev_priv->edp.rate, dev_priv->edp.bpp);
+
+ switch (edp_link_params->preemphasis) {
+ case 0:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+ break;
+ case 1:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+ break;
+ case 2:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+ break;
+ case 3:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+ break;
+ }
+ switch (edp_link_params->vswing) {
+ case 0:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
+ break;
+ case 1:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
+ break;
+ case 2:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
+ break;
+ case 3:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
+ break;
+ }
+ DRM_DEBUG_KMS("VBT reports EDP: VSwing %d, Preemph %d\n",
+ dev_priv->edp.vswing, dev_priv->edp.preemphasis);
+}
+
static u16
get_blocksize(void *p)
{
@@ -154,6 +246,8 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
return;
dev_priv->lvds_dither = lvds_options->pixel_dither;
+ dev_priv->panel_type = lvds_options->panel_type;
+
if (lvds_options->panel_type == 0xff)
return;
@@ -340,6 +434,9 @@ parse_driver_features(struct drm_psb_private *dev_priv,
if (!driver)
return;
+ if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+ dev_priv->edp.support = 1;
+
/* This bit means to use 96Mhz for DPLL_A or not */
if (driver->primary_lfp_id)
dev_priv->dplla_96mhz = true;
@@ -437,6 +534,9 @@ int psb_intel_init_bios(struct drm_device *dev)
size_t size;
int i;
+
+ dev_priv->panel_type = 0xff;
+
/* XXX Should this validation be moved to intel_opregion.c? */
if (dev_priv->opregion.vbt) {
struct vbt_header *vbt = dev_priv->opregion.vbt;
@@ -477,6 +577,7 @@ int psb_intel_init_bios(struct drm_device *dev)
parse_sdvo_device_mapping(dev_priv, bdb);
parse_device_mapping(dev_priv, bdb);
parse_backlight_data(dev_priv, bdb);
+ parse_edp(dev_priv, bdb);
if (bios)
pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 2e95523b84b1..c6267c98c9e7 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -23,6 +23,7 @@
#define _I830_BIOS_H_
#include <drm/drmP.h>
+#include <drm/drm_dp_helper.h>
struct vbt_header {
u8 signature[20]; /**< Always starts with 'VBT$' */
@@ -93,6 +94,7 @@ struct vbios_data {
#define BDB_SDVO_LVDS_PNP_IDS 24
#define BDB_SDVO_LVDS_POWER_SEQ 25
#define BDB_TV_OPTIONS 26
+#define BDB_EDP 27
#define BDB_LVDS_OPTIONS 40
#define BDB_LVDS_LFP_DATA_PTRS 41
#define BDB_LVDS_LFP_DATA 42
@@ -391,6 +393,11 @@ struct bdb_sdvo_lvds_options {
u8 panel_misc_bits_4;
} __attribute__((packed));
+#define BDB_DRIVER_FEATURE_NO_LVDS 0
+#define BDB_DRIVER_FEATURE_INT_LVDS 1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
+#define BDB_DRIVER_FEATURE_EDP 3
+
struct bdb_driver_features {
u8 boot_dev_algorithm:1;
u8 block_display_switch:1;
@@ -431,6 +438,45 @@ struct bdb_driver_features {
u8 custom_vbt_version;
} __attribute__((packed));
+#define EDP_18BPP 0
+#define EDP_24BPP 1
+#define EDP_30BPP 2
+#define EDP_RATE_1_62 0
+#define EDP_RATE_2_7 1
+#define EDP_LANE_1 0
+#define EDP_LANE_2 1
+#define EDP_LANE_4 3
+#define EDP_PREEMPHASIS_NONE 0
+#define EDP_PREEMPHASIS_3_5dB 1
+#define EDP_PREEMPHASIS_6dB 2
+#define EDP_PREEMPHASIS_9_5dB 3
+#define EDP_VSWING_0_4V 0
+#define EDP_VSWING_0_6V 1
+#define EDP_VSWING_0_8V 2
+#define EDP_VSWING_1_2V 3
+
+struct edp_power_seq {
+ u16 t1_t3;
+ u16 t8;
+ u16 t9;
+ u16 t10;
+ u16 t11_t12;
+} __attribute__ ((packed));
+
+struct edp_link_params {
+ u8 rate:4;
+ u8 lanes:4;
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __attribute__ ((packed));
+
+struct bdb_edp {
+ struct edp_power_seq power_seqs[16];
+ u32 color_depth;
+ u32 sdrrs_msa_timing_delay;
+ struct edp_link_params link_params[16];
+} __attribute__ ((packed));
+
extern int psb_intel_init_bios(struct drm_device *dev);
extern void psb_intel_destroy_bios(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 5675d93b4205..32dba2ab53e1 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -299,17 +299,8 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
if (drm_connector_property_set_value(connector, property,
value))
goto set_prop_error;
- else {
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct backlight_device *psb_bd;
-
- psb_bd = mdfld_get_backlight_device();
- if (psb_bd) {
- psb_bd->props.brightness = value;
- mdfld_set_brightness(psb_bd);
- }
-#endif
- }
+ else
+ gma_backlight_set(encoder->dev, value);
}
set_prop_done:
return 0;
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index b2a790bd9899..850cd3fbb969 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -118,20 +118,20 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
dev_priv->platform_rev_id);
}
-struct vbt_header {
+struct mid_vbt_header {
u32 signature;
u8 revision;
} __packed;
/* The same for r0 and r1 */
struct vbt_r0 {
- struct vbt_header vbt_header;
+ struct mid_vbt_header vbt_header;
u8 size;
u8 checksum;
} __packed;
struct vbt_r10 {
- struct vbt_header vbt_header;
+ struct mid_vbt_header vbt_header;
u8 checksum;
u16 size;
u8 panel_count;
@@ -281,7 +281,7 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
struct drm_device *dev = dev_priv->dev;
u32 addr;
u8 __iomem *vbt_virtual;
- struct vbt_header vbt_header;
+ struct mid_vbt_header vbt_header;
struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
int ret = -1;
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 2eb3dc4e9c9b..69e51e903f35 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -252,7 +252,6 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
- connector->display_info.raw_edid = NULL;
}
/*
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index c430bd424681..ad0d6de938f3 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -166,8 +166,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
int max = bd->props.max_brightness;
- bd->props.brightness = bclp * max / 255;
- backlight_update_status(bd);
+ gma_backlight_set(dev, bclp * max / 255);
}
asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 5971bc82b765..f1432f096e58 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -290,6 +290,7 @@ static void psb_get_core_freq(struct drm_device *dev)
case 6:
case 7:
dev_priv->core_freq = 266;
+ break;
default:
dev_priv->core_freq = 0;
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 1bd115ecefe1..223ff5b1b5c4 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -24,10 +24,10 @@
#include <drm/drmP.h>
#include "drm_global.h"
-#include "gem_glue.h"
#include "gma_drm.h"
#include "psb_reg.h"
#include "psb_intel_drv.h"
+#include "intel_bios.h"
#include "gtt.h"
#include "power.h"
#include "opregion.h"
@@ -613,6 +613,8 @@ struct drm_psb_private {
*/
struct backlight_device *backlight_device;
struct drm_property *backlight_property;
+ bool backlight_enabled;
+ int backlight_level;
uint32_t blc_adj1;
uint32_t blc_adj2;
@@ -640,6 +642,19 @@ struct drm_psb_private {
int mdfld_panel_id;
bool dplla_96mhz; /* DPLL data from the VBT */
+
+ struct {
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+
+ bool initialized;
+ bool support;
+ int bpp;
+ struct edp_power_seq pps;
+ } edp;
+ uint8_t panel_type;
};
@@ -796,6 +811,9 @@ extern int psb_fbdev_init(struct drm_device *dev);
/* backlight.c */
int gma_backlight_init(struct drm_device *dev);
void gma_backlight_exit(struct drm_device *dev);
+void gma_backlight_disable(struct drm_device *dev);
+void gma_backlight_enable(struct drm_device *dev);
+void gma_backlight_set(struct drm_device *dev, int v);
/* oaktrail_crtc.c */
extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index ebe1a28f60e1..90f2d11e686b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -29,10 +29,6 @@
* Display related stuff
*/
-/* store information about an Ixxx DVO */
-/* The i830->i865 use multiple DVOs with multiple i2cs */
-/* the i915, i945 have a single sDVO i2c bus - which is different */
-#define MAX_OUTPUTS 6
/* maximum connectors per crtcs in the mode set */
#define INTELFB_CONN_LIMIT 4
@@ -69,6 +65,8 @@
#define INTEL_OUTPUT_HDMI 6
#define INTEL_OUTPUT_MIPI 7
#define INTEL_OUTPUT_MIPI2 8
+#define INTEL_OUTPUT_DISPLAYPORT 9
+#define INTEL_OUTPUT_EDP 10
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -133,6 +131,11 @@ struct psb_intel_encoder {
void (*hot_plug)(struct psb_intel_encoder *);
int crtc_mask;
int clone_mask;
+ u32 ddi_select; /* Channel info */
+#define DDI0_SELECT 0x01
+#define DDI1_SELECT 0x02
+#define DP_MASK 0x8000
+#define DDI_MASK 0x03
void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */
/* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's
@@ -190,7 +193,6 @@ struct psb_intel_crtc {
u32 mode_flags;
bool active;
- bool crtc_enable;
/* Saved Crtc HW states */
struct psb_intel_crtc_state *crtc_state;
@@ -285,4 +287,20 @@ extern void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
extern void gma_intel_teardown_gmbus(struct drm_device *dev);
+/* DP support */
+extern void cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg);
+extern void cdv_intel_dp_set_m_n(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+extern void psb_intel_attach_force_audio_property(struct drm_connector *connector);
+extern void psb_intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+extern int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val);
+extern int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val);
+extern void cdv_sb_reset(struct drm_device *dev);
+
+extern void cdv_intel_attach_force_audio_property(struct drm_connector *connector);
+extern void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 37adc9edf974..2a4c3a9e33e3 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -630,17 +630,8 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
property,
value))
goto set_prop_error;
- else {
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *devp =
- encoder->dev->dev_private;
- struct backlight_device *bd = devp->backlight_device;
- if (bd) {
- bd->props.brightness = value;
- backlight_update_status(bd);
- }
-#endif
- }
+ else
+ gma_backlight_set(encoder->dev, value);
} else if (!strcmp(property->name, "DPMS")) {
struct drm_encoder_helper_funcs *hfuncs
= encoder->helper_private;
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index 8e8c8efb0a89..d914719c4b60 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -173,15 +173,46 @@
#define PP_SEQUENCE_ON (1 << 28)
#define PP_SEQUENCE_OFF (2 << 28)
#define PP_SEQUENCE_MASK 0x30000000
+#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
+#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
+#define PP_SEQUENCE_STATE_MASK 0x0000000f
+
#define PP_CONTROL 0x61204
#define POWER_TARGET_ON (1 << 0)
-
+#define PANEL_UNLOCK_REGS (0xabcd << 16)
+#define PANEL_UNLOCK_MASK (0xffff << 16)
+#define EDP_FORCE_VDD (1 << 3)
+#define EDP_BLC_ENABLE (1 << 2)
+#define PANEL_POWER_RESET (1 << 1)
+#define PANEL_POWER_OFF (0 << 0)
+#define PANEL_POWER_ON (1 << 0)
+
+/* Poulsbo/Oaktrail */
#define LVDSPP_ON 0x61208
#define LVDSPP_OFF 0x6120c
#define PP_CYCLE 0x61210
+/* Cedartrail */
#define PP_ON_DELAYS 0x61208 /* Cedartrail */
+#define PANEL_PORT_SELECT_MASK (3 << 30)
+#define PANEL_PORT_SELECT_LVDS (0 << 30)
+#define PANEL_PORT_SELECT_EDP (1 << 30)
+#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
+#define PANEL_POWER_UP_DELAY_SHIFT 16
+#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff)
+#define PANEL_LIGHT_ON_DELAY_SHIFT 0
+
#define PP_OFF_DELAYS 0x6120c /* Cedartrail */
+#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
+#define PANEL_POWER_DOWN_DELAY_SHIFT 16
+#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
+#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
+
+#define PP_DIVISOR 0x61210 /* Cedartrail */
+#define PP_REFERENCE_DIVIDER_MASK (0xffffff00)
+#define PP_REFERENCE_DIVIDER_SHIFT 8
+#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
+#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
#define PFIT_CONTROL 0x61230
#define PFIT_ENABLE (1 << 31)
@@ -1282,6 +1313,10 @@ No status bits are changed.
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* Fixed value on CDV */
# define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11)
# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6)
+# define DPUNIT_PIPEB_GATE_DISABLE (1 << 30)
+# define DPUNIT_PIPEA_GATE_DISABLE (1 << 25)
+# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24)
+# define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13)
#define RAMCLK_GATE_D 0x6210
@@ -1347,5 +1382,165 @@ No status bits are changed.
#define LANE_PLL_ENABLE (0x3 << 20)
#define LANE_PLL_PIPE(p) (((p) == 0) ? (1 << 21) : (0 << 21))
+#define DP_B 0x64100
+#define DP_C 0x64200
+
+#define DP_PORT_EN (1 << 31)
+#define DP_PIPEB_SELECT (1 << 30)
+#define DP_PIPE_MASK (1 << 30)
+
+/* Link training mode - select a suitable mode for each stage */
+#define DP_LINK_TRAIN_PAT_1 (0 << 28)
+#define DP_LINK_TRAIN_PAT_2 (1 << 28)
+#define DP_LINK_TRAIN_PAT_IDLE (2 << 28)
+#define DP_LINK_TRAIN_OFF (3 << 28)
+#define DP_LINK_TRAIN_MASK (3 << 28)
+#define DP_LINK_TRAIN_SHIFT 28
+
+/* Signal voltages. These are mostly controlled by the other end */
+#define DP_VOLTAGE_0_4 (0 << 25)
+#define DP_VOLTAGE_0_6 (1 << 25)
+#define DP_VOLTAGE_0_8 (2 << 25)
+#define DP_VOLTAGE_1_2 (3 << 25)
+#define DP_VOLTAGE_MASK (7 << 25)
+#define DP_VOLTAGE_SHIFT 25
+
+/* Signal pre-emphasis levels, like voltages, the other end tells us what
+ * they want
+ */
+#define DP_PRE_EMPHASIS_0 (0 << 22)
+#define DP_PRE_EMPHASIS_3_5 (1 << 22)
+#define DP_PRE_EMPHASIS_6 (2 << 22)
+#define DP_PRE_EMPHASIS_9_5 (3 << 22)
+#define DP_PRE_EMPHASIS_MASK (7 << 22)
+#define DP_PRE_EMPHASIS_SHIFT 22
+
+/* How many wires to use. I guess 3 was too hard */
+#define DP_PORT_WIDTH_1 (0 << 19)
+#define DP_PORT_WIDTH_2 (1 << 19)
+#define DP_PORT_WIDTH_4 (3 << 19)
+#define DP_PORT_WIDTH_MASK (7 << 19)
+
+/* Mystic DPCD version 1.1 special mode */
+#define DP_ENHANCED_FRAMING (1 << 18)
+
+/** locked once port is enabled */
+#define DP_PORT_REVERSAL (1 << 15)
+
+/** sends the clock on lane 15 of the PEG for debug */
+#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
+
+#define DP_SCRAMBLING_DISABLE (1 << 12)
+#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
+
+/** limit RGB values to avoid confusing TVs */
+#define DP_COLOR_RANGE_16_235 (1 << 8)
+
+/** Turn on the audio link */
+#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
+
+/** vs and hs sync polarity */
+#define DP_SYNC_VS_HIGH (1 << 4)
+#define DP_SYNC_HS_HIGH (1 << 3)
+
+/** A fantasy */
+#define DP_DETECTED (1 << 2)
+
+/** The aux channel provides a way to talk to the
+ * signal sink for DDC etc. Max packet size supported
+ * is 20 bytes in each direction, hence the 5 fixed
+ * data registers
+ */
+#define DPB_AUX_CH_CTL 0x64110
+#define DPB_AUX_CH_DATA1 0x64114
+#define DPB_AUX_CH_DATA2 0x64118
+#define DPB_AUX_CH_DATA3 0x6411c
+#define DPB_AUX_CH_DATA4 0x64120
+#define DPB_AUX_CH_DATA5 0x64124
+
+#define DPC_AUX_CH_CTL 0x64210
+#define DPC_AUX_CH_DATA1 0x64214
+#define DPC_AUX_CH_DATA2 0x64218
+#define DPC_AUX_CH_DATA3 0x6421c
+#define DPC_AUX_CH_DATA4 0x64220
+#define DPC_AUX_CH_DATA5 0x64224
+
+#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
+#define DP_AUX_CH_CTL_DONE (1 << 30)
+#define DP_AUX_CH_CTL_INTERRUPT (1 << 29)
+#define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28)
+#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26)
+#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
+#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
+#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20
+#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16)
+#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16
+#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15)
+#define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14)
+#define DP_AUX_CH_CTL_SYNC_TEST (1 << 13)
+#define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12)
+#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
+#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
+#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
+
+/*
+ * Computing GMCH M and N values for the Display Port link
+ *
+ * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
+ *
+ * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
+ *
+ * The GMCH value is used internally
+ *
+ * bytes_per_pixel is the number of bytes coming out of the plane,
+ * which is after the LUTs, so we want the bytes for our color format.
+ * For our current usage, this is always 3, one byte for R, G and B.
+ */
+
+#define _PIPEA_GMCH_DATA_M 0x70050
+#define _PIPEB_GMCH_DATA_M 0x71050
+
+/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
+#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25)
+#define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25
+
+#define PIPE_GMCH_DATA_M_MASK (0xffffff)
+
+#define _PIPEA_GMCH_DATA_N 0x70054
+#define _PIPEB_GMCH_DATA_N 0x71054
+#define PIPE_GMCH_DATA_N_MASK (0xffffff)
+
+/*
+ * Computing Link M and N values for the Display Port link
+ *
+ * Link M / N = pixel_clock / ls_clk
+ *
+ * (the DP spec calls pixel_clock the 'strm_clk')
+ *
+ * The Link value is transmitted in the Main Stream
+ * Attributes and VB-ID.
+ */
+
+#define _PIPEA_DP_LINK_M 0x70060
+#define _PIPEB_DP_LINK_M 0x71060
+#define PIPEA_DP_LINK_M_MASK (0xffffff)
+
+#define _PIPEA_DP_LINK_N 0x70064
+#define _PIPEB_DP_LINK_N 0x71064
+#define PIPEA_DP_LINK_N_MASK (0xffffff)
+
+#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
+#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
+#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
+#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
+
+#define PIPE_BPC_MASK (7 << 5)
+#define PIPE_8BPC (0 << 5)
+#define PIPE_10BPC (1 << 5)
+#define PIPE_6BPC (2 << 5)
#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 0466c7b985f8..d35f93ba3a85 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1292,7 +1292,6 @@ psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
return drm_get_edid(connector,
&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
- return NULL;
}
static enum drm_connector_status
@@ -1343,7 +1342,6 @@ psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
}
} else
status = connector_status_disconnected;
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -1404,7 +1402,6 @@ psb_intel_sdvo_detect(struct drm_connector *connector, bool force)
ret = connector_status_disconnected;
else
ret = connector_status_connected;
- connector->display_info.raw_edid = NULL;
kfree(edid);
} else
ret = connector_status_connected;
@@ -1453,7 +1450,6 @@ static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
drm_add_edid_modes(connector, edid);
}
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
}
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 36d952280c50..599099fe76e3 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -427,15 +427,10 @@ static int ch7006_remove(struct i2c_client *client)
return 0;
}
-static int ch7006_suspend(struct i2c_client *client, pm_message_t mesg)
+static int ch7006_resume(struct device *dev)
{
- ch7006_dbg(client, "\n");
-
- return 0;
-}
+ struct i2c_client *client = to_i2c_client(dev);
-static int ch7006_resume(struct i2c_client *client)
-{
ch7006_dbg(client, "\n");
ch7006_write(client, 0x3d, 0x0);
@@ -499,15 +494,18 @@ static struct i2c_device_id ch7006_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, ch7006_ids);
+static const struct dev_pm_ops ch7006_pm_ops = {
+ .resume = ch7006_resume,
+};
+
static struct drm_i2c_encoder_driver ch7006_driver = {
.i2c_driver = {
.probe = ch7006_probe,
.remove = ch7006_remove,
- .suspend = ch7006_suspend,
- .resume = ch7006_resume,
.driver = {
.name = "ch7006",
+ .pm = &ch7006_pm_ops,
},
.id_table = ch7006_ids,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b0bacdba6d7e..0f2c5493242b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -40,6 +40,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
dvo_ivch.o \
dvo_tfp410.o \
dvo_sil164.o \
+ dvo_ns2501.o \
i915_gem_dmabuf.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 58914691a77b..74b5efccfdb1 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -58,13 +58,12 @@ struct intel_dvo_dev_ops {
void (*create_resources)(struct intel_dvo_device *dvo);
/*
- * Turn on/off output or set intermediate power levels if available.
+ * Turn on/off output.
*
- * Unsupported intermediate modes drop to the lower power setting.
- * If the mode is DPMSModeOff, the output must be disabled,
- * as the DPLL may be disabled afterwards.
+ * Because none of our dvo drivers support an intermediate power levels,
+ * we don't expose this in the interfac.
*/
- void (*dpms)(struct intel_dvo_device *dvo, int mode);
+ void (*dpms)(struct intel_dvo_device *dvo, bool enable);
/*
* Callback for testing a video mode for a given output.
@@ -115,6 +114,12 @@ struct intel_dvo_dev_ops {
*/
enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
+ /*
+ * Probe the current hw status, returning true if the connected output
+ * is active.
+ */
+ bool (*get_hw_state)(struct intel_dvo_device *dev);
+
/**
* Query the device for the modes it provides.
*
@@ -140,5 +145,6 @@ extern struct intel_dvo_dev_ops ch7xxx_ops;
extern struct intel_dvo_dev_ops ivch_ops;
extern struct intel_dvo_dev_ops tfp410_ops;
extern struct intel_dvo_dev_ops ch7017_ops;
+extern struct intel_dvo_dev_ops ns2501_ops;
#endif /* _INTEL_DVO_H */
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1ca799a1e1fc..86b27d1d90c2 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -163,7 +163,7 @@ struct ch7017_priv {
};
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
-static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable);
static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
{
@@ -309,7 +309,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
(mode->hdisplay & 0x0700) >> 8;
- ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
+ ch7017_dpms(dvo, false);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
horizontal_active_pixel_input);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
@@ -331,7 +331,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
}
/* set the CH7017 power state */
-static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
{
uint8_t val;
@@ -345,7 +345,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
CH7017_DAC3_POWER_DOWN |
CH7017_TV_POWER_DOWN_EN);
- if (mode == DRM_MODE_DPMS_ON) {
+ if (enable) {
/* Turn on the LVDS */
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
val & ~CH7017_LVDS_POWER_DOWN_EN);
@@ -359,6 +359,18 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
msleep(20);
}
+static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+ if (val & CH7017_LVDS_POWER_DOWN_EN)
+ return false;
+ else
+ return true;
+}
+
static void ch7017_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
@@ -396,6 +408,7 @@ struct intel_dvo_dev_ops ch7017_ops = {
.mode_valid = ch7017_mode_valid,
.mode_set = ch7017_mode_set,
.dpms = ch7017_dpms,
+ .get_hw_state = ch7017_get_hw_state,
.dump_regs = ch7017_dump_regs,
.destroy = ch7017_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 4a036600e806..38f3a6cb8c7d 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -289,14 +289,26 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
}
/* set the CH7xxx power state */
-static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
+static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
{
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
else
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
}
+static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
+{
+ u8 val;
+
+ ch7xxx_readb(dvo, CH7xxx_PM, &val);
+
+ if (val & CH7xxx_PM_FPD)
+ return false;
+ else
+ return true;
+}
+
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
{
int i;
@@ -326,6 +338,7 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
.mode_valid = ch7xxx_mode_valid,
.mode_set = ch7xxx_mode_set,
.dpms = ch7xxx_dpms,
+ .get_hw_state = ch7xxx_get_hw_state,
.dump_regs = ch7xxx_dump_regs,
.destroy = ch7xxx_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 04f2893d5e3c..baaf65bf0bdd 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -288,7 +288,7 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
}
/** Sets the power state of the panel connected to the ivch */
-static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
+static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
int i;
uint16_t vr01, vr30, backlight;
@@ -297,13 +297,13 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
if (!ivch_read(dvo, VR01, &vr01))
return;
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
backlight = 1;
else
backlight = 0;
ivch_write(dvo, VR80, backlight);
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
else
vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
@@ -315,7 +315,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
if (!ivch_read(dvo, VR30, &vr30))
break;
- if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON))
+ if (((vr30 & VR30_PANEL_ON) != 0) == enable)
break;
udelay(1000);
}
@@ -323,6 +323,20 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
udelay(16 * 1000);
}
+static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint16_t vr01;
+
+ /* Set the new power state of the panel. */
+ if (!ivch_read(dvo, VR01, &vr01))
+ return false;
+
+ if (vr01 & VR01_LCD_ENABLE)
+ return true;
+ else
+ return false;
+}
+
static void ivch_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -413,6 +427,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
struct intel_dvo_dev_ops ivch_ops = {
.init = ivch_init,
.dpms = ivch_dpms,
+ .get_hw_state = ivch_get_hw_state,
.mode_valid = ivch_mode_valid,
.mode_set = ivch_mode_set,
.detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
new file mode 100644
index 000000000000..c4a255be6979
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -0,0 +1,588 @@
+/*
+ *
+ * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "dvo.h"
+#include "i915_reg.h"
+#include "i915_drv.h"
+
+#define NS2501_VID 0x1305
+#define NS2501_DID 0x6726
+
+#define NS2501_VID_LO 0x00
+#define NS2501_VID_HI 0x01
+#define NS2501_DID_LO 0x02
+#define NS2501_DID_HI 0x03
+#define NS2501_REV 0x04
+#define NS2501_RSVD 0x05
+#define NS2501_FREQ_LO 0x06
+#define NS2501_FREQ_HI 0x07
+
+#define NS2501_REG8 0x08
+#define NS2501_8_VEN (1<<5)
+#define NS2501_8_HEN (1<<4)
+#define NS2501_8_DSEL (1<<3)
+#define NS2501_8_BPAS (1<<2)
+#define NS2501_8_RSVD (1<<1)
+#define NS2501_8_PD (1<<0)
+
+#define NS2501_REG9 0x09
+#define NS2501_9_VLOW (1<<7)
+#define NS2501_9_MSEL_MASK (0x7<<4)
+#define NS2501_9_TSEL (1<<3)
+#define NS2501_9_RSEN (1<<2)
+#define NS2501_9_RSVD (1<<1)
+#define NS2501_9_MDI (1<<0)
+
+#define NS2501_REGC 0x0c
+
+struct ns2501_priv {
+ //I2CDevRec d;
+ bool quiet;
+ int reg_8_shadow;
+ int reg_8_set;
+ // Shadow registers for i915
+ int dvoc;
+ int pll_a;
+ int srcdim;
+ int fw_blc;
+};
+
+#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
+
+/*
+ * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
+ * laptops does not react on the i2c bus unless
+ * both the PLL is running and the display is configured in its native
+ * resolution.
+ * This function forces the DVO on, and stores the registers it touches.
+ * Afterwards, registers are restored to regular values.
+ *
+ * This is pretty much a hack, though it works.
+ * Without that, ns2501_readb and ns2501_writeb fail
+ * when switching the resolution.
+ */
+
+static void enable_dvo(struct intel_dvo_device *dvo)
+{
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
+
+ ns->dvoc = I915_READ(DVO_C);
+ ns->pll_a = I915_READ(_DPLL_A);
+ ns->srcdim = I915_READ(DVOC_SRCDIM);
+ ns->fw_blc = I915_READ(FW_BLC);
+
+ I915_WRITE(DVOC, 0x10004084);
+ I915_WRITE(_DPLL_A, 0xd0820000);
+ I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768
+ I915_WRITE(FW_BLC, 0x1080304);
+
+ I915_WRITE(DVOC, 0x90004084);
+}
+
+/*
+ * Restore the I915 registers modified by the above
+ * trigger function.
+ */
+static void restore_dvo(struct intel_dvo_device *dvo)
+{
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+ I915_WRITE(DVOC, ns->dvoc);
+ I915_WRITE(_DPLL_A, ns->pll_a);
+ I915_WRITE(DVOC_SRCDIM, ns->srcdim);
+ I915_WRITE(FW_BLC, ns->fw_blc);
+}
+
+/*
+** Read a register from the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ };
+
+ if (!ns->quiet) {
+ DRM_DEBUG_KMS
+ ("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
+ adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/*
+** Write a register to the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ uint8_t out_buf[2];
+
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(adapter, &msg, 1) == 1) {
+ return true;
+ }
+
+ if (!ns->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/* National Semiconductor 2501 driver for chip on i2c bus
+ * scan for the chip on the bus.
+ * Hope the VBIOS initialized the PLL correctly so we can
+ * talk to it. If not, it will not be seen and not detected.
+ * Bummer!
+ */
+static bool ns2501_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+{
+ /* this will detect the NS2501 chip on the specified i2c bus */
+ struct ns2501_priv *ns;
+ unsigned char ch;
+
+ ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
+ if (ns == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = ns;
+ ns->quiet = true;
+
+ if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
+ goto out;
+
+ if (ch != (NS2501_VID & 0xff)) {
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+ ch, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+
+ if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
+ goto out;
+
+ if (ch != (NS2501_DID & 0xff)) {
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+ ch, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+ ns->quiet = false;
+ ns->reg_8_set = 0;
+ ns->reg_8_shadow =
+ NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
+
+ DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
+ return true;
+
+out:
+ kfree(ns);
+ return false;
+}
+
+static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
+{
+ /*
+ * This is a Laptop display, it doesn't have hotplugging.
+ * Even if not, the detection bit of the 2501 is unreliable as
+ * it only works for some display types.
+ * It is even more unreliable as the PLL must be active for
+ * allowing reading from the chiop.
+ */
+ return connector_status_connected;
+}
+
+static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS
+ ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
+ __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+ mode->vtotal);
+
+ /*
+ * Currently, these are all the modes I have data from.
+ * More might exist. Unclear how to find the native resolution
+ * of the panel in here so we could always accept it
+ * by disabling the scaler.
+ */
+ if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
+ (mode->hdisplay == 640 && mode->vdisplay == 480) ||
+ (mode->hdisplay == 1024 && mode->vdisplay == 768)) {
+ return MODE_OK;
+ } else {
+ return MODE_ONE_SIZE; /* Is this a reasonable error? */
+ }
+}
+
+static void ns2501_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ bool ok;
+ bool restore = false;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+ DRM_DEBUG_KMS
+ ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
+ __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+ mode->vtotal);
+
+ /*
+ * Where do I find the native resolution for which scaling is not required???
+ *
+ * First trigger the DVO on as otherwise the chip does not appear on the i2c
+ * bus.
+ */
+ do {
+ ok = true;
+
+ if (mode->hdisplay == 800 && mode->vdisplay == 600) {
+ /* mode 277 */
+ ns->reg_8_shadow &= ~NS2501_8_BPAS;
+ DRM_DEBUG_KMS("%s: switching to 800x600\n",
+ __FUNCTION__);
+
+ /*
+ * No, I do not know where this data comes from.
+ * It is just what the video bios left in the DVO, so
+ * I'm just copying it here over.
+ * This also means that I cannot support any other modes
+ * except the ones supported by the bios.
+ */
+ ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
+ ok &= ns2501_writeb(dvo, 0x1b, 0x19);
+ ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
+ ok &= ns2501_writeb(dvo, 0x1d, 0x02);
+
+ ok &= ns2501_writeb(dvo, 0x34, 0x03);
+ ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+ ok &= ns2501_writeb(dvo, 0x80, 0x27);
+ ok &= ns2501_writeb(dvo, 0x81, 0x03);
+ ok &= ns2501_writeb(dvo, 0x82, 0x41);
+ ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+ ok &= ns2501_writeb(dvo, 0x8e, 0x04);
+ ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */
+ ok &= ns2501_writeb(dvo, 0x91, 0x07);
+ ok &= ns2501_writeb(dvo, 0x94, 0x00);
+ ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x96, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x99, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+ ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
+ ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+ ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0xa4, 0x80);
+
+ ok &= ns2501_writeb(dvo, 0xb6, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
+
+ ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
+
+ ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
+
+ ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+ ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
+
+ ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc7, 0x73);
+ ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+ } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
+ /* mode 274 */
+ DRM_DEBUG_KMS("%s: switching to 640x480\n",
+ __FUNCTION__);
+ /*
+ * No, I do not know where this data comes from.
+ * It is just what the video bios left in the DVO, so
+ * I'm just copying it here over.
+ * This also means that I cannot support any other modes
+ * except the ones supported by the bios.
+ */
+ ns->reg_8_shadow &= ~NS2501_8_BPAS;
+
+ ok &= ns2501_writeb(dvo, 0x11, 0xa0);
+ ok &= ns2501_writeb(dvo, 0x1b, 0x11);
+ ok &= ns2501_writeb(dvo, 0x1c, 0x54);
+ ok &= ns2501_writeb(dvo, 0x1d, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0x34, 0x03);
+ ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+ ok &= ns2501_writeb(dvo, 0x80, 0xff);
+ ok &= ns2501_writeb(dvo, 0x81, 0x07);
+ ok &= ns2501_writeb(dvo, 0x82, 0x3d);
+ ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+ ok &= ns2501_writeb(dvo, 0x8e, 0x10);
+ ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
+ ok &= ns2501_writeb(dvo, 0x91, 0x07);
+ ok &= ns2501_writeb(dvo, 0x94, 0x00);
+ ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x96, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x99, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+ ok &= ns2501_writeb(dvo, 0x9c, 0x24);
+ ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+ ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0xa4, 0x84);
+
+ ok &= ns2501_writeb(dvo, 0xb6, 0x09);
+
+ ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
+
+ ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xc1, 0x90);
+
+ ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
+
+ ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+ ok &= ns2501_writeb(dvo, 0xc5, 0x16);
+
+ ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc7, 0x02);
+ ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+ } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
+ /* mode 280 */
+ DRM_DEBUG_KMS("%s: switching to 1024x768\n",
+ __FUNCTION__);
+ /*
+ * This might or might not work, actually. I'm silently
+ * assuming here that the native panel resolution is
+ * 1024x768. If not, then this leaves the scaler disabled
+ * generating a picture that is likely not the expected.
+ *
+ * Problem is that I do not know where to take the panel
+ * dimensions from.
+ *
+ * Enable the bypass, scaling not required.
+ *
+ * The scaler registers are irrelevant here....
+ *
+ */
+ ns->reg_8_shadow |= NS2501_8_BPAS;
+ ok &= ns2501_writeb(dvo, 0x37, 0x44);
+ } else {
+ /*
+ * Data not known. Bummer!
+ * Hopefully, the code should not go here
+ * as mode_OK delivered no other modes.
+ */
+ ns->reg_8_shadow |= NS2501_8_BPAS;
+ }
+ ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
+
+ if (!ok) {
+ if (restore)
+ restore_dvo(dvo);
+ enable_dvo(dvo);
+ restore = true;
+ }
+ } while (!ok);
+ /*
+ * Restore the old i915 registers before
+ * forcing the ns2501 on.
+ */
+ if (restore)
+ restore_dvo(dvo);
+}
+
+/* set the NS2501 power state */
+static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
+{
+ unsigned char ch;
+
+ if (!ns2501_readb(dvo, NS2501_REG8, &ch))
+ return false;
+
+ if (ch & NS2501_8_PD)
+ return true;
+ else
+ return false;
+}
+
+/* set the NS2501 power state */
+static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ bool ok;
+ bool restore = false;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+ unsigned char ch;
+
+ DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
+ __FUNCTION__, enable);
+
+ ch = ns->reg_8_shadow;
+
+ if (enable)
+ ch |= NS2501_8_PD;
+ else
+ ch &= ~NS2501_8_PD;
+
+ if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
+ ns->reg_8_set = 1;
+ ns->reg_8_shadow = ch;
+
+ do {
+ ok = true;
+ ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
+ ok &=
+ ns2501_writeb(dvo, 0x34,
+ enable ? 0x03 : 0x00);
+ ok &=
+ ns2501_writeb(dvo, 0x35,
+ enable ? 0xff : 0x00);
+ if (!ok) {
+ if (restore)
+ restore_dvo(dvo);
+ enable_dvo(dvo);
+ restore = true;
+ }
+ } while (!ok);
+
+ if (restore)
+ restore_dvo(dvo);
+ }
+}
+
+static void ns2501_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ ns2501_readb(dvo, NS2501_FREQ_LO, &val);
+ DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_FREQ_HI, &val);
+ DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REG8, &val);
+ DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REG9, &val);
+ DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REGC, &val);
+ DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
+}
+
+static void ns2501_destroy(struct intel_dvo_device *dvo)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+
+ if (ns) {
+ kfree(ns);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ns2501_ops = {
+ .init = ns2501_init,
+ .detect = ns2501_detect,
+ .mode_valid = ns2501_mode_valid,
+ .mode_set = ns2501_mode_set,
+ .dpms = ns2501_dpms,
+ .get_hw_state = ns2501_get_hw_state,
+ .dump_regs = ns2501_dump_regs,
+ .destroy = ns2501_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index a0b13a6f619d..4debd32e3e4c 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -208,7 +208,7 @@ static void sil164_mode_set(struct intel_dvo_device *dvo,
}
/* set the SIL164 power state */
-static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
+static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
{
int ret;
unsigned char ch;
@@ -217,7 +217,7 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
if (ret == false)
return;
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
ch |= SIL164_8_PD;
else
ch &= ~SIL164_8_PD;
@@ -226,6 +226,21 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
return;
}
+static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
+{
+ int ret;
+ unsigned char ch;
+
+ ret = sil164_readb(dvo, SIL164_REG8, &ch);
+ if (ret == false)
+ return false;
+
+ if (ch & SIL164_8_PD)
+ return true;
+ else
+ return false;
+}
+
static void sil164_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
@@ -258,6 +273,7 @@ struct intel_dvo_dev_ops sil164_ops = {
.mode_valid = sil164_mode_valid,
.mode_set = sil164_mode_set,
.dpms = sil164_dpms,
+ .get_hw_state = sil164_get_hw_state,
.dump_regs = sil164_dump_regs,
.destroy = sil164_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index aa2cd3ec54aa..e17f1b07e915 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -234,14 +234,14 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
}
/* set the tfp410 power state */
-static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
+static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
{
uint8_t ctl1;
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return;
- if (mode == DRM_MODE_DPMS_ON)
+ if (enable)
ctl1 |= TFP410_CTL_1_PD;
else
ctl1 &= ~TFP410_CTL_1_PD;
@@ -249,6 +249,19 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
}
+static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint8_t ctl1;
+
+ if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+ return false;
+
+ if (ctl1 & TFP410_CTL_1_PD)
+ return true;
+ else
+ return false;
+}
+
static void tfp410_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val, val2;
@@ -299,6 +312,7 @@ struct intel_dvo_dev_ops tfp410_ops = {
.mode_valid = tfp410_mode_valid,
.mode_set = tfp410_mode_set,
.dpms = tfp410_dpms,
+ .get_hw_state = tfp410_get_hw_state,
.dump_regs = tfp410_dump_regs,
.destroy = tfp410_destroy,
};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 359f6e8b9b00..7a4b3f3aad3f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -44,7 +44,6 @@
enum {
ACTIVE_LIST,
- FLUSHING_LIST,
INACTIVE_LIST,
PINNED_LIST,
};
@@ -62,28 +61,11 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "gen: %d\n", info->gen);
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
-#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
- B(is_mobile);
- B(is_i85x);
- B(is_i915g);
- B(is_i945gm);
- B(is_g33);
- B(need_gfx_hws);
- B(is_g4x);
- B(is_pineview);
- B(is_broadwater);
- B(is_crestline);
- B(has_fbc);
- B(has_pipe_cxsr);
- B(has_hotplug);
- B(cursor_needs_physical);
- B(has_overlay);
- B(overlay_needs_physical);
- B(supports_tv);
- B(has_bsd_ring);
- B(has_blt_ring);
- B(has_llc);
-#undef B
+#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
+#define DEV_INFO_SEP ;
+ DEV_INFO_FLAGS;
+#undef DEV_INFO_FLAG
+#undef DEV_INFO_SEP
return 0;
}
@@ -121,20 +103,23 @@ static const char *cache_level_str(int type)
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
+ seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain,
- obj->last_rendering_seqno,
+ obj->last_read_seqno,
+ obj->last_write_seqno,
obj->last_fenced_seqno,
cache_level_str(obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
+ if (obj->pin_count)
+ seq_printf(m, " (pinned x %d)", obj->pin_count);
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
if (obj->gtt_space != NULL)
@@ -177,10 +162,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
seq_printf(m, "Inactive:\n");
head = &dev_priv->mm.inactive_list;
break;
- case FLUSHING_LIST:
- seq_printf(m, "Flushing:\n");
- head = &dev_priv->mm.flushing_list;
- break;
default:
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
@@ -218,8 +199,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 count, mappable_count;
- size_t size, mappable_size;
+ u32 count, mappable_count, purgeable_count;
+ size_t size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj;
int ret;
@@ -232,13 +213,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
dev_priv->mm.object_memory);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.gtt_list, gtt_list);
+ count_objects(&dev_priv->mm.bound_list, gtt_list);
seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.active_list, mm_list);
- count_objects(&dev_priv->mm.flushing_list, mm_list);
seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
@@ -247,8 +227,16 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
+ size = count = purgeable_size = purgeable_count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) {
+ size += obj->base.size, ++count;
+ if (obj->madv == I915_MADV_DONTNEED)
+ purgeable_size += obj->base.size, ++purgeable_count;
+ }
+ seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
+
size = count = mappable_size = mappable_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
if (obj->fault_mappable) {
size += obj->gtt_space->size;
++count;
@@ -257,7 +245,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
mappable_size += obj->gtt_space->size;
++mappable_count;
}
+ if (obj->madv == I915_MADV_DONTNEED) {
+ purgeable_size += obj->base.size;
+ ++purgeable_count;
+ }
}
+ seq_printf(m, "%u purgeable objects, %zu bytes\n",
+ purgeable_count, purgeable_size);
seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
mappable_count, mappable_size);
seq_printf(m, "%u fault mappable objects, %zu bytes\n",
@@ -286,7 +280,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
return ret;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
if (list == PINNED_LIST && obj->pin_count == 0)
continue;
@@ -359,40 +353,22 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
struct drm_i915_gem_request *gem_request;
- int ret, count;
+ int ret, count, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
count = 0;
- if (!list_empty(&dev_priv->ring[RCS].request_list)) {
- seq_printf(m, "Render requests:\n");
- list_for_each_entry(gem_request,
- &dev_priv->ring[RCS].request_list,
- list) {
- seq_printf(m, " %d @ %d\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies));
- }
- count++;
- }
- if (!list_empty(&dev_priv->ring[VCS].request_list)) {
- seq_printf(m, "BSD requests:\n");
- list_for_each_entry(gem_request,
- &dev_priv->ring[VCS].request_list,
- list) {
- seq_printf(m, " %d @ %d\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies));
- }
- count++;
- }
- if (!list_empty(&dev_priv->ring[BCS].request_list)) {
- seq_printf(m, "BLT requests:\n");
+ for_each_ring(ring, dev_priv, i) {
+ if (list_empty(&ring->request_list))
+ continue;
+
+ seq_printf(m, "%s requests:\n", ring->name);
list_for_each_entry(gem_request,
- &dev_priv->ring[BCS].request_list,
+ &ring->request_list,
list) {
seq_printf(m, " %d @ %d\n",
gem_request->seqno,
@@ -413,7 +389,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
{
if (ring->get_seqno) {
seq_printf(m, "Current sequence (%s): %d\n",
- ring->name, ring->get_seqno(ring));
+ ring->name, ring->get_seqno(ring, false));
}
}
@@ -422,14 +398,15 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- for (i = 0; i < I915_NUM_RINGS; i++)
- i915_ring_seqno_info(m, &dev_priv->ring[i]);
+ for_each_ring(ring, dev_priv, i)
+ i915_ring_seqno_info(m, ring);
mutex_unlock(&dev->struct_mutex);
@@ -442,6 +419,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int ret, i, pipe;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -519,13 +497,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for_each_ring(ring, dev_priv, i) {
if (IS_GEN6(dev) || IS_GEN7(dev)) {
- seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
- dev_priv->ring[i].name,
- I915_READ_IMR(&dev_priv->ring[i]));
+ seq_printf(m,
+ "Graphics Interrupt mask (%s): %08x\n",
+ ring->name, I915_READ_IMR(ring));
}
- i915_ring_seqno_info(m, &dev_priv->ring[i]);
+ i915_ring_seqno_info(m, ring);
}
mutex_unlock(&dev->struct_mutex);
@@ -548,7 +526,8 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
- seq_printf(m, "Fenced object[%2d] = ", i);
+ seq_printf(m, "Fence %d, pin count = %d, object = ",
+ i, dev_priv->fence_regs[i].pin_count);
if (obj == NULL)
seq_printf(m, "unused");
else
@@ -630,12 +609,12 @@ static void print_error_buffers(struct seq_file *m,
seq_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
+ seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
err->gtt_offset,
err->size,
err->read_domains,
err->write_domain,
- err->seqno,
+ err->rseqno, err->wseqno,
pin_flag(err->pinned),
tiling_flag(err->tiling),
dirty_flag(err->dirty),
@@ -667,10 +646,9 @@ static void i915_ring_error_state(struct seq_file *m,
seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
- if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
- seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
+ if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
- }
+
if (INTEL_INFO(dev)->gen >= 4)
seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
@@ -719,11 +697,17 @@ static int i915_error_state(struct seq_file *m, void *unused)
for (i = 0; i < dev_priv->num_fence_regs; i++)
seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+ for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
+ seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]);
+
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m, "ERROR: 0x%08x\n", error->error);
seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
+ if (INTEL_INFO(dev)->gen == 7)
+ seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+
for_each_ring(ring, dev_priv, i)
i915_ring_error_state(m, dev, error, i);
@@ -799,10 +783,14 @@ i915_error_state_write(struct file *filp,
struct seq_file *m = filp->private_data;
struct i915_error_state_file_priv *error_priv = m->private;
struct drm_device *dev = error_priv->dev;
+ int ret;
DRM_DEBUG_DRIVER("Resetting error state\n");
- mutex_lock(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
i915_destroy_error_state(dev);
mutex_unlock(&dev->struct_mutex);
@@ -926,7 +914,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
- GEN6_CAGF_SHIFT) * 50);
+ GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
@@ -942,15 +930,15 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
max_freq = (rp_state_cap & 0xff0000) >> 16;
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- max_freq * 50);
+ max_freq * GT_FREQUENCY_MULTIPLIER);
max_freq = (rp_state_cap & 0xff00) >> 8;
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- max_freq * 50);
+ max_freq * GT_FREQUENCY_MULTIPLIER);
max_freq = rp_state_cap & 0xff;
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- max_freq * 50);
+ max_freq * GT_FREQUENCY_MULTIPLIER);
} else {
seq_printf(m, "no P-state info available\n");
}
@@ -1292,7 +1280,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
- for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
+ for (gpu_freq = dev_priv->rps.min_delay;
+ gpu_freq <= dev_priv->rps.max_delay;
gpu_freq++) {
I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
@@ -1303,7 +1292,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
continue;
}
ia_freq = I915_READ(GEN6_PCODE_DATA);
- seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
+ seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
}
mutex_unlock(&dev->struct_mutex);
@@ -1472,8 +1461,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
- mutex_lock(&dev->struct_mutex);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_x));
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
@@ -1520,9 +1513,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
if (INTEL_INFO(dev)->gen == 6)
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
- for (i = 0; i < I915_NUM_RINGS; i++) {
- ring = &dev_priv->ring[i];
-
+ for_each_ring(ring, dev_priv, i) {
seq_printf(m, "%s\n", ring->name);
if (INTEL_INFO(dev)->gen == 7)
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
@@ -1674,7 +1665,7 @@ i915_ring_stop_write(struct file *filp,
struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private;
char buf[20];
- int val = 0;
+ int val = 0, ret;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
@@ -1689,7 +1680,10 @@ i915_ring_stop_write(struct file *filp,
DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
- mutex_lock(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
dev_priv->stop_rings = val;
mutex_unlock(&dev->struct_mutex);
@@ -1713,10 +1707,18 @@ i915_max_freq_read(struct file *filp,
struct drm_device *dev = filp->private_data;
drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80];
- int len;
+ int len, ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
len = snprintf(buf, sizeof(buf),
- "max freq: %d\n", dev_priv->max_delay * 50);
+ "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
+ mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1733,7 +1735,10 @@ i915_max_freq_write(struct file *filp,
struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private;
char buf[20];
- int val = 1;
+ int val = 1, ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
@@ -1748,12 +1753,17 @@ i915_max_freq_write(struct file *filp,
DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
/*
* Turbo will still be enabled, but won't go above the set value.
*/
- dev_priv->max_delay = val / 50;
+ dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
- gen6_set_rps(dev, val / 50);
+ gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
+ mutex_unlock(&dev->struct_mutex);
return cnt;
}
@@ -1773,10 +1783,18 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
struct drm_device *dev = filp->private_data;
drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80];
- int len;
+ int len, ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
len = snprintf(buf, sizeof(buf),
- "min freq: %d\n", dev_priv->min_delay * 50);
+ "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
+ mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1791,7 +1809,10 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private;
char buf[20];
- int val = 1;
+ int val = 1, ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
@@ -1806,12 +1827,17 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
/*
* Turbo will still be enabled, but won't go below the set value.
*/
- dev_priv->min_delay = val / 50;
+ dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
- gen6_set_rps(dev, val / 50);
+ gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
+ mutex_unlock(&dev->struct_mutex);
return cnt;
}
@@ -1834,9 +1860,15 @@ i915_cache_sharing_read(struct file *filp,
drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80];
u32 snpcr;
- int len;
+ int len, ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
- mutex_lock(&dev_priv->dev->struct_mutex);
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
mutex_unlock(&dev_priv->dev->struct_mutex);
@@ -1862,6 +1894,9 @@ i915_cache_sharing_write(struct file *filp,
u32 snpcr;
int val = 1;
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
return -EINVAL;
@@ -1925,16 +1960,11 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
if (INTEL_INFO(dev)->gen < 6)
return 0;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
gen6_gt_force_wake_get(dev_priv);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -1947,16 +1977,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
if (INTEL_INFO(dev)->gen < 6)
return 0;
- /*
- * It's bad that we can potentially hang userspace if struct_mutex gets
- * forever stuck. However, if we cannot acquire this lock it means that
- * almost certainly the driver has hung, is not unload-able. Therefore
- * hanging here is probably a minor inconvenience not to be seen my
- * almost every user.
- */
- mutex_lock(&dev->struct_mutex);
gen6_gt_force_wake_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -2006,7 +2027,6 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
- {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
@@ -2067,6 +2087,7 @@ int i915_debugfs_init(struct drm_minor *minor)
&i915_cache_sharing_fops);
if (ret)
return ret;
+
ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_ring_stop",
&i915_ring_stop_fops);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 914c0dfabe60..ffbc9156c792 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -235,10 +235,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
}
- dev_priv->cpp = init->cpp;
- dev_priv->back_offset = init->back_offset;
- dev_priv->front_offset = init->front_offset;
- dev_priv->current_page = 0;
+ dev_priv->dri1.cpp = init->cpp;
+ dev_priv->dri1.back_offset = init->back_offset;
+ dev_priv->dri1.front_offset = init->front_offset;
+ dev_priv->dri1.current_page = 0;
if (master_priv->sarea_priv)
master_priv->sarea_priv->pf_current_page = 0;
@@ -575,7 +575,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
__func__,
- dev_priv->current_page,
+ dev_priv->dri1.current_page,
master_priv->sarea_priv->pf_current_page);
i915_kernel_lost_context(dev);
@@ -589,12 +589,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
OUT_RING(0);
- if (dev_priv->current_page == 0) {
- OUT_RING(dev_priv->back_offset);
- dev_priv->current_page = 1;
+ if (dev_priv->dri1.current_page == 0) {
+ OUT_RING(dev_priv->dri1.back_offset);
+ dev_priv->dri1.current_page = 1;
} else {
- OUT_RING(dev_priv->front_offset);
- dev_priv->current_page = 0;
+ OUT_RING(dev_priv->dri1.front_offset);
+ dev_priv->dri1.current_page = 0;
}
OUT_RING(0);
@@ -613,7 +613,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
ADVANCE_LP_RING();
}
- master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+ master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
return 0;
}
@@ -1009,6 +1009,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_WAIT_TIMEOUT:
value = 1;
break;
+ case I915_PARAM_HAS_SEMAPHORES:
+ value = i915_semaphore_is_enabled(dev);
+ break;
+ case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1425,6 +1431,21 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
kfree(ap);
}
+static void i915_dump_device_info(struct drm_i915_private *dev_priv)
+{
+ const struct intel_device_info *info = dev_priv->info;
+
+#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
+#define DEV_INFO_SEP ,
+ DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ info->gen,
+ dev_priv->dev->pdev->device,
+ DEV_INFO_FLAGS);
+#undef DEV_INFO_FLAG
+#undef DEV_INFO_SEP
+}
+
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
@@ -1440,7 +1461,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
struct intel_device_info *info;
- int ret = 0, mmio_bar;
+ int ret = 0, mmio_bar, mmio_size;
uint32_t aperture_size;
info = (struct intel_device_info *) flags;
@@ -1449,7 +1470,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
-
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@@ -1465,6 +1485,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->dev = dev;
dev_priv->info = info;
+ i915_dump_device_info(dev_priv);
+
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
@@ -1504,7 +1526,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
mmio_bar = IS_GEN2(dev) ? 1 : 0;
- dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
+ /* Before gen4, the registers and the GTT are behind different BARs.
+ * However, from gen4 onwards, the registers and the GTT are shared
+ * in the same BAR, so we want to restrict this ioremap from
+ * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+ * the register BAR remains the same size for all the earlier
+ * generations up to Ironlake.
+ */
+ if (info->gen < 5)
+ mmio_size = 512*1024;
+ else
+ mmio_size = 2*1024*1024;
+
+ dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
@@ -1536,11 +1570,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
*
* All tasks on the workqueue are expected to acquire the dev mutex
* so there is no point in running more than one instance of the
- * workqueue at any time: max_active = 1 and NON_REENTRANT.
+ * workqueue at any time. Use an ordered one.
*/
- dev_priv->wq = alloc_workqueue("i915",
- WQ_UNBOUND | WQ_NON_REENTRANT,
- 1);
+ dev_priv->wq = alloc_ordered_workqueue("i915", 0);
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
@@ -1586,7 +1618,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
- spin_lock_init(&dev_priv->rps_lock);
+ spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->dpio_lock);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
@@ -1836,6 +1868,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
@@ -1858,6 +1892,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a24ffbe97c01..a7837e556945 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -470,6 +470,9 @@ static int i915_drm_freeze(struct drm_device *dev)
"GEM idle failed, resume might fail\n");
return error;
}
+
+ intel_modeset_disable(dev);
+
drm_irq_uninstall(dev);
}
@@ -543,13 +546,9 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
intel_modeset_init_hw(dev);
+ intel_modeset_setup_hw_state(dev);
drm_mode_config_reset(dev);
drm_irq_install(dev);
-
- /* Resume the modeset for every activated CRTC */
- mutex_lock(&dev->mode_config.mutex);
- drm_helper_resume_force_mode(dev);
- mutex_unlock(&dev->mode_config.mutex);
}
intel_opregion_init(dev);
@@ -1060,7 +1059,7 @@ static bool IS_DISPLAYREG(u32 reg)
* This should make it easier to transition modules over to the
* new register block scheme, since we can do it incrementally.
*/
- if (reg >= 0x180000)
+ if (reg >= VLV_DISPLAY_BASE)
return false;
if (reg >= RENDER_RING_BASE &&
@@ -1174,9 +1173,59 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
+ if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
+ DRM_ERROR("Unclaimed write to %x\n", reg); \
+ writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT); \
+ } \
}
__i915_write(8, b)
__i915_write(16, w)
__i915_write(32, l)
__i915_write(64, q)
#undef __i915_write
+
+static const struct register_whitelist {
+ uint64_t offset;
+ uint32_t size;
+ uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+} whitelist[] = {
+ { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+};
+
+int i915_reg_read_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_reg_read *reg = data;
+ struct register_whitelist const *entry = whitelist;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+ if (entry->offset == reg->offset &&
+ (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(whitelist))
+ return -EINVAL;
+
+ switch (entry->size) {
+ case 8:
+ reg->val = I915_READ64(reg->offset);
+ break;
+ case 4:
+ reg->val = I915_READ(reg->offset);
+ break;
+ case 2:
+ reg->val = I915_READ16(reg->offset);
+ break;
+ case 1:
+ reg->val = I915_READ8(reg->offset);
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 627fe35781b4..4f2831aa5fed 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -109,6 +109,7 @@ struct intel_pch_pll {
#define WATCH_COHERENCY 0
#define WATCH_LISTS 0
+#define WATCH_GTT 0
#define I915_GEM_PHYS_CURSOR_0 1
#define I915_GEM_PHYS_CURSOR_1 2
@@ -195,9 +196,10 @@ struct drm_i915_error_state {
u32 cpu_ring_head[I915_NUM_RINGS];
u32 cpu_ring_tail[I915_NUM_RINGS];
u32 error; /* gen6+ */
+ u32 err_int; /* gen7 */
u32 instpm[I915_NUM_RINGS];
u32 instps[I915_NUM_RINGS];
- u32 instdone1;
+ u32 extra_instdone[I915_NUM_INSTDONE_REG];
u32 seqno[I915_NUM_RINGS];
u64 bbaddr;
u32 fault_reg[I915_NUM_RINGS];
@@ -221,7 +223,7 @@ struct drm_i915_error_state {
struct drm_i915_error_buffer {
u32 size;
u32 name;
- u32 seqno;
+ u32 rseqno, wseqno;
u32 gtt_offset;
u32 read_domains;
u32 write_domain;
@@ -239,7 +241,6 @@ struct drm_i915_error_state {
};
struct drm_i915_display_funcs {
- void (*dpms)(struct drm_crtc *crtc, int mode);
bool (*fbc_enabled)(struct drm_device *dev);
void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
void (*disable_fbc)(struct drm_device *dev);
@@ -248,7 +249,6 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size);
- void (*sanitize_pm)(struct drm_device *dev);
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
int (*crtc_mode_set)(struct drm_crtc *crtc,
@@ -256,6 +256,8 @@ struct drm_i915_display_funcs {
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
+ void (*crtc_enable)(struct drm_crtc *crtc);
+ void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
struct drm_crtc *crtc);
@@ -279,6 +281,32 @@ struct drm_i915_gt_funcs {
void (*force_wake_put)(struct drm_i915_private *dev_priv);
};
+#define DEV_INFO_FLAGS \
+ DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
+ DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
+ DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
+ DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
+ DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
+ DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
+ DEV_INFO_FLAG(has_llc)
+
struct intel_device_info {
u8 gen;
u8 is_mobile:1;
@@ -402,12 +430,6 @@ typedef struct drm_i915_private {
struct resource mch_res;
- unsigned int cpp;
- int back_offset;
- int front_offset;
- int current_page;
- int page_flipping;
-
atomic_t irq_received;
/* protects the irq masks */
@@ -425,7 +447,6 @@ typedef struct drm_i915_private {
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
- unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int num_pipe;
int num_pch_pll;
@@ -434,8 +455,7 @@ typedef struct drm_i915_private {
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
- uint32_t last_instdone;
- uint32_t last_instdone1;
+ uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
unsigned int stop_rings;
@@ -666,7 +686,13 @@ typedef struct drm_i915_private {
struct drm_mm gtt_space;
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
- struct list_head gtt_list;
+ struct list_head bound_list;
+ /**
+ * List of objects which are not bound to the GTT (thus
+ * are idle and not used by the GPU) but still have
+ * (presumably uncached) pages still attached.
+ */
+ struct list_head unbound_list;
/** Usable portion of the GTT for GEM */
unsigned long gtt_start;
@@ -696,17 +722,6 @@ typedef struct drm_i915_private {
struct list_head active_list;
/**
- * List of objects which are not in the ringbuffer but which
- * still have a write_domain which needs to be flushed before
- * unbinding.
- *
- * last_rendering_seqno is 0 while an object is in this list.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head flushing_list;
-
- /**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
@@ -775,6 +790,12 @@ typedef struct drm_i915_private {
struct {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
+
+ unsigned int cpp;
+ int back_offset;
+ int front_offset;
+ int current_page;
+ int page_flipping;
} dri1;
/* Kernel Modesetting */
@@ -796,9 +817,6 @@ typedef struct drm_i915_private {
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
- struct work_struct idle_work;
- struct timer_list idle_timer;
- bool busy;
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
@@ -807,26 +825,41 @@ typedef struct drm_i915_private {
bool mchbar_need_disable;
- struct work_struct rps_work;
- spinlock_t rps_lock;
- u32 pm_iir;
-
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- u8 fmax;
- u8 fstart;
-
- u64 last_count1;
- unsigned long last_time1;
- unsigned long chipset_power;
- u64 last_count2;
- struct timespec last_time2;
- unsigned long gfx_power;
- int c_m;
- int r_t;
- u8 corr;
- spinlock_t *mchdev_lock;
+ /* gen6+ rps state */
+ struct {
+ struct work_struct work;
+ u32 pm_iir;
+ /* lock - irqsave spinlock that protectects the work_struct and
+ * pm_iir. */
+ spinlock_t lock;
+
+ /* The below variables an all the rps hw state are protected by
+ * dev->struct mutext. */
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ } rps;
+
+ /* ilk-only ips/rps state. Everything in here is protected by the global
+ * mchdev_lock in intel_pm.c */
+ struct {
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c_m;
+ int r_t;
+ } ips;
enum no_fbc_reason no_fbc_reason;
@@ -861,30 +894,48 @@ enum hdmi_force_audio {
};
enum i915_cache_level {
- I915_CACHE_NONE,
+ I915_CACHE_NONE = 0,
I915_CACHE_LLC,
- I915_CACHE_LLC_MLC, /* gen6+ */
+ I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
+};
+
+struct drm_i915_gem_object_ops {
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+ * of pages before to binding them into the GTT, and put_pages() is
+ * called after we no longer need them. As we expect there to be
+ * associated cost with migrating pages between the backing storage
+ * and making them available for the GPU (e.g. clflush), we may hold
+ * onto the pages after they are no longer referenced by the GPU
+ * in case they may be used again shortly (for example migrating the
+ * pages to a different memory domain within the GTT). put_pages()
+ * will therefore most likely be called when the object itself is
+ * being released or under memory pressure (where we attempt to
+ * reap pages for the shrinker).
+ */
+ int (*get_pages)(struct drm_i915_gem_object *);
+ void (*put_pages)(struct drm_i915_gem_object *);
};
struct drm_i915_gem_object {
struct drm_gem_object base;
+ const struct drm_i915_gem_object_ops *ops;
+
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
struct list_head gtt_list;
- /** This object's place on the active/flushing/inactive lists */
+ /** This object's place on the active/inactive lists */
struct list_head ring_list;
struct list_head mm_list;
- /** This object's place on GPU write list */
- struct list_head gpu_write_list;
/** This object's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
/**
- * This is set if the object is on the active or flushing lists
- * (has pending rendering), and is not set if it's on inactive (ready
- * to be unbound).
+ * This is set if the object is on the active lists (has pending
+ * rendering and so a non-zero seqno), and is not set if it i s on
+ * inactive (ready to be unbound) list.
*/
unsigned int active:1;
@@ -895,12 +946,6 @@ struct drm_i915_gem_object {
unsigned int dirty:1;
/**
- * This is set if the object has been written to since the last
- * GPU flush.
- */
- unsigned int pending_gpu_write:1;
-
- /**
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
@@ -961,17 +1006,12 @@ struct drm_i915_gem_object {
unsigned int has_aliasing_ppgtt_mapping:1;
unsigned int has_global_gtt_mapping:1;
+ unsigned int has_dma_mapping:1;
- struct page **pages;
-
- /**
- * DMAR support
- */
- struct scatterlist *sg_list;
- int num_sg;
+ struct sg_table *pages;
+ int pages_pin_count;
/* prime dma-buf support */
- struct sg_table *sg_table;
void *dma_buf_vmapping;
int vmapping_count;
@@ -992,7 +1032,8 @@ struct drm_i915_gem_object {
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */
- uint32_t last_rendering_seqno;
+ uint32_t last_read_seqno;
+ uint32_t last_write_seqno;
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
@@ -1135,6 +1176,10 @@ struct drm_i915_file_private {
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
+#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+
+#define GT_FREQUENCY_MULTIPLIER 50
+
#include "i915_trace.h"
/**
@@ -1256,6 +1301,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
@@ -1274,24 +1323,42 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
-int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
- uint32_t invalidate_domains,
- uint32_t flush_domains);
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
- bool map_and_fenceable);
+ bool map_and_fenceable,
+ bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
-int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
- gfp_t gfpmask);
+int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+{
+ struct scatterlist *sg = obj->pages->sgl;
+ while (n >= SG_MAX_SINGLE_ALLOC) {
+ sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
+ n -= SG_MAX_SINGLE_ALLOC - 1;
+ }
+ return sg_page(sg+n);
+}
+static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+ BUG_ON(obj->pages == NULL);
+ obj->pages_pin_count++;
+}
+static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+ BUG_ON(obj->pages_pin_count == 0);
+ obj->pages_pin_count--;
+}
+
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
@@ -1358,9 +1425,9 @@ void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
-int __must_check i915_add_request(struct intel_ring_buffer *ring,
- struct drm_file *file,
- struct drm_i915_gem_request *request);
+int i915_add_request(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ struct drm_i915_gem_request *request);
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -1429,8 +1496,11 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
- unsigned alignment, bool mappable);
-int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
+ unsigned alignment,
+ unsigned cache_level,
+ bool mappable,
+ bool nonblock);
+int i915_gem_evict_everything(struct drm_device *dev);
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
@@ -1519,6 +1589,7 @@ extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern void intel_modeset_setup_hw_state(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -1529,6 +1600,8 @@ extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev);
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+int i915_reg_read_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
/* overlay */
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 274d25de521e..365a7dc8a4a8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -37,12 +37,12 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
-static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
- bool map_and_fenceable);
+ bool map_and_fenceable,
+ bool nonblocking);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -56,6 +56,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
struct shrink_control *sc);
+static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
+static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
@@ -141,7 +143,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return !obj->active;
+ return obj->gtt_space && !obj->active;
}
int
@@ -180,7 +182,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned = 0;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
if (obj->pin_count)
pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
@@ -341,7 +343,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
page_length);
kunmap_atomic(vaddr);
- return ret;
+ return ret ? -EFAULT : 0;
}
static void
@@ -392,7 +394,7 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
page_length);
kunmap(page);
- return ret;
+ return ret ? - EFAULT : 0;
}
static int
@@ -401,7 +403,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
struct drm_i915_gem_pread *args,
struct drm_file *file)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
char __user *user_data;
ssize_t remain;
loff_t offset;
@@ -410,7 +411,8 @@ i915_gem_shmem_pread(struct drm_device *dev,
int hit_slowpath = 0;
int prefaulted = 0;
int needs_clflush = 0;
- int release_page;
+ struct scatterlist *sg;
+ int i;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@@ -424,16 +426,30 @@ i915_gem_shmem_pread(struct drm_device *dev,
* anyway again before the next pread happens. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush = 1;
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- if (ret)
- return ret;
+ if (obj->gtt_space) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret)
+ return ret;
+ }
}
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
offset = args->offset;
- while (remain > 0) {
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
struct page *page;
+ if (i < offset >> PAGE_SHIFT)
+ continue;
+
+ if (remain <= 0)
+ break;
+
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
@@ -444,18 +460,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- if (obj->pages) {
- page = obj->pages[offset >> PAGE_SHIFT];
- release_page = 0;
- } else {
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
- }
- release_page = 1;
- }
-
+ page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
@@ -466,7 +471,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
goto next_page;
hit_slowpath = 1;
- page_cache_get(page);
mutex_unlock(&dev->struct_mutex);
if (!prefaulted) {
@@ -484,16 +488,12 @@ i915_gem_shmem_pread(struct drm_device *dev,
needs_clflush);
mutex_lock(&dev->struct_mutex);
- page_cache_release(page);
+
next_page:
mark_page_accessed(page);
- if (release_page)
- page_cache_release(page);
- if (ret) {
- ret = -EFAULT;
+ if (ret)
goto out;
- }
remain -= page_length;
user_data += page_length;
@@ -501,6 +501,8 @@ next_page:
}
out:
+ i915_gem_object_unpin_pages(obj);
+
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)
@@ -606,7 +608,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
char __user *user_data;
int page_offset, page_length, ret;
- ret = i915_gem_object_pin(obj, 0, true);
+ ret = i915_gem_object_pin(obj, 0, true, true);
if (ret)
goto out;
@@ -686,7 +688,7 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
page_length);
kunmap_atomic(vaddr);
- return ret;
+ return ret ? -EFAULT : 0;
}
/* Only difference to the fast-path function is that this can handle bit17
@@ -720,7 +722,7 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
page_do_bit17_swizzling);
kunmap(page);
- return ret;
+ return ret ? -EFAULT : 0;
}
static int
@@ -729,7 +731,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
loff_t offset;
char __user *user_data;
@@ -738,7 +739,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int hit_slowpath = 0;
int needs_clflush_after = 0;
int needs_clflush_before = 0;
- int release_page;
+ int i;
+ struct scatterlist *sg;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@@ -752,9 +754,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* right away and we therefore have to clflush anyway. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush_after = 1;
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- return ret;
+ if (obj->gtt_space) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+ }
}
/* Same trick applies for invalidate partially written cachelines before
* writing. */
@@ -762,13 +766,25 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
&& obj->cache_level == I915_CACHE_NONE)
needs_clflush_before = 1;
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
offset = args->offset;
obj->dirty = 1;
- while (remain > 0) {
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
struct page *page;
int partial_cacheline_write;
+ if (i < offset >> PAGE_SHIFT)
+ continue;
+
+ if (remain <= 0)
+ break;
+
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
@@ -787,18 +803,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
((shmem_page_offset | page_length)
& (boot_cpu_data.x86_clflush_size - 1));
- if (obj->pages) {
- page = obj->pages[offset >> PAGE_SHIFT];
- release_page = 0;
- } else {
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
- }
- release_page = 1;
- }
-
+ page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
@@ -810,26 +815,20 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
goto next_page;
hit_slowpath = 1;
- page_cache_get(page);
mutex_unlock(&dev->struct_mutex);
-
ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
partial_cacheline_write,
needs_clflush_after);
mutex_lock(&dev->struct_mutex);
- page_cache_release(page);
+
next_page:
set_page_dirty(page);
mark_page_accessed(page);
- if (release_page)
- page_cache_release(page);
- if (ret) {
- ret = -EFAULT;
+ if (ret)
goto out;
- }
remain -= page_length;
user_data += page_length;
@@ -837,6 +836,8 @@ next_page:
}
out:
+ i915_gem_object_unpin_pages(obj);
+
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)
@@ -920,10 +921,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
- if (obj->gtt_space &&
- obj->cache_level == I915_CACHE_NONE &&
+ if (obj->cache_level == I915_CACHE_NONE &&
obj->tiling_mode == I915_TILING_NONE &&
- obj->map_and_fenceable &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
/* Note that the gtt paths might fail with non-page-backed user
@@ -931,7 +930,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* textures). Fallback to the shmem path in that case. */
}
- if (ret == -EFAULT)
+ if (ret == -EFAULT || ret == -ENOSPC)
ret = i915_gem_shmem_pwrite(dev, obj, args, file);
out:
@@ -941,6 +940,240 @@ unlock:
return ret;
}
+int
+i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+ bool interruptible)
+{
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ struct completion *x = &dev_priv->error_completion;
+ bool recovery_complete;
+ unsigned long flags;
+
+ /* Give the error handler a chance to run. */
+ spin_lock_irqsave(&x->wait.lock, flags);
+ recovery_complete = x->done > 0;
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+
+ /* Non-interruptible callers can't handle -EAGAIN, hence return
+ * -EIO unconditionally for these. */
+ if (!interruptible)
+ return -EIO;
+
+ /* Recovery complete, but still wedged means reset failure. */
+ if (recovery_complete)
+ return -EIO;
+
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/*
+ * Compare seqno against outstanding lazy request. Emit a request if they are
+ * equal.
+ */
+static int
+i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+{
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
+ ret = 0;
+ if (seqno == ring->outstanding_lazy_request)
+ ret = i915_add_request(ring, NULL, NULL);
+
+ return ret;
+}
+
+/**
+ * __wait_seqno - wait until execution of seqno has finished
+ * @ring: the ring expected to report seqno
+ * @seqno: duh!
+ * @interruptible: do an interruptible wait (normally yes)
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ *
+ * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
+static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+ bool interruptible, struct timespec *timeout)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct timespec before, now, wait_time={1,0};
+ unsigned long timeout_jiffies;
+ long end;
+ bool wait_forever = true;
+ int ret;
+
+ if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+ return 0;
+
+ trace_i915_gem_request_wait_begin(ring, seqno);
+
+ if (timeout != NULL) {
+ wait_time = *timeout;
+ wait_forever = false;
+ }
+
+ timeout_jiffies = timespec_to_jiffies(&wait_time);
+
+ if (WARN_ON(!ring->irq_get(ring)))
+ return -ENODEV;
+
+ /* Record current time in case interrupted by signal, or wedged * */
+ getrawmonotonic(&before);
+
+#define EXIT_COND \
+ (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
+ atomic_read(&dev_priv->mm.wedged))
+ do {
+ if (interruptible)
+ end = wait_event_interruptible_timeout(ring->irq_queue,
+ EXIT_COND,
+ timeout_jiffies);
+ else
+ end = wait_event_timeout(ring->irq_queue, EXIT_COND,
+ timeout_jiffies);
+
+ ret = i915_gem_check_wedge(dev_priv, interruptible);
+ if (ret)
+ end = ret;
+ } while (end == 0 && wait_forever);
+
+ getrawmonotonic(&now);
+
+ ring->irq_put(ring);
+ trace_i915_gem_request_wait_end(ring, seqno);
+#undef EXIT_COND
+
+ if (timeout) {
+ struct timespec sleep_time = timespec_sub(now, before);
+ *timeout = timespec_sub(*timeout, sleep_time);
+ }
+
+ switch (end) {
+ case -EIO:
+ case -EAGAIN: /* Wedged */
+ case -ERESTARTSYS: /* Signal */
+ return (int)end;
+ case 0: /* Timeout */
+ if (timeout)
+ set_normalized_timespec(timeout, 0, 0);
+ return -ETIME;
+ default: /* Completed */
+ WARN_ON(end < 0); /* We're not aware of other errors */
+ return 0;
+ }
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool interruptible = dev_priv->mm.interruptible;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(seqno == 0);
+
+ ret = i915_gem_check_wedge(dev_priv, interruptible);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(ring, seqno);
+ if (ret)
+ return ret;
+
+ return __wait_seqno(ring, seqno, interruptible, NULL);
+}
+
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool readonly)
+{
+ struct intel_ring_buffer *ring = obj->ring;
+ u32 seqno;
+ int ret;
+
+ seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+ if (seqno == 0)
+ return 0;
+
+ ret = i915_wait_seqno(ring, seqno);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests_ring(ring);
+
+ /* Manually manage the write flush as we may have not yet
+ * retired the buffer.
+ */
+ if (obj->last_write_seqno &&
+ i915_seqno_passed(seqno, obj->last_write_seqno)) {
+ obj->last_write_seqno = 0;
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ return 0;
+}
+
+/* A nonblocking variant of the above wait. This is a highly dangerous routine
+ * as the object state may change during this call.
+ */
+static __must_check int
+i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+ bool readonly)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = obj->ring;
+ u32 seqno;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(!dev_priv->mm.interruptible);
+
+ seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+ if (seqno == 0)
+ return 0;
+
+ ret = i915_gem_check_wedge(dev_priv, true);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(ring, seqno);
+ if (ret)
+ return ret;
+
+ mutex_unlock(&dev->struct_mutex);
+ ret = __wait_seqno(ring, seqno, true, NULL);
+ mutex_lock(&dev->struct_mutex);
+
+ i915_gem_retire_requests_ring(ring);
+
+ /* Manually manage the write flush as we may have not yet
+ * retired the buffer.
+ */
+ if (obj->last_write_seqno &&
+ i915_seqno_passed(seqno, obj->last_write_seqno)) {
+ obj->last_write_seqno = 0;
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ return ret;
+}
+
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
@@ -978,6 +1211,14 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
goto unlock;
}
+ /* Try to flush the object off the GPU without holding the lock.
+ * We will repeat the flush holding the lock in the normal manner
+ * to catch cases where we are gazumped.
+ */
+ ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
+ if (ret)
+ goto unref;
+
if (read_domains & I915_GEM_DOMAIN_GTT) {
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
@@ -991,6 +1232,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
+unref:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
@@ -1110,7 +1352,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock;
}
if (!obj->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, 0, true);
+ ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
if (ret)
goto unlock;
@@ -1271,6 +1513,42 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
+static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int ret;
+
+ if (obj->base.map_list.map)
+ return 0;
+
+ ret = drm_gem_create_mmap_offset(&obj->base);
+ if (ret != -ENOSPC)
+ return ret;
+
+ /* Badly fragmented mmap space? The only way we can recover
+ * space is by destroying unwanted objects. We can't randomly release
+ * mmap_offsets as userspace expects them to be persistent for the
+ * lifetime of the objects. The closest we can is to release the
+ * offsets on purgeable objects by truncating it and marking it purged,
+ * which prevents userspace from ever using that object again.
+ */
+ i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
+ ret = drm_gem_create_mmap_offset(&obj->base);
+ if (ret != -ENOSPC)
+ return ret;
+
+ i915_gem_shrink_all(dev_priv);
+ return drm_gem_create_mmap_offset(&obj->base);
+}
+
+static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ if (!obj->base.map_list.map)
+ return;
+
+ drm_gem_free_mmap_offset(&obj->base);
+}
+
int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
@@ -1302,11 +1580,9 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto out;
}
- if (!obj->base.map_list.map) {
- ret = drm_gem_create_mmap_offset(&obj->base);
- if (ret)
- goto out;
- }
+ ret = i915_gem_object_create_mmap_offset(obj);
+ if (ret)
+ goto out;
*offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
@@ -1341,83 +1617,245 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
-int
-i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
- gfp_t gfpmask)
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
- int page_count, i;
- struct address_space *mapping;
struct inode *inode;
- struct page *page;
- if (obj->pages || obj->sg_table)
- return 0;
+ i915_gem_object_free_mmap_offset(obj);
- /* Get the list of pages out of our struct file. They'll be pinned
- * at this point until we release them.
- */
- page_count = obj->base.size / PAGE_SIZE;
- BUG_ON(obj->pages != NULL);
- obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
- if (obj->pages == NULL)
- return -ENOMEM;
+ if (obj->base.filp == NULL)
+ return;
+ /* Our goal here is to return as much of the memory as
+ * is possible back to the system as we are called from OOM.
+ * To do this we must instruct the shmfs to drop all of its
+ * backing pages, *now*.
+ */
inode = obj->base.filp->f_path.dentry->d_inode;
- mapping = inode->i_mapping;
- gfpmask |= mapping_gfp_mask(mapping);
-
- for (i = 0; i < page_count; i++) {
- page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
- if (IS_ERR(page))
- goto err_pages;
-
- obj->pages[i] = page;
- }
-
- if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_do_bit_17_swizzle(obj);
-
- return 0;
+ shmem_truncate_range(inode, 0, (loff_t)-1);
-err_pages:
- while (i--)
- page_cache_release(obj->pages[i]);
+ obj->madv = __I915_MADV_PURGED;
+}
- drm_free_large(obj->pages);
- obj->pages = NULL;
- return PTR_ERR(page);
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
+{
+ return obj->madv == I915_MADV_DONTNEED;
}
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
int page_count = obj->base.size / PAGE_SIZE;
- int i;
-
- if (!obj->pages)
- return;
+ struct scatterlist *sg;
+ int ret, i;
BUG_ON(obj->madv == __I915_MADV_PURGED);
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret) {
+ /* In the event of a disaster, abandon all caches and
+ * hope for the best.
+ */
+ WARN_ON(ret != -EIO);
+ i915_gem_clflush_object(obj);
+ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj);
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
- for (i = 0; i < page_count; i++) {
+ for_each_sg(obj->pages->sgl, sg, page_count, i) {
+ struct page *page = sg_page(sg);
+
if (obj->dirty)
- set_page_dirty(obj->pages[i]);
+ set_page_dirty(page);
if (obj->madv == I915_MADV_WILLNEED)
- mark_page_accessed(obj->pages[i]);
+ mark_page_accessed(page);
- page_cache_release(obj->pages[i]);
+ page_cache_release(page);
}
obj->dirty = 0;
- drm_free_large(obj->pages);
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+}
+
+static int
+i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+{
+ const struct drm_i915_gem_object_ops *ops = obj->ops;
+
+ if (obj->pages == NULL)
+ return 0;
+
+ BUG_ON(obj->gtt_space);
+
+ if (obj->pages_pin_count)
+ return -EBUSY;
+
+ ops->put_pages(obj);
obj->pages = NULL;
+
+ list_del(&obj->gtt_list);
+ if (i915_gem_object_is_purgeable(obj))
+ i915_gem_object_truncate(obj);
+
+ return 0;
+}
+
+static long
+i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+{
+ struct drm_i915_gem_object *obj, *next;
+ long count = 0;
+
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.unbound_list,
+ gtt_list) {
+ if (i915_gem_object_is_purgeable(obj) &&
+ i915_gem_object_put_pages(obj) == 0) {
+ count += obj->base.size >> PAGE_SHIFT;
+ if (count >= target)
+ return count;
+ }
+ }
+
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list,
+ mm_list) {
+ if (i915_gem_object_is_purgeable(obj) &&
+ i915_gem_object_unbind(obj) == 0 &&
+ i915_gem_object_put_pages(obj) == 0) {
+ count += obj->base.size >> PAGE_SHIFT;
+ if (count >= target)
+ return count;
+ }
+ }
+
+ return count;
+}
+
+static void
+i915_gem_shrink_all(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj, *next;
+
+ i915_gem_evict_everything(dev_priv->dev);
+
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
+ i915_gem_object_put_pages(obj);
+}
+
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int page_count, i;
+ struct address_space *mapping;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ struct page *page;
+ gfp_t gfp;
+
+ /* Assert that the object is not currently in any GPU domain. As it
+ * wasn't in the GTT, there shouldn't be any way it could have been in
+ * a GPU cache
+ */
+ BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return -ENOMEM;
+
+ page_count = obj->base.size / PAGE_SIZE;
+ if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
+ sg_free_table(st);
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ /* Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ *
+ * Fail silently without starting the shrinker
+ */
+ mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ gfp = mapping_gfp_mask(mapping);
+ gfp |= __GFP_NORETRY | __GFP_NOWARN;
+ gfp &= ~(__GFP_IO | __GFP_WAIT);
+ for_each_sg(st->sgl, sg, page_count, i) {
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ if (IS_ERR(page)) {
+ i915_gem_purge(dev_priv, page_count);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ }
+ if (IS_ERR(page)) {
+ /* We've tried hard to allocate the memory by reaping
+ * our own buffer, now let the real VM do its job and
+ * go down in flames if truly OOM.
+ */
+ gfp &= ~(__GFP_NORETRY | __GFP_NOWARN);
+ gfp |= __GFP_IO | __GFP_WAIT;
+
+ i915_gem_shrink_all(dev_priv);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ if (IS_ERR(page))
+ goto err_pages;
+
+ gfp |= __GFP_NORETRY | __GFP_NOWARN;
+ gfp &= ~(__GFP_IO | __GFP_WAIT);
+ }
+
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ }
+
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ i915_gem_object_do_bit_17_swizzle(obj);
+
+ obj->pages = st;
+ return 0;
+
+err_pages:
+ for_each_sg(st->sgl, sg, i, page_count)
+ page_cache_release(sg_page(sg));
+ sg_free_table(st);
+ kfree(st);
+ return PTR_ERR(page);
+}
+
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_get_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_put_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int
+i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ const struct drm_i915_gem_object_ops *ops = obj->ops;
+ int ret;
+
+ if (obj->pages)
+ return 0;
+
+ BUG_ON(obj->pages_pin_count);
+
+ ret = ops->get_pages(obj);
+ if (ret)
+ return ret;
+
+ list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+ return 0;
}
void
@@ -1441,7 +1879,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
- obj->last_rendering_seqno = seqno;
+ obj->last_read_seqno = seqno;
if (obj->fenced_gpu_access) {
obj->last_fenced_seqno = seqno;
@@ -1458,97 +1896,35 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
static void
-i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
-{
- list_del_init(&obj->ring_list);
- obj->last_rendering_seqno = 0;
- obj->last_fenced_seqno = 0;
-}
-
-static void
-i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
-
- i915_gem_object_move_off_active(obj);
-}
-static void
-i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ if (obj->pin_count) /* are we a framebuffer? */
+ intel_mark_fb_idle(obj);
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- BUG_ON(!list_empty(&obj->gpu_write_list));
- BUG_ON(!obj->active);
+ list_del_init(&obj->ring_list);
obj->ring = NULL;
- i915_gem_object_move_off_active(obj);
+ obj->last_read_seqno = 0;
+ obj->last_write_seqno = 0;
+ obj->base.write_domain = 0;
+
+ obj->last_fenced_seqno = 0;
obj->fenced_gpu_access = false;
obj->active = 0;
- obj->pending_gpu_write = false;
drm_gem_object_unreference(&obj->base);
WARN_ON(i915_verify_lists(dev));
}
-/* Immediately discard the backing storage */
-static void
-i915_gem_object_truncate(struct drm_i915_gem_object *obj)
-{
- struct inode *inode;
-
- /* Our goal here is to return as much of the memory as
- * is possible back to the system as we are called from OOM.
- * To do this we must instruct the shmfs to drop all of its
- * backing pages, *now*.
- */
- inode = obj->base.filp->f_path.dentry->d_inode;
- shmem_truncate_range(inode, 0, (loff_t)-1);
-
- if (obj->base.map_list.map)
- drm_gem_free_mmap_offset(&obj->base);
-
- obj->madv = __I915_MADV_PURGED;
-}
-
-static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
-{
- return obj->madv == I915_MADV_DONTNEED;
-}
-
-static void
-i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
- uint32_t flush_domains)
-{
- struct drm_i915_gem_object *obj, *next;
-
- list_for_each_entry_safe(obj, next,
- &ring->gpu_write_list,
- gpu_write_list) {
- if (obj->base.write_domain & flush_domains) {
- uint32_t old_write_domain = obj->base.write_domain;
-
- obj->base.write_domain = 0;
- list_del_init(&obj->gpu_write_list);
- i915_gem_object_move_to_active(obj, ring,
- i915_gem_next_request_seqno(ring));
-
- trace_i915_gem_object_change_domain(obj,
- obj->base.read_domains,
- old_write_domain);
- }
- }
-}
-
static u32
i915_gem_get_seqno(struct drm_device *dev)
{
@@ -1589,15 +1965,16 @@ i915_add_request(struct intel_ring_buffer *ring,
* is that the flush _must_ happen before the next request, no matter
* what.
*/
- if (ring->gpu_caches_dirty) {
- ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
- if (ret)
- return ret;
+ ret = intel_ring_flush_all_caches(ring);
+ if (ret)
+ return ret;
- ring->gpu_caches_dirty = false;
+ if (request == NULL) {
+ request = kmalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
}
- BUG_ON(request == NULL);
seqno = i915_gem_next_request_seqno(ring);
/* Record the position of the start of the request so that
@@ -1608,8 +1985,10 @@ i915_add_request(struct intel_ring_buffer *ring,
request_ring_position = intel_ring_get_tail(ring);
ret = ring->add_request(ring, &seqno);
- if (ret)
- return ret;
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
trace_i915_gem_request_add(ring, seqno);
@@ -1619,6 +1998,7 @@ i915_add_request(struct intel_ring_buffer *ring,
request->emitted_jiffies = jiffies;
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
+ request->file_priv = NULL;
if (file) {
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -1638,13 +2018,13 @@ i915_add_request(struct intel_ring_buffer *ring,
jiffies +
msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
- if (was_empty)
+ if (was_empty) {
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
+ intel_mark_busy(dev_priv->dev);
+ }
}
- WARN_ON(!list_empty(&ring->gpu_write_list));
-
return 0;
}
@@ -1686,8 +2066,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object,
ring_list);
- obj->base.write_domain = 0;
- list_del_init(&obj->gpu_write_list);
i915_gem_object_move_to_inactive(obj);
}
}
@@ -1723,20 +2101,6 @@ void i915_gem_reset(struct drm_device *dev)
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_lists(dev_priv, ring);
- /* Remove anything from the flushing lists. The GPU cache is likely
- * to be lost on reset along with the data, so simply move the
- * lost bo to the inactive list.
- */
- while (!list_empty(&dev_priv->mm.flushing_list)) {
- obj = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- mm_list);
-
- obj->base.write_domain = 0;
- list_del_init(&obj->gpu_write_list);
- i915_gem_object_move_to_inactive(obj);
- }
-
/* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse.
*/
@@ -1765,7 +2129,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
WARN_ON(i915_verify_lists(ring->dev));
- seqno = ring->get_seqno(ring);
+ seqno = ring->get_seqno(ring, true);
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
if (seqno >= ring->sync_seqno[i])
@@ -1804,13 +2168,10 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
struct drm_i915_gem_object,
ring_list);
- if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
+ if (!i915_seqno_passed(seqno, obj->last_read_seqno))
break;
- if (obj->base.write_domain != 0)
- i915_gem_object_move_to_flushing(obj);
- else
- i915_gem_object_move_to_inactive(obj);
+ i915_gem_object_move_to_inactive(obj);
}
if (unlikely(ring->trace_irq_seqno &&
@@ -1859,216 +2220,20 @@ i915_gem_retire_work_handler(struct work_struct *work)
*/
idle = true;
for_each_ring(ring, dev_priv, i) {
- if (ring->gpu_caches_dirty) {
- struct drm_i915_gem_request *request;
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL ||
- i915_add_request(ring, NULL, request))
- kfree(request);
- }
+ if (ring->gpu_caches_dirty)
+ i915_add_request(ring, NULL, NULL);
idle &= list_empty(&ring->request_list);
}
if (!dev_priv->mm.suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ if (idle)
+ intel_mark_idle(dev);
mutex_unlock(&dev->struct_mutex);
}
-int
-i915_gem_check_wedge(struct drm_i915_private *dev_priv,
- bool interruptible)
-{
- if (atomic_read(&dev_priv->mm.wedged)) {
- struct completion *x = &dev_priv->error_completion;
- bool recovery_complete;
- unsigned long flags;
-
- /* Give the error handler a chance to run. */
- spin_lock_irqsave(&x->wait.lock, flags);
- recovery_complete = x->done > 0;
- spin_unlock_irqrestore(&x->wait.lock, flags);
-
- /* Non-interruptible callers can't handle -EAGAIN, hence return
- * -EIO unconditionally for these. */
- if (!interruptible)
- return -EIO;
-
- /* Recovery complete, but still wedged means reset failure. */
- if (recovery_complete)
- return -EIO;
-
- return -EAGAIN;
- }
-
- return 0;
-}
-
-/*
- * Compare seqno against outstanding lazy request. Emit a request if they are
- * equal.
- */
-static int
-i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
-{
- int ret = 0;
-
- BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-
- if (seqno == ring->outstanding_lazy_request) {
- struct drm_i915_gem_request *request;
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
-
- ret = i915_add_request(ring, NULL, request);
- if (ret) {
- kfree(request);
- return ret;
- }
-
- BUG_ON(seqno != request->seqno);
- }
-
- return ret;
-}
-
-/**
- * __wait_seqno - wait until execution of seqno has finished
- * @ring: the ring expected to report seqno
- * @seqno: duh!
- * @interruptible: do an interruptible wait (normally yes)
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- *
- * Returns 0 if the seqno was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
- */
-static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
- bool interruptible, struct timespec *timeout)
-{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
- struct timespec before, now, wait_time={1,0};
- unsigned long timeout_jiffies;
- long end;
- bool wait_forever = true;
- int ret;
-
- if (i915_seqno_passed(ring->get_seqno(ring), seqno))
- return 0;
-
- trace_i915_gem_request_wait_begin(ring, seqno);
-
- if (timeout != NULL) {
- wait_time = *timeout;
- wait_forever = false;
- }
-
- timeout_jiffies = timespec_to_jiffies(&wait_time);
-
- if (WARN_ON(!ring->irq_get(ring)))
- return -ENODEV;
-
- /* Record current time in case interrupted by signal, or wedged * */
- getrawmonotonic(&before);
-
-#define EXIT_COND \
- (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
- atomic_read(&dev_priv->mm.wedged))
- do {
- if (interruptible)
- end = wait_event_interruptible_timeout(ring->irq_queue,
- EXIT_COND,
- timeout_jiffies);
- else
- end = wait_event_timeout(ring->irq_queue, EXIT_COND,
- timeout_jiffies);
-
- ret = i915_gem_check_wedge(dev_priv, interruptible);
- if (ret)
- end = ret;
- } while (end == 0 && wait_forever);
-
- getrawmonotonic(&now);
-
- ring->irq_put(ring);
- trace_i915_gem_request_wait_end(ring, seqno);
-#undef EXIT_COND
-
- if (timeout) {
- struct timespec sleep_time = timespec_sub(now, before);
- *timeout = timespec_sub(*timeout, sleep_time);
- }
-
- switch (end) {
- case -EIO:
- case -EAGAIN: /* Wedged */
- case -ERESTARTSYS: /* Signal */
- return (int)end;
- case 0: /* Timeout */
- if (timeout)
- set_normalized_timespec(timeout, 0, 0);
- return -ETIME;
- default: /* Completed */
- WARN_ON(end < 0); /* We're not aware of other errors */
- return 0;
- }
-}
-
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int
-i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
-{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
- int ret = 0;
-
- BUG_ON(seqno == 0);
-
- ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
- if (ret)
- return ret;
-
- ret = i915_gem_check_olr(ring, seqno);
- if (ret)
- return ret;
-
- ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
-
- return ret;
-}
-
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
-{
- int ret;
-
- /* This function only exists to support waiting for existing rendering,
- * not for emitting required flushes.
- */
- BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
-
- /* If there is rendering queued on the buffer being evicted, wait for
- * it.
- */
- if (obj->active) {
- ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
- if (ret)
- return ret;
- i915_gem_retire_requests_ring(obj->ring);
- }
-
- return 0;
-}
-
/**
* Ensures that an object will eventually get non-busy by flushing any required
* write domains, emitting any outstanding lazy request and retiring and
@@ -2080,14 +2245,10 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
int ret;
if (obj->active) {
- ret = i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
if (ret)
return ret;
- ret = i915_gem_check_olr(obj->ring,
- obj->last_rendering_seqno);
- if (ret)
- return ret;
i915_gem_retire_requests_ring(obj->ring);
}
@@ -2147,7 +2308,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto out;
if (obj->active) {
- seqno = obj->last_rendering_seqno;
+ seqno = obj->last_read_seqno;
ring = obj->ring;
}
@@ -2202,11 +2363,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
return 0;
if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
- return i915_gem_object_wait_rendering(obj);
+ return i915_gem_object_wait_rendering(obj, false);
idx = intel_ring_sync_index(from, to);
- seqno = obj->last_rendering_seqno;
+ seqno = obj->last_read_seqno;
if (seqno <= from->sync_seqno[idx])
return 0;
@@ -2260,6 +2421,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (obj->pin_count)
return -EBUSY;
+ BUG_ON(obj->pages == NULL);
+
ret = i915_gem_object_finish_gpu(obj);
if (ret)
return ret;
@@ -2270,22 +2433,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
i915_gem_object_finish_gtt(obj);
- /* Move the object to the CPU domain to ensure that
- * any possible CPU writes while it's not in the GTT
- * are flushed when we go to remap it.
- */
- if (ret == 0)
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret == -ERESTARTSYS)
- return ret;
- if (ret) {
- /* In the event of a disaster, abandon all caches and
- * hope for the best.
- */
- i915_gem_clflush_object(obj);
- obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- }
-
/* release the fence reg _after_ flushing */
ret = i915_gem_object_put_fence(obj);
if (ret)
@@ -2301,10 +2448,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
}
i915_gem_gtt_finish_object(obj);
- i915_gem_object_put_pages_gtt(obj);
-
- list_del_init(&obj->gtt_list);
- list_del_init(&obj->mm_list);
+ list_del(&obj->mm_list);
+ list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
@@ -2312,48 +2457,14 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
obj->gtt_space = NULL;
obj->gtt_offset = 0;
- if (i915_gem_object_is_purgeable(obj))
- i915_gem_object_truncate(obj);
-
- return ret;
-}
-
-int
-i915_gem_flush_ring(struct intel_ring_buffer *ring,
- uint32_t invalidate_domains,
- uint32_t flush_domains)
-{
- int ret;
-
- if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
-
- trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
-
- ret = ring->flush(ring, invalidate_domains, flush_domains);
- if (ret)
- return ret;
-
- if (flush_domains & I915_GEM_GPU_DOMAINS)
- i915_gem_process_flushing_list(ring, flush_domains);
-
return 0;
}
static int i915_ring_idle(struct intel_ring_buffer *ring)
{
- int ret;
-
- if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
+ if (list_empty(&ring->active_list))
return 0;
- if (!list_empty(&ring->gpu_write_list)) {
- ret = i915_gem_flush_ring(ring,
- I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- if (ret)
- return ret;
- }
-
return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
}
@@ -2372,10 +2483,6 @@ int i915_gpu_idle(struct drm_device *dev)
ret = i915_ring_idle(ring);
if (ret)
return ret;
-
- /* Is the device fubar? */
- if (WARN_ON(!list_empty(&ring->gpu_write_list)))
- return -EBUSY;
}
return 0;
@@ -2548,21 +2655,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
{
- int ret;
-
- if (obj->fenced_gpu_access) {
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->ring,
- 0, obj->base.write_domain);
- if (ret)
- return ret;
- }
-
- obj->fenced_gpu_access = false;
- }
-
if (obj->last_fenced_seqno) {
- ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+ int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
if (ret)
return ret;
@@ -2575,6 +2669,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
mb();
+ obj->fenced_gpu_access = false;
return 0;
}
@@ -2694,18 +2789,88 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
return 0;
}
+static bool i915_gem_valid_gtt_space(struct drm_device *dev,
+ struct drm_mm_node *gtt_space,
+ unsigned long cache_level)
+{
+ struct drm_mm_node *other;
+
+ /* On non-LLC machines we have to be careful when putting differing
+ * types of snoopable memory together to avoid the prefetcher
+ * crossing memory domains and dieing.
+ */
+ if (HAS_LLC(dev))
+ return true;
+
+ if (gtt_space == NULL)
+ return true;
+
+ if (list_empty(&gtt_space->node_list))
+ return true;
+
+ other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
+ if (other->allocated && !other->hole_follows && other->color != cache_level)
+ return false;
+
+ other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
+ if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+ return false;
+
+ return true;
+}
+
+static void i915_gem_verify_gtt(struct drm_device *dev)
+{
+#if WATCH_GTT
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ if (obj->gtt_space == NULL) {
+ printk(KERN_ERR "object found on GTT list with no space reserved\n");
+ err++;
+ continue;
+ }
+
+ if (obj->cache_level != obj->gtt_space->color) {
+ printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
+ obj->gtt_space->start,
+ obj->gtt_space->start + obj->gtt_space->size,
+ obj->cache_level,
+ obj->gtt_space->color);
+ err++;
+ continue;
+ }
+
+ if (!i915_gem_valid_gtt_space(dev,
+ obj->gtt_space,
+ obj->cache_level)) {
+ printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
+ obj->gtt_space->start,
+ obj->gtt_space->start + obj->gtt_space->size,
+ obj->cache_level);
+ err++;
+ continue;
+ }
+ }
+
+ WARN_ON(err);
+#endif
+}
+
/**
* Finds free space in the GTT aperture and binds the object there.
*/
static int
i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
- bool map_and_fenceable)
+ bool map_and_fenceable,
+ bool nonblocking)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_mm_node *free_space;
- gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
int ret;
@@ -2745,89 +2910,67 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
return -E2BIG;
}
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
search_free:
if (map_and_fenceable)
free_space =
- drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
- size, alignment,
- 0, dev_priv->mm.gtt_mappable_end,
- 0);
+ drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
+ size, alignment, obj->cache_level,
+ 0, dev_priv->mm.gtt_mappable_end,
+ false);
else
- free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
- size, alignment, 0);
+ free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
+ size, alignment, obj->cache_level,
+ false);
if (free_space != NULL) {
if (map_and_fenceable)
obj->gtt_space =
drm_mm_get_block_range_generic(free_space,
- size, alignment, 0,
+ size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end,
- 0);
+ false);
else
obj->gtt_space =
- drm_mm_get_block(free_space, size, alignment);
+ drm_mm_get_block_generic(free_space,
+ size, alignment, obj->cache_level,
+ false);
}
if (obj->gtt_space == NULL) {
- /* If the gtt is empty and we're still having trouble
- * fitting our object in, we're out of memory.
- */
ret = i915_gem_evict_something(dev, size, alignment,
- map_and_fenceable);
+ obj->cache_level,
+ map_and_fenceable,
+ nonblocking);
if (ret)
return ret;
goto search_free;
}
-
- ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
- if (ret) {
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev,
+ obj->gtt_space,
+ obj->cache_level))) {
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
-
- if (ret == -ENOMEM) {
- /* first try to reclaim some memory by clearing the GTT */
- ret = i915_gem_evict_everything(dev, false);
- if (ret) {
- /* now try to shrink everyone else */
- if (gfpmask) {
- gfpmask = 0;
- goto search_free;
- }
-
- return -ENOMEM;
- }
-
- goto search_free;
- }
-
- return ret;
+ return -EINVAL;
}
+
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
- i915_gem_object_put_pages_gtt(obj);
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
-
- if (i915_gem_evict_everything(dev, false))
- return ret;
-
- goto search_free;
+ return ret;
}
if (!dev_priv->mm.aliasing_ppgtt)
i915_gem_gtt_bind_object(obj, obj->cache_level);
- list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
+ list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- /* Assert that the object is not currently in any GPU domain. As it
- * wasn't in the GTT, there shouldn't be any way it could have been in
- * a GPU cache
- */
- BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
- BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
-
obj->gtt_offset = obj->gtt_space->start;
fenceable =
@@ -2840,6 +2983,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
obj->map_and_fenceable = mappable && fenceable;
trace_i915_gem_object_bind(obj, map_and_fenceable);
+ i915_gem_verify_gtt(dev);
return 0;
}
@@ -2866,18 +3010,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
trace_i915_gem_object_clflush(obj);
- drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
-}
-
-/** Flushes any GPU write domain for the object if it's dirty. */
-static int
-i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
-{
- if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
-
- /* Queue the GPU write cache flushing we need. */
- return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
+ drm_clflush_sg(obj->pages);
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2946,16 +3079,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
return 0;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_wait_rendering(obj, !write);
if (ret)
return ret;
- if (obj->pending_gpu_write || write) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
- }
-
i915_gem_object_flush_cpu_write_domain(obj);
old_write_domain = obj->base.write_domain;
@@ -2998,6 +3125,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
+ if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
+ }
+
if (obj->gtt_space) {
ret = i915_gem_object_finish_gpu(obj);
if (ret)
@@ -3009,7 +3142,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* registers with snooped memory, so relinquish any fences
* currently pointing to our region in the aperture.
*/
- if (INTEL_INFO(obj->base.dev)->gen < 6) {
+ if (INTEL_INFO(dev)->gen < 6) {
ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
@@ -3020,6 +3153,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
if (obj->has_aliasing_ppgtt_mapping)
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
+
+ obj->gtt_space->color = cache_level;
}
if (cache_level == I915_CACHE_NONE) {
@@ -3046,9 +3181,72 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
}
obj->cache_level = cache_level;
+ i915_gem_verify_gtt(dev);
return 0;
}
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_caching *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ args->caching = obj->cache_level != I915_CACHE_NONE;
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_caching *args = data;
+ struct drm_i915_gem_object *obj;
+ enum i915_cache_level level;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ switch (args->caching) {
+ case I915_CACHING_NONE:
+ level = I915_CACHE_NONE;
+ break;
+ case I915_CACHING_CACHED:
+ level = I915_CACHE_LLC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ ret = i915_gem_object_set_cache_level(obj, level);
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
/*
* Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows
@@ -3062,10 +3260,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 old_read_domains, old_write_domain;
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
-
if (pipelined != obj->ring) {
ret = i915_gem_object_sync(obj, pipelined);
if (ret)
@@ -3089,7 +3283,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
- ret = i915_gem_object_pin(obj, alignment, true);
+ ret = i915_gem_object_pin(obj, alignment, true, false);
if (ret)
return ret;
@@ -3101,7 +3295,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->base.write_domain = 0;
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
@@ -3119,13 +3313,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
return 0;
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
- if (ret)
- return ret;
- }
-
- ret = i915_gem_object_wait_rendering(obj);
+ ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
@@ -3149,16 +3337,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
return 0;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_wait_rendering(obj, !write);
if (ret)
return ret;
- if (write || obj->pending_gpu_write) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
- }
-
i915_gem_object_flush_gtt_write_domain(obj);
old_write_domain = obj->base.write_domain;
@@ -3238,7 +3420,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
- bool map_and_fenceable)
+ bool map_and_fenceable,
+ bool nonblocking)
{
int ret;
@@ -3263,7 +3446,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (obj->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment,
- map_and_fenceable);
+ map_and_fenceable,
+ nonblocking);
if (ret)
return ret;
}
@@ -3321,7 +3505,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
obj->user_pin_count++;
obj->pin_filp = file;
if (obj->user_pin_count == 1) {
- ret = i915_gem_object_pin(obj, args->alignment, true);
+ ret = i915_gem_object_pin(obj, args->alignment, true, false);
if (ret)
goto out;
}
@@ -3401,6 +3585,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_flush_active(obj);
args->busy = obj->active;
+ if (obj->ring) {
+ BUILD_BUG_ON(I915_NUM_RINGS > 16);
+ args->busy |= intel_ring_flag(obj->ring) << 16;
+ }
drm_gem_object_unreference(&obj->base);
unlock:
@@ -3449,9 +3637,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (obj->madv != __I915_MADV_PURGED)
obj->madv = args->madv;
- /* if the object is no longer bound, discard its backing storage */
- if (i915_gem_object_is_purgeable(obj) &&
- obj->gtt_space == NULL)
+ /* if the object is no longer attached, discard its backing storage */
+ if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
i915_gem_object_truncate(obj);
args->retained = obj->madv != __I915_MADV_PURGED;
@@ -3463,10 +3650,32 @@ unlock:
return ret;
}
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops)
+{
+ INIT_LIST_HEAD(&obj->mm_list);
+ INIT_LIST_HEAD(&obj->gtt_list);
+ INIT_LIST_HEAD(&obj->ring_list);
+ INIT_LIST_HEAD(&obj->exec_list);
+
+ obj->ops = ops;
+
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ obj->madv = I915_MADV_WILLNEED;
+ /* Avoid an unnecessary call to unbind on the first bind. */
+ obj->map_and_fenceable = true;
+
+ i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+ .get_pages = i915_gem_object_get_pages_gtt,
+ .put_pages = i915_gem_object_put_pages_gtt,
+};
+
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct address_space *mapping;
u32 mask;
@@ -3490,7 +3699,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
mapping_set_gfp_mask(mapping, mask);
- i915_gem_info_add_obj(dev_priv, size);
+ i915_gem_object_init(obj, &i915_gem_object_ops);
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@ -3512,17 +3721,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
} else
obj->cache_level = I915_CACHE_NONE;
- obj->base.driver_private = NULL;
- obj->fence_reg = I915_FENCE_REG_NONE;
- INIT_LIST_HEAD(&obj->mm_list);
- INIT_LIST_HEAD(&obj->gtt_list);
- INIT_LIST_HEAD(&obj->ring_list);
- INIT_LIST_HEAD(&obj->exec_list);
- INIT_LIST_HEAD(&obj->gpu_write_list);
- obj->madv = I915_MADV_WILLNEED;
- /* Avoid an unnecessary call to unbind on the first bind. */
- obj->map_and_fenceable = true;
-
return obj;
}
@@ -3541,9 +3739,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
trace_i915_gem_object_destroy(obj);
- if (gem_obj->import_attach)
- drm_prime_gem_destroy(gem_obj, obj->sg_table);
-
if (obj->phys_obj)
i915_gem_detach_phys_object(dev, obj);
@@ -3559,8 +3754,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
dev_priv->mm.interruptible = was_interruptible;
}
- if (obj->base.map_list.map)
- drm_gem_free_mmap_offset(&obj->base);
+ obj->pages_pin_count = 0;
+ i915_gem_object_put_pages(obj);
+ i915_gem_object_free_mmap_offset(obj);
+
+ BUG_ON(obj->pages);
+
+ if (obj->base.import_attach)
+ drm_prime_gem_destroy(&obj->base, NULL);
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
@@ -3591,7 +3792,7 @@ i915_gem_idle(struct drm_device *dev)
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_gem_evict_everything(dev, false);
+ i915_gem_evict_everything(dev);
i915_gem_reset_fences(dev);
@@ -3892,7 +4093,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
}
BUG_ON(!list_empty(&dev_priv->mm.active_list));
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
mutex_unlock(&dev->struct_mutex);
@@ -3940,7 +4140,6 @@ init_ring_lists(struct intel_ring_buffer *ring)
{
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->gpu_write_list);
}
void
@@ -3950,10 +4149,10 @@ i915_gem_load(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
INIT_LIST_HEAD(&dev_priv->mm.active_list);
- INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
+ INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]);
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
@@ -4198,18 +4397,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
}
static int
-i915_gpu_is_active(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int lists_empty;
-
- lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list);
-
- return !lists_empty;
-}
-
-static int
i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
@@ -4217,60 +4404,27 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_i915_private,
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
- struct drm_i915_gem_object *obj, *next;
+ struct drm_i915_gem_object *obj;
int nr_to_scan = sc->nr_to_scan;
int cnt;
if (!mutex_trylock(&dev->struct_mutex))
return 0;
- /* "fast-path" to count number of available objects */
- if (nr_to_scan == 0) {
- cnt = 0;
- list_for_each_entry(obj,
- &dev_priv->mm.inactive_list,
- mm_list)
- cnt++;
- mutex_unlock(&dev->struct_mutex);
- return cnt / 100 * sysctl_vfs_cache_pressure;
- }
-
-rescan:
- /* first scan for clean buffers */
- i915_gem_retire_requests(dev);
-
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list,
- mm_list) {
- if (i915_gem_object_is_purgeable(obj)) {
- if (i915_gem_object_unbind(obj) == 0 &&
- --nr_to_scan == 0)
- break;
- }
+ if (nr_to_scan) {
+ nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
+ if (nr_to_scan > 0)
+ i915_gem_shrink_all(dev_priv);
}
- /* second pass, evict/count anything still on the inactive list */
cnt = 0;
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list,
- mm_list) {
- if (nr_to_scan &&
- i915_gem_object_unbind(obj) == 0)
- nr_to_scan--;
- else
- cnt++;
- }
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
+ if (obj->pages_pin_count == 0)
+ cnt += obj->base.size >> PAGE_SHIFT;
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+ if (obj->pin_count == 0 && obj->pages_pin_count == 0)
+ cnt += obj->base.size >> PAGE_SHIFT;
- if (nr_to_scan && i915_gpu_is_active(dev)) {
- /*
- * We are desperate for pages, so as a last resort, wait
- * for the GPU to finish and discard whatever we can.
- * This has a dramatic impact to reduce the number of
- * OOM-killer events whilst running the GPU aggressively.
- */
- if (i915_gpu_idle(dev) == 0)
- goto rescan;
- }
mutex_unlock(&dev->struct_mutex);
- return cnt / 100 * sysctl_vfs_cache_pressure;
+ return cnt;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a9d58d72bb4d..4aa7ecf77ede 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -97,8 +97,7 @@
static struct i915_hw_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
-static int do_switch(struct drm_i915_gem_object *from_obj,
- struct i915_hw_context *to, u32 seqno);
+static int do_switch(struct i915_hw_context *to);
static int get_context_size(struct drm_device *dev)
{
@@ -113,7 +112,10 @@ static int get_context_size(struct drm_device *dev)
break;
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
- ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
+ if (IS_HASWELL(dev))
+ ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
+ else
+ ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
default:
BUG();
@@ -219,20 +221,21 @@ static int create_default_context(struct drm_i915_private *dev_priv)
* default context.
*/
dev_priv->ring[RCS].default_context = ctx;
- ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
- if (ret) {
- do_destroy(ctx);
- return ret;
- }
+ ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
+ if (ret)
+ goto err_destroy;
- ret = do_switch(NULL, ctx, 0);
- if (ret) {
- i915_gem_object_unpin(ctx->obj);
- do_destroy(ctx);
- } else {
- DRM_DEBUG_DRIVER("Default HW context loaded\n");
- }
+ ret = do_switch(ctx);
+ if (ret)
+ goto err_unpin;
+ DRM_DEBUG_DRIVER("Default HW context loaded\n");
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(ctx->obj);
+err_destroy:
+ do_destroy(ctx);
return ret;
}
@@ -359,18 +362,19 @@ mi_set_context(struct intel_ring_buffer *ring,
return ret;
}
-static int do_switch(struct drm_i915_gem_object *from_obj,
- struct i915_hw_context *to,
- u32 seqno)
+static int do_switch(struct i915_hw_context *to)
{
- struct intel_ring_buffer *ring = NULL;
+ struct intel_ring_buffer *ring = to->ring;
+ struct drm_i915_gem_object *from_obj = ring->last_context_obj;
u32 hw_flags = 0;
int ret;
- BUG_ON(to == NULL);
BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
- ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
+ if (from_obj == to->obj)
+ return 0;
+
+ ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
if (ret)
return ret;
@@ -393,7 +397,6 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
hw_flags |= MI_FORCE_RESTORE;
- ring = to->ring;
ret = mi_set_context(ring, to, hw_flags);
if (ret) {
i915_gem_object_unpin(to->obj);
@@ -407,6 +410,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from_obj != NULL) {
+ u32 seqno = i915_gem_next_request_seqno(ring);
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_gem_object_move_to_active(from_obj, ring, seqno);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
@@ -417,7 +421,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
* swapped, but there is no way to do that yet.
*/
from_obj->dirty = 1;
- BUG_ON(from_obj->ring != to->ring);
+ BUG_ON(from_obj->ring != ring);
i915_gem_object_unpin(from_obj);
drm_gem_object_unreference(&from_obj->base);
@@ -448,9 +452,7 @@ int i915_switch_context(struct intel_ring_buffer *ring,
int to_id)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct drm_i915_file_private *file_priv = NULL;
struct i915_hw_context *to;
- struct drm_i915_gem_object *from_obj = ring->last_context_obj;
if (dev_priv->hw_contexts_disabled)
return 0;
@@ -458,21 +460,18 @@ int i915_switch_context(struct intel_ring_buffer *ring,
if (ring != &dev_priv->ring[RCS])
return 0;
- if (file)
- file_priv = file->driver_priv;
-
if (to_id == DEFAULT_CONTEXT_ID) {
to = ring->default_context;
} else {
- to = i915_gem_context_get(file_priv, to_id);
+ if (file == NULL)
+ return -EINVAL;
+
+ to = i915_gem_context_get(file->driver_priv, to_id);
if (to == NULL)
return -ENOENT;
}
- if (from_obj == to->obj)
- return 0;
-
- return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring));
+ return do_switch(to);
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index aa308e1337db..ca3497e1108c 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -28,35 +28,62 @@
#include <linux/dma-buf.h>
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
- struct drm_device *dev = obj->base.dev;
- int npages = obj->base.size / PAGE_SIZE;
- struct sg_table *sg = NULL;
- int ret;
- int nents;
+ struct sg_table *st;
+ struct scatterlist *src, *dst;
+ int ret, i;
- ret = i915_mutex_lock_interruptible(dev);
+ ret = i915_mutex_lock_interruptible(obj->base.dev);
if (ret)
return ERR_PTR(ret);
- if (!obj->pages) {
- ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
- if (ret)
- goto out;
+ ret = i915_gem_object_get_pages(obj);
+ if (ret) {
+ st = ERR_PTR(ret);
+ goto out;
+ }
+
+ /* Copy sg so that we make an independent mapping */
+ st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (st == NULL) {
+ st = ERR_PTR(-ENOMEM);
+ goto out;
}
- /* link the pages into an SG then map the sg */
- sg = drm_prime_pages_to_sg(obj->pages, npages);
- nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+ if (ret) {
+ kfree(st);
+ st = ERR_PTR(ret);
+ goto out;
+ }
+
+ src = obj->pages->sgl;
+ dst = st->sgl;
+ for (i = 0; i < obj->pages->nents; i++) {
+ sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
+ dst = sg_next(dst);
+ src = sg_next(src);
+ }
+
+ if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+ sg_free_table(st);
+ kfree(st);
+ st = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ i915_gem_object_pin_pages(obj);
+
out:
- mutex_unlock(&dev->struct_mutex);
- return sg;
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ return st;
}
static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *sg, enum dma_data_direction dir)
+ struct sg_table *sg,
+ enum dma_data_direction dir)
{
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
sg_free_table(sg);
@@ -78,7 +105,9 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->base.dev;
- int ret;
+ struct scatterlist *sg;
+ struct page **pages;
+ int ret, i;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@@ -89,24 +118,34 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
goto out_unlock;
}
- if (!obj->pages) {
- ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ERR_PTR(ret);
- }
- }
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ goto error;
- obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
- if (!obj->dma_buf_vmapping) {
- DRM_ERROR("failed to vmap object\n");
- goto out_unlock;
- }
+ ret = -ENOMEM;
+
+ pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
+ if (pages == NULL)
+ goto error;
+
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
+ pages[i] = sg_page(sg);
+
+ obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
+ drm_free_large(pages);
+
+ if (!obj->dma_buf_vmapping)
+ goto error;
obj->vmapping_count = 1;
+ i915_gem_object_pin_pages(obj);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return obj->dma_buf_vmapping;
+
+error:
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -119,10 +158,11 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
if (ret)
return;
- --obj->vmapping_count;
- if (obj->vmapping_count == 0) {
+ if (--obj->vmapping_count == 0) {
vunmap(obj->dma_buf_vmapping);
obj->dma_buf_vmapping = NULL;
+
+ i915_gem_object_unpin_pages(obj);
}
mutex_unlock(&dev->struct_mutex);
}
@@ -151,6 +191,22 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
return -EINVAL;
}
+static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
+{
+ struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->base.dev;
+ int ret;
+ bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, write);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
static const struct dma_buf_ops i915_dmabuf_ops = {
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -162,25 +218,47 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,
+ .begin_cpu_access = i915_gem_begin_cpu_access,
};
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gem_obj, int flags)
+ struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- return dma_buf_export(obj, &i915_dmabuf_ops,
- obj->base.size, 0600);
+ return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
+}
+
+static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+ struct sg_table *sg;
+
+ sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg))
+ return PTR_ERR(sg);
+
+ obj->pages = sg;
+ obj->has_dma_mapping = true;
+ return 0;
}
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+ dma_buf_unmap_attachment(obj->base.import_attach,
+ obj->pages, DMA_BIDIRECTIONAL);
+ obj->has_dma_mapping = false;
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
+ .get_pages = i915_gem_object_get_pages_dmabuf,
+ .put_pages = i915_gem_object_put_pages_dmabuf,
+};
+
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
+ struct dma_buf *dma_buf)
{
struct dma_buf_attachment *attach;
- struct sg_table *sg;
struct drm_i915_gem_object *obj;
- int npages;
- int size;
int ret;
/* is this one of own objects? */
@@ -198,34 +276,24 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
if (IS_ERR(attach))
return ERR_CAST(attach);
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto fail_detach;
- }
-
- size = dma_buf->size;
- npages = size / PAGE_SIZE;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL) {
ret = -ENOMEM;
- goto fail_unmap;
+ goto fail_detach;
}
- ret = drm_gem_private_object_init(dev, &obj->base, size);
+ ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
if (ret) {
kfree(obj);
- goto fail_unmap;
+ goto fail_detach;
}
- obj->sg_table = sg;
+ i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
return &obj->base;
-fail_unmap:
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index eba0308f10e3..a2d8acde8550 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -44,7 +44,8 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
int
i915_gem_evict_something(struct drm_device *dev, int min_size,
- unsigned alignment, bool mappable)
+ unsigned alignment, unsigned cache_level,
+ bool mappable, bool nonblocking)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
@@ -79,11 +80,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
INIT_LIST_HEAD(&unwind_list);
if (mappable)
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
- min_size, alignment, 0,
+ min_size, alignment, cache_level,
0, dev_priv->mm.gtt_mappable_end);
else
drm_mm_init_scan(&dev_priv->mm.gtt_space,
- min_size, alignment, 0);
+ min_size, alignment, cache_level);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
@@ -91,29 +92,16 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
goto found;
}
- /* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- /* Does the object require an outstanding flush? */
- if (obj->base.write_domain)
- continue;
-
- if (mark_free(obj, &unwind_list))
- goto found;
- }
+ if (nonblocking)
+ goto none;
- /* Finally add anything with a pending flush (in order of retirement) */
- list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
- if (mark_free(obj, &unwind_list))
- goto found;
- }
+ /* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (!obj->base.write_domain)
- continue;
-
if (mark_free(obj, &unwind_list))
goto found;
}
+none:
/* Nothing found, clean up and bail out! */
while (!list_empty(&unwind_list)) {
obj = list_first_entry(&unwind_list,
@@ -164,7 +152,7 @@ found:
}
int
-i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
+i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
@@ -172,12 +160,11 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
int ret;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->mm.active_list));
if (lists_empty)
return -ENOSPC;
- trace_i915_gem_evict_everything(dev, purgeable_only);
+ trace_i915_gem_evict_everything(dev);
/* The gpu_idle will flush everything in the write domain to the
* active list. Then we must move everything off the active list
@@ -189,16 +176,11 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
i915_gem_retire_requests(dev);
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list, mm_list) {
- if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
- if (obj->pin_count == 0)
- WARN_ON(i915_gem_object_unbind(obj));
- }
- }
+ &dev_priv->mm.inactive_list, mm_list)
+ if (obj->pin_count == 0)
+ WARN_ON(i915_gem_object_unbind(obj));
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index ff2819ea0813..6a2f3e50c714 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -34,180 +34,6 @@
#include "intel_drv.h"
#include <linux/dma_remapping.h>
-struct change_domains {
- uint32_t invalidate_domains;
- uint32_t flush_domains;
- uint32_t flush_rings;
- uint32_t flips;
-};
-
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Mapped to GTT
- * 4. Read by GPU
- * 5. Unmapped from GTT
- * 6. Freed
- *
- * Let's take these a step at a time
- *
- * 1. Allocated
- * Pages allocated from the kernel may still have
- * cache contents, so we set them to (CPU, CPU) always.
- * 2. Written by CPU (using pwrite)
- * The pwrite function calls set_domain (CPU, CPU) and
- * this function does nothing (as nothing changes)
- * 3. Mapped by GTT
- * This function asserts that the object is not
- * currently in any GPU-based read or write domains
- * 4. Read by GPU
- * i915_gem_execbuffer calls set_domain (COMMAND, 0).
- * As write_domain is zero, this function adds in the
- * current read domains (CPU+COMMAND, 0).
- * flush_domains is set to CPU.
- * invalidate_domains is set to COMMAND
- * clflush is run to get data out of the CPU caches
- * then i915_dev_set_domain calls i915_gem_flush to
- * emit an MI_FLUSH and drm_agp_chipset_flush
- * 5. Unmapped from GTT
- * i915_gem_object_unbind calls set_domain (CPU, CPU)
- * flush_domains and invalidate_domains end up both zero
- * so no flushing/invalidating happens
- * 6. Freed
- * yay, done
- *
- * Case 2: The shared render buffer
- *
- * 1. Allocated
- * 2. Mapped to GTT
- * 3. Read/written by GPU
- * 4. set_domain to (CPU,CPU)
- * 5. Read/written by CPU
- * 6. Read/written by GPU
- *
- * 1. Allocated
- * Same as last example, (CPU, CPU)
- * 2. Mapped to GTT
- * Nothing changes (assertions find that it is not in the GPU)
- * 3. Read/written by GPU
- * execbuffer calls set_domain (RENDER, RENDER)
- * flush_domains gets CPU
- * invalidate_domains gets GPU
- * clflush (obj)
- * MI_FLUSH and drm_agp_chipset_flush
- * 4. set_domain (CPU, CPU)
- * flush_domains gets GPU
- * invalidate_domains gets CPU
- * wait_rendering (obj) to make sure all drawing is complete.
- * This will include an MI_FLUSH to get the data from GPU
- * to memory
- * clflush (obj) to invalidate the CPU cache
- * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- * 5. Read/written by CPU
- * cache lines are loaded and dirtied
- * 6. Read written by GPU
- * Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Read by GPU
- * 4. Updated (written) by CPU again
- * 5. Read by GPU
- *
- * 1. Allocated
- * (CPU, CPU)
- * 2. Written by CPU
- * (CPU, CPU)
- * 3. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- * 4. Updated (written) by CPU again
- * (CPU, CPU)
- * flush_domains = 0 (no previous write domain)
- * invalidate_domains = 0 (no new read domains)
- * 5. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- */
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring,
- struct change_domains *cd)
-{
- uint32_t invalidate_domains = 0, flush_domains = 0;
-
- /*
- * If the object isn't moving to a new write domain,
- * let the object stay in multiple read domains
- */
- if (obj->base.pending_write_domain == 0)
- obj->base.pending_read_domains |= obj->base.read_domains;
-
- /*
- * Flush the current write domain if
- * the new read domains don't match. Invalidate
- * any read domains which differ from the old
- * write domain
- */
- if (obj->base.write_domain &&
- (((obj->base.write_domain != obj->base.pending_read_domains ||
- obj->ring != ring)) ||
- (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
- flush_domains |= obj->base.write_domain;
- invalidate_domains |=
- obj->base.pending_read_domains & ~obj->base.write_domain;
- }
- /*
- * Invalidate any read caches which may have
- * stale data. That is, any new read domains.
- */
- invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
- i915_gem_clflush_object(obj);
-
- if (obj->base.pending_write_domain)
- cd->flips |= atomic_read(&obj->pending_flip);
-
- /* The actual obj->write_domain will be updated with
- * pending_write_domain after we emit the accumulated flush for all
- * of our domain changes in execbuffers (which clears objects'
- * write_domains). So if we have a current write domain that we
- * aren't changing, set pending_write_domain to that.
- */
- if (flush_domains == 0 && obj->base.pending_write_domain == 0)
- obj->base.pending_write_domain = obj->base.write_domain;
-
- cd->invalidate_domains |= invalidate_domains;
- cd->flush_domains |= flush_domains;
- if (flush_domains & I915_GEM_GPU_DOMAINS)
- cd->flush_rings |= intel_ring_flag(obj->ring);
- if (invalidate_domains & I915_GEM_GPU_DOMAINS)
- cd->flush_rings |= intel_ring_flag(ring);
-}
-
struct eb_objects {
int and;
struct hlist_head buckets[0];
@@ -218,6 +44,7 @@ eb_create(int size)
{
struct eb_objects *eb;
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
while (count > size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
@@ -269,6 +96,7 @@ eb_destroy(struct eb_objects *eb)
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+ !obj->map_and_fenceable ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -383,7 +211,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ reloc->offset >> PAGE_SHIFT));
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
kunmap_atomic(vaddr);
} else {
@@ -504,7 +333,8 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
return ret;
}
-#define __EXEC_OBJECT_HAS_FENCE (1<<31)
+#define __EXEC_OBJECT_HAS_PIN (1<<31)
+#define __EXEC_OBJECT_HAS_FENCE (1<<30)
static int
need_reloc_mappable(struct drm_i915_gem_object *obj)
@@ -514,9 +344,10 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
}
static int
-pin_and_fence_object(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring)
{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
@@ -528,15 +359,17 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
- ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
+ ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
if (ret)
return ret;
+ entry->flags |= __EXEC_OBJECT_HAS_PIN;
+
if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
ret = i915_gem_object_get_fence(obj);
if (ret)
- goto err_unpin;
+ return ret;
if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
@@ -545,12 +378,35 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
}
}
+ /* Ensure ppgtt mapping exists if needed */
+ if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
+ i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+ obj, obj->cache_level);
+
+ obj->has_aliasing_ppgtt_mapping = 1;
+ }
+
entry->offset = obj->gtt_offset;
return 0;
+}
-err_unpin:
- i915_gem_object_unpin(obj);
- return ret;
+static void
+i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_gem_exec_object2 *entry;
+
+ if (!obj->gtt_space)
+ return;
+
+ entry = obj->exec_entry;
+
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+ i915_gem_object_unpin_fence(obj);
+
+ if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+ i915_gem_object_unpin(obj);
+
+ entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
static int
@@ -558,11 +414,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
- int ret, retry;
- bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
struct list_head ordered_objects;
+ bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+ int retry;
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
@@ -587,6 +442,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0;
+ obj->pending_fenced_gpu_access = false;
}
list_splice(&ordered_objects, objects);
@@ -599,12 +455,12 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
* 2. Bind new objects.
* 3. Decrement pin count.
*
- * This avoid unnecessary unbinding of later objects in order to makr
+ * This avoid unnecessary unbinding of later objects in order to make
* room for the earlier objects *unless* we need to defragment.
*/
retry = 0;
do {
- ret = 0;
+ int ret = 0;
/* Unbind any ill-fitting objects or pin. */
list_for_each_entry(obj, objects, exec_list) {
@@ -624,7 +480,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
- ret = pin_and_fence_object(obj, ring);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring);
if (ret)
goto err;
}
@@ -634,77 +490,22 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (obj->gtt_space)
continue;
- ret = pin_and_fence_object(obj, ring);
- if (ret) {
- int ret_ignore;
-
- /* This can potentially raise a harmless
- * -EINVAL if we failed to bind in the above
- * call. It cannot raise -EINTR since we know
- * that the bo is freshly bound and so will
- * not need to be flushed or waited upon.
- */
- ret_ignore = i915_gem_object_unbind(obj);
- (void)ret_ignore;
- WARN_ON(obj->gtt_space);
- break;
- }
+ ret = i915_gem_execbuffer_reserve_object(obj, ring);
+ if (ret)
+ goto err;
}
- /* Decrement pin count for bound objects */
- list_for_each_entry(obj, objects, exec_list) {
- struct drm_i915_gem_exec_object2 *entry;
-
- if (!obj->gtt_space)
- continue;
-
- entry = obj->exec_entry;
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- i915_gem_object_unpin_fence(obj);
- entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
- }
-
- i915_gem_object_unpin(obj);
-
- /* ... and ensure ppgtt mapping exist if needed. */
- if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
- i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
- obj, obj->cache_level);
+err: /* Decrement pin count for bound objects */
+ list_for_each_entry(obj, objects, exec_list)
+ i915_gem_execbuffer_unreserve_object(obj);
- obj->has_aliasing_ppgtt_mapping = 1;
- }
- }
-
- if (ret != -ENOSPC || retry > 1)
+ if (ret != -ENOSPC || retry++)
return ret;
- /* First attempt, just clear anything that is purgeable.
- * Second attempt, clear the entire GTT.
- */
- ret = i915_gem_evict_everything(ring->dev, retry == 0);
+ ret = i915_gem_evict_everything(ring->dev);
if (ret)
return ret;
-
- retry++;
} while (1);
-
-err:
- list_for_each_entry_continue_reverse(obj, objects, exec_list) {
- struct drm_i915_gem_exec_object2 *entry;
-
- if (!obj->gtt_space)
- continue;
-
- entry = obj->exec_entry;
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- i915_gem_object_unpin_fence(obj);
- entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
- }
-
- i915_gem_object_unpin(obj);
- }
-
- return ret;
}
static int
@@ -810,18 +611,6 @@ err:
return ret;
}
-static void
-i915_gem_execbuffer_flush(struct drm_device *dev,
- uint32_t invalidate_domains,
- uint32_t flush_domains)
-{
- if (flush_domains & I915_GEM_DOMAIN_CPU)
- intel_gtt_chipset_flush();
-
- if (flush_domains & I915_GEM_DOMAIN_GTT)
- wmb();
-}
-
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
@@ -854,48 +643,45 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
return 0;
}
-
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
- struct change_domains cd;
+ uint32_t flush_domains = 0;
+ uint32_t flips = 0;
int ret;
- memset(&cd, 0, sizeof(cd));
- list_for_each_entry(obj, objects, exec_list)
- i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
-
- if (cd.invalidate_domains | cd.flush_domains) {
- i915_gem_execbuffer_flush(ring->dev,
- cd.invalidate_domains,
- cd.flush_domains);
- }
-
- if (cd.flips) {
- ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
+ list_for_each_entry(obj, objects, exec_list) {
+ ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
+
+ if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
+ i915_gem_clflush_object(obj);
+
+ if (obj->base.pending_write_domain)
+ flips |= atomic_read(&obj->pending_flip);
+
+ flush_domains |= obj->base.write_domain;
}
- list_for_each_entry(obj, objects, exec_list) {
- ret = i915_gem_object_sync(obj, ring);
+ if (flips) {
+ ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
if (ret)
return ret;
}
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ intel_gtt_chipset_flush();
+
+ if (flush_domains & I915_GEM_DOMAIN_GTT)
+ wmb();
+
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- ret = i915_gem_flush_ring(ring,
- I915_GEM_GPU_DOMAINS,
- ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
- if (ret)
- return ret;
-
- ring->gpu_caches_dirty = false;
- return 0;
+ return intel_ring_invalidate_all_caches(ring);
}
static bool
@@ -943,9 +729,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, objects, exec_list) {
- u32 old_read = obj->base.read_domains;
- u32 old_write = obj->base.write_domain;
-
+ u32 old_read = obj->base.read_domains;
+ u32 old_write = obj->base.write_domain;
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
@@ -954,17 +739,13 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
i915_gem_object_move_to_active(obj, ring, seqno);
if (obj->base.write_domain) {
obj->dirty = 1;
- obj->pending_gpu_write = true;
- list_move_tail(&obj->gpu_write_list,
- &ring->gpu_write_list);
+ obj->last_write_seqno = seqno;
if (obj->pin_count) /* check for potential scanout */
- intel_mark_busy(ring->dev, obj);
+ intel_mark_fb_busy(obj);
}
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
-
- intel_mark_busy(ring->dev, NULL);
}
static void
@@ -972,16 +753,11 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring)
{
- struct drm_i915_gem_request *request;
-
/* Unconditionally force add_request to emit a full flush. */
ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL || i915_add_request(ring, file, request)) {
- kfree(request);
- }
+ (void)i915_add_request(ring, file, NULL);
}
static int
@@ -1327,8 +1103,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -ENOMEM;
}
ret = copy_from_user(exec_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
+ (void __user *)(uintptr_t)args->buffers_ptr,
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1367,8 +1142,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
for (i = 0; i < args->buffer_count; i++)
exec_list[i].offset = exec2_list[i].offset;
/* ... and back out to userspace */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
+ ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
exec_list,
sizeof(*exec_list) * args->buffer_count);
if (ret) {
@@ -1422,8 +1196,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
+ ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
exec2_list,
sizeof(*exec2_list) * args->buffer_count);
if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 60815b861ec2..47e427e4bc67 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -167,8 +167,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
}
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
- struct scatterlist *sg_list,
- unsigned sg_len,
+ const struct sg_table *pages,
unsigned first_entry,
uint32_t pte_flags)
{
@@ -180,12 +179,12 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
struct scatterlist *sg;
/* init sg walking */
- sg = sg_list;
+ sg = pages->sgl;
i = 0;
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
- while (i < sg_len) {
+ while (i < pages->nents) {
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
@@ -194,13 +193,11 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
pt_vaddr[j] = pte | pte_flags;
/* grab the next page */
- m++;
- if (m == segment_len) {
- sg = sg_next(sg);
- i++;
- if (i == sg_len)
+ if (++m == segment_len) {
+ if (++i == pages->nents)
break;
+ sg = sg_next(sg);
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
}
@@ -213,44 +210,10 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
}
}
-static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
- unsigned first_entry, unsigned num_entries,
- struct page **pages, uint32_t pte_flags)
-{
- uint32_t *pt_vaddr, pte;
- unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
- unsigned last_pte, i;
- dma_addr_t page_addr;
-
- while (num_entries) {
- last_pte = first_pte + num_entries;
- last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
-
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
-
- for (i = first_pte; i < last_pte; i++) {
- page_addr = page_to_phys(*pages);
- pte = GEN6_PTE_ADDR_ENCODE(page_addr);
- pt_vaddr[i] = pte | pte_flags;
-
- pages++;
- }
-
- kunmap_atomic(pt_vaddr);
-
- num_entries -= last_pte - first_pte;
- first_pte = 0;
- act_pd++;
- }
-}
-
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t pte_flags = GEN6_PTE_VALID;
switch (cache_level) {
@@ -261,7 +224,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
pte_flags |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(obj->base.dev))
pte_flags |= HSW_PTE_UNCACHED;
else
pte_flags |= GEN6_PTE_UNCACHED;
@@ -270,26 +233,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
BUG();
}
- if (obj->sg_table) {
- i915_ppgtt_insert_sg_entries(ppgtt,
- obj->sg_table->sgl,
- obj->sg_table->nents,
- obj->gtt_space->start >> PAGE_SHIFT,
- pte_flags);
- } else if (dev_priv->mm.gtt->needs_dmar) {
- BUG_ON(!obj->sg_list);
-
- i915_ppgtt_insert_sg_entries(ppgtt,
- obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- pte_flags);
- } else
- i915_ppgtt_insert_pages(ppgtt,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- pte_flags);
+ i915_ppgtt_insert_sg_entries(ppgtt,
+ obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ pte_flags);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -351,7 +298,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
i915_gem_clflush_object(obj);
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
@@ -361,44 +308,26 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* don't map imported dma buf objects */
- if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
- return intel_gtt_map_memory(obj->pages,
- obj->base.size >> PAGE_SHIFT,
- &obj->sg_list,
- &obj->num_sg);
- else
+ if (obj->has_dma_mapping)
return 0;
+
+ if (!dma_map_sg(&obj->base.dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL))
+ return -ENOSPC;
+
+ return 0;
}
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
- if (obj->sg_table) {
- intel_gtt_insert_sg_entries(obj->sg_table->sgl,
- obj->sg_table->nents,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else if (dev_priv->mm.gtt->needs_dmar) {
- BUG_ON(!obj->sg_list);
-
- intel_gtt_insert_sg_entries(obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else
- intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- agp_type);
-
+ intel_gtt_insert_sg_entries(obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ agp_type);
obj->has_global_gtt_mapping = 1;
}
@@ -418,14 +347,31 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
interruptible = do_idling(dev_priv);
- if (obj->sg_list) {
- intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
- obj->sg_list = NULL;
- }
+ if (!obj->has_dma_mapping)
+ dma_unmap_sg(&dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL);
undo_idling(dev_priv, interruptible);
}
+static void i915_gtt_color_adjust(struct drm_mm_node *node,
+ unsigned long color,
+ unsigned long *start,
+ unsigned long *end)
+{
+ if (node->color != color)
+ *start += 4096;
+
+ if (!list_empty(&node->node_list)) {
+ node = list_entry(node->node_list.next,
+ struct drm_mm_node,
+ node_list);
+ if (node->allocated && node->color != color)
+ *end -= 4096;
+ }
+}
+
void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
@@ -435,6 +381,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
/* Substract the guard page ... */
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+ if (!HAS_LLC(dev))
+ dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
dev_priv->mm.gtt_start = start;
dev_priv->mm.gtt_mappable_end = mappable_end;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b964df51cec7..8093ecd2ea31 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -470,18 +470,20 @@ i915_gem_swizzle_page(struct page *page)
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
+ struct scatterlist *sg;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (obj->bit_17 == NULL)
return;
- for (i = 0; i < page_count; i++) {
- char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
+ for_each_sg(obj->pages->sgl, sg, page_count, i) {
+ struct page *page = sg_page(sg);
+ char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
- i915_gem_swizzle_page(obj->pages[i]);
- set_page_dirty(obj->pages[i]);
+ i915_gem_swizzle_page(page);
+ set_page_dirty(page);
}
}
}
@@ -489,6 +491,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
+ struct scatterlist *sg;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
@@ -502,8 +505,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
}
}
- for (i = 0; i < page_count; i++) {
- if (page_to_phys(obj->pages[i]) & (1 << 17))
+ for_each_sg(obj->pages->sgl, sg, page_count, i) {
+ struct page *page = sg_page(sg);
+ if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5249640cce13..d7f0066538a9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -296,11 +296,21 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_helper_hpd_irq_event(dev);
}
-static void i915_handle_rps_change(struct drm_device *dev)
+/* defined intel_pm.c */
+extern spinlock_t mchdev_lock;
+
+static void ironlake_handle_rps_change(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
- u8 new_delay = dev_priv->cur_delay;
+ u8 new_delay;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mchdev_lock, flags);
+
+ I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+
+ new_delay = dev_priv->ips.cur_delay;
I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
busy_up = I915_READ(RCPREVBSYTUPAVG);
@@ -310,19 +320,21 @@ static void i915_handle_rps_change(struct drm_device *dev)
/* Handle RCS change request from hw */
if (busy_up > max_avg) {
- if (dev_priv->cur_delay != dev_priv->max_delay)
- new_delay = dev_priv->cur_delay - 1;
- if (new_delay < dev_priv->max_delay)
- new_delay = dev_priv->max_delay;
+ if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
+ new_delay = dev_priv->ips.cur_delay - 1;
+ if (new_delay < dev_priv->ips.max_delay)
+ new_delay = dev_priv->ips.max_delay;
} else if (busy_down < min_avg) {
- if (dev_priv->cur_delay != dev_priv->min_delay)
- new_delay = dev_priv->cur_delay + 1;
- if (new_delay > dev_priv->min_delay)
- new_delay = dev_priv->min_delay;
+ if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
+ new_delay = dev_priv->ips.cur_delay + 1;
+ if (new_delay > dev_priv->ips.min_delay)
+ new_delay = dev_priv->ips.min_delay;
}
if (ironlake_set_drps(dev, new_delay))
- dev_priv->cur_delay = new_delay;
+ dev_priv->ips.cur_delay = new_delay;
+
+ spin_unlock_irqrestore(&mchdev_lock, flags);
return;
}
@@ -335,7 +347,7 @@ static void notify_ring(struct drm_device *dev,
if (ring->obj == NULL)
return;
- trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
+ trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) {
@@ -349,16 +361,16 @@ static void notify_ring(struct drm_device *dev,
static void gen6_pm_rps_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- rps_work);
+ rps.work);
u32 pm_iir, pm_imr;
u8 new_delay;
- spin_lock_irq(&dev_priv->rps_lock);
- pm_iir = dev_priv->pm_iir;
- dev_priv->pm_iir = 0;
+ spin_lock_irq(&dev_priv->rps.lock);
+ pm_iir = dev_priv->rps.pm_iir;
+ dev_priv->rps.pm_iir = 0;
pm_imr = I915_READ(GEN6_PMIMR);
I915_WRITE(GEN6_PMIMR, 0);
- spin_unlock_irq(&dev_priv->rps_lock);
+ spin_unlock_irq(&dev_priv->rps.lock);
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
@@ -366,11 +378,17 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_lock(&dev_priv->dev->struct_mutex);
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
- new_delay = dev_priv->cur_delay + 1;
+ new_delay = dev_priv->rps.cur_delay + 1;
else
- new_delay = dev_priv->cur_delay - 1;
+ new_delay = dev_priv->rps.cur_delay - 1;
- gen6_set_rps(dev_priv->dev, new_delay);
+ /* sysfs frequency interfaces may have snuck in while servicing the
+ * interrupt
+ */
+ if (!(new_delay > dev_priv->rps.max_delay ||
+ new_delay < dev_priv->rps.min_delay)) {
+ gen6_set_rps(dev_priv->dev, new_delay);
+ }
mutex_unlock(&dev_priv->dev->struct_mutex);
}
@@ -444,7 +462,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long flags;
- if (!IS_IVYBRIDGE(dev))
+ if (!HAS_L3_GPU_CACHE(dev))
return;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -488,19 +506,19 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
* IIR bits should never already be set because IMR should
* prevent an interrupt from being shown in IIR. The warning
* displays a case where we've unsafely cleared
- * dev_priv->pm_iir. Although missing an interrupt of the same
+ * dev_priv->rps.pm_iir. Although missing an interrupt of the same
* type is not a problem, it displays a problem in the logic.
*
- * The mask bit in IMR is cleared by rps_work.
+ * The mask bit in IMR is cleared by dev_priv->rps.work.
*/
- spin_lock_irqsave(&dev_priv->rps_lock, flags);
- dev_priv->pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+ spin_lock_irqsave(&dev_priv->rps.lock, flags);
+ dev_priv->rps.pm_iir |= pm_iir;
+ I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
POSTING_READ(GEN6_PMIMR);
- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+ spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
- queue_work(dev_priv->wq, &dev_priv->rps_work);
+ queue_work(dev_priv->wq, &dev_priv->rps.work);
}
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
@@ -793,10 +811,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
ibx_irq_handler(dev, pch_iir);
}
- if (de_iir & DE_PCU_EVENT) {
- I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
- i915_handle_rps_change(dev);
- }
+ if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
+ ironlake_handle_rps_change(dev);
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir);
@@ -843,26 +859,55 @@ static void i915_error_work_func(struct work_struct *work)
}
}
+/* NB: please notice the memset */
+static void i915_get_extra_instdone(struct drm_device *dev,
+ uint32_t *instdone)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+ switch(INTEL_INFO(dev)->gen) {
+ case 2:
+ case 3:
+ instdone[0] = I915_READ(INSTDONE);
+ break;
+ case 4:
+ case 5:
+ case 6:
+ instdone[0] = I915_READ(INSTDONE_I965);
+ instdone[1] = I915_READ(INSTDONE1);
+ break;
+ default:
+ WARN_ONCE(1, "Unsupported platform\n");
+ case 7:
+ instdone[0] = I915_READ(GEN7_INSTDONE_1);
+ instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+ instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+ instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+ break;
+ }
+}
+
#ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object *
i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src)
{
struct drm_i915_error_object *dst;
- int page, page_count;
+ int i, count;
u32 reloc_offset;
if (src == NULL || src->pages == NULL)
return NULL;
- page_count = src->base.size / PAGE_SIZE;
+ count = src->base.size / PAGE_SIZE;
- dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC);
+ dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
if (dst == NULL)
return NULL;
reloc_offset = src->gtt_offset;
- for (page = 0; page < page_count; page++) {
+ for (i = 0; i < count; i++) {
unsigned long flags;
void *d;
@@ -885,30 +930,33 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
} else {
+ struct page *page;
void *s;
- drm_clflush_pages(&src->pages[page], 1);
+ page = i915_gem_object_get_page(src, i);
+
+ drm_clflush_pages(&page, 1);
- s = kmap_atomic(src->pages[page]);
+ s = kmap_atomic(page);
memcpy(d, s, PAGE_SIZE);
kunmap_atomic(s);
- drm_clflush_pages(&src->pages[page], 1);
+ drm_clflush_pages(&page, 1);
}
local_irq_restore(flags);
- dst->pages[page] = d;
+ dst->pages[i] = d;
reloc_offset += PAGE_SIZE;
}
- dst->page_count = page_count;
+ dst->page_count = count;
dst->gtt_offset = src->gtt_offset;
return dst;
unwind:
- while (page--)
- kfree(dst->pages[page]);
+ while (i--)
+ kfree(dst->pages[i]);
kfree(dst);
return NULL;
}
@@ -949,7 +997,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
{
err->size = obj->base.size;
err->name = obj->base.name;
- err->seqno = obj->last_rendering_seqno;
+ err->rseqno = obj->last_read_seqno;
+ err->wseqno = obj->last_write_seqno;
err->gtt_offset = obj->gtt_offset;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
@@ -1039,12 +1088,12 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (!ring->get_seqno)
return NULL;
- seqno = ring->get_seqno(ring);
+ seqno = ring->get_seqno(ring, false);
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (obj->ring != ring)
continue;
- if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
+ if (i915_seqno_passed(seqno, obj->last_read_seqno))
continue;
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
@@ -1080,10 +1129,8 @@ static void i915_record_ring_state(struct drm_device *dev,
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
- if (ring->id == RCS) {
- error->instdone1 = I915_READ(INSTDONE1);
+ if (ring->id == RCS)
error->bbaddr = I915_READ64(BB_ADDR);
- }
} else {
error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
error->ipeir[ring->id] = I915_READ(IPEIR);
@@ -1093,7 +1140,7 @@ static void i915_record_ring_state(struct drm_device *dev,
error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
- error->seqno[ring->id] = ring->get_seqno(ring);
+ error->seqno[ring->id] = ring->get_seqno(ring, false);
error->acthd[ring->id] = intel_ring_get_active_head(ring);
error->head[ring->id] = I915_READ_HEAD(ring);
error->tail[ring->id] = I915_READ_TAIL(ring);
@@ -1199,6 +1246,11 @@ static void i915_capture_error_state(struct drm_device *dev)
error->done_reg = I915_READ(DONE_REG);
}
+ if (INTEL_INFO(dev)->gen == 7)
+ error->err_int = I915_READ(GEN7_ERR_INT);
+
+ i915_get_extra_instdone(dev, error->extra_instdone);
+
i915_gem_record_fences(dev, error);
i915_gem_record_rings(dev, error);
@@ -1210,7 +1262,7 @@ static void i915_capture_error_state(struct drm_device *dev)
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
i++;
error->active_bo_count = i;
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
if (obj->pin_count)
i++;
error->pinned_bo_count = i - error->active_bo_count;
@@ -1235,7 +1287,7 @@ static void i915_capture_error_state(struct drm_device *dev)
error->pinned_bo_count =
capture_pinned_bo(error->pinned_bo,
error->pinned_bo_count,
- &dev_priv->mm.gtt_list);
+ &dev_priv->mm.bound_list);
do_gettimeofday(&error->time);
@@ -1274,24 +1326,26 @@ void i915_destroy_error_state(struct drm_device *dev)
static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t instdone[I915_NUM_INSTDONE_REG];
u32 eir = I915_READ(EIR);
- int pipe;
+ int pipe, i;
if (!eir)
return;
pr_err("render error detected, EIR: 0x%08x\n", eir);
+ i915_get_extra_instdone(dev, instdone);
+
if (IS_G4X(dev)) {
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
u32 ipeir = I915_READ(IPEIR_I965);
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- pr_err(" INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE_I965));
+ for (i = 0; i < ARRAY_SIZE(instdone); i++)
+ pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
@@ -1325,12 +1379,13 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
if (eir & I915_ERROR_INSTRUCTION) {
pr_err("instruction error\n");
pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
+ for (i = 0; i < ARRAY_SIZE(instdone); i++)
+ pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
if (INTEL_INFO(dev)->gen < 4) {
u32 ipeir = I915_READ(IPEIR);
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
- pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
I915_WRITE(IPEIR, ipeir);
POSTING_READ(IPEIR);
@@ -1339,10 +1394,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- pr_err(" INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE_I965));
pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
@@ -1590,7 +1642,8 @@ ring_last_seqno(struct intel_ring_buffer *ring)
static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
{
if (list_empty(&ring->request_list) ||
- i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
+ i915_seqno_passed(ring->get_seqno(ring, false),
+ ring_last_seqno(ring))) {
/* Issue a wake-up to catch stuck h/w. */
if (waitqueue_active(&ring->irq_queue)) {
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
@@ -1656,7 +1709,7 @@ void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
+ uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
struct intel_ring_buffer *ring;
bool err = false, idle;
int i;
@@ -1684,25 +1737,16 @@ void i915_hangcheck_elapsed(unsigned long data)
return;
}
- if (INTEL_INFO(dev)->gen < 4) {
- instdone = I915_READ(INSTDONE);
- instdone1 = 0;
- } else {
- instdone = I915_READ(INSTDONE_I965);
- instdone1 = I915_READ(INSTDONE1);
- }
-
+ i915_get_extra_instdone(dev, instdone);
if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
- dev_priv->last_instdone == instdone &&
- dev_priv->last_instdone1 == instdone1) {
+ memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
if (i915_hangcheck_hung(dev))
return;
} else {
dev_priv->hangcheck_count = 0;
memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
- dev_priv->last_instdone = instdone;
- dev_priv->last_instdone1 = instdone1;
+ memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
}
repeat:
@@ -2647,7 +2691,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
- INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
+ INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 28725ce5b82c..7637824c6a7d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -450,6 +450,7 @@
#define RING_ACTHD(base) ((base)+0x74)
#define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8)
+#define RING_TIMESTAMP(base) ((base)+0x358)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
@@ -478,6 +479,11 @@
#define IPEIR_I965 0x02064
#define IPEHR_I965 0x02068
#define INSTDONE_I965 0x0206c
+#define GEN7_INSTDONE_1 0x0206c
+#define GEN7_SC_INSTDONE 0x07100
+#define GEN7_SAMPLER_INSTDONE 0x0e160
+#define GEN7_ROW_INSTDONE 0x0e164
+#define I915_NUM_INSTDONE_REG 4
#define RING_IPEIR(base) ((base)+0x64)
#define RING_IPEHR(base) ((base)+0x68)
#define RING_INSTDONE(base) ((base)+0x6c)
@@ -500,6 +506,8 @@
#define DMA_FADD_I8XX 0x020d0
#define ERROR_GEN6 0x040a0
+#define GEN7_ERR_INT 0x44040
+#define ERR_INT_MMIO_UNCLAIMED (1<<13)
/* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are
@@ -529,6 +537,8 @@
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
+#define VLV_DISPLAY_BASE 0x180000
+
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
@@ -1496,6 +1506,14 @@
GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
GEN7_CXT_GT1_SIZE(ctx_reg) + \
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f)
+#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7)
+#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff)
+#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \
+ HSW_CXT_RING_SIZE(ctx_reg) + \
+ HSW_CXT_RENDER_SIZE(ctx_reg) + \
+ GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+
/*
* Overlay regs
@@ -1549,12 +1567,35 @@
/* VGA port control */
#define ADPA 0x61100
+#define PCH_ADPA 0xe1100
+#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA)
+
#define ADPA_DAC_ENABLE (1<<31)
#define ADPA_DAC_DISABLE 0
#define ADPA_PIPE_SELECT_MASK (1<<30)
#define ADPA_PIPE_A_SELECT 0
#define ADPA_PIPE_B_SELECT (1<<30)
#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
+/* CPT uses bits 29:30 for pch transcoder select */
+#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
+#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
+#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
+#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
+#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
+#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
+#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
+#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
+#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
+#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
+#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
+#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
#define ADPA_SETS_HVPOLARITY 0
#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
@@ -1753,6 +1794,10 @@
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
+/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
+ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+ * of the infoframe structure specified by CEA-861. */
+#define VIDEO_DIP_DATA_SIZE 32
#define VIDEO_DIP_CTL 0x61170
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
@@ -3889,31 +3934,6 @@
#define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004
-/* CRT */
-#define PCH_ADPA 0xe1100
-#define ADPA_TRANS_SELECT_MASK (1<<30)
-#define ADPA_TRANS_A_SELECT 0
-#define ADPA_TRANS_B_SELECT (1<<30)
-#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
-#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
-#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
-#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
-#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
-#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
-#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
-#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
-#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
-#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
-#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
-#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
@@ -4021,6 +4041,8 @@
#define PORT_TRANS_C_SEL_CPT (2<<29)
#define PORT_TRANS_SEL_MASK (3<<29)
#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
+#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
+#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
@@ -4239,7 +4261,15 @@
#define G4X_HDMIW_HDMIEDID 0x6210C
#define IBX_HDMIW_HDMIEDID_A 0xE2050
+#define IBX_HDMIW_HDMIEDID_B 0xE2150
+#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+ IBX_HDMIW_HDMIEDID_A, \
+ IBX_HDMIW_HDMIEDID_B)
#define IBX_AUD_CNTL_ST_A 0xE20B4
+#define IBX_AUD_CNTL_ST_B 0xE21B4
+#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+ IBX_AUD_CNTL_ST_A, \
+ IBX_AUD_CNTL_ST_B)
#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
#define IBX_ELD_ADDRESS (0x1f << 5)
#define IBX_ELD_ACK (1 << 4)
@@ -4248,7 +4278,15 @@
#define IBX_CP_READYB (1 << 1)
#define CPT_HDMIW_HDMIEDID_A 0xE5050
+#define CPT_HDMIW_HDMIEDID_B 0xE5150
+#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+ CPT_HDMIW_HDMIEDID_A, \
+ CPT_HDMIW_HDMIEDID_B)
#define CPT_AUD_CNTL_ST_A 0xE50B4
+#define CPT_AUD_CNTL_ST_B 0xE51B4
+#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+ CPT_AUD_CNTL_ST_A, \
+ CPT_AUD_CNTL_ST_B)
#define CPT_AUD_CNTRL_ST2 0xE50C0
/* These are the 4 32-bit write offset registers for each stream
@@ -4258,7 +4296,15 @@
#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
#define IBX_AUD_CONFIG_A 0xe2000
+#define IBX_AUD_CONFIG_B 0xe2100
+#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
+ IBX_AUD_CONFIG_A, \
+ IBX_AUD_CONFIG_B)
#define CPT_AUD_CONFIG_A 0xe5000
+#define CPT_AUD_CONFIG_B 0xe5100
+#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
+ CPT_AUD_CONFIG_A, \
+ CPT_AUD_CONFIG_B)
#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
#define AUD_CONFIG_UPPER_N_SHIFT 20
@@ -4269,195 +4315,233 @@
#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+/* HSW Audio */
+#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */
+#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */
+#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
+ HSW_AUD_CONFIG_A, \
+ HSW_AUD_CONFIG_B)
+
+#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */
+#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */
+#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
+ HSW_AUD_MISC_CTRL_A, \
+ HSW_AUD_MISC_CTRL_B)
+
+#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */
+#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */
+#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
+ HSW_AUD_DIP_ELD_CTRL_ST_A, \
+ HSW_AUD_DIP_ELD_CTRL_ST_B)
+
+/* Audio Digital Converter */
+#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */
+#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */
+#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
+ HSW_AUD_DIG_CNVT_1, \
+ HSW_AUD_DIG_CNVT_2)
+#define DIP_PORT_SEL_MASK 0x3
+
+#define HSW_AUD_EDID_DATA_A 0x65050
+#define HSW_AUD_EDID_DATA_B 0x65150
+#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
+ HSW_AUD_EDID_DATA_A, \
+ HSW_AUD_EDID_DATA_B)
+
+#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */
+#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */
+#define AUDIO_INACTIVE_C (1<<11)
+#define AUDIO_INACTIVE_B (1<<7)
+#define AUDIO_INACTIVE_A (1<<3)
+#define AUDIO_OUTPUT_ENABLE_A (1<<2)
+#define AUDIO_OUTPUT_ENABLE_B (1<<6)
+#define AUDIO_OUTPUT_ENABLE_C (1<<10)
+#define AUDIO_ELD_VALID_A (1<<0)
+#define AUDIO_ELD_VALID_B (1<<4)
+#define AUDIO_ELD_VALID_C (1<<8)
+#define AUDIO_CP_READY_A (1<<1)
+#define AUDIO_CP_READY_B (1<<5)
+#define AUDIO_CP_READY_C (1<<9)
+
/* HSW Power Wells */
-#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
-#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
-#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
-#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
-#define HSW_PWR_WELL_ENABLE (1<<31)
-#define HSW_PWR_WELL_STATE (1<<30)
-#define HSW_PWR_WELL_CTL5 0x45410
+#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
+#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
+#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
+#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
+#define HSW_PWR_WELL_ENABLE (1<<31)
+#define HSW_PWR_WELL_STATE (1<<30)
+#define HSW_PWR_WELL_CTL5 0x45410
#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
-#define HSW_PWR_WELL_FORCE_ON (1<<19)
-#define HSW_PWR_WELL_CTL6 0x45414
+#define HSW_PWR_WELL_FORCE_ON (1<<19)
+#define HSW_PWR_WELL_CTL6 0x45414
/* Per-pipe DDI Function Control */
-#define PIPE_DDI_FUNC_CTL_A 0x60400
-#define PIPE_DDI_FUNC_CTL_B 0x61400
-#define PIPE_DDI_FUNC_CTL_C 0x62400
+#define PIPE_DDI_FUNC_CTL_A 0x60400
+#define PIPE_DDI_FUNC_CTL_B 0x61400
+#define PIPE_DDI_FUNC_CTL_C 0x62400
#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
-#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \
- PIPE_DDI_FUNC_CTL_A, \
- PIPE_DDI_FUNC_CTL_B)
+#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
+ PIPE_DDI_FUNC_CTL_B)
#define PIPE_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define PIPE_DDI_PORT_MASK (7<<28)
-#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
-#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
-#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
+#define PIPE_DDI_PORT_MASK (7<<28)
+#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
+#define PIPE_DDI_MODE_SELECT_MASK (7<<24)
+#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
+#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
-#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
-#define PIPE_DDI_BPC_8 (0<<20)
-#define PIPE_DDI_BPC_10 (1<<20)
-#define PIPE_DDI_BPC_6 (2<<20)
-#define PIPE_DDI_BPC_12 (3<<20)
-#define PIPE_DDI_BFI_ENABLE (1<<4)
-#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
-#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
-#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
+#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
+#define PIPE_DDI_BPC_MASK (7<<20)
+#define PIPE_DDI_BPC_8 (0<<20)
+#define PIPE_DDI_BPC_10 (1<<20)
+#define PIPE_DDI_BPC_6 (2<<20)
+#define PIPE_DDI_BPC_12 (3<<20)
+#define PIPE_DDI_PVSYNC (1<<17)
+#define PIPE_DDI_PHSYNC (1<<16)
+#define PIPE_DDI_BFI_ENABLE (1<<4)
+#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
+#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
+#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
/* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040
#define DP_TP_CTL_B 0x64140
-#define DP_TP_CTL(port) _PORT(port, \
- DP_TP_CTL_A, \
- DP_TP_CTL_B)
-#define DP_TP_CTL_ENABLE (1<<31)
-#define DP_TP_CTL_MODE_SST (0<<27)
-#define DP_TP_CTL_MODE_MST (1<<27)
+#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
+#define DP_TP_CTL_ENABLE (1<<31)
+#define DP_TP_CTL_MODE_SST (0<<27)
+#define DP_TP_CTL_MODE_MST (1<<27)
#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
-#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
+#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
-#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
/* DisplayPort Transport Status */
#define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144
-#define DP_TP_STATUS(port) _PORT(port, \
- DP_TP_STATUS_A, \
- DP_TP_STATUS_B)
+#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
/* DDI Buffer Control */
#define DDI_BUF_CTL_A 0x64000
#define DDI_BUF_CTL_B 0x64100
-#define DDI_BUF_CTL(port) _PORT(port, \
- DDI_BUF_CTL_A, \
- DDI_BUF_CTL_B)
-#define DDI_BUF_CTL_ENABLE (1<<31)
+#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
+#define DDI_BUF_CTL_ENABLE (1<<31)
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
-#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
+#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
-#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
+#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
-#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
+#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
-#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
-#define DDI_BUF_EMP_MASK (0xf<<24)
-#define DDI_BUF_IS_IDLE (1<<7)
-#define DDI_PORT_WIDTH_X1 (0<<1)
-#define DDI_PORT_WIDTH_X2 (1<<1)
-#define DDI_PORT_WIDTH_X4 (3<<1)
+#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
+#define DDI_BUF_EMP_MASK (0xf<<24)
+#define DDI_BUF_IS_IDLE (1<<7)
+#define DDI_PORT_WIDTH_X1 (0<<1)
+#define DDI_PORT_WIDTH_X2 (1<<1)
+#define DDI_PORT_WIDTH_X4 (3<<1)
#define DDI_INIT_DISPLAY_DETECTED (1<<0)
/* DDI Buffer Translations */
#define DDI_BUF_TRANS_A 0x64E00
#define DDI_BUF_TRANS_B 0x64E60
-#define DDI_BUF_TRANS(port) _PORT(port, \
- DDI_BUF_TRANS_A, \
- DDI_BUF_TRANS_B)
+#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
/* Sideband Interface (SBI) is programmed indirectly, via
* SBI_ADDR, which contains the register offset; and SBI_DATA,
* which contains the payload */
-#define SBI_ADDR 0xC6000
-#define SBI_DATA 0xC6004
+#define SBI_ADDR 0xC6000
+#define SBI_DATA 0xC6004
#define SBI_CTL_STAT 0xC6008
#define SBI_CTL_OP_CRRD (0x6<<8)
#define SBI_CTL_OP_CRWR (0x7<<8)
#define SBI_RESPONSE_FAIL (0x1<<1)
-#define SBI_RESPONSE_SUCCESS (0x0<<1)
-#define SBI_BUSY (0x1<<0)
-#define SBI_READY (0x0<<0)
+#define SBI_RESPONSE_SUCCESS (0x0<<1)
+#define SBI_BUSY (0x1<<0)
+#define SBI_READY (0x0<<0)
/* SBI offsets */
-#define SBI_SSCDIVINTPHASE6 0x0600
+#define SBI_SSCDIVINTPHASE6 0x0600
#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
-#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
+#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
-#define SBI_SSCCTL 0x020c
+#define SBI_SSCCTL 0x020c
#define SBI_SSCCTL6 0x060C
-#define SBI_SSCCTL_DISABLE (1<<0)
+#define SBI_SSCCTL_DISABLE (1<<0)
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
-#define SBI_DBUFF0 0x2a00
+#define SBI_DBUFF0 0x2a00
/* LPT PIXCLK_GATE */
-#define PIXCLK_GATE 0xC6020
-#define PIXCLK_GATE_UNGATE 1<<0
-#define PIXCLK_GATE_GATE 0<<0
+#define PIXCLK_GATE 0xC6020
+#define PIXCLK_GATE_UNGATE (1<<0)
+#define PIXCLK_GATE_GATE (0<<0)
/* SPLL */
-#define SPLL_CTL 0x46020
+#define SPLL_CTL 0x46020
#define SPLL_PLL_ENABLE (1<<31)
#define SPLL_PLL_SCC (1<<28)
#define SPLL_PLL_NON_SCC (2<<28)
-#define SPLL_PLL_FREQ_810MHz (0<<26)
-#define SPLL_PLL_FREQ_1350MHz (1<<26)
+#define SPLL_PLL_FREQ_810MHz (0<<26)
+#define SPLL_PLL_FREQ_1350MHz (1<<26)
/* WRPLL */
-#define WRPLL_CTL1 0x46040
-#define WRPLL_CTL2 0x46060
-#define WRPLL_PLL_ENABLE (1<<31)
-#define WRPLL_PLL_SELECT_SSC (0x01<<28)
-#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
+#define WRPLL_CTL1 0x46040
+#define WRPLL_CTL2 0x46060
+#define WRPLL_PLL_ENABLE (1<<31)
+#define WRPLL_PLL_SELECT_SSC (0x01<<28)
+#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
/* WRPLL divider programming */
-#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
-#define WRPLL_DIVIDER_POST(x) ((x)<<8)
-#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
+#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
+#define WRPLL_DIVIDER_POST(x) ((x)<<8)
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
/* Port clock selection */
#define PORT_CLK_SEL_A 0x46100
#define PORT_CLK_SEL_B 0x46104
-#define PORT_CLK_SEL(port) _PORT(port, \
- PORT_CLK_SEL_A, \
- PORT_CLK_SEL_B)
+#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
#define PORT_CLK_SEL_LCPLL_810 (2<<29)
-#define PORT_CLK_SEL_SPLL (3<<29)
+#define PORT_CLK_SEL_SPLL (3<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
/* Pipe clock selection */
#define PIPE_CLK_SEL_A 0x46140
#define PIPE_CLK_SEL_B 0x46144
-#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \
- PIPE_CLK_SEL_A, \
- PIPE_CLK_SEL_B)
+#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
/* For each pipe, we need to select the corresponding port clock */
-#define PIPE_CLK_SEL_DISABLED (0x0<<29)
-#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
+#define PIPE_CLK_SEL_DISABLED (0x0<<29)
+#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
/* LCPLL Control */
-#define LCPLL_CTL 0x130040
+#define LCPLL_CTL 0x130040
#define LCPLL_PLL_DISABLE (1<<31)
#define LCPLL_PLL_LOCK (1<<30)
-#define LCPLL_CD_CLOCK_DISABLE (1<<25)
+#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
/* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270
#define PIPE_WM_LINETIME_B 0x45274
-#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
- PIPE_WM_LINETIME_A, \
- PIPE_WM_LINETIME_B)
-#define PIPE_WM_LINETIME_MASK (0x1ff)
-#define PIPE_WM_LINETIME_TIME(x) ((x))
+#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
+ PIPE_WM_LINETIME_B)
+#define PIPE_WM_LINETIME_MASK (0x1ff)
+#define PIPE_WM_LINETIME_TIME(x) ((x))
#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
-#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
+#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
/* SFUSE_STRAP */
-#define SFUSE_STRAP 0xc2014
+#define SFUSE_STRAP 0xc2014
#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 7631807a2788..903eebd2117a 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -46,32 +46,32 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
}
static ssize_t
-show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
}
static ssize_t
-show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
}
static ssize_t
-show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
}
static ssize_t
-show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
}
@@ -93,6 +93,7 @@ static struct attribute_group rc6_attr_group = {
.name = power_group_name,
.attrs = rc6_attrs
};
+#endif
static int l3_access_valid(struct drm_device *dev, loff_t offset)
{
@@ -202,37 +203,214 @@ static struct bin_attribute dpf_attrs = {
.mmap = NULL
};
+static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev->struct_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev->struct_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_max_freq_mhz_store(struct device *kdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, rp_state_cap, hw_max, hw_min;
+ ssize_t ret;
+
+ ret = kstrtou32(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ val /= GT_FREQUENCY_MULTIPLIER;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ hw_max = (rp_state_cap & 0xff);
+ hw_min = ((rp_state_cap & 0xff0000) >> 16);
+
+ if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ if (dev_priv->rps.cur_delay > val)
+ gen6_set_rps(dev_priv->dev, val);
+
+ dev_priv->rps.max_delay = val;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+}
+
+static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev->struct_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_min_freq_mhz_store(struct device *kdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, rp_state_cap, hw_max, hw_min;
+ ssize_t ret;
+
+ ret = kstrtou32(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ val /= GT_FREQUENCY_MULTIPLIER;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ hw_max = (rp_state_cap & 0xff);
+ hw_min = ((rp_state_cap & 0xff0000) >> 16);
+
+ if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ if (dev_priv->rps.cur_delay < val)
+ gen6_set_rps(dev_priv->dev, val);
+
+ dev_priv->rps.min_delay = val;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+
+}
+
+static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
+static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
+static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
+
+
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
+static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+
+/* For now we have a static number of RP states */
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, rp_state_cap;
+ ssize_t ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (attr == &dev_attr_gt_RP0_freq_mhz) {
+ val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+ } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
+ val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+ } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
+ val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+ } else {
+ BUG();
+ }
+ return snprintf(buf, PAGE_SIZE, "%d", val);
+}
+
+static const struct attribute *gen6_attrs[] = {
+ &dev_attr_gt_cur_freq_mhz.attr,
+ &dev_attr_gt_max_freq_mhz.attr,
+ &dev_attr_gt_min_freq_mhz.attr,
+ &dev_attr_gt_RP0_freq_mhz.attr,
+ &dev_attr_gt_RP1_freq_mhz.attr,
+ &dev_attr_gt_RPn_freq_mhz.attr,
+ NULL,
+};
+
void i915_setup_sysfs(struct drm_device *dev)
{
int ret;
+#ifdef CONFIG_PM
if (INTEL_INFO(dev)->gen >= 6) {
ret = sysfs_merge_group(&dev->primary->kdev.kobj,
&rc6_attr_group);
if (ret)
DRM_ERROR("RC6 residency sysfs setup failed\n");
}
-
- if (IS_IVYBRIDGE(dev)) {
+#endif
+ if (HAS_L3_GPU_CACHE(dev)) {
ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
if (ret)
DRM_ERROR("l3 parity sysfs setup failed\n");
}
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
+ if (ret)
+ DRM_ERROR("gen6 sysfs setup failed\n");
+ }
}
void i915_teardown_sysfs(struct drm_device *dev)
{
+ sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
+#ifdef CONFIG_PM
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+#endif
}
-#else
-void i915_setup_sysfs(struct drm_device *dev)
-{
- return;
-}
-
-void i915_teardown_sysfs(struct drm_device *dev)
-{
- return;
-}
-#endif /* CONFIG_PM */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fe90b3a84a6d..8134421b89a6 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -214,22 +214,18 @@ TRACE_EVENT(i915_gem_evict,
);
TRACE_EVENT(i915_gem_evict_everything,
- TP_PROTO(struct drm_device *dev, bool purgeable),
- TP_ARGS(dev, purgeable),
+ TP_PROTO(struct drm_device *dev),
+ TP_ARGS(dev),
TP_STRUCT__entry(
__field(u32, dev)
- __field(bool, purgeable)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
- __entry->purgeable = purgeable;
),
- TP_printk("dev=%d%s",
- __entry->dev,
- __entry->purgeable ? ", purgeable only" : "")
+ TP_printk("dev=%d", __entry->dev)
);
TRACE_EVENT(i915_gem_ring_dispatch,
@@ -434,6 +430,21 @@ TRACE_EVENT(i915_reg_rw,
(u32)(__entry->val >> 32))
);
+TRACE_EVENT(intel_gpu_freq_change,
+ TP_PROTO(u32 freq),
+ TP_ARGS(freq),
+
+ TP_STRUCT__entry(
+ __field(u32, freq)
+ ),
+
+ TP_fast_assign(
+ __entry->freq = freq;
+ ),
+
+ TP_printk("new_freq=%u", __entry->freq)
+);
+
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 23bdc8cd1458..c42b9809f86d 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -47,6 +47,7 @@
struct intel_crt {
struct intel_encoder base;
bool force_hotplug_required;
+ u32 adpa_reg;
};
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
@@ -55,42 +56,68 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
struct intel_crt, base);
}
-static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
+static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ return container_of(encoder, struct intel_crt, base);
+}
+
+static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 tmp;
+
+ tmp = I915_READ(crt->adpa_reg);
+
+ if (!(tmp & ADPA_DAC_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_disable_crt(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 temp;
- temp = I915_READ(PCH_ADPA);
+ temp = I915_READ(crt->adpa_reg);
+ temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
+ I915_WRITE(crt->adpa_reg, temp);
+}
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- temp |= ADPA_DAC_ENABLE;
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- /* Just leave port enable cleared */
- break;
- }
+static void intel_enable_crt(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 temp;
- I915_WRITE(PCH_ADPA, temp);
+ temp = I915_READ(crt->adpa_reg);
+ temp |= ADPA_DAC_ENABLE;
+ I915_WRITE(crt->adpa_reg, temp);
}
-static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
+/* Note: The caller is required to filter out dpms modes not supported by the
+ * platform. */
+static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 temp;
- temp = I915_READ(ADPA);
+ temp = I915_READ(crt->adpa_reg);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
- if (IS_VALLEYVIEW(dev) && mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
switch (mode) {
case DRM_MODE_DPMS_ON:
temp |= ADPA_DAC_ENABLE;
@@ -106,7 +133,51 @@ static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
break;
}
- I915_WRITE(ADPA, temp);
+ I915_WRITE(crt->adpa_reg, temp);
+}
+
+static void intel_crt_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct drm_crtc *crtc;
+ int old_dpms;
+
+ /* PCH platforms and VLV only support on/off. */
+ if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ old_dpms = connector->dpms;
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = encoder->base.crtc;
+ if (!crtc) {
+ encoder->connectors_active = false;
+ return;
+ }
+
+ /* We need the pipe to run for anything but OFF. */
+ if (mode == DRM_MODE_DPMS_OFF)
+ encoder->connectors_active = false;
+ else
+ encoder->connectors_active = true;
+
+ if (mode < old_dpms) {
+ /* From off to on, enable the pipe first. */
+ intel_crtc_update_dpms(crtc);
+
+ intel_crt_set_dpms(encoder, mode);
+ } else {
+ intel_crt_set_dpms(encoder, mode);
+
+ intel_crtc_update_dpms(crtc);
+ }
+
+ intel_modeset_check_state(connector->dev);
}
static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -145,19 +216,15 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crt *crt =
+ intel_encoder_to_crt(to_intel_encoder(encoder));
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_md_reg;
u32 adpa, dpll_md;
- u32 adpa_reg;
dpll_md_reg = DPLL_MD(intel_crtc->pipe);
- if (HAS_PCH_SPLIT(dev))
- adpa_reg = PCH_ADPA;
- else
- adpa_reg = ADPA;
-
/*
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
@@ -185,7 +252,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
- I915_WRITE(adpa_reg, adpa);
+ I915_WRITE(crt->adpa_reg, adpa);
}
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
@@ -545,14 +612,12 @@ intel_crt_detect(struct drm_connector *connector, bool force)
return connector->status;
/* for pre-945g platforms use load detect */
- if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
- &tmp)) {
+ if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else
status = intel_crt_load_detect(crt);
- intel_release_load_detect_pipe(&crt->base, connector,
- &tmp);
+ intel_release_load_detect_pipe(connector, &tmp);
} else
status = connector_status_unknown;
@@ -603,25 +668,15 @@ static void intel_crt_reset(struct drm_connector *connector)
* Routines for controlling stuff on the analog port
*/
-static const struct drm_encoder_helper_funcs pch_encoder_funcs = {
+static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
.mode_fixup = intel_crt_mode_fixup,
- .prepare = intel_encoder_prepare,
- .commit = intel_encoder_commit,
.mode_set = intel_crt_mode_set,
- .dpms = pch_crt_dpms,
-};
-
-static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
- .mode_fixup = intel_crt_mode_fixup,
- .prepare = intel_encoder_prepare,
- .commit = intel_encoder_commit,
- .mode_set = intel_crt_mode_set,
- .dpms = gmch_crt_dpms,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.reset = intel_crt_reset,
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_crt_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy,
@@ -662,7 +717,6 @@ void intel_crt_init(struct drm_device *dev)
struct intel_crt *crt;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- const struct drm_encoder_helper_funcs *encoder_helper_funcs;
/* Skip machines without VGA that falsely report hotplug events */
if (dmi_check_system(intel_no_crt))
@@ -688,13 +742,11 @@ void intel_crt_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, &crt->base);
crt->base.type = INTEL_OUTPUT_ANALOG;
- crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
- 1 << INTEL_ANALOG_CLONE_BIT |
- 1 << INTEL_SDVO_LVDS_CLONE_BIT);
+ crt->base.cloneable = true;
if (IS_HASWELL(dev))
crt->base.crtc_mask = (1 << 0);
else
- crt->base.crtc_mask = (1 << 0) | (1 << 1);
+ crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
if (IS_GEN2(dev))
connector->interlace_allowed = 0;
@@ -703,11 +755,18 @@ void intel_crt_init(struct drm_device *dev)
connector->doublescan_allowed = 0;
if (HAS_PCH_SPLIT(dev))
- encoder_helper_funcs = &pch_encoder_funcs;
+ crt->adpa_reg = PCH_ADPA;
+ else if (IS_VALLEYVIEW(dev))
+ crt->adpa_reg = VLV_ADPA;
else
- encoder_helper_funcs = &gmch_encoder_funcs;
+ crt->adpa_reg = ADPA;
+
+ crt->base.disable = intel_disable_crt;
+ crt->base.enable = intel_enable_crt;
+ crt->base.get_hw_state = intel_crt_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
- drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
+ drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 933c74859172..bfe375466a0e 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -250,7 +250,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
case PORT_B:
case PORT_C:
case PORT_D:
- intel_hdmi_init(dev, DDI_BUF_CTL(port));
+ intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
break;
default:
DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
@@ -267,7 +267,8 @@ struct wrpll_tmds_clock {
u16 r2; /* Reference divider */
};
-/* Table of matching values for WRPLL clocks programming for each frequency */
+/* Table of matching values for WRPLL clocks programming for each frequency.
+ * The code assumes this table is sorted. */
static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{19750, 38, 25, 18},
{20000, 48, 32, 18},
@@ -277,7 +278,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{23000, 36, 23, 15},
{23500, 40, 40, 23},
{23750, 26, 16, 14},
- {23750, 26, 16, 14},
{24000, 36, 24, 15},
{25000, 36, 25, 15},
{25175, 26, 40, 33},
@@ -437,7 +437,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{108000, 8, 24, 15},
{108108, 8, 173, 108},
{109000, 6, 23, 19},
- {109000, 6, 23, 19},
{110000, 6, 22, 18},
{110013, 6, 22, 18},
{110250, 8, 49, 30},
@@ -614,7 +613,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{218250, 4, 42, 26},
{218750, 4, 34, 21},
{219000, 4, 47, 29},
- {219000, 4, 47, 29},
{220000, 4, 44, 27},
{220640, 4, 49, 30},
{220750, 4, 36, 22},
@@ -658,7 +656,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int port = intel_hdmi->ddi_port;
int pipe = intel_crtc->pipe;
- int p, n2, r2, valid=0;
+ int p, n2, r2;
u32 temp, i;
/* On Haswell, we need to enable the clocks and prepare DDI function to
@@ -666,26 +664,23 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
*/
DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
- for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
- if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
- p = wrpll_tmds_clock_table[i].p;
- n2 = wrpll_tmds_clock_table[i].n2;
- r2 = wrpll_tmds_clock_table[i].r2;
+ for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
+ if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
+ break;
- DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
- crtc->mode.clock,
- p, n2, r2);
+ if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
+ i--;
- valid = 1;
- break;
- }
- }
+ p = wrpll_tmds_clock_table[i].p;
+ n2 = wrpll_tmds_clock_table[i].n2;
+ r2 = wrpll_tmds_clock_table[i].r2;
- if (!valid) {
- DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
- crtc->mode.clock);
- return;
- }
+ if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
+ DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
+ wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
+
+ DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+ crtc->mode.clock, p, n2, r2);
/* Enable LCPLL if disabled */
temp = I915_READ(LCPLL_CTL);
@@ -718,46 +713,107 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
/* Proper support for digital audio needs a new logic and a new set
* of registers, so we leave it for future patch bombing.
*/
- DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n",
+ DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
pipe_name(intel_crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
+ intel_write_eld(encoder, adjusted_mode);
}
/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
- temp = I915_READ(DDI_FUNC_CTL(pipe));
- temp &= ~PIPE_DDI_PORT_MASK;
- temp &= ~PIPE_DDI_BPC_12;
- temp |= PIPE_DDI_SELECT_PORT(port) |
- PIPE_DDI_MODE_SELECT_HDMI |
- ((intel_crtc->bpp > 24) ?
- PIPE_DDI_BPC_12 :
- PIPE_DDI_BPC_8) |
- PIPE_DDI_FUNC_ENABLE;
+ temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
+
+ switch (intel_crtc->bpp) {
+ case 18:
+ temp |= PIPE_DDI_BPC_6;
+ break;
+ case 24:
+ temp |= PIPE_DDI_BPC_8;
+ break;
+ case 30:
+ temp |= PIPE_DDI_BPC_10;
+ break;
+ case 36:
+ temp |= PIPE_DDI_BPC_12;
+ break;
+ default:
+ WARN(1, "%d bpp unsupported by pipe DDI function\n",
+ intel_crtc->bpp);
+ }
+
+ if (intel_hdmi->has_hdmi_sink)
+ temp |= PIPE_DDI_MODE_SELECT_HDMI;
+ else
+ temp |= PIPE_DDI_MODE_SELECT_DVI;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= PIPE_DDI_PVSYNC;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= PIPE_DDI_PHSYNC;
I915_WRITE(DDI_FUNC_CTL(pipe), temp);
intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
-void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 tmp;
+ int i;
+
+ tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port));
+
+ if (!(tmp & DDI_BUF_CTL_ENABLE))
+ return false;
+
+ for_each_pipe(i) {
+ tmp = I915_READ(DDI_FUNC_CTL(i));
+
+ if ((tmp & PIPE_DDI_PORT_MASK)
+ == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) {
+ *pipe = i;
+ return true;
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port);
+
+ return true;
+}
+
+void intel_enable_ddi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
int port = intel_hdmi->ddi_port;
u32 temp;
temp = I915_READ(DDI_BUF_CTL(port));
-
- if (mode != DRM_MODE_DPMS_ON) {
- temp &= ~DDI_BUF_CTL_ENABLE;
- } else {
- temp |= DDI_BUF_CTL_ENABLE;
- }
+ temp |= DDI_BUF_CTL_ENABLE;
/* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
* and swing/emphasis values are ignored so nothing special needs
* to be done besides enabling the port.
*/
- I915_WRITE(DDI_BUF_CTL(port),
- temp);
+ I915_WRITE(DDI_BUF_CTL(port), temp);
+}
+
+void intel_disable_ddi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ int port = intel_hdmi->ddi_port;
+ u32 temp;
+
+ temp = I915_READ(DDI_BUF_CTL(port));
+ temp &= ~DDI_BUF_CTL_ENABLE;
+
+ I915_WRITE(DDI_BUF_CTL(port), temp);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c040aee1341c..6f5aafa1b633 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1006,7 +1006,7 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
/* Wait for the Pipe State to go off */
if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
100))
- DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ WARN(1, "pipe_off wait timed out\n");
} else {
u32 last_line, line_mask;
int reg = PIPEDSL(pipe);
@@ -1024,7 +1024,7 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
} while (((I915_READ(reg) & line_mask) != last_line) &&
time_after(timeout, jiffies));
if (time_after(jiffies, timeout))
- DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ WARN(1, "pipe_off wait timed out\n");
}
}
@@ -1431,6 +1431,8 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
* protect mechanism may be enabled.
*
* Note! This is for pre-ILK only.
+ *
+ * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
*/
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
@@ -1860,59 +1862,6 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
intel_wait_for_vblank(dev_priv->dev, pipe);
}
-static void disable_pch_dp(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg, u32 port_sel)
-{
- u32 val = I915_READ(reg);
- if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
- DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
- I915_WRITE(reg, val & ~DP_PORT_EN);
- }
-}
-
-static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg)
-{
- u32 val = I915_READ(reg);
- if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
- DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
- reg, pipe);
- I915_WRITE(reg, val & ~PORT_ENABLE);
- }
-}
-
-/* Disable any ports connected to this transcoder */
-static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- u32 reg, val;
-
- val = I915_READ(PCH_PP_CONTROL);
- I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
-
- disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
- disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
- disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
-
- reg = PCH_ADPA;
- val = I915_READ(reg);
- if (adpa_pipe_enabled(dev_priv, pipe, val))
- I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
-
- reg = PCH_LVDS;
- val = I915_READ(reg);
- if (lvds_pipe_enabled(dev_priv, pipe, val)) {
- DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
- I915_WRITE(reg, val & ~LVDS_PORT_EN);
- POSTING_READ(reg);
- udelay(100);
- }
-
- disable_pch_hdmi(dev_priv, pipe, HDMIB);
- disable_pch_hdmi(dev_priv, pipe, HDMIC);
- disable_pch_hdmi(dev_priv, pipe, HDMID);
-}
-
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -2201,16 +2150,17 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_framebuffer *old_fb;
int ret;
/* no fb bound */
- if (!crtc->fb) {
+ if (!fb) {
DRM_ERROR("No FB bound\n");
return 0;
}
@@ -2224,7 +2174,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
- to_intel_framebuffer(crtc->fb)->obj,
+ to_intel_framebuffer(fb)->obj,
NULL);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
@@ -2232,17 +2182,22 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- if (old_fb)
- intel_finish_fb(old_fb);
+ if (crtc->fb)
+ intel_finish_fb(crtc->fb);
- ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
+ ret = dev_priv->display.update_plane(crtc, fb, x, y);
if (ret) {
- intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
+ intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
mutex_unlock(&dev->struct_mutex);
DRM_ERROR("failed to update base address\n");
return ret;
}
+ old_fb = crtc->fb;
+ crtc->fb = fb;
+ crtc->x = x;
+ crtc->y = y;
+
if (old_fb) {
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
@@ -2709,11 +2664,10 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
DRM_DEBUG_KMS("FDI train done.\n");
}
-static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
+static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
@@ -2754,6 +2708,35 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
}
}
+static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* Switch from PCDclk to Rawclk */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_PCDCLK);
+
+ /* Disable CPU FDI TX PLL */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+
+ /* Wait for the clocks to turn off. */
+ POSTING_READ(reg);
+ udelay(100);
+}
+
static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2838,13 +2821,13 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct intel_encoder *encoder;
+ struct intel_encoder *intel_encoder;
/*
* If there's a non-PCH eDP on this crtc, it must be DP_A, and that
* must be driven by its own crtc; no sharing is possible.
*/
- for_each_encoder_on_crtc(dev, crtc, encoder) {
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
* CPU handles all others */
@@ -2852,19 +2835,19 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
/* It is still unclear how this will work on PPT, so throw up a warning */
WARN_ON(!HAS_PCH_LPT(dev));
- if (encoder->type == DRM_MODE_ENCODER_DAC) {
+ if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
return true;
} else {
DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
- encoder->type);
+ intel_encoder->type);
return false;
}
}
- switch (encoder->type) {
+ switch (intel_encoder->type) {
case INTEL_OUTPUT_EDP:
- if (!intel_encoder_is_pch_edp(&encoder->base))
+ if (!intel_encoder_is_pch_edp(&intel_encoder->base))
return false;
continue;
}
@@ -3181,11 +3164,14 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 temp;
bool is_pch_port;
+ WARN_ON(!crtc->enabled);
+
if (intel_crtc->active)
return;
@@ -3200,10 +3186,16 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
is_pch_port = intel_crtc_driving_pch(crtc);
- if (is_pch_port)
- ironlake_fdi_pll_enable(crtc);
- else
- ironlake_fdi_disable(crtc);
+ if (is_pch_port) {
+ ironlake_fdi_pll_enable(intel_crtc);
+ } else {
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+ }
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
@@ -3234,6 +3226,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
intel_crtc_update_cursor(crtc, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
+ if (HAS_PCH_CPT(dev))
+ intel_cpt_verify_modeset(dev, intel_crtc->pipe);
}
static void ironlake_crtc_disable(struct drm_crtc *crtc)
@@ -3241,13 +3239,18 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp;
+
if (!intel_crtc->active)
return;
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
intel_crtc_update_cursor(crtc, false);
@@ -3263,14 +3266,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
- ironlake_fdi_disable(crtc);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
- /* This is a horrible layering violation; we should be doing this in
- * the connector/encoder ->prepare instead, but we don't always have
- * enough information there about the config to know whether it will
- * actually be necessary or just cause undesired flicker.
- */
- intel_disable_pch_ports(dev_priv, pipe);
+ ironlake_fdi_disable(crtc);
intel_disable_transcoder(dev_priv, pipe);
@@ -3304,26 +3304,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
/* disable PCH DPLL */
intel_disable_pch_pll(intel_crtc);
- /* Switch from PCDclk to Rawclk */
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_PCDCLK);
-
- /* Disable CPU FDI TX PLL */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
-
- POSTING_READ(reg);
- udelay(100);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
-
- /* Wait for the clocks to turn off. */
- POSTING_READ(reg);
- udelay(100);
+ ironlake_fdi_pll_disable(intel_crtc);
intel_crtc->active = false;
intel_update_watermarks(dev);
@@ -3333,30 +3314,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
}
-static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
-
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
- ironlake_crtc_enable(crtc);
- break;
-
- case DRM_MODE_DPMS_OFF:
- DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
- ironlake_crtc_disable(crtc);
- break;
- }
-}
-
static void ironlake_crtc_off(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -3386,9 +3343,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ WARN_ON(!crtc->enabled);
+
if (intel_crtc->active)
return;
@@ -3405,6 +3365,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
/* Give the overlay scaler a chance to enable if it's on this pipe */
intel_crtc_dpms_overlay(intel_crtc, true);
intel_crtc_update_cursor(crtc, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
}
static void i9xx_crtc_disable(struct drm_crtc *crtc)
@@ -3412,12 +3375,17 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+
if (!intel_crtc->active)
return;
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
/* Give the overlay scaler a chance to disable if it's on this pipe */
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
@@ -3436,45 +3404,17 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_update_watermarks(dev);
}
-static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- i9xx_crtc_enable(crtc);
- break;
- case DRM_MODE_DPMS_OFF:
- i9xx_crtc_disable(crtc);
- break;
- }
-}
-
static void i9xx_crtc_off(struct drm_crtc *crtc)
{
}
-/**
- * Sets the power management mode of the pipe and plane.
- */
-static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void intel_crtc_update_sarea(struct drm_crtc *crtc,
+ bool enabled)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- bool enabled;
-
- if (intel_crtc->dpms_mode == mode)
- return;
-
- intel_crtc->dpms_mode = mode;
-
- dev_priv->display.dpms(crtc, mode);
if (!dev->primary->master)
return;
@@ -3483,8 +3423,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
if (!master_priv->sarea_priv)
return;
- enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
-
switch (pipe) {
case 0:
master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
@@ -3500,13 +3438,42 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
+/**
+ * Sets the power management mode of the pipe and plane.
+ */
+void intel_crtc_update_dpms(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ bool enable = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+ enable |= intel_encoder->connectors_active;
+
+ if (enable)
+ dev_priv->display.crtc_enable(crtc);
+ else
+ dev_priv->display.crtc_disable(crtc);
+
+ intel_crtc_update_sarea(crtc, enable);
+}
+
+static void intel_crtc_noop(struct drm_crtc *crtc)
+{
+}
+
static void intel_crtc_disable(struct drm_crtc *crtc)
{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ /* crtc should still be enabled when we disable it. */
+ WARN_ON(!crtc->enabled);
+
+ dev_priv->display.crtc_disable(crtc);
+ intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
@@ -3516,63 +3483,128 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
+ crtc->fb = NULL;
+ }
+
+ /* Update computed state. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder || !connector->encoder->crtc)
+ continue;
+
+ if (connector->encoder->crtc != crtc)
+ continue;
+
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ to_intel_encoder(connector->encoder)->connectors_active = false;
}
}
-/* Prepare for a mode set.
- *
- * Note we could be a lot smarter here. We need to figure out which outputs
- * will be enabled, which disabled (in short, how the config will changes)
- * and perform the minimum necessary steps to accomplish that, e.g. updating
- * watermarks, FBC configuration, making sure PLLs are programmed correctly,
- * panel fitting is in the proper state, etc.
- */
-static void i9xx_crtc_prepare(struct drm_crtc *crtc)
+void intel_modeset_disable(struct drm_device *dev)
{
- i9xx_crtc_disable(crtc);
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->enabled)
+ intel_crtc_disable(crtc);
+ }
}
-static void i9xx_crtc_commit(struct drm_crtc *crtc)
+void intel_encoder_noop(struct drm_encoder *encoder)
{
- i9xx_crtc_enable(crtc);
}
-static void ironlake_crtc_prepare(struct drm_crtc *crtc)
+void intel_encoder_destroy(struct drm_encoder *encoder)
{
- ironlake_crtc_disable(crtc);
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
-static void ironlake_crtc_commit(struct drm_crtc *crtc)
+/* Simple dpms helper for encodres with just one connector, no cloning and only
+ * one kind of off state. It clamps all !ON modes to fully OFF and changes the
+ * state of the entire output pipe. */
+void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
{
- ironlake_crtc_enable(crtc);
+ if (mode == DRM_MODE_DPMS_ON) {
+ encoder->connectors_active = true;
+
+ intel_crtc_update_dpms(encoder->base.crtc);
+ } else {
+ encoder->connectors_active = false;
+
+ intel_crtc_update_dpms(encoder->base.crtc);
+ }
}
-void intel_encoder_prepare(struct drm_encoder *encoder)
+/* Cross check the actual hw state with our own modeset state tracking (and it's
+ * internal consistency). */
+static void intel_connector_check_state(struct intel_connector *connector)
{
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- /* lvds has its own version of prepare see intel_lvds_prepare */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+ if (connector->get_hw_state(connector)) {
+ struct intel_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc;
+ bool encoder_enabled;
+ enum pipe pipe;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base));
+
+ WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
+ "wrong connector dpms state\n");
+ WARN(connector->base.encoder != &encoder->base,
+ "active connector not linked to encoder\n");
+ WARN(!encoder->connectors_active,
+ "encoder->connectors_active not set\n");
+
+ encoder_enabled = encoder->get_hw_state(encoder, &pipe);
+ WARN(!encoder_enabled, "encoder not enabled\n");
+ if (WARN_ON(!encoder->base.crtc))
+ return;
+
+ crtc = encoder->base.crtc;
+
+ WARN(!crtc->enabled, "crtc not enabled\n");
+ WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+ WARN(pipe != to_intel_crtc(crtc)->pipe,
+ "encoder active on the wrong pipe\n");
+ }
}
-void intel_encoder_commit(struct drm_encoder *encoder)
+/* Even simpler default implementation, if there's really no special case to
+ * consider. */
+void intel_connector_dpms(struct drm_connector *connector, int mode)
{
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- struct drm_device *dev = encoder->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
- /* lvds has its own version of commit see intel_lvds_commit */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+ /* All the simple cases only support two dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
- if (HAS_PCH_CPT(dev))
- intel_cpt_verify_modeset(dev, intel_crtc->pipe);
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ if (encoder->base.crtc)
+ intel_encoder_dpms(encoder, mode);
+ else
+ WARN_ON(encoder->connectors_active != false);
+
+ intel_modeset_check_state(connector->dev);
}
-void intel_encoder_destroy(struct drm_encoder *encoder)
+/* Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector. */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ enum pipe pipe = 0;
+ struct intel_encoder *encoder = connector->encoder;
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
+ return encoder->get_hw_state(encoder, &pipe);
}
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -3593,6 +3625,13 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
drm_mode_set_crtcinfo(adjusted_mode, 0);
+ /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
+ * with a hsync front porch of 0.
+ */
+ if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
+ adjusted_mode->hsync_start == adjusted_mode->hdisplay)
+ return false;
+
return true;
}
@@ -3728,6 +3767,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
* true if they don't match).
*/
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
unsigned int *pipe_bpp,
struct drm_display_mode *mode)
{
@@ -3797,7 +3837,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
* also stays within the max display bpc discovered above.
*/
- switch (crtc->fb->depth) {
+ switch (fb->depth) {
case 8:
bpc = 8; /* since we go through a colormap */
break;
@@ -4216,7 +4256,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
- struct drm_framebuffer *old_fb)
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4406,7 +4446,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
- ret = intel_pipe_set_base(crtc, x, y, old_fb);
+ ret = intel_pipe_set_base(crtc, x, y, fb);
intel_update_watermarks(dev);
@@ -4560,24 +4600,130 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
return 120000;
}
+static void ironlake_set_pipeconf(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ bool dither)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ uint32_t val;
+
+ val = I915_READ(PIPECONF(pipe));
+
+ val &= ~PIPE_BPC_MASK;
+ switch (intel_crtc->bpp) {
+ case 18:
+ val |= PIPE_6BPC;
+ break;
+ case 24:
+ val |= PIPE_8BPC;
+ break;
+ case 30:
+ val |= PIPE_10BPC;
+ break;
+ case 36:
+ val |= PIPE_12BPC;
+ break;
+ default:
+ val |= PIPE_8BPC;
+ break;
+ }
+
+ val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+ if (dither)
+ val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+ val &= ~PIPECONF_INTERLACE_MASK;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACED_ILK;
+ else
+ val |= PIPECONF_PROGRESSIVE;
+
+ I915_WRITE(PIPECONF(pipe), val);
+ POSTING_READ(PIPECONF(pipe));
+}
+
+static bool ironlake_compute_clocks(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock,
+ bool *has_reduced_clock,
+ intel_clock_t *reduced_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ int refclk;
+ const intel_limit_t *limit;
+ bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+ if (intel_encoder->needs_tv_clock)
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ }
+ }
+
+ refclk = ironlake_get_refclk(crtc);
+
+ /*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+ limit = intel_limit(crtc, refclk);
+ ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+ clock);
+ if (!ret)
+ return false;
+
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target clock.
+ * If the clocks don't match, we can't switch the display clock
+ * by using the FP0/FP1. In such case we will disable the LVDS
+ * downclock feature.
+ */
+ *has_reduced_clock = limit->find_pll(limit, crtc,
+ dev_priv->lvds_downclock,
+ refclk,
+ clock,
+ reduced_clock);
+ }
+
+ if (is_sdvo && is_tv)
+ i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
+
+ return true;
+}
+
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
- struct drm_framebuffer *old_fb)
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- int refclk, num_connectors = 0;
+ int num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
+ u32 dpll, fp = 0, fp2 = 0;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *encoder, *edp_encoder = NULL;
- const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
u32 temp;
@@ -4619,16 +4765,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
- refclk = ironlake_get_refclk(crtc);
-
- /*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
- limit = intel_limit(crtc, refclk);
- ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
- &clock);
+ ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+ &has_reduced_clock, &reduced_clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
@@ -4637,24 +4775,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* Ensure that the cursor is valid for the new mode before changing... */
intel_crtc_update_cursor(crtc, true);
- if (is_lvds && dev_priv->lvds_downclock_avail) {
- /*
- * Ensure we match the reduced clock's P to the target clock.
- * If the clocks don't match, we can't switch the display clock
- * by using the FP0/FP1. In such case we will disable the LVDS
- * downclock feature.
- */
- has_reduced_clock = limit->find_pll(limit, crtc,
- dev_priv->lvds_downclock,
- refclk,
- &clock,
- &reduced_clock);
- }
-
- if (is_sdvo && is_tv)
- i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
-
-
/* FDI link */
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
lane = 0;
@@ -4682,32 +4802,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
target_clock = adjusted_mode->clock;
/* determine panel color depth */
- temp = I915_READ(PIPECONF(pipe));
- temp &= ~PIPE_BPC_MASK;
- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
- switch (pipe_bpp) {
- case 18:
- temp |= PIPE_6BPC;
- break;
- case 24:
- temp |= PIPE_8BPC;
- break;
- case 30:
- temp |= PIPE_10BPC;
- break;
- case 36:
- temp |= PIPE_12BPC;
- break;
- default:
+ dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, mode);
+ if (is_lvds && dev_priv->lvds_dither)
+ dither = true;
+
+ if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
+ pipe_bpp != 36) {
WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
- pipe_bpp);
- temp |= PIPE_8BPC;
+ pipe_bpp);
pipe_bpp = 24;
- break;
}
-
intel_crtc->bpp = pipe_bpp;
- I915_WRITE(PIPECONF(pipe), temp);
if (!lane) {
/*
@@ -4791,12 +4896,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
dpll |= PLL_REF_INPUT_DREFCLK;
- /* setup pipeconf */
- pipeconf = I915_READ(PIPECONF(pipe));
-
- /* Set up the display plane register */
- dspcntr = DISPPLANE_GAMMA_ENABLE;
-
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
@@ -4856,12 +4955,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PCH_LVDS, temp);
}
- pipeconf &= ~PIPECONF_DITHER_EN;
- pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
- if ((is_lvds && dev_priv->lvds_dither) || dither) {
- pipeconf |= PIPECONF_DITHER_EN;
- pipeconf |= PIPECONF_DITHER_TYPE_SP;
- }
if (is_dp && !is_cpu_edp) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
@@ -4897,9 +4990,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
}
- pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- pipeconf |= PIPECONF_INTERLACED_ILK;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_end -= 1;
@@ -4907,7 +4998,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
adjusted_mode->crtc_hsync_start
- adjusted_mode->crtc_htotal/2);
} else {
- pipeconf |= PIPECONF_PROGRESSIVE;
I915_WRITE(VSYNCSHIFT(pipe), 0);
}
@@ -4945,15 +5035,15 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
- I915_WRITE(PIPECONF(pipe), pipeconf);
- POSTING_READ(PIPECONF(pipe));
+ ironlake_set_pipeconf(crtc, adjusted_mode, dither);
intel_wait_for_vblank(dev, pipe);
- I915_WRITE(DSPCNTR(plane), dspcntr);
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
POSTING_READ(DSPCNTR(plane));
- ret = intel_pipe_set_base(crtc, x, y, old_fb);
+ ret = intel_pipe_set_base(crtc, x, y, fb);
intel_update_watermarks(dev);
@@ -4966,7 +5056,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
- struct drm_framebuffer *old_fb)
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4977,14 +5067,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
drm_vblank_pre_modeset(dev, pipe);
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
- x, y, old_fb);
+ x, y, fb);
drm_vblank_post_modeset(dev, pipe);
- if (ret)
- intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
- else
- intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
-
return ret;
}
@@ -5057,6 +5142,91 @@ static void g4x_write_eld(struct drm_connector *connector,
I915_WRITE(G4X_AUD_CNTL_ST, i);
}
+static void haswell_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ struct drm_device *dev = crtc->dev;
+ uint32_t eldv;
+ uint32_t i;
+ int len;
+ int pipe = to_intel_crtc(crtc)->pipe;
+ int tmp;
+
+ int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
+ int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
+ int aud_config = HSW_AUD_CFG(pipe);
+ int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
+
+
+ DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
+
+ /* Audio output enable */
+ DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
+ tmp = I915_READ(aud_cntrl_st2);
+ tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
+ I915_WRITE(aud_cntrl_st2, tmp);
+
+ /* Wait for 1 vertical blank */
+ intel_wait_for_vblank(dev, pipe);
+
+ /* Set ELD valid state */
+ tmp = I915_READ(aud_cntrl_st2);
+ DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
+ tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
+ I915_WRITE(aud_cntrl_st2, tmp);
+ tmp = I915_READ(aud_cntrl_st2);
+ DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
+
+ /* Enable HDMI mode */
+ tmp = I915_READ(aud_config);
+ DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
+ /* clear N_programing_enable and N_value_index */
+ tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
+ I915_WRITE(aud_config, tmp);
+
+ DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
+
+ eldv = AUDIO_ELD_VALID_A << (pipe * 4);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+ eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
+ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+ } else
+ I915_WRITE(aud_config, 0);
+
+ if (intel_eld_uptodate(connector,
+ aud_cntrl_st2, eldv,
+ aud_cntl_st, IBX_ELD_ADDRESS,
+ hdmiw_hdmiedid))
+ return;
+
+ i = I915_READ(aud_cntrl_st2);
+ i &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+ if (!eld[0])
+ return;
+
+ i = I915_READ(aud_cntl_st);
+ i &= ~IBX_ELD_ADDRESS;
+ I915_WRITE(aud_cntl_st, i);
+ i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
+ DRM_DEBUG_DRIVER("port num:%d\n", i);
+
+ len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+ i = I915_READ(aud_cntrl_st2);
+ i |= eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+}
+
static void ironlake_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc)
{
@@ -5069,28 +5239,24 @@ static void ironlake_write_eld(struct drm_connector *connector,
int aud_config;
int aud_cntl_st;
int aud_cntrl_st2;
+ int pipe = to_intel_crtc(crtc)->pipe;
if (HAS_PCH_IBX(connector->dev)) {
- hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
- aud_config = IBX_AUD_CONFIG_A;
- aud_cntl_st = IBX_AUD_CNTL_ST_A;
+ hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+ aud_config = IBX_AUD_CFG(pipe);
+ aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else {
- hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
- aud_config = CPT_AUD_CONFIG_A;
- aud_cntl_st = CPT_AUD_CNTL_ST_A;
+ hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+ aud_config = CPT_AUD_CFG(pipe);
+ aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
- i = to_intel_crtc(crtc)->pipe;
- hdmiw_hdmiedid += i * 0x100;
- aud_cntl_st += i * 0x100;
- aud_config += i * 0x100;
-
- DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
+ DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
i = I915_READ(aud_cntl_st);
- i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */
+ i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
if (!i) {
DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
/* operate blindly on all ports */
@@ -5337,8 +5503,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t addr;
int ret;
- DRM_DEBUG_KMS("\n");
-
/* if we want to turn off the cursor ignore width and height */
if (!handle) {
DRM_DEBUG_KMS("cursor off\n");
@@ -5584,17 +5748,18 @@ mode_fits_in_fbdev(struct drm_device *dev,
return fb;
}
-bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
- struct drm_connector *connector,
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old)
{
struct intel_crtc *intel_crtc;
+ struct intel_encoder *intel_encoder =
+ intel_attached_encoder(connector);
struct drm_crtc *possible_crtc;
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
- struct drm_framebuffer *old_fb;
+ struct drm_framebuffer *fb;
int i = -1;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -5615,21 +5780,12 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
if (encoder->crtc) {
crtc = encoder->crtc;
- intel_crtc = to_intel_crtc(crtc);
- old->dpms_mode = intel_crtc->dpms_mode;
+ old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
/* Make sure the crtc and connector are running */
- if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
- struct drm_encoder_helper_funcs *encoder_funcs;
- struct drm_crtc_helper_funcs *crtc_funcs;
-
- crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-
- encoder_funcs = encoder->helper_private;
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
- }
+ if (connector->dpms != DRM_MODE_DPMS_ON)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
return true;
}
@@ -5653,19 +5809,17 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
return false;
}
- encoder->crtc = crtc;
- connector->encoder = encoder;
+ intel_encoder->new_crtc = to_intel_crtc(crtc);
+ to_intel_connector(connector)->new_encoder = intel_encoder;
intel_crtc = to_intel_crtc(crtc);
- old->dpms_mode = intel_crtc->dpms_mode;
+ old->dpms_mode = connector->dpms;
old->load_detect_temp = true;
old->release_fb = NULL;
if (!mode)
mode = &load_detect_mode;
- old_fb = crtc->fb;
-
/* We need a framebuffer large enough to accommodate all accesses
* that the plane may generate whilst we perform load detection.
* We can not rely on the fbcon either being present (we get called
@@ -5673,50 +5827,52 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
* not even exist) or that it is large enough to satisfy the
* requested mode.
*/
- crtc->fb = mode_fits_in_fbdev(dev, mode);
- if (crtc->fb == NULL) {
+ fb = mode_fits_in_fbdev(dev, mode);
+ if (fb == NULL) {
DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
- crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
- old->release_fb = crtc->fb;
+ fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
+ old->release_fb = fb;
} else
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
- if (IS_ERR(crtc->fb)) {
+ if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
- crtc->fb = old_fb;
- return false;
+ goto fail;
}
- if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
+ if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
- crtc->fb = old_fb;
- return false;
+ goto fail;
}
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
return true;
+fail:
+ connector->encoder = NULL;
+ encoder->crtc = NULL;
+ return false;
}
-void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
- struct drm_connector *connector,
+void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old)
{
+ struct intel_encoder *intel_encoder =
+ intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_crtc *crtc = encoder->crtc;
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
encoder->base.id, drm_get_encoder_name(encoder));
if (old->load_detect_temp) {
- connector->encoder = NULL;
- drm_helper_disable_unused_functions(dev);
+ struct drm_crtc *crtc = encoder->crtc;
+
+ to_intel_connector(connector)->new_encoder = NULL;
+ intel_encoder->new_crtc = NULL;
+ intel_set_mode(crtc, NULL, 0, 0, NULL);
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
@@ -5725,10 +5881,8 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
}
/* Switch crtc and encoder back off if necessary */
- if (old->dpms_mode != DRM_MODE_DPMS_ON) {
- encoder_funcs->dpms(encoder, old->dpms_mode);
- crtc_funcs->dpms(crtc, old->dpms_mode);
- }
+ if (old->dpms_mode != DRM_MODE_DPMS_ON)
+ connector->funcs->dpms(connector, old->dpms_mode);
}
/* Returns the clock of the currently programmed mode of the given pipe. */
@@ -5850,46 +6004,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-#define GPU_IDLE_TIMEOUT 500 /* ms */
-
-/* When this timer fires, we've been idle for awhile */
-static void intel_gpu_idle_timer(unsigned long arg)
-{
- struct drm_device *dev = (struct drm_device *)arg;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (!list_empty(&dev_priv->mm.active_list)) {
- /* Still processing requests, so just re-arm the timer. */
- mod_timer(&dev_priv->idle_timer, jiffies +
- msecs_to_jiffies(GPU_IDLE_TIMEOUT));
- return;
- }
-
- dev_priv->busy = false;
- queue_work(dev_priv->wq, &dev_priv->idle_work);
-}
-
-#define CRTC_IDLE_TIMEOUT 1000 /* ms */
-
-static void intel_crtc_idle_timer(unsigned long arg)
-{
- struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
- struct drm_crtc *crtc = &intel_crtc->base;
- drm_i915_private_t *dev_priv = crtc->dev->dev_private;
- struct intel_framebuffer *intel_fb;
-
- intel_fb = to_intel_framebuffer(crtc->fb);
- if (intel_fb && intel_fb->obj->active) {
- /* The framebuffer is still being accessed by the GPU. */
- mod_timer(&intel_crtc->idle_timer, jiffies +
- msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
- return;
- }
-
- intel_crtc->busy = false;
- queue_work(dev_priv->wq, &dev_priv->idle_work);
-}
-
static void intel_increase_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -5919,10 +6033,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
}
-
- /* Schedule downclock */
- mod_timer(&intel_crtc->idle_timer, jiffies +
- msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
}
static void intel_decrease_pllclock(struct drm_crtc *crtc)
@@ -5961,89 +6071,46 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
}
-/**
- * intel_idle_update - adjust clocks for idleness
- * @work: work struct
- *
- * Either the GPU or display (or both) went idle. Check the busy status
- * here and adjust the CRTC and GPU clocks as necessary.
- */
-static void intel_idle_update(struct work_struct *work)
+void intel_mark_busy(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- idle_work);
- struct drm_device *dev = dev_priv->dev;
+ i915_update_gfx_val(dev->dev_private);
+}
+
+void intel_mark_idle(struct drm_device *dev)
+{
+}
+
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
if (!i915_powersave)
return;
- mutex_lock(&dev->struct_mutex);
-
- i915_update_gfx_val(dev_priv);
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- /* Skip inactive CRTCs */
if (!crtc->fb)
continue;
- intel_crtc = to_intel_crtc(crtc);
- if (!intel_crtc->busy)
- intel_decrease_pllclock(crtc);
+ if (to_intel_framebuffer(crtc->fb)->obj == obj)
+ intel_increase_pllclock(crtc);
}
-
-
- mutex_unlock(&dev->struct_mutex);
}
-/**
- * intel_mark_busy - mark the GPU and possibly the display busy
- * @dev: drm device
- * @obj: object we're operating on
- *
- * Callers can use this function to indicate that the GPU is busy processing
- * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
- * buffer), we'll also mark the display as busy, so we know to increase its
- * clock frequency.
- */
-void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
+void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = NULL;
- struct intel_framebuffer *intel_fb;
- struct intel_crtc *intel_crtc;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- if (!dev_priv->busy) {
- intel_sanitize_pm(dev);
- dev_priv->busy = true;
- } else
- mod_timer(&dev_priv->idle_timer, jiffies +
- msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+ struct drm_device *dev = obj->base.dev;
+ struct drm_crtc *crtc;
- if (obj == NULL)
+ if (!i915_powersave)
return;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
continue;
- intel_crtc = to_intel_crtc(crtc);
- intel_fb = to_intel_framebuffer(crtc->fb);
- if (intel_fb->obj == obj) {
- if (!intel_crtc->busy) {
- /* Non-busy -> busy, upclock */
- intel_increase_pllclock(crtc);
- intel_crtc->busy = true;
- } else {
- /* Busy -> busy, put off timer */
- mod_timer(&intel_crtc->idle_timer, jiffies +
- msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
- }
- }
+ if (to_intel_framebuffer(crtc->fb)->obj == obj)
+ intel_decrease_pllclock(crtc);
}
}
@@ -6394,7 +6461,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
default:
WARN_ONCE(1, "unknown plane in flip command\n");
ret = -ENODEV;
- goto err;
+ goto err_unpin;
}
ret = intel_ring_begin(ring, 4);
@@ -6502,7 +6569,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup_pending;
intel_disable_fbc(dev);
- intel_mark_busy(dev, obj);
+ intel_mark_fb_busy(obj);
mutex_unlock(&dev->struct_mutex);
trace_i915_flip_request(intel_crtc->plane, obj);
@@ -6527,81 +6594,807 @@ free_work:
return ret;
}
-static void intel_sanitize_modesetting(struct drm_device *dev,
- int pipe, int plane)
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
+ .load_lut = intel_crtc_load_lut,
+ .disable = intel_crtc_noop,
+};
+
+bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg, val;
- int i;
+ struct intel_encoder *other_encoder;
+ struct drm_crtc *crtc = &encoder->new_crtc->base;
- /* Clear any frame start delays used for debugging left by the BIOS */
- for_each_pipe(i) {
- reg = PIPECONF(i);
- I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ if (WARN_ON(!crtc))
+ return false;
+
+ list_for_each_entry(other_encoder,
+ &crtc->dev->mode_config.encoder_list,
+ base.head) {
+
+ if (&other_encoder->new_crtc->base != crtc ||
+ encoder == other_encoder)
+ continue;
+ else
+ return true;
}
- if (HAS_PCH_SPLIT(dev))
- return;
+ return false;
+}
- /* Who knows what state these registers were left in by the BIOS or
- * grub?
- *
- * If we leave the registers in a conflicting state (e.g. with the
- * display plane reading from the other pipe than the one we intend
- * to use) then when we attempt to teardown the active mode, we will
- * not disable the pipes and planes in the correct order -- leaving
- * a plane reading from a disabled pipe and possibly leading to
- * undefined behaviour.
+static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
+ struct drm_crtc *crtc)
+{
+ struct drm_device *dev;
+ struct drm_crtc *tmp;
+ int crtc_mask = 1;
+
+ WARN(!crtc, "checking null crtc?\n");
+
+ dev = crtc->dev;
+
+ list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+ if (tmp == crtc)
+ break;
+ crtc_mask <<= 1;
+ }
+
+ if (encoder->possible_crtcs & crtc_mask)
+ return true;
+ return false;
+}
+
+/**
+ * intel_modeset_update_staged_output_state
+ *
+ * Updates the staged output configuration state, e.g. after we've read out the
+ * current hw state.
+ */
+static void intel_modeset_update_staged_output_state(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ connector->new_encoder =
+ to_intel_encoder(connector->base.encoder);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ encoder->new_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ }
+}
+
+/**
+ * intel_modeset_commit_output_state
+ *
+ * This function copies the stage display pipe configuration to the real one.
+ */
+static void intel_modeset_commit_output_state(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ connector->base.encoder = &connector->new_encoder->base;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ encoder->base.crtc = &encoder->new_crtc->base;
+ }
+}
+
+static struct drm_display_mode *
+intel_modeset_adjusted_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_display_mode *adjusted_mode;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct intel_encoder *encoder;
+
+ adjusted_mode = drm_mode_duplicate(dev, mode);
+ if (!adjusted_mode)
+ return ERR_PTR(-ENOMEM);
+
+ /* Pass our mode to the connectors and the CRTC to give them a chance to
+ * adjust it according to limitations or connector properties, and also
+ * a chance to reject the mode entirely.
*/
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
- reg = DSPCNTR(plane);
- val = I915_READ(reg);
+ if (&encoder->new_crtc->base != crtc)
+ continue;
+ encoder_funcs = encoder->base.helper_private;
+ if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
+ adjusted_mode))) {
+ DRM_DEBUG_KMS("Encoder fixup failed\n");
+ goto fail;
+ }
+ }
- if ((val & DISPLAY_PLANE_ENABLE) == 0)
- return;
- if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
- return;
+ if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
+ DRM_DEBUG_KMS("CRTC fixup failed\n");
+ goto fail;
+ }
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
- /* This display plane is active and attached to the other CPU pipe. */
- pipe = !pipe;
+ return adjusted_mode;
+fail:
+ drm_mode_destroy(dev, adjusted_mode);
+ return ERR_PTR(-EINVAL);
+}
- /* Disable the plane and wait for it to stop reading from the pipe. */
- intel_disable_plane(dev_priv, plane, pipe);
- intel_disable_pipe(dev_priv, pipe);
+/* Computes which crtcs are affected and sets the relevant bits in the mask. For
+ * simplicity we use the crtc's pipe number (because it's easier to obtain). */
+static void
+intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
+ unsigned *prepare_pipes, unsigned *disable_pipes)
+{
+ struct intel_crtc *intel_crtc;
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_crtc *tmp_crtc;
+
+ *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
+
+ /* Check which crtcs have changed outputs connected to them, these need
+ * to be part of the prepare_pipes mask. We don't (yet) support global
+ * modeset across multiple crtcs, so modeset_pipes will only have one
+ * bit set at most. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->base.encoder == &connector->new_encoder->base)
+ continue;
+
+ if (connector->base.encoder) {
+ tmp_crtc = connector->base.encoder->crtc;
+
+ *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+ }
+
+ if (connector->new_encoder)
+ *prepare_pipes |=
+ 1 << connector->new_encoder->new_crtc->pipe;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->base.crtc == &encoder->new_crtc->base)
+ continue;
+
+ if (encoder->base.crtc) {
+ tmp_crtc = encoder->base.crtc;
+
+ *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+ }
+
+ if (encoder->new_crtc)
+ *prepare_pipes |= 1 << encoder->new_crtc->pipe;
+ }
+
+ /* Check for any pipes that will be fully disabled ... */
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ bool used = false;
+
+ /* Don't try to disable disabled crtcs. */
+ if (!intel_crtc->base.enabled)
+ continue;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->new_crtc == intel_crtc)
+ used = true;
+ }
+
+ if (!used)
+ *disable_pipes |= 1 << intel_crtc->pipe;
+ }
+
+
+ /* set_mode is also used to update properties on life display pipes. */
+ intel_crtc = to_intel_crtc(crtc);
+ if (crtc->enabled)
+ *prepare_pipes |= 1 << intel_crtc->pipe;
+
+ /* We only support modeset on one single crtc, hence we need to do that
+ * only for the passed in crtc iff we change anything else than just
+ * disable crtcs.
+ *
+ * This is actually not true, to be fully compatible with the old crtc
+ * helper we automatically disable _any_ output (i.e. doesn't need to be
+ * connected to the crtc we're modesetting on) if it's disconnected.
+ * Which is a rather nutty api (since changed the output configuration
+ * without userspace's explicit request can lead to confusion), but
+ * alas. Hence we currently need to modeset on all pipes we prepare. */
+ if (*prepare_pipes)
+ *modeset_pipes = *prepare_pipes;
+
+ /* ... and mask these out. */
+ *modeset_pipes &= ~(*disable_pipes);
+ *prepare_pipes &= ~(*disable_pipes);
}
-static void intel_crtc_reset(struct drm_crtc *crtc)
+static bool intel_crtc_in_use(struct drm_crtc *crtc)
{
+ struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- /* Reset flags back to the 'unknown' status so that they
- * will be correctly set on the initial modeset.
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc)
+ return true;
+
+ return false;
+}
+
+static void
+intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
+{
+ struct intel_encoder *intel_encoder;
+ struct intel_crtc *intel_crtc;
+ struct drm_connector *connector;
+
+ list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (!intel_encoder->base.crtc)
+ continue;
+
+ intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+
+ if (prepare_pipes & (1 << intel_crtc->pipe))
+ intel_encoder->connectors_active = false;
+ }
+
+ intel_modeset_commit_output_state(dev);
+
+ /* Update computed state. */
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder || !connector->encoder->crtc)
+ continue;
+
+ intel_crtc = to_intel_crtc(connector->encoder->crtc);
+
+ if (prepare_pipes & (1 << intel_crtc->pipe)) {
+ struct drm_property *dpms_property =
+ dev->mode_config.dpms_property;
+
+ connector->dpms = DRM_MODE_DPMS_ON;
+ drm_connector_property_set_value(connector,
+ dpms_property,
+ DRM_MODE_DPMS_ON);
+
+ intel_encoder = to_intel_encoder(connector->encoder);
+ intel_encoder->connectors_active = true;
+ }
+ }
+
+}
+
+#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
+ list_for_each_entry((intel_crtc), \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ if (mask & (1 <<(intel_crtc)->pipe)) \
+
+void
+intel_modeset_check_state(struct drm_device *dev)
+{
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ /* This also checks the encoder/connector hw state with the
+ * ->get_hw_state callbacks. */
+ intel_connector_check_state(connector);
+
+ WARN(&connector->new_encoder->base != connector->base.encoder,
+ "connector's staged encoder doesn't match current encoder\n");
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ bool enabled = false;
+ bool active = false;
+ enum pipe pipe, tracked_pipe;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+
+ WARN(&encoder->new_crtc->base != encoder->base.crtc,
+ "encoder's stage crtc doesn't match current crtc\n");
+ WARN(encoder->connectors_active && !encoder->base.crtc,
+ "encoder's active_connectors set, but no crtc\n");
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->base.encoder != &encoder->base)
+ continue;
+ enabled = true;
+ if (connector->base.dpms != DRM_MODE_DPMS_OFF)
+ active = true;
+ }
+ WARN(!!encoder->base.crtc != enabled,
+ "encoder's enabled state mismatch "
+ "(expected %i, found %i)\n",
+ !!encoder->base.crtc, enabled);
+ WARN(active && !encoder->base.crtc,
+ "active encoder with no crtc\n");
+
+ WARN(encoder->connectors_active != active,
+ "encoder's computed active state doesn't match tracked active state "
+ "(expected %i, found %i)\n", active, encoder->connectors_active);
+
+ active = encoder->get_hw_state(encoder, &pipe);
+ WARN(active != encoder->connectors_active,
+ "encoder's hw state doesn't match sw tracking "
+ "(expected %i, found %i)\n",
+ encoder->connectors_active, active);
+
+ if (!encoder->base.crtc)
+ continue;
+
+ tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+ WARN(active && pipe != tracked_pipe,
+ "active encoder's pipe doesn't match"
+ "(expected %i, found %i)\n",
+ tracked_pipe, pipe);
+
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ bool enabled = false;
+ bool active = false;
+
+ DRM_DEBUG_KMS("[CRTC:%d]\n",
+ crtc->base.base.id);
+
+ WARN(crtc->active && !crtc->base.enabled,
+ "active crtc, but not enabled in sw tracking\n");
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->base.crtc != &crtc->base)
+ continue;
+ enabled = true;
+ if (encoder->connectors_active)
+ active = true;
+ }
+ WARN(active != crtc->active,
+ "crtc's computed active state doesn't match tracked active state "
+ "(expected %i, found %i)\n", active, crtc->active);
+ WARN(enabled != crtc->base.enabled,
+ "crtc's computed enabled state doesn't match tracked enabled state "
+ "(expected %i, found %i)\n", enabled, crtc->base.enabled);
+
+ assert_pipe(dev->dev_private, crtc->pipe, crtc->active);
+ }
+}
+
+bool intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_encoder *encoder;
+ struct intel_crtc *intel_crtc;
+ unsigned disable_pipes, prepare_pipes, modeset_pipes;
+ bool ret = true;
+
+ intel_modeset_affected_pipes(crtc, &modeset_pipes,
+ &prepare_pipes, &disable_pipes);
+
+ DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
+ modeset_pipes, prepare_pipes, disable_pipes);
+
+ for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
+ intel_crtc_disable(&intel_crtc->base);
+
+ saved_hwmode = crtc->hwmode;
+ saved_mode = crtc->mode;
+
+ /* Hack: Because we don't (yet) support global modeset on multiple
+ * crtcs, we don't keep track of the new mode for more than one crtc.
+ * Hence simply check whether any bit is set in modeset_pipes in all the
+ * pieces of code that are not yet converted to deal with mutliple crtcs
+ * changing their mode at the same time. */
+ adjusted_mode = NULL;
+ if (modeset_pipes) {
+ adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
+ if (IS_ERR(adjusted_mode)) {
+ return false;
+ }
+ }
+
+ for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
+ if (intel_crtc->base.enabled)
+ dev_priv->display.crtc_disable(&intel_crtc->base);
+ }
+
+ /* crtc->mode is already used by the ->mode_set callbacks, hence we need
+ * to set it here already despite that we pass it down the callchain.
*/
- intel_crtc->dpms_mode = -1;
+ if (modeset_pipes)
+ crtc->mode = *mode;
- /* We need to fix up any BIOS configuration that conflicts with
- * our expectations.
+ /* Only after disabling all output pipelines that will be changed can we
+ * update the the output configuration. */
+ intel_modeset_update_state(dev, prepare_pipes);
+
+ /* Set up the DPLL and any encoders state that needs to adjust or depend
+ * on the DPLL.
*/
- intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
+ for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+ ret = !intel_crtc_mode_set(&intel_crtc->base,
+ mode, adjusted_mode,
+ x, y, fb);
+ if (!ret)
+ goto done;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != &intel_crtc->base)
+ continue;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.id, drm_get_encoder_name(encoder),
+ mode->base.id, mode->name);
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+ }
+ }
+
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
+ dev_priv->display.crtc_enable(&intel_crtc->base);
+
+ if (modeset_pipes) {
+ /* Store real post-adjustment hardware mode. */
+ crtc->hwmode = *adjusted_mode;
+
+ /* Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc);
+ }
+
+ /* FIXME: add subpixel order */
+done:
+ drm_mode_destroy(dev, adjusted_mode);
+ if (!ret && crtc->enabled) {
+ crtc->hwmode = saved_hwmode;
+ crtc->mode = saved_mode;
+ } else {
+ intel_modeset_check_state(dev);
+ }
+
+ return ret;
}
-static struct drm_crtc_helper_funcs intel_helper_funcs = {
- .dpms = intel_crtc_dpms,
- .mode_fixup = intel_crtc_mode_fixup,
- .mode_set = intel_crtc_mode_set,
- .mode_set_base = intel_pipe_set_base,
- .mode_set_base_atomic = intel_pipe_set_base_atomic,
- .load_lut = intel_crtc_load_lut,
- .disable = intel_crtc_disable,
-};
+#undef for_each_intel_crtc_masked
+
+static void intel_set_config_free(struct intel_set_config *config)
+{
+ if (!config)
+ return;
+
+ kfree(config->save_connector_encoders);
+ kfree(config->save_encoder_crtcs);
+ kfree(config);
+}
+
+static int intel_set_config_save_state(struct drm_device *dev,
+ struct intel_set_config *config)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int count;
+
+ config->save_encoder_crtcs =
+ kcalloc(dev->mode_config.num_encoder,
+ sizeof(struct drm_crtc *), GFP_KERNEL);
+ if (!config->save_encoder_crtcs)
+ return -ENOMEM;
+
+ config->save_connector_encoders =
+ kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_encoder *), GFP_KERNEL);
+ if (!config->save_connector_encoders)
+ return -ENOMEM;
+
+ /* Copy data. Note that driver private data is not affected.
+ * Should anything bad happen only the expected state is
+ * restored, not the drivers personal bookkeeping.
+ */
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ config->save_encoder_crtcs[count++] = encoder->crtc;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ config->save_connector_encoders[count++] = connector->encoder;
+ }
+
+ return 0;
+}
+
+static void intel_set_config_restore_state(struct drm_device *dev,
+ struct intel_set_config *config)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ int count;
+
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ encoder->new_crtc =
+ to_intel_crtc(config->save_encoder_crtcs[count++]);
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+ connector->new_encoder =
+ to_intel_encoder(config->save_connector_encoders[count++]);
+ }
+}
+
+static void
+intel_set_config_compute_mode_changes(struct drm_mode_set *set,
+ struct intel_set_config *config)
+{
+
+ /* We should be able to check here if the fb has the same properties
+ * and then just flip_or_move it */
+ if (set->crtc->fb != set->fb) {
+ /* If we have no fb then treat it as a full mode set */
+ if (set->crtc->fb == NULL) {
+ DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+ config->mode_changed = true;
+ } else if (set->fb == NULL) {
+ config->mode_changed = true;
+ } else if (set->fb->depth != set->crtc->fb->depth) {
+ config->mode_changed = true;
+ } else if (set->fb->bits_per_pixel !=
+ set->crtc->fb->bits_per_pixel) {
+ config->mode_changed = true;
+ } else
+ config->fb_changed = true;
+ }
+
+ if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
+ config->fb_changed = true;
+
+ if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+ DRM_DEBUG_KMS("modes are different, full mode set\n");
+ drm_mode_debug_printmodeline(&set->crtc->mode);
+ drm_mode_debug_printmodeline(set->mode);
+ config->mode_changed = true;
+ }
+}
+
+static int
+intel_modeset_stage_output_state(struct drm_device *dev,
+ struct drm_mode_set *set,
+ struct intel_set_config *config)
+{
+ struct drm_crtc *new_crtc;
+ struct intel_connector *connector;
+ struct intel_encoder *encoder;
+ int count, ro;
+
+ /* The upper layers ensure that we either disabl a crtc or have a list
+ * of connectors. For paranoia, double-check this. */
+ WARN_ON(!set->fb && (set->num_connectors != 0));
+ WARN_ON(set->fb && (set->num_connectors == 0));
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ /* Otherwise traverse passed in connector list and get encoders
+ * for them. */
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == &connector->base) {
+ connector->new_encoder = connector->encoder;
+ break;
+ }
+ }
+
+ /* If we disable the crtc, disable all its connectors. Also, if
+ * the connector is on the changing crtc but not on the new
+ * connector list, disable it. */
+ if ((!set->fb || ro == set->num_connectors) &&
+ connector->base.encoder &&
+ connector->base.encoder->crtc == set->crtc) {
+ connector->new_encoder = NULL;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base));
+ }
+
+
+ if (&connector->new_encoder->base != connector->base.encoder) {
+ DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ config->mode_changed = true;
+ }
+
+ /* Disable all disconnected encoders. */
+ if (connector->base.status == connector_status_disconnected)
+ connector->new_encoder = NULL;
+ }
+ /* connector->new_encoder is now updated for all connectors. */
+
+ /* Update crtc of enabled connectors. */
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (!connector->new_encoder)
+ continue;
+
+ new_crtc = connector->new_encoder->base.crtc;
+
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == &connector->base)
+ new_crtc = set->crtc;
+ }
+
+ /* Make sure the new CRTC will work with the encoder */
+ if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
+ new_crtc)) {
+ return -EINVAL;
+ }
+ connector->encoder->new_crtc = to_intel_crtc(new_crtc);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base),
+ new_crtc->base.id);
+ }
+
+ /* Check for any encoders that needs to be disabled. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->new_encoder == encoder) {
+ WARN_ON(!connector->new_encoder->new_crtc);
+
+ goto next_encoder;
+ }
+ }
+ encoder->new_crtc = NULL;
+next_encoder:
+ /* Only now check for crtc changes so we don't miss encoders
+ * that will be disabled. */
+ if (&encoder->new_crtc->base != encoder->base.crtc) {
+ DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ config->mode_changed = true;
+ }
+ }
+ /* Now we've also updated encoder->new_crtc for all encoders. */
+
+ return 0;
+}
+
+static int intel_crtc_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev;
+ struct drm_mode_set save_set;
+ struct intel_set_config *config;
+ int ret;
+
+ BUG_ON(!set);
+ BUG_ON(!set->crtc);
+ BUG_ON(!set->crtc->helper_private);
+
+ if (!set->mode)
+ set->fb = NULL;
+
+ /* The fb helper likes to play gross jokes with ->mode_set_config.
+ * Unfortunately the crtc helper doesn't do much at all for this case,
+ * so we have to cope with this madness until the fb helper is fixed up. */
+ if (set->fb && set->num_connectors == 0)
+ return 0;
+
+ if (set->fb) {
+ DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
+ } else {
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+ }
+
+ dev = set->crtc->dev;
+
+ ret = -ENOMEM;
+ config = kzalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ goto out_config;
+
+ ret = intel_set_config_save_state(dev, config);
+ if (ret)
+ goto out_config;
+
+ save_set.crtc = set->crtc;
+ save_set.mode = &set->crtc->mode;
+ save_set.x = set->crtc->x;
+ save_set.y = set->crtc->y;
+ save_set.fb = set->crtc->fb;
+
+ /* Compute whether we need a full modeset, only an fb base update or no
+ * change at all. In the future we might also check whether only the
+ * mode changed, e.g. for LVDS where we only change the panel fitter in
+ * such cases. */
+ intel_set_config_compute_mode_changes(set, config);
+
+ ret = intel_modeset_stage_output_state(dev, set, config);
+ if (ret)
+ goto fail;
+
+ if (config->mode_changed) {
+ if (set->mode) {
+ DRM_DEBUG_KMS("attempting to set mode from"
+ " userspace\n");
+ drm_mode_debug_printmodeline(set->mode);
+ }
+
+ if (!intel_set_mode(set->crtc, set->mode,
+ set->x, set->y, set->fb)) {
+ DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+ set->crtc->base.id);
+ ret = -EINVAL;
+ goto fail;
+ }
+ } else if (config->fb_changed) {
+ ret = intel_pipe_set_base(set->crtc,
+ set->x, set->y, set->fb);
+ }
+
+ intel_set_config_free(config);
+
+ return 0;
+
+fail:
+ intel_set_config_restore_state(dev, config);
+
+ /* Try to restore the config */
+ if (config->mode_changed &&
+ !intel_set_mode(save_set.crtc, save_set.mode,
+ save_set.x, save_set.y, save_set.fb))
+ DRM_ERROR("failed to restore config after modeset failure\n");
+
+out_config:
+ intel_set_config_free(config);
+ return ret;
+}
static const struct drm_crtc_funcs intel_crtc_funcs = {
- .reset = intel_crtc_reset,
.cursor_set = intel_crtc_cursor_set,
.cursor_move = intel_crtc_cursor_move,
.gamma_set = intel_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
+ .set_config = intel_crtc_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
};
@@ -6655,24 +7448,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
- intel_crtc_reset(&intel_crtc->base);
- intel_crtc->active = true; /* force the pipe off on setup_init_config */
intel_crtc->bpp = 24; /* default for pre-Ironlake */
- if (HAS_PCH_SPLIT(dev)) {
- intel_helper_funcs.prepare = ironlake_crtc_prepare;
- intel_helper_funcs.commit = ironlake_crtc_commit;
- } else {
- intel_helper_funcs.prepare = i9xx_crtc_prepare;
- intel_helper_funcs.commit = i9xx_crtc_commit;
- }
-
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
-
- intel_crtc->busy = false;
-
- setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
- (unsigned long)intel_crtc);
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -6699,15 +7477,23 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
return 0;
}
-static int intel_encoder_clones(struct drm_device *dev, int type_mask)
+static int intel_encoder_clones(struct intel_encoder *encoder)
{
- struct intel_encoder *encoder;
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_encoder *source_encoder;
int index_mask = 0;
int entry = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
- if (type_mask & encoder->clone_mask)
+ list_for_each_entry(source_encoder,
+ &dev->mode_config.encoder_list, base.head) {
+
+ if (encoder == source_encoder)
index_mask |= (1 << entry);
+
+ /* Intel hw has only one MUX where enocoders could be cloned. */
+ if (encoder->cloneable && source_encoder->cloneable)
+ index_mask |= (1 << entry);
+
entry++;
}
@@ -6748,10 +7534,10 @@ static void intel_setup_outputs(struct drm_device *dev)
dpd_is_edp = intel_dpd_is_edp(dev);
if (has_edp_a(dev))
- intel_dp_init(dev, DP_A);
+ intel_dp_init(dev, DP_A, PORT_A);
if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_D);
+ intel_dp_init(dev, PCH_DP_D, PORT_D);
}
intel_crt_init(dev);
@@ -6782,22 +7568,22 @@ static void intel_setup_outputs(struct drm_device *dev)
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, PCH_SDVOB, true);
if (!found)
- intel_hdmi_init(dev, HDMIB);
+ intel_hdmi_init(dev, HDMIB, PORT_B);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_B);
+ intel_dp_init(dev, PCH_DP_B, PORT_B);
}
if (I915_READ(HDMIC) & PORT_DETECTED)
- intel_hdmi_init(dev, HDMIC);
+ intel_hdmi_init(dev, HDMIC, PORT_C);
if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
- intel_hdmi_init(dev, HDMID);
+ intel_hdmi_init(dev, HDMID, PORT_D);
if (I915_READ(PCH_DP_C) & DP_DETECTED)
- intel_dp_init(dev, PCH_DP_C);
+ intel_dp_init(dev, PCH_DP_C, PORT_C);
if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_D);
+ intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
int found;
@@ -6805,17 +7591,17 @@ static void intel_setup_outputs(struct drm_device *dev)
/* SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, SDVOB, true);
if (!found)
- intel_hdmi_init(dev, SDVOB);
+ intel_hdmi_init(dev, SDVOB, PORT_B);
if (!found && (I915_READ(DP_B) & DP_DETECTED))
- intel_dp_init(dev, DP_B);
+ intel_dp_init(dev, DP_B, PORT_B);
}
if (I915_READ(SDVOC) & PORT_DETECTED)
- intel_hdmi_init(dev, SDVOC);
+ intel_hdmi_init(dev, SDVOC, PORT_C);
/* Shares lanes with HDMI on SDVOC */
if (I915_READ(DP_C) & DP_DETECTED)
- intel_dp_init(dev, DP_C);
+ intel_dp_init(dev, DP_C, PORT_C);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -6824,12 +7610,12 @@ static void intel_setup_outputs(struct drm_device *dev)
found = intel_sdvo_init(dev, SDVOB, true);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
- intel_hdmi_init(dev, SDVOB);
+ intel_hdmi_init(dev, SDVOB, PORT_B);
}
if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_B\n");
- intel_dp_init(dev, DP_B);
+ intel_dp_init(dev, DP_B, PORT_B);
}
}
@@ -6844,18 +7630,18 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
- intel_hdmi_init(dev, SDVOC);
+ intel_hdmi_init(dev, SDVOC, PORT_C);
}
if (SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_C\n");
- intel_dp_init(dev, DP_C);
+ intel_dp_init(dev, DP_C, PORT_C);
}
}
if (SUPPORTS_INTEGRATED_DP(dev) &&
(I915_READ(DP_D) & DP_DETECTED)) {
DRM_DEBUG_KMS("probing DP_D\n");
- intel_dp_init(dev, DP_D);
+ intel_dp_init(dev, DP_D, PORT_D);
}
} else if (IS_GEN2(dev))
intel_dvo_init(dev);
@@ -6866,12 +7652,9 @@ static void intel_setup_outputs(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
- intel_encoder_clones(dev, encoder->clone_mask);
+ intel_encoder_clones(encoder);
}
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ironlake_init_pch_refclk(dev);
}
@@ -6973,13 +7756,15 @@ static void intel_init_display(struct drm_device *dev)
/* We always want a DPMS function */
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->display.dpms = ironlake_crtc_dpms;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.crtc_enable = ironlake_crtc_enable;
+ dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_plane = ironlake_update_plane;
} else {
- dev_priv->display.dpms = i9xx_crtc_dpms;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.crtc_enable = i9xx_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_plane = i9xx_update_plane;
}
@@ -7023,7 +7808,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_HASWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.write_eld = haswell_write_eld;
} else
dev_priv->display.update_wm = NULL;
} else if (IS_G4X(dev)) {
@@ -7101,21 +7886,16 @@ static struct intel_quirk intel_quirks[] = {
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
- /* Thinkpad R31 needs pipe A force quirk */
- { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
- /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
- { 0x3577, 0x1014, 0x0513, quirk_pipea_force },
- /* ThinkPad X40 needs pipe A force quirk */
-
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
/* 855 & before need to leave pipe A & dpll A up */
{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+ { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
/* Lenovo U160 cannot use SSC on LVDS */
{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
@@ -7231,10 +8011,251 @@ void intel_modeset_init(struct drm_device *dev)
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
+}
- INIT_WORK(&dev_priv->idle_work, intel_idle_update);
- setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
- (unsigned long)dev);
+static void
+intel_connector_break_all_links(struct intel_connector *connector)
+{
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ connector->encoder->connectors_active = false;
+ connector->encoder->base.crtc = NULL;
+}
+
+static void intel_enable_pipe_a(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+ struct drm_connector *crt = NULL;
+ struct intel_load_detect_pipe load_detect_temp;
+
+ /* We can't just switch on the pipe A, we need to set things up with a
+ * proper mode and output configuration. As a gross hack, enable pipe A
+ * by enabling the load detect pipe once. */
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
+ crt = &connector->base;
+ break;
+ }
+ }
+
+ if (!crt)
+ return;
+
+ if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
+ intel_release_load_detect_pipe(crt, &load_detect_temp);
+
+
+}
+
+static void intel_sanitize_crtc(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg, val;
+
+ /* Clear any frame start delays used for debugging left by the BIOS */
+ reg = PIPECONF(crtc->pipe);
+ I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+
+ /* We need to sanitize the plane -> pipe mapping first because this will
+ * disable the crtc (and hence change the state) if it is wrong. */
+ if (!HAS_PCH_SPLIT(dev)) {
+ struct intel_connector *connector;
+ bool plane;
+
+ reg = DSPCNTR(crtc->plane);
+ val = I915_READ(reg);
+
+ if ((val & DISPLAY_PLANE_ENABLE) == 0 &&
+ (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
+ goto ok;
+
+ DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
+ crtc->base.base.id);
+
+ /* Pipe has the wrong plane attached and the plane is active.
+ * Temporarily change the plane mapping and disable everything
+ * ... */
+ plane = crtc->plane;
+ crtc->plane = !plane;
+ dev_priv->display.crtc_disable(&crtc->base);
+ crtc->plane = plane;
+
+ /* ... and break all links. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder->base.crtc != &crtc->base)
+ continue;
+
+ intel_connector_break_all_links(connector);
+ }
+
+ WARN_ON(crtc->active);
+ crtc->base.enabled = false;
+ }
+ok:
+
+ if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+ crtc->pipe == PIPE_A && !crtc->active) {
+ /* BIOS forgot to enable pipe A, this mostly happens after
+ * resume. Force-enable the pipe to fix this, the update_dpms
+ * call below we restore the pipe to the right state, but leave
+ * the required bits on. */
+ intel_enable_pipe_a(dev);
+ }
+
+ /* Adjust the state of the output pipe according to whether we
+ * have active connectors/encoders. */
+ intel_crtc_update_dpms(&crtc->base);
+
+ if (crtc->active != crtc->base.enabled) {
+ struct intel_encoder *encoder;
+
+ /* This can happen either due to bugs in the get_hw_state
+ * functions or because the pipe is force-enabled due to the
+ * pipe A quirk. */
+ DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
+ crtc->base.base.id,
+ crtc->base.enabled ? "enabled" : "disabled",
+ crtc->active ? "enabled" : "disabled");
+
+ crtc->base.enabled = crtc->active;
+
+ /* Because we only establish the connector -> encoder ->
+ * crtc links if something is active, this means the
+ * crtc is now deactivated. Break the links. connector
+ * -> encoder links are only establish when things are
+ * actually up, hence no need to break them. */
+ WARN_ON(crtc->active);
+
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+ WARN_ON(encoder->connectors_active);
+ encoder->base.crtc = NULL;
+ }
+ }
+}
+
+static void intel_sanitize_encoder(struct intel_encoder *encoder)
+{
+ struct intel_connector *connector;
+ struct drm_device *dev = encoder->base.dev;
+
+ /* We need to check both for a crtc link (meaning that the
+ * encoder is active and trying to read from a pipe) and the
+ * pipe itself being active. */
+ bool has_active_crtc = encoder->base.crtc &&
+ to_intel_crtc(encoder->base.crtc)->active;
+
+ if (encoder->connectors_active && !has_active_crtc) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+
+ /* Connector is active, but has no active pipe. This is
+ * fallout from our resume register restoring. Disable
+ * the encoder manually again. */
+ if (encoder->base.crtc) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+ encoder->disable(encoder);
+ }
+
+ /* Inconsistent output/port/pipe state happens presumably due to
+ * a bug in one of the get_hw_state functions. Or someplace else
+ * in our code, like the register restore mess on resume. Clamp
+ * things to off as a safer default. */
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ intel_connector_break_all_links(connector);
+ }
+ }
+ /* Enabled encoders without active connectors will be fixed in
+ * the crtc fixup. */
+}
+
+/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
+ * and i915 state tracking structures. */
+void intel_modeset_setup_hw_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ u32 tmp;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ tmp = I915_READ(PIPECONF(pipe));
+ if (tmp & PIPECONF_ENABLE)
+ crtc->active = true;
+ else
+ crtc->active = false;
+
+ crtc->base.enabled = crtc->active;
+
+ DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
+ crtc->base.base.id,
+ crtc->active ? "enabled" : "disabled");
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ pipe = 0;
+
+ if (encoder->get_hw_state(encoder, &pipe)) {
+ encoder->base.crtc =
+ dev_priv->pipe_to_crtc_mapping[pipe];
+ } else {
+ encoder->base.crtc = NULL;
+ }
+
+ encoder->connectors_active = false;
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base),
+ encoder->base.crtc ? "enabled" : "disabled",
+ pipe);
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->get_hw_state(connector)) {
+ connector->base.dpms = DRM_MODE_DPMS_ON;
+ connector->encoder->connectors_active = true;
+ connector->base.encoder = &connector->encoder->base;
+ } else {
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ }
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base),
+ connector->base.encoder ? "enabled" : "disabled");
+ }
+
+ /* HW state is read out, now we need to sanitize this mess. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ intel_sanitize_encoder(encoder);
+ }
+
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ intel_sanitize_crtc(crtc);
+ }
+
+ intel_modeset_update_staged_output_state(dev);
+
+ intel_modeset_check_state(dev);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -7242,6 +8263,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
+
+ intel_modeset_setup_hw_state(dev);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -7280,19 +8303,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
* enqueue unpin/hotplug work. */
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
- cancel_work_sync(&dev_priv->rps_work);
+ cancel_work_sync(&dev_priv->rps.work);
/* flush any delayed tasks or pending work */
flush_scheduled_work();
- /* Shut off idle work before the crtcs get freed. */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- intel_crtc = to_intel_crtc(crtc);
- del_timer_sync(&intel_crtc->idle_timer);
- }
- del_timer_sync(&dev_priv->idle_timer);
- cancel_work_sync(&dev_priv->idle_work);
-
drm_mode_config_cleanup(dev);
}
@@ -7338,7 +8353,7 @@ struct intel_display_error_state {
u32 position;
u32 base;
u32 size;
- } cursor[2];
+ } cursor[I915_MAX_PIPES];
struct intel_pipe_error_state {
u32 conf;
@@ -7350,7 +8365,7 @@ struct intel_display_error_state {
u32 vtotal;
u32 vblank;
u32 vsync;
- } pipe[2];
+ } pipe[I915_MAX_PIPES];
struct intel_plane_error_state {
u32 control;
@@ -7360,7 +8375,7 @@ struct intel_display_error_state {
u32 addr;
u32 surface;
u32 tile_offset;
- } plane[2];
+ } plane[I915_MAX_PIPES];
};
struct intel_display_error_state *
@@ -7374,7 +8389,7 @@ intel_display_capture_error_state(struct drm_device *dev)
if (error == NULL)
return NULL;
- for (i = 0; i < 2; i++) {
+ for_each_pipe(i) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
error->cursor[i].base = I915_READ(CURBASE(i));
@@ -7407,9 +8422,11 @@ intel_display_print_error_state(struct seq_file *m,
struct drm_device *dev,
struct intel_display_error_state *error)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
int i;
- for (i = 0; i < 2; i++) {
+ seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe);
+ for_each_pipe(i) {
seq_printf(m, "Pipe [%d]:\n", i);
seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index ace757af9133..1474f84fdbd0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -36,42 +36,10 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
-#include "drm_dp_helper.h"
-#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
-#define DP_LINK_CONFIGURATION_SIZE 9
-
-struct intel_dp {
- struct intel_encoder base;
- uint32_t output_reg;
- uint32_t DP;
- uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
- bool has_audio;
- enum hdmi_force_audio force_audio;
- uint32_t color_range;
- int dpms_mode;
- uint8_t link_bw;
- uint8_t lane_count;
- uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
- struct i2c_adapter adapter;
- struct i2c_algo_dp_aux_data algo;
- bool is_pch_edp;
- uint8_t train_set[4];
- int panel_power_up_delay;
- int panel_power_down_delay;
- int panel_power_cycle_delay;
- int backlight_on_delay;
- int backlight_off_delay;
- struct drm_display_mode *panel_fixed_mode; /* for eDP */
- struct delayed_work panel_vdd_work;
- bool want_panel_vdd;
- struct edid *edid; /* cached EDID for eDP */
- int edid_mode_count;
-};
-
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
@@ -840,9 +808,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
}
}
-static void ironlake_edp_pll_on(struct drm_encoder *encoder);
-static void ironlake_edp_pll_off(struct drm_encoder *encoder);
-
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -853,14 +818,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- /* Turn on the eDP PLL if needed */
- if (is_edp(intel_dp)) {
- if (!is_pch_edp(intel_dp))
- ironlake_edp_pll_on(encoder);
- else
- ironlake_edp_pll_off(encoder);
- }
-
/*
* There are four kinds of DP registers:
*
@@ -882,10 +839,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
* supposed to be read-only.
*/
intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
- intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
/* Handle DP bits in common between all three register formats */
-
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
switch (intel_dp->lane_count) {
@@ -932,7 +887,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= intel_crtc->pipe << 29;
/* don't miss out required setting for eDP */
- intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
@@ -954,7 +908,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (is_cpu_edp(intel_dp)) {
/* don't miss out required setting for eDP */
- intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
@@ -1225,27 +1178,49 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
msleep(intel_dp->backlight_off_delay);
}
-static void ironlake_edp_pll_on(struct drm_encoder *encoder)
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
DRM_DEBUG_KMS("\n");
dpa_ctl = I915_READ(DP_A);
- dpa_ctl |= DP_PLL_ENABLE;
- I915_WRITE(DP_A, dpa_ctl);
+ WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
+ WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+ /* We don't adjust intel_dp->DP while tearing down the link, to
+ * facilitate link retraining (e.g. after hotplug). Hence clear all
+ * enable bits here to ensure that we don't enable too much. */
+ intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ intel_dp->DP |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, intel_dp->DP);
POSTING_READ(DP_A);
udelay(200);
}
-static void ironlake_edp_pll_off(struct drm_encoder *encoder)
+static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
dpa_ctl = I915_READ(DP_A);
+ WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
+ "dp pll off, should be on\n");
+ WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+ /* We can't rely on the value tracked for the DP register in
+ * intel_dp->DP because link_down must not change that (otherwise link
+ * re-training will fail. */
dpa_ctl &= ~DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
POSTING_READ(DP_A);
@@ -1282,10 +1257,57 @@ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
}
}
-static void intel_dp_prepare(struct drm_encoder *encoder)
+static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ(intel_dp->output_reg);
+ if (!(tmp & DP_PORT_EN))
+ return false;
+
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+ *pipe = PORT_TO_PIPE(tmp);
+ } else {
+ u32 trans_sel;
+ u32 trans_dp;
+ int i;
+
+ switch (intel_dp->output_reg) {
+ case PCH_DP_B:
+ trans_sel = TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ trans_sel = TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ trans_sel = TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ return true;
+ }
+
+ for_each_pipe(i) {
+ trans_dp = I915_READ(TRANS_DP_CTL(i));
+ if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
+ *pipe = i;
+ return true;
+ }
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
+
+ return true;
+}
+
+static void intel_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
@@ -1293,14 +1315,31 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
ironlake_edp_panel_off(intel_dp);
- intel_dp_link_down(intel_dp);
+
+ /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
+ if (!is_cpu_edp(intel_dp))
+ intel_dp_link_down(intel_dp);
}
-static void intel_dp_commit(struct drm_encoder *encoder)
+static void intel_post_disable_dp(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = encoder->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (is_cpu_edp(intel_dp)) {
+ intel_dp_link_down(intel_dp);
+ ironlake_edp_pll_off(intel_dp);
+ }
+}
+
+static void intel_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+ if (WARN_ON(dp_reg & DP_PORT_EN))
+ return;
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
@@ -1309,47 +1348,14 @@ static void intel_dp_commit(struct drm_encoder *encoder)
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
-
- intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
-
- if (HAS_PCH_CPT(dev))
- intel_cpt_verify_modeset(dev, intel_crtc->pipe);
}
-static void
-intel_dp_dpms(struct drm_encoder *encoder, int mode)
+static void intel_pre_enable_dp(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- if (mode != DRM_MODE_DPMS_ON) {
- /* Switching the panel off requires vdd. */
- ironlake_edp_panel_vdd_on(intel_dp);
- ironlake_edp_backlight_off(intel_dp);
- intel_dp_sink_dpms(intel_dp, mode);
- ironlake_edp_panel_off(intel_dp);
- intel_dp_link_down(intel_dp);
-
- if (is_cpu_edp(intel_dp))
- ironlake_edp_pll_off(encoder);
- } else {
- if (is_cpu_edp(intel_dp))
- ironlake_edp_pll_on(encoder);
-
- ironlake_edp_panel_vdd_on(intel_dp);
- intel_dp_sink_dpms(intel_dp, mode);
- if (!(dp_reg & DP_PORT_EN)) {
- intel_dp_start_link_train(intel_dp);
- ironlake_edp_panel_on(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, true);
- intel_dp_complete_link_train(intel_dp);
- } else
- ironlake_edp_panel_vdd_off(intel_dp, false);
- ironlake_edp_backlight_on(intel_dp);
- }
- intel_dp->dpms_mode = mode;
+ if (is_cpu_edp(intel_dp))
+ ironlake_edp_pll_on(intel_dp);
}
/*
@@ -1668,6 +1674,45 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+ dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
+
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ DRM_ERROR("DP training pattern 3 not supported\n");
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ }
+
+ } else {
+ dp_reg_value &= ~DP_LINK_TRAIN_MASK;
+
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ dp_reg_value |= DP_LINK_TRAIN_OFF;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ DRM_ERROR("DP training pattern 3 not supported\n");
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+ break;
+ }
+ }
+
I915_WRITE(intel_dp->output_reg, dp_reg_value);
POSTING_READ(intel_dp->output_reg);
@@ -1675,12 +1720,15 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
- ret = intel_dp_aux_native_write(intel_dp,
- DP_TRAINING_LANE0_SET,
- intel_dp->train_set,
- intel_dp->lane_count);
- if (ret != intel_dp->lane_count)
- return false;
+ if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
+ DP_TRAINING_PATTERN_DISABLE) {
+ ret = intel_dp_aux_native_write(intel_dp,
+ DP_TRAINING_LANE0_SET,
+ intel_dp->train_set,
+ intel_dp->lane_count);
+ if (ret != intel_dp->lane_count)
+ return false;
+ }
return true;
}
@@ -1690,26 +1738,12 @@ static void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
int i;
uint8_t voltage;
bool clock_recovery = false;
int voltage_tries, loop_tries;
- u32 reg;
uint32_t DP = intel_dp->DP;
- /*
- * On CPT we have to enable the port in training pattern 1, which
- * will happen below in intel_dp_set_link_train. Otherwise, enable
- * the port and wait for it to become active.
- */
- if (!HAS_PCH_CPT(dev)) {
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- }
-
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
intel_dp->link_configuration,
@@ -1717,10 +1751,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- DP &= ~DP_LINK_TRAIN_MASK_CPT;
- else
- DP &= ~DP_LINK_TRAIN_MASK;
memset(intel_dp->train_set, 0, 4);
voltage = 0xff;
voltage_tries = 0;
@@ -1744,12 +1774,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
- else
- reg = DP | DP_LINK_TRAIN_PAT_1;
-
- if (!intel_dp_set_link_train(intel_dp, reg,
+ if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE))
break;
@@ -1804,10 +1829,8 @@ static void
intel_dp_complete_link_train(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
bool channel_eq = false;
int tries, cr_tries;
- u32 reg;
uint32_t DP = intel_dp->DP;
/* channel equalization */
@@ -1836,13 +1859,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
- else
- reg = DP | DP_LINK_TRAIN_PAT_2;
-
/* channel eq pattern */
- if (!intel_dp_set_link_train(intel_dp, reg,
+ if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_2 |
DP_LINK_SCRAMBLING_DISABLE))
break;
@@ -1877,15 +1895,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries;
}
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- reg = DP | DP_LINK_TRAIN_OFF_CPT;
- else
- reg = DP | DP_LINK_TRAIN_OFF;
-
- I915_WRITE(intel_dp->output_reg, reg);
- POSTING_READ(intel_dp->output_reg);
- intel_dp_aux_native_write_1(intel_dp,
- DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+ intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
}
static void
@@ -1895,18 +1905,11 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
- if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+ if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
return;
DRM_DEBUG_KMS("\n");
- if (is_edp(intel_dp)) {
- DP &= ~DP_PLL_ENABLE;
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
- udelay(100);
- }
-
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
@@ -1918,13 +1921,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(17);
- if (is_edp(intel_dp)) {
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- DP |= DP_LINK_TRAIN_OFF_CPT;
- else
- DP |= DP_LINK_TRAIN_OFF;
- }
-
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct drm_crtc *crtc = intel_dp->base.base.crtc;
@@ -2033,10 +2029,10 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
- if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
+ if (!intel_dp->base.connectors_active)
return;
- if (!intel_dp->base.base.crtc)
+ if (WARN_ON(!intel_dp->base.base.crtc))
return;
/* Try to read receiver status if the link appears to be up */
@@ -2160,7 +2156,6 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
ret = drm_add_edid_modes(connector, intel_dp->edid);
drm_edid_to_eld(connector,
intel_dp->edid);
- connector->display_info.raw_edid = NULL;
return intel_dp->edid_mode_count;
}
@@ -2206,7 +2201,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
if (edid) {
intel_dp->has_audio = drm_detect_monitor_audio(edid);
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
}
@@ -2271,8 +2265,6 @@ intel_dp_detect_audio(struct drm_connector *connector)
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
if (edid) {
has_audio = drm_detect_monitor_audio(edid);
-
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -2326,9 +2318,8 @@ intel_dp_set_property(struct drm_connector *connector,
done:
if (intel_dp->base.base.crtc) {
struct drm_crtc *crtc = intel_dp->base.base.crtc;
- drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y,
- crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
}
return 0;
@@ -2362,15 +2353,13 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
- .dpms = intel_dp_dpms,
.mode_fixup = intel_dp_mode_fixup,
- .prepare = intel_dp_prepare,
.mode_set = intel_dp_mode_set,
- .commit = intel_dp_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
@@ -2441,7 +2430,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
}
void
-intel_dp_init(struct drm_device *dev, int output_reg)
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
@@ -2456,7 +2445,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
return;
intel_dp->output_reg = output_reg;
- intel_dp->dpms_mode = -1;
+ intel_dp->port = port;
+ /* Preserve the current hw state. */
+ intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
@@ -2483,18 +2474,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
connector->polled = DRM_CONNECTOR_POLL_HPD;
- if (output_reg == DP_B || output_reg == PCH_DP_B)
- intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
- else if (output_reg == DP_C || output_reg == PCH_DP_C)
- intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
- else if (output_reg == DP_D || output_reg == PCH_DP_D)
- intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
+ intel_encoder->cloneable = false;
- if (is_edp(intel_dp)) {
- intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
- INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
- ironlake_panel_vdd_work);
- }
+ INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+ ironlake_panel_vdd_work);
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
@@ -2508,29 +2491,33 @@ intel_dp_init(struct drm_device *dev, int output_reg)
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
+ intel_encoder->enable = intel_enable_dp;
+ intel_encoder->pre_enable = intel_pre_enable_dp;
+ intel_encoder->disable = intel_disable_dp;
+ intel_encoder->post_disable = intel_post_disable_dp;
+ intel_encoder->get_hw_state = intel_dp_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
/* Set up the DDC bus. */
- switch (output_reg) {
- case DP_A:
- name = "DPDDC-A";
- break;
- case DP_B:
- case PCH_DP_B:
- dev_priv->hotplug_supported_mask |=
- DPB_HOTPLUG_INT_STATUS;
- name = "DPDDC-B";
- break;
- case DP_C:
- case PCH_DP_C:
- dev_priv->hotplug_supported_mask |=
- DPC_HOTPLUG_INT_STATUS;
- name = "DPDDC-C";
- break;
- case DP_D:
- case PCH_DP_D:
- dev_priv->hotplug_supported_mask |=
- DPD_HOTPLUG_INT_STATUS;
- name = "DPDDC-D";
- break;
+ switch (port) {
+ case PORT_A:
+ name = "DPDDC-A";
+ break;
+ case PORT_B:
+ dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
+ name = "DPDDC-B";
+ break;
+ case PORT_C:
+ dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
+ name = "DPDDC-C";
+ break;
+ case PORT_D:
+ dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
+ name = "DPDDC-D";
+ break;
+ default:
+ WARN(1, "Invalid port %c\n", port_name(port));
+ break;
}
/* Cache some DPCD data in the eDP case */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd54cf88a28f..351fd7179cd7 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -31,6 +31,7 @@
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
+#include "drm_dp_helper.h"
#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
@@ -40,7 +41,11 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if (W && drm_can_sleep()) msleep(W); \
+ if (W && drm_can_sleep()) { \
+ msleep(W); \
+ } else { \
+ cpu_relax(); \
+ } \
} \
ret__; \
})
@@ -90,25 +95,6 @@
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
-/* Intel Pipe Clone Bit */
-#define INTEL_HDMIB_CLONE_BIT 1
-#define INTEL_HDMIC_CLONE_BIT 2
-#define INTEL_HDMID_CLONE_BIT 3
-#define INTEL_HDMIE_CLONE_BIT 4
-#define INTEL_HDMIF_CLONE_BIT 5
-#define INTEL_SDVO_NON_TV_CLONE_BIT 6
-#define INTEL_SDVO_TV_CLONE_BIT 7
-#define INTEL_SDVO_LVDS_CLONE_BIT 8
-#define INTEL_ANALOG_CLONE_BIT 9
-#define INTEL_TV_CLONE_BIT 10
-#define INTEL_DP_B_CLONE_BIT 11
-#define INTEL_DP_C_CLONE_BIT 12
-#define INTEL_DP_D_CLONE_BIT 13
-#define INTEL_LVDS_CLONE_BIT 14
-#define INTEL_DVO_TMDS_CLONE_BIT 15
-#define INTEL_DVO_LVDS_CLONE_BIT 16
-#define INTEL_EDP_CLONE_BIT 17
-
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
#define INTEL_DVO_CHIP_TMDS 2
@@ -151,16 +137,48 @@ struct intel_fbdev {
struct intel_encoder {
struct drm_encoder base;
+ /*
+ * The new crtc this encoder will be driven from. Only differs from
+ * base->crtc while a modeset is in progress.
+ */
+ struct intel_crtc *new_crtc;
+
int type;
bool needs_tv_clock;
+ /*
+ * Intel hw has only one MUX where encoders could be clone, hence a
+ * simple flag is enough to compute the possible_clones mask.
+ */
+ bool cloneable;
+ bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
+ void (*pre_enable)(struct intel_encoder *);
+ void (*enable)(struct intel_encoder *);
+ void (*disable)(struct intel_encoder *);
+ void (*post_disable)(struct intel_encoder *);
+ /* Read out the current hw state of this connector, returning true if
+ * the encoder is active. If the encoder is enabled it also set the pipe
+ * it is connected to in the pipe parameter. */
+ bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
int crtc_mask;
- int clone_mask;
};
struct intel_connector {
struct drm_connector base;
+ /*
+ * The fixed encoder this connector is connected to.
+ */
struct intel_encoder *encoder;
+
+ /*
+ * The new encoder this connector will be driven. Only differs from
+ * encoder while a modeset is in progress.
+ */
+ struct intel_encoder *new_encoder;
+
+ /* Reads out the current hw, returning true if the connector is enabled
+ * and active (i.e. dpms ON state). */
+ bool (*get_hw_state)(struct intel_connector *);
};
struct intel_crtc {
@@ -168,11 +186,13 @@ struct intel_crtc {
enum pipe pipe;
enum plane plane;
u8 lut_r[256], lut_g[256], lut_b[256];
- int dpms_mode;
- bool active; /* is the crtc on? independent of the dpms mode */
+ /*
+ * Whether the crtc and the connected output pipeline is active. Implies
+ * that crtc->enabled is set, i.e. the current mode configuration has
+ * some outputs connected to this crtc.
+ */
+ bool active;
bool primary_disabled; /* is the crtc obscured by a plane? */
- bool busy; /* is scanout buffer being updated frequently? */
- struct timer_list idle_timer;
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
@@ -311,6 +331,37 @@ struct intel_hdmi {
struct drm_display_mode *adjusted_mode);
};
+#define DP_RECEIVER_CAP_SIZE 0xf
+#define DP_LINK_CONFIGURATION_SIZE 9
+
+struct intel_dp {
+ struct intel_encoder base;
+ uint32_t output_reg;
+ uint32_t DP;
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ bool has_audio;
+ enum hdmi_force_audio force_audio;
+ enum port port;
+ uint32_t color_range;
+ uint8_t link_bw;
+ uint8_t lane_count;
+ uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+ struct i2c_adapter adapter;
+ struct i2c_algo_dp_aux_data algo;
+ bool is_pch_edp;
+ uint8_t train_set[4];
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ struct drm_display_mode *panel_fixed_mode; /* for eDP */
+ struct delayed_work panel_vdd_work;
+ bool want_panel_vdd;
+ struct edid *edid; /* cached EDID for eDP */
+ int edid_mode_count;
+};
+
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
@@ -350,17 +401,21 @@ extern void intel_attach_force_audio_property(struct drm_connector *connector);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
extern void intel_crt_init(struct drm_device *dev);
-extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
+extern void intel_hdmi_init(struct drm_device *dev,
+ int sdvox_reg, enum port port);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev,
- struct drm_i915_gem_object *obj);
+extern void intel_mark_busy(struct drm_device *dev);
+extern void intel_mark_idle(struct drm_device *dev);
+extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
+extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev);
-extern void intel_dp_init(struct drm_device *dev, int dp_reg);
+extern void intel_dp_init(struct drm_device *dev, int output_reg,
+ enum port port);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
@@ -373,8 +428,6 @@ extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
-void intel_sanitize_pm(struct drm_device *dev);
-
/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
@@ -391,10 +444,27 @@ extern void intel_panel_disable_backlight(struct drm_device *dev);
extern void intel_panel_destroy_backlight(struct drm_device *dev);
extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+struct intel_set_config {
+ struct drm_encoder **save_connector_encoders;
+ struct drm_crtc **save_encoder_crtcs;
+
+ bool fb_changed;
+ bool mode_changed;
+};
+
+extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *old_fb);
+extern void intel_modeset_disable(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void intel_encoder_prepare(struct drm_encoder *encoder);
-extern void intel_encoder_commit(struct drm_encoder *encoder);
+extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
+extern void intel_encoder_noop(struct drm_encoder *encoder);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
+extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
+extern void intel_connector_dpms(struct drm_connector *, int mode);
+extern bool intel_connector_get_hw_state(struct intel_connector *connector);
+extern void intel_modeset_check_state(struct drm_device *dev);
+
static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
{
@@ -417,12 +487,10 @@ struct intel_load_detect_pipe {
bool load_detect_temp;
int dpms_mode;
};
-extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
- struct drm_connector *connector,
+extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old);
-extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
- struct drm_connector *connector,
+extern void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
extern void intelfb_restore(void);
@@ -503,7 +571,10 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev);
-extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
+extern void intel_enable_ddi(struct intel_encoder *encoder);
+extern void intel_disable_ddi(struct intel_encoder *encoder);
+extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe);
extern void intel_ddi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 36c542e5036b..4f1fdcc44005 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -37,6 +37,7 @@
#define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76
#define TFP410_ADDR 0x38
+#define NS2501_ADDR 0x38
static const struct intel_dvo_device intel_dvo_devices[] = {
{
@@ -74,7 +75,14 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.slave_addr = 0x75,
.gpio = GMBUS_PORT_DPB,
.dev_ops = &ch7017_ops,
- }
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "ns2501",
+ .dvo_reg = DVOC,
+ .slave_addr = NS2501_ADDR,
+ .dev_ops = &ns2501_ops,
+ }
};
struct intel_dvo {
@@ -97,22 +105,91 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
struct intel_dvo, base);
}
-static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+
+ return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
+}
+
+static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+ if (!(tmp & DVO_ENABLE))
+ return false;
+
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_disable_dvo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ u32 dvo_reg = intel_dvo->dev.dvo_reg;
+ u32 temp = I915_READ(dvo_reg);
+
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+ I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
+ I915_READ(dvo_reg);
+}
+
+static void intel_enable_dvo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
+ I915_WRITE(dvo_reg, temp | DVO_ENABLE);
+ I915_READ(dvo_reg);
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+}
+
+static void intel_dvo_dpms(struct drm_connector *connector, int mode)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct drm_crtc *crtc;
+
+ /* dvo supports only 2 dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = intel_dvo->base.base.crtc;
+ if (!crtc) {
+ intel_dvo->base.connectors_active = false;
+ return;
+ }
+
if (mode == DRM_MODE_DPMS_ON) {
- I915_WRITE(dvo_reg, temp | DVO_ENABLE);
- I915_READ(dvo_reg);
- intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
+ intel_dvo->base.connectors_active = true;
+
+ intel_crtc_update_dpms(crtc);
+
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
} else {
- intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
- I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
- I915_READ(dvo_reg);
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+
+ intel_dvo->base.connectors_active = false;
+
+ intel_crtc_update_dpms(crtc);
}
+
+ intel_modeset_check_state(connector->dev);
}
static int intel_dvo_mode_valid(struct drm_connector *connector,
@@ -267,15 +344,13 @@ static void intel_dvo_destroy(struct drm_connector *connector)
}
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
- .dpms = intel_dvo_dpms,
.mode_fixup = intel_dvo_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_dvo_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_dvo_dpms,
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -364,6 +439,11 @@ void intel_dvo_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->base,
&intel_dvo_enc_funcs, encoder_type);
+ intel_encoder->disable = intel_disable_dvo;
+ intel_encoder->enable = intel_enable_dvo;
+ intel_encoder->get_hw_state = intel_dvo_get_hw_state;
+ intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
+
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
struct drm_connector *connector = &intel_connector->base;
@@ -396,17 +476,14 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
- intel_encoder->clone_mask =
- (1 << INTEL_DVO_TMDS_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
+ intel_encoder->cloneable = true;
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
case INTEL_DVO_CHIP_LVDS:
- intel_encoder->clone_mask =
- (1 << INTEL_DVO_LVDS_CLONE_BIT);
+ intel_encoder->cloneable = false;
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 12dc3308ab8c..08f2b63d740a 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -151,6 +151,9 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VIDEO_DIP_DATA, 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -186,6 +189,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -224,6 +230,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -259,6 +268,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -292,6 +304,9 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(data_reg + i, *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(data_reg + i, 0);
mmiowb();
val |= hsw_infoframe_enable(frame);
@@ -377,6 +392,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
port = VIDEO_DIP_PORT_C;
break;
default:
+ BUG();
return;
}
@@ -435,6 +451,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
port = VIDEO_DIP_PORT_D;
break;
default:
+ BUG();
return;
}
@@ -601,15 +618,36 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
-static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_hdmi->sdvox_reg);
+
+ if (!(tmp & SDVO_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_enable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
u32 enable_bits = SDVO_ENABLE;
- if (intel_hdmi->has_audio || mode != DRM_MODE_DPMS_ON)
+ if (intel_hdmi->has_audio)
enable_bits |= SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->sdvox_reg);
@@ -617,31 +655,12 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
/* HW workaround for IBX, we need to move the port to transcoder A
* before disabling it. */
if (HAS_PCH_IBX(dev)) {
- struct drm_crtc *crtc = encoder->crtc;
+ struct drm_crtc *crtc = encoder->base.crtc;
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
- if (mode != DRM_MODE_DPMS_ON) {
- if (temp & SDVO_PIPE_B_SELECT) {
- temp &= ~SDVO_PIPE_B_SELECT;
- I915_WRITE(intel_hdmi->sdvox_reg, temp);
- POSTING_READ(intel_hdmi->sdvox_reg);
-
- /* Again we need to write this twice. */
- I915_WRITE(intel_hdmi->sdvox_reg, temp);
- POSTING_READ(intel_hdmi->sdvox_reg);
-
- /* Transcoder selection bits only update
- * effectively on vblank. */
- if (crtc)
- intel_wait_for_vblank(dev, pipe);
- else
- msleep(50);
- }
- } else {
- /* Restore the transcoder select bit. */
- if (pipe == PIPE_B)
- enable_bits |= SDVO_PIPE_B_SELECT;
- }
+ /* Restore the transcoder select bit. */
+ if (pipe == PIPE_B)
+ enable_bits |= SDVO_PIPE_B_SELECT;
}
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
@@ -652,12 +671,64 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
POSTING_READ(intel_hdmi->sdvox_reg);
}
- if (mode != DRM_MODE_DPMS_ON) {
- temp &= ~enable_bits;
- } else {
- temp |= enable_bits;
+ temp |= enable_bits;
+
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* HW workaround, need to write this twice for issue that may result
+ * in first write getting masked.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+ }
+}
+
+static void intel_disable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 temp;
+ u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
+
+ temp = I915_READ(intel_hdmi->sdvox_reg);
+
+ /* HW workaround for IBX, we need to move the port to transcoder A
+ * before disabling it. */
+ if (HAS_PCH_IBX(dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ if (temp & SDVO_PIPE_B_SELECT) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* Again we need to write this twice. */
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* Transcoder selection bits only update
+ * effectively on vblank. */
+ if (crtc)
+ intel_wait_for_vblank(dev, pipe);
+ else
+ msleep(50);
+ }
+ }
+
+ /* HW workaround, need to toggle enable bit off and on for 12bpc, but
+ * we do this anyway which shows more stable in testing.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(intel_hdmi->sdvox_reg);
}
+ temp &= ~enable_bits;
+
I915_WRITE(intel_hdmi->sdvox_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg);
@@ -737,7 +808,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
drm_detect_hdmi_monitor(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
}
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -778,8 +848,6 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
-
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -833,9 +901,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
done:
if (intel_hdmi->base.base.crtc) {
struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
- drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y,
- crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
}
return 0;
@@ -849,23 +916,19 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
}
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
- .dpms = intel_ddi_dpms,
.mode_fixup = intel_hdmi_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_ddi_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
- .dpms = intel_hdmi_dpms,
.mode_fixup = intel_hdmi_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_hdmi_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_hdmi_set_property,
@@ -889,7 +952,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_broadcast_rgb_property(connector);
}
-void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
@@ -923,48 +986,25 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- /* Set up the DDC bus. */
- if (sdvox_reg == SDVOB) {
- intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == SDVOC) {
- intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == HDMIB) {
- intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == HDMIC) {
- intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == HDMID) {
- intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
- intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
- dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
- DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
- intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+ intel_encoder->cloneable = false;
+
+ intel_hdmi->ddi_port = port;
+ switch (port) {
+ case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
- intel_hdmi->ddi_port = PORT_B;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
- DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
- intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+ break;
+ case PORT_C:
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
- intel_hdmi->ddi_port = PORT_C;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
- } else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
- DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
- intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
+ break;
+ case PORT_D:
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
- intel_hdmi->ddi_port = PORT_D;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
- } else {
- /* If we got an unknown sdvox_reg, things are pretty much broken
- * in a way that we should let the kernel know about it */
+ break;
+ case PORT_A:
+ /* Internal port only for eDP. */
+ default:
BUG();
}
@@ -987,10 +1027,21 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_hdmi->set_infoframes = cpt_set_infoframes;
}
- if (IS_HASWELL(dev))
- drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw);
- else
- drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+ if (IS_HASWELL(dev)) {
+ intel_encoder->enable = intel_enable_ddi;
+ intel_encoder->disable = intel_disable_ddi;
+ intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+ drm_encoder_helper_add(&intel_encoder->base,
+ &intel_hdmi_helper_funcs_hsw);
+ } else {
+ intel_encoder->enable = intel_enable_hdmi;
+ intel_encoder->disable = intel_disable_hdmi;
+ intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+ drm_encoder_helper_add(&intel_encoder->base,
+ &intel_hdmi_helper_funcs);
+ }
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
intel_hdmi_add_properties(intel_hdmi, connector);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e9a6f6aaed85..40d72bd64e11 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -65,13 +65,40 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
struct intel_lvds, base);
}
+static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 lvds_reg, tmp;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ lvds_reg = PCH_LVDS;
+ } else {
+ lvds_reg = LVDS;
+ }
+
+ tmp = I915_READ(lvds_reg);
+
+ if (!(tmp & LVDS_PORT_EN))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
/**
* Sets the power state for the panel.
*/
-static void intel_lvds_enable(struct intel_lvds *intel_lvds)
+static void intel_enable_lvds(struct intel_encoder *encoder)
{
- struct drm_device *dev = intel_lvds->base.base.dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_lvds->base.base.crtc);
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -111,9 +138,10 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
intel_panel_enable_backlight(dev, intel_crtc->pipe);
}
-static void intel_lvds_disable(struct intel_lvds *intel_lvds)
+static void intel_disable_lvds(struct intel_encoder *encoder)
{
- struct drm_device *dev = intel_lvds->base.base.dev;
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -142,18 +170,6 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
POSTING_READ(lvds_reg);
}
-static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
-{
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
- if (mode == DRM_MODE_DPMS_ON)
- intel_lvds_enable(intel_lvds);
- else
- intel_lvds_disable(intel_lvds);
-
- /* XXX: We never power down the LVDS pairs. */
-}
-
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -234,9 +250,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- struct intel_encoder *tmp_encoder;
+ struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
int pipe;
@@ -246,14 +261,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return false;
}
- /* Should never happen!! */
- for_each_encoder_on_crtc(dev, encoder->crtc, tmp_encoder) {
- if (&tmp_encoder->base != encoder) {
- DRM_ERROR("Can't enable LVDS and another "
- "encoder on the same pipe\n");
- return false;
- }
- }
+ if (intel_encoder_check_is_cloned(&intel_lvds->base))
+ return false;
/*
* We have timings from the BIOS for the panel, put them in
@@ -405,23 +414,6 @@ out:
return true;
}
-static void intel_lvds_prepare(struct drm_encoder *encoder)
-{
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
- intel_lvds_disable(intel_lvds);
-}
-
-static void intel_lvds_commit(struct drm_encoder *encoder)
-{
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
- /* Always do a full power on as we do not know what state
- * we were left in.
- */
- intel_lvds_enable(intel_lvds);
-}
-
static void intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -535,7 +527,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
dev_priv->modeset_on_lid = 0;
mutex_lock(&dev->mode_config.mutex);
- drm_helper_resume_force_mode(dev);
+ intel_modeset_check_state(dev);
mutex_unlock(&dev->mode_config.mutex);
return NOTIFY_OK;
@@ -587,8 +579,8 @@ static int intel_lvds_set_property(struct drm_connector *connector,
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
*/
- drm_crtc_helper_set_mode(crtc, &crtc->mode,
- crtc->x, crtc->y, crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
}
}
@@ -596,11 +588,9 @@ static int intel_lvds_set_property(struct drm_connector *connector,
}
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
- .dpms = intel_lvds_dpms,
.mode_fixup = intel_lvds_mode_fixup,
- .prepare = intel_lvds_prepare,
.mode_set = intel_lvds_mode_set,
- .commit = intel_lvds_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -610,7 +600,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
@@ -972,10 +962,15 @@ bool intel_lvds_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
+ intel_encoder->enable = intel_enable_lvds;
+ intel_encoder->disable = intel_disable_lvds;
+ intel_encoder->get_hw_state = intel_lvds_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS;
- intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
+ intel_encoder->cloneable = false;
if (HAS_PCH_SPLIT(dev))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
else if (IS_GEN4(dev))
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 29b72593fbb2..4bc1c0fc342a 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
- connector->display_info.raw_edid = NULL;
kfree(edid);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 18bd0af855dc..e27c17012628 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -427,6 +427,25 @@ blind_set:
goto end;
}
+static void intel_setup_cadls(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ int i = 0;
+ u32 disp_id;
+
+ /* Initialize the CADL field by duplicating the DIDL values.
+ * Technically, this is not always correct as display outputs may exist,
+ * but not active. This initialization is necessary for some Clevo
+ * laptops that check this field before processing the brightness and
+ * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
+ * there are less than eight devices. */
+ do {
+ disp_id = ioread32(&opregion->acpi->didl[i]);
+ iowrite32(disp_id, &opregion->acpi->cadl[i]);
+ } while (++i < 8 && disp_id != 0);
+}
+
void intel_opregion_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -436,8 +455,10 @@ void intel_opregion_init(struct drm_device *dev)
return;
if (opregion->acpi) {
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_didl_outputs(dev);
+ intel_setup_cadls(dev);
+ }
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 830d0dd610e1..afd0f30ab882 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -235,54 +235,6 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
return 0;
}
-/* Workaround for i830 bug where pipe a must be enable to change control regs */
-static int
-i830_activate_pipe_a(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_crtc *crtc;
- struct drm_crtc_helper_funcs *crtc_funcs;
- struct drm_display_mode vesa_640x480 = {
- DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 489, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
- }, *mode;
-
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
- if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
- return 0;
-
- /* most i8xx have pipe a forced on, so don't trust dpms mode */
- if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE)
- return 0;
-
- crtc_funcs = crtc->base.helper_private;
- if (crtc_funcs->dpms == NULL)
- return 0;
-
- DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
-
- mode = drm_mode_duplicate(dev, &vesa_640x480);
-
- if (!drm_crtc_helper_set_mode(&crtc->base, mode,
- crtc->base.x, crtc->base.y,
- crtc->base.fb))
- return 0;
-
- crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
- return 1;
-}
-
-static void
-i830_deactivate_pipe_a(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
@@ -290,17 +242,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
struct drm_i915_gem_request *request;
- int pipe_a_quirk = 0;
int ret;
BUG_ON(overlay->active);
overlay->active = 1;
- if (IS_I830(dev)) {
- pipe_a_quirk = i830_activate_pipe_a(dev);
- if (pipe_a_quirk < 0)
- return pipe_a_quirk;
- }
+ WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL) {
@@ -322,9 +269,6 @@ static int intel_overlay_on(struct intel_overlay *overlay)
ret = intel_overlay_do_wait_request(overlay, request, NULL);
out:
- if (pipe_a_quirk)
- i830_deactivate_pipe_a(dev);
-
return ret;
}
@@ -1439,7 +1383,7 @@ void intel_setup_overlay(struct drm_device *dev)
}
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
if (ret) {
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ba8a27b1757a..d69f8f49beb5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -31,6 +31,8 @@
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
+#define FORCEWAKE_ACK_TIMEOUT_MS 2
+
/* FBC, or Frame Buffer Compression, is a technique employed to compress the
* framebuffer contents in-memory, aiming at reducing the required bandwidth
* during in-memory transfers and, therefore, reduce the power packet.
@@ -593,7 +595,7 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
break;
}
- dev_priv->r_t = dev_priv->mem_freq;
+ dev_priv->ips.r_t = dev_priv->mem_freq;
switch (csipll & 0x3ff) {
case 0x00c:
@@ -625,11 +627,11 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
}
if (dev_priv->fsb_freq == 3200) {
- dev_priv->c_m = 0;
+ dev_priv->ips.c_m = 0;
} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
- dev_priv->c_m = 1;
+ dev_priv->ips.c_m = 1;
} else {
- dev_priv->c_m = 2;
+ dev_priv->ips.c_m = 2;
}
}
@@ -2138,7 +2140,7 @@ intel_alloc_context_page(struct drm_device *dev)
return NULL;
}
- ret = i915_gem_object_pin(ctx, 4096, true);
+ ret = i915_gem_object_pin(ctx, 4096, true, false);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
@@ -2160,11 +2162,22 @@ err_unref:
return NULL;
}
+/**
+ * Lock protecting IPS related data structures
+ */
+DEFINE_SPINLOCK(mchdev_lock);
+
+/* Global for IPS driver to get at the current i915 device. Protected by
+ * mchdev_lock. */
+static struct drm_i915_private *i915_mch_dev;
+
bool ironlake_set_drps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
+ assert_spin_locked(&mchdev_lock);
+
rgvswctl = I915_READ16(MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) {
DRM_DEBUG("gpu busy, RCS change rejected\n");
@@ -2188,6 +2201,8 @@ static void ironlake_enable_drps(struct drm_device *dev)
u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
+ spin_lock_irq(&mchdev_lock);
+
/* Enable temp reporting */
I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
@@ -2211,12 +2226,12 @@ static void ironlake_enable_drps(struct drm_device *dev)
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
- dev_priv->fmax = fmax; /* IPS callback will increase this */
- dev_priv->fstart = fstart;
+ dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
+ dev_priv->ips.fstart = fstart;
- dev_priv->max_delay = fstart;
- dev_priv->min_delay = fmin;
- dev_priv->cur_delay = fstart;
+ dev_priv->ips.max_delay = fstart;
+ dev_priv->ips.min_delay = fmin;
+ dev_priv->ips.cur_delay = fstart;
DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
fmax, fmin, fstart);
@@ -2233,23 +2248,29 @@ static void ironlake_enable_drps(struct drm_device *dev)
rgvmodectl |= MEMMODE_SWMODE_EN;
I915_WRITE(MEMMODECTL, rgvmodectl);
- if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+ if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
DRM_ERROR("stuck trying to change perf mode\n");
- msleep(1);
+ mdelay(1);
ironlake_set_drps(dev, fstart);
- dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+ dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
I915_READ(0x112e0);
- dev_priv->last_time1 = jiffies_to_msecs(jiffies);
- dev_priv->last_count2 = I915_READ(0x112f4);
- getrawmonotonic(&dev_priv->last_time2);
+ dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
+ dev_priv->ips.last_count2 = I915_READ(0x112f4);
+ getrawmonotonic(&dev_priv->ips.last_time2);
+
+ spin_unlock_irq(&mchdev_lock);
}
static void ironlake_disable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u16 rgvswctl = I915_READ16(MEMSWCTL);
+ u16 rgvswctl;
+
+ spin_lock_irq(&mchdev_lock);
+
+ rgvswctl = I915_READ16(MEMSWCTL);
/* Ack interrupts, disable EFC interrupt */
I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
@@ -2259,31 +2280,54 @@ static void ironlake_disable_drps(struct drm_device *dev)
I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
/* Go back to the starting frequency */
- ironlake_set_drps(dev, dev_priv->fstart);
- msleep(1);
+ ironlake_set_drps(dev, dev_priv->ips.fstart);
+ mdelay(1);
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE(MEMSWCTL, rgvswctl);
- msleep(1);
+ mdelay(1);
+ spin_unlock_irq(&mchdev_lock);
}
-void gen6_set_rps(struct drm_device *dev, u8 val)
+/* There's a funny hw issue where the hw returns all 0 when reading from
+ * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
+ * ourselves, instead of doing a rmw cycle (which might result in us clearing
+ * all limits and the gpu stuck at whatever frequency it is at atm).
+ */
+static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 limits;
limits = 0;
- if (val >= dev_priv->max_delay)
- val = dev_priv->max_delay;
- else
- limits |= dev_priv->max_delay << 24;
- if (val <= dev_priv->min_delay)
- val = dev_priv->min_delay;
- else
- limits |= dev_priv->min_delay << 16;
+ if (*val >= dev_priv->rps.max_delay)
+ *val = dev_priv->rps.max_delay;
+ limits |= dev_priv->rps.max_delay << 24;
+
+ /* Only set the down limit when we've reached the lowest level to avoid
+ * getting more interrupts, otherwise leave this clear. This prevents a
+ * race in the hw when coming out of rc6: There's a tiny window where
+ * the hw runs at the minimal clock before selecting the desired
+ * frequency, if the down threshold expires in that window we will not
+ * receive a down interrupt. */
+ if (*val <= dev_priv->rps.min_delay) {
+ *val = dev_priv->rps.min_delay;
+ limits |= dev_priv->rps.min_delay << 16;
+ }
- if (val == dev_priv->cur_delay)
+ return limits;
+}
+
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 limits = gen6_rps_limits(dev_priv, &val);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(val > dev_priv->rps.max_delay);
+ WARN_ON(val < dev_priv->rps.min_delay);
+
+ if (val == dev_priv->rps.cur_delay)
return;
I915_WRITE(GEN6_RPNSWREQ,
@@ -2296,7 +2340,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
*/
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
- dev_priv->cur_delay = val;
+ POSTING_READ(GEN6_RPNSWREQ);
+
+ dev_priv->rps.cur_delay = val;
+
+ trace_intel_gpu_freq_change(val * 50);
}
static void gen6_disable_rps(struct drm_device *dev)
@@ -2312,40 +2360,40 @@ static void gen6_disable_rps(struct drm_device *dev)
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
- spin_lock_irq(&dev_priv->rps_lock);
- dev_priv->pm_iir = 0;
- spin_unlock_irq(&dev_priv->rps_lock);
+ spin_lock_irq(&dev_priv->rps.lock);
+ dev_priv->rps.pm_iir = 0;
+ spin_unlock_irq(&dev_priv->rps.lock);
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
}
int intel_enable_rc6(const struct drm_device *dev)
{
- /*
- * Respect the kernel parameter if it is set
- */
+ /* Respect the kernel parameter if it is set */
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
- /*
- * Disable RC6 on Ironlake
- */
- if (INTEL_INFO(dev)->gen == 5)
- return 0;
+ if (INTEL_INFO(dev)->gen == 5) {
+#ifdef CONFIG_INTEL_IOMMU
+ /* Disable rc6 on ilk if VT-d is on. */
+ if (intel_iommu_gfx_mapped)
+ return false;
+#endif
+ DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
+ return INTEL_RC6_ENABLE;
+ }
- /* On Haswell, only RC6 is available. So let's enable it by default to
- * provide better testing and coverage since the beginning.
- */
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev)) {
+ DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
return INTEL_RC6_ENABLE;
+ }
- /*
- * Disable rc6 on Sandybridge
- */
+ /* snb/ivb have more than one rc6 state. */
if (INTEL_INFO(dev)->gen == 6) {
DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
return INTEL_RC6_ENABLE;
}
+
DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
}
@@ -2383,9 +2431,9 @@ static void gen6_enable_rps(struct drm_device *dev)
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
/* In units of 100MHz */
- dev_priv->max_delay = rp_state_cap & 0xff;
- dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
- dev_priv->cur_delay = 0;
+ dev_priv->rps.max_delay = rp_state_cap & 0xff;
+ dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+ dev_priv->rps.cur_delay = 0;
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -2438,8 +2486,8 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- dev_priv->max_delay << 24 |
- dev_priv->min_delay << 16);
+ dev_priv->rps.max_delay << 24 |
+ dev_priv->rps.min_delay << 16);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
@@ -2477,7 +2525,7 @@ static void gen6_enable_rps(struct drm_device *dev)
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
- dev_priv->max_delay = pcu_mbox & 0xff;
+ dev_priv->rps.max_delay = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
}
@@ -2485,10 +2533,10 @@ static void gen6_enable_rps(struct drm_device *dev)
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
- spin_lock_irq(&dev_priv->rps_lock);
- WARN_ON(dev_priv->pm_iir != 0);
+ spin_lock_irq(&dev_priv->rps.lock);
+ WARN_ON(dev_priv->rps.pm_iir != 0);
I915_WRITE(GEN6_PMIMR, 0);
- spin_unlock_irq(&dev_priv->rps_lock);
+ spin_unlock_irq(&dev_priv->rps.lock);
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
@@ -2520,9 +2568,9 @@ static void gen6_update_ring_freq(struct drm_device *dev)
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
- for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+ for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
gpu_freq--) {
- int diff = dev_priv->max_delay - gpu_freq;
+ int diff = dev_priv->rps.max_delay - gpu_freq;
/*
* For GPU frequencies less than 750MHz, just use the lowest
@@ -2686,14 +2734,16 @@ static const struct cparams {
{ 0, 800, 231, 23784 },
};
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
{
u64 total_count, diff, ret;
u32 count1, count2, count3, m = 0, c = 0;
unsigned long now = jiffies_to_msecs(jiffies), diff1;
int i;
- diff1 = now - dev_priv->last_time1;
+ assert_spin_locked(&mchdev_lock);
+
+ diff1 = now - dev_priv->ips.last_time1;
/* Prevent division-by-zero if we are asking too fast.
* Also, we don't get interesting results if we are polling
@@ -2701,7 +2751,7 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
* in such cases.
*/
if (diff1 <= 10)
- return dev_priv->chipset_power;
+ return dev_priv->ips.chipset_power;
count1 = I915_READ(DMIEC);
count2 = I915_READ(DDREC);
@@ -2710,16 +2760,16 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
total_count = count1 + count2 + count3;
/* FIXME: handle per-counter overflow */
- if (total_count < dev_priv->last_count1) {
- diff = ~0UL - dev_priv->last_count1;
+ if (total_count < dev_priv->ips.last_count1) {
+ diff = ~0UL - dev_priv->ips.last_count1;
diff += total_count;
} else {
- diff = total_count - dev_priv->last_count1;
+ diff = total_count - dev_priv->ips.last_count1;
}
for (i = 0; i < ARRAY_SIZE(cparams); i++) {
- if (cparams[i].i == dev_priv->c_m &&
- cparams[i].t == dev_priv->r_t) {
+ if (cparams[i].i == dev_priv->ips.c_m &&
+ cparams[i].t == dev_priv->ips.r_t) {
m = cparams[i].m;
c = cparams[i].c;
break;
@@ -2730,14 +2780,30 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
ret = ((m * diff) + c);
ret = div_u64(ret, 10);
- dev_priv->last_count1 = total_count;
- dev_priv->last_time1 = now;
+ dev_priv->ips.last_count1 = total_count;
+ dev_priv->ips.last_time1 = now;
- dev_priv->chipset_power = ret;
+ dev_priv->ips.chipset_power = ret;
return ret;
}
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long val;
+
+ if (dev_priv->info->gen != 5)
+ return 0;
+
+ spin_lock_irq(&mchdev_lock);
+
+ val = __i915_chipset_val(dev_priv);
+
+ spin_unlock_irq(&mchdev_lock);
+
+ return val;
+}
+
unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
{
unsigned long m, x, b;
@@ -2894,18 +2960,17 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
return v_table[pxvid].vd;
}
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
struct timespec now, diff1;
u64 diff;
unsigned long diffms;
u32 count;
- if (dev_priv->info->gen != 5)
- return;
+ assert_spin_locked(&mchdev_lock);
getrawmonotonic(&now);
- diff1 = timespec_sub(now, dev_priv->last_time2);
+ diff1 = timespec_sub(now, dev_priv->ips.last_time2);
/* Don't divide by 0 */
diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
@@ -2914,28 +2979,42 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
count = I915_READ(GFXEC);
- if (count < dev_priv->last_count2) {
- diff = ~0UL - dev_priv->last_count2;
+ if (count < dev_priv->ips.last_count2) {
+ diff = ~0UL - dev_priv->ips.last_count2;
diff += count;
} else {
- diff = count - dev_priv->last_count2;
+ diff = count - dev_priv->ips.last_count2;
}
- dev_priv->last_count2 = count;
- dev_priv->last_time2 = now;
+ dev_priv->ips.last_count2 = count;
+ dev_priv->ips.last_time2 = now;
/* More magic constants... */
diff = diff * 1181;
diff = div_u64(diff, diffms * 10);
- dev_priv->gfx_power = diff;
+ dev_priv->ips.gfx_power = diff;
}
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->info->gen != 5)
+ return;
+
+ spin_lock_irq(&mchdev_lock);
+
+ __i915_update_gfx_val(dev_priv);
+
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
{
unsigned long t, corr, state1, corr2, state2;
u32 pxvid, ext_v;
- pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+ assert_spin_locked(&mchdev_lock);
+
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
pxvid = (pxvid >> 24) & 0x7f;
ext_v = pvid_to_extvid(dev_priv, pxvid);
@@ -2955,27 +3034,31 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
corr = corr * ((150142 * state1) / 10000 - 78642);
corr /= 100000;
- corr2 = (corr * dev_priv->corr);
+ corr2 = (corr * dev_priv->ips.corr);
state2 = (corr2 * state1) / 10000;
state2 /= 100; /* convert to mW */
- i915_update_gfx_val(dev_priv);
+ __i915_update_gfx_val(dev_priv);
- return dev_priv->gfx_power + state2;
+ return dev_priv->ips.gfx_power + state2;
}
-/* Global for IPS driver to get at the current i915 device */
-static struct drm_i915_private *i915_mch_dev;
-/*
- * Lock protecting IPS related data structures
- * - i915_mch_dev
- * - dev_priv->max_delay
- * - dev_priv->min_delay
- * - dev_priv->fmax
- * - dev_priv->gpu_busy
- */
-static DEFINE_SPINLOCK(mchdev_lock);
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long val;
+
+ if (dev_priv->info->gen != 5)
+ return 0;
+
+ spin_lock_irq(&mchdev_lock);
+
+ val = __i915_gfx_val(dev_priv);
+
+ spin_unlock_irq(&mchdev_lock);
+
+ return val;
+}
/**
* i915_read_mch_val - return value for IPS use
@@ -2988,18 +3071,18 @@ unsigned long i915_read_mch_val(void)
struct drm_i915_private *dev_priv;
unsigned long chipset_val, graphics_val, ret = 0;
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
- chipset_val = i915_chipset_val(dev_priv);
- graphics_val = i915_gfx_val(dev_priv);
+ chipset_val = __i915_chipset_val(dev_priv);
+ graphics_val = __i915_gfx_val(dev_priv);
ret = chipset_val + graphics_val;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3015,18 +3098,18 @@ bool i915_gpu_raise(void)
struct drm_i915_private *dev_priv;
bool ret = true;
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
- if (dev_priv->max_delay > dev_priv->fmax)
- dev_priv->max_delay--;
+ if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
+ dev_priv->ips.max_delay--;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3043,18 +3126,18 @@ bool i915_gpu_lower(void)
struct drm_i915_private *dev_priv;
bool ret = true;
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
- if (dev_priv->max_delay < dev_priv->min_delay)
- dev_priv->max_delay++;
+ if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
+ dev_priv->ips.max_delay++;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3068,17 +3151,20 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
bool i915_gpu_busy(void)
{
struct drm_i915_private *dev_priv;
+ struct intel_ring_buffer *ring;
bool ret = false;
+ int i;
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
- ret = dev_priv->busy;
+ for_each_ring(ring, dev_priv, i)
+ ret |= !list_empty(&ring->request_list);
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3095,20 +3181,20 @@ bool i915_gpu_turbo_disable(void)
struct drm_i915_private *dev_priv;
bool ret = true;
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
- dev_priv->max_delay = dev_priv->fstart;
+ dev_priv->ips.max_delay = dev_priv->ips.fstart;
- if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+ if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
ret = false;
out_unlock:
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
return ret;
}
@@ -3136,19 +3222,20 @@ ips_ping_for_i915_load(void)
void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
{
- spin_lock(&mchdev_lock);
+ /* We only register the i915 ips part with intel-ips once everything is
+ * set up, to avoid intel-ips sneaking in and reading bogus values. */
+ spin_lock_irq(&mchdev_lock);
i915_mch_dev = dev_priv;
- dev_priv->mchdev_lock = &mchdev_lock;
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
ips_ping_for_i915_load();
}
void intel_gpu_ips_teardown(void)
{
- spin_lock(&mchdev_lock);
+ spin_lock_irq(&mchdev_lock);
i915_mch_dev = NULL;
- spin_unlock(&mchdev_lock);
+ spin_unlock_irq(&mchdev_lock);
}
static void intel_init_emon(struct drm_device *dev)
{
@@ -3218,7 +3305,7 @@ static void intel_init_emon(struct drm_device *dev)
lcfuse = I915_READ(LCFUSE02);
- dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
+ dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
}
void intel_disable_gt_powersave(struct drm_device *dev)
@@ -3731,42 +3818,6 @@ void intel_init_clock_gating(struct drm_device *dev)
dev_priv->display.init_pch_clock_gating(dev);
}
-static void gen6_sanitize_pm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 limits, delay, old;
-
- gen6_gt_force_wake_get(dev_priv);
-
- old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
- /* Make sure we continue to get interrupts
- * until we hit the minimum or maximum frequencies.
- */
- limits &= ~(0x3f << 16 | 0x3f << 24);
- delay = dev_priv->cur_delay;
- if (delay < dev_priv->max_delay)
- limits |= (dev_priv->max_delay & 0x3f) << 24;
- if (delay > dev_priv->min_delay)
- limits |= (dev_priv->min_delay & 0x3f) << 16;
-
- if (old != limits) {
- /* Note that the known failure case is to read back 0. */
- DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
- "expected %08x, was %08x\n", limits, old);
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
- }
-
- gen6_gt_force_wake_put(dev_priv);
-}
-
-void intel_sanitize_pm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->display.sanitize_pm)
- dev_priv->display.sanitize_pm(dev);
-}
-
/* Starting with Haswell, we have different power wells for
* different parts of the GPU. This attempts to enable them all.
*/
@@ -3852,7 +3903,6 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
- dev_priv->display.sanitize_pm = gen6_sanitize_pm;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
if (SNB_READ_WM0_LATENCY()) {
@@ -3864,7 +3914,6 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
- dev_priv->display.sanitize_pm = gen6_sanitize_pm;
} else if (IS_HASWELL(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
@@ -3876,7 +3925,6 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
- dev_priv->display.sanitize_pm = gen6_sanitize_pm;
} else
dev_priv->display.update_wm = NULL;
} else if (IS_VALLEYVIEW(dev)) {
@@ -3955,14 +4003,16 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
else
forcewake_ack = FORCEWAKE_ACK;
- if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500))
- DRM_ERROR("Force wake wait timed out\n");
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
I915_WRITE_NOTRACE(FORCEWAKE, 1);
- POSTING_READ(FORCEWAKE);
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
- if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
- DRM_ERROR("Force wake wait timed out\n");
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
__gen6_gt_wait_for_thread_c0(dev_priv);
}
@@ -3976,14 +4026,16 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
else
forcewake_ack = FORCEWAKE_MT_ACK;
- if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500))
- DRM_ERROR("Force wake wait timed out\n");
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
- POSTING_READ(FORCEWAKE_MT);
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
- if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
- DRM_ERROR("Force wake wait timed out\n");
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
__gen6_gt_wait_for_thread_c0(dev_priv);
}
@@ -4016,14 +4068,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
- POSTING_READ(FORCEWAKE);
+ /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
- POSTING_READ(FORCEWAKE_MT);
+ /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
@@ -4062,24 +4114,24 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{
- /* Already awake? */
- if ((I915_READ(0x130094) & 0xa1) == 0xa1)
- return;
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
- POSTING_READ(FORCEWAKE_VLV);
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
- if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500))
- DRM_ERROR("Force wake wait timed out\n");
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
__gen6_gt_wait_for_thread_c0(dev_priv);
}
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
- /* FIXME: confirm VLV behavior with Punit folks */
- POSTING_READ(FORCEWAKE_VLV);
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
+ /* The below doubles as a POSTING_READ */
+ gen6_gt_check_fifodbg(dev_priv);
}
void intel_gt_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e2a73b38abe9..984a0c5fbf5d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -262,6 +262,83 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
return 0;
}
+static int
+gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen7_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains, u32 flush_domains)
+{
+ u32 flags = 0;
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+ /*
+ * Ensure that any following seqno writes only happen when the render
+ * cache is indeed flushed.
+ *
+ * Workaround: 4th PIPE_CONTROL command (except the ones with only
+ * read-cache invalidate bits set) must have the CS_STALL bit set. We
+ * don't try to be clever and just set it unconditionally.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ /* Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ /* Workaround: we must issue a pipe_control with CS-stall bit
+ * set before a pipe_control command that has the state cache
+ * invalidate bit set. */
+ gen7_render_ring_cs_stall_wa(ring);
+ }
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
static void ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
@@ -382,12 +459,12 @@ init_pipe_control(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- ret = i915_gem_object_pin(obj, 4096, true);
+ ret = i915_gem_object_pin(obj, 4096, true, false);
if (ret)
goto err_unref;
pc->gtt_offset = obj->gtt_offset;
- pc->cpu_page = kmap(obj->pages[0]);
+ pc->cpu_page = kmap(sg_page(obj->pages->sgl));
if (pc->cpu_page == NULL)
goto err_unpin;
@@ -414,7 +491,8 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
return;
obj = pc->obj;
- kunmap(obj->pages[0]);
+
+ kunmap(sg_page(obj->pages->sgl));
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
@@ -462,7 +540,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- if (IS_IVYBRIDGE(dev))
+ if (HAS_L3_GPU_CACHE(dev))
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
return ret;
@@ -628,26 +706,24 @@ pc_render_add_request(struct intel_ring_buffer *ring,
}
static u32
-gen6_ring_get_seqno(struct intel_ring_buffer *ring)
+gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
- struct drm_device *dev = ring->dev;
-
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
* ACTHD) before reading the status page. */
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if (!lazy_coherency)
intel_ring_get_active_head(ring);
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
static u32
-ring_get_seqno(struct intel_ring_buffer *ring)
+ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
static u32
-pc_render_get_seqno(struct intel_ring_buffer *ring)
+pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
struct pipe_control *pc = ring->private;
return pc->cpu_page[0];
@@ -852,7 +928,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
- if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+ if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
GEN6_RENDER_L3_PARITY_ERROR));
else
@@ -875,7 +951,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
- if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+ if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
else
I915_WRITE_IMR(ring, ~0);
@@ -951,7 +1027,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
if (obj == NULL)
return;
- kunmap(obj->pages[0]);
+ kunmap(sg_page(obj->pages->sgl));
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
@@ -972,13 +1048,13 @@ static int init_status_page(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- ret = i915_gem_object_pin(obj, 4096, true);
+ ret = i915_gem_object_pin(obj, 4096, true, false);
if (ret != 0) {
goto err_unref;
}
ring->status_page.gfx_addr = obj->gtt_offset;
- ring->status_page.page_addr = kmap(obj->pages[0]);
+ ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
goto err_unpin;
@@ -1010,7 +1086,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->gpu_write_list);
ring->size = 32 * PAGE_SIZE;
init_waitqueue_head(&ring->irq_queue);
@@ -1030,7 +1105,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->obj = obj;
- ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
if (ret)
goto err_unref;
@@ -1379,7 +1454,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
- ring->flush = gen6_render_ring_flush;
+ ring->flush = gen7_render_ring_flush;
+ if (INTEL_INFO(dev)->gen == 6)
+ ring->flush = gen6_render_ring_flush;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT;
@@ -1481,7 +1558,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->gpu_write_list);
ring->size = size;
ring->effective_size = ring->size;
@@ -1574,3 +1650,41 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
return intel_init_ring_buffer(dev, ring);
}
+
+int
+intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ if (!ring->gpu_caches_dirty)
+ return 0;
+
+ ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+ trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
+
+int
+intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+{
+ uint32_t flush_domains;
+ int ret;
+
+ flush_domains = 0;
+ if (ring->gpu_caches_dirty)
+ flush_domains = I915_GEM_GPU_DOMAINS;
+
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+ if (ret)
+ return ret;
+
+ trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 1d3c81fdad92..2ea7a311a1f0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -72,7 +72,14 @@ struct intel_ring_buffer {
u32 flush_domains);
int (*add_request)(struct intel_ring_buffer *ring,
u32 *seqno);
- u32 (*get_seqno)(struct intel_ring_buffer *ring);
+ /* Some chipsets are not quite as coherent as advertised and need
+ * an expensive kick to force a true read of the up-to-date seqno.
+ * However, the up-to-date seqno is not always required and the last
+ * seen value is good enough. Note that the seqno will always be
+ * monotonic, even if not coherent.
+ */
+ u32 (*get_seqno)(struct intel_ring_buffer *ring,
+ bool lazy_coherency);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length);
void (*cleanup)(struct intel_ring_buffer *ring);
@@ -101,15 +108,6 @@ struct intel_ring_buffer {
struct list_head request_list;
/**
- * List of objects currently pending a GPU write flush.
- *
- * All elements on this list will belong to either the
- * active_list or flushing_list, last_rendering_seqno can
- * be used to differentiate between the two elements.
- */
- struct list_head gpu_write_list;
-
- /**
* Do we have some not yet emitted requests outstanding?
*/
u32 outstanding_lazy_request;
@@ -204,6 +202,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
void intel_ring_advance(struct intel_ring_buffer *ring);
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
+int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
+int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 123afd357611..39c319827f91 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -97,7 +97,7 @@ struct intel_sdvo {
/*
* Hotplug activation bits for this device
*/
- uint8_t hotplug_active[2];
+ uint16_t hotplug_active;
/**
* This is used to select the color range of RBG outputs in HDMI mode.
@@ -628,6 +628,14 @@ static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
&outputs, sizeof(outputs));
}
+static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo,
+ u16 *outputs)
+{
+ return intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_ACTIVE_OUTPUTS,
+ outputs, sizeof(*outputs));
+}
+
static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
int mode)
{
@@ -1142,51 +1150,132 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
-static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
{
- struct drm_device *dev = encoder->dev;
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(&connector->base);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
+ u16 active_outputs;
+
+ intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+
+ if (active_outputs & intel_sdvo_connector->output_flag)
+ return true;
+ else
+ return false;
+}
+
+static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_sdvo->sdvo_reg);
+
+ if (!(tmp & SDVO_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_disable_sdvo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ u32 temp;
+
+ intel_sdvo_set_active_outputs(intel_sdvo, 0);
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo,
+ DRM_MODE_DPMS_OFF);
+
+ temp = I915_READ(intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) != 0) {
+ intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
+ }
+}
+
+static void intel_enable_sdvo(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
+ bool input1, input2;
+ int i;
+ u8 status;
+
+ temp = I915_READ(intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) == 0)
+ intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+ for (i = 0; i < 2; i++)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
+ /* Warn if the device reported failure to sync.
+ * A lot of SDVO devices fail to notify of sync, but it's
+ * a given it the status is a success, we succeeded.
+ */
+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+ DRM_DEBUG_KMS("First %s output reported failure to "
+ "sync\n", SDVO_NAME(intel_sdvo));
+ }
+
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo,
+ DRM_MODE_DPMS_ON);
+ intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+}
+
+static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_crtc *crtc;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+
+ /* dvo supports only 2 dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = intel_sdvo->base.base.crtc;
+ if (!crtc) {
+ intel_sdvo->base.connectors_active = false;
+ return;
+ }
if (mode != DRM_MODE_DPMS_ON) {
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
- if (mode == DRM_MODE_DPMS_OFF) {
- temp = I915_READ(intel_sdvo->sdvo_reg);
- if ((temp & SDVO_ENABLE) != 0) {
- intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
- }
- }
+ intel_sdvo->base.connectors_active = false;
+
+ intel_crtc_update_dpms(crtc);
} else {
- bool input1, input2;
- int i;
- u8 status;
-
- temp = I915_READ(intel_sdvo->sdvo_reg);
- if ((temp & SDVO_ENABLE) == 0)
- intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
- for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
- status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
- /* Warn if the device reported failure to sync.
- * A lot of SDVO devices fail to notify of sync, but it's
- * a given it the status is a success, we succeeded.
- */
- if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
- DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(intel_sdvo));
- }
+ intel_sdvo->base.connectors_active = true;
+
+ intel_crtc_update_dpms(crtc);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
}
- return;
+
+ intel_modeset_check_state(connector->dev);
}
static int intel_sdvo_mode_valid(struct drm_connector *connector,
@@ -1251,25 +1340,29 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
return true;
}
-static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
+static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
{
struct drm_device *dev = intel_sdvo->base.base.dev;
- u8 response[2];
+ uint16_t hotplug;
/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
* on the line. */
if (IS_I945G(dev) || IS_I945GM(dev))
- return false;
+ return 0;
- return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
- &response, 2) && response[0];
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+ &hotplug, sizeof(hotplug)))
+ return 0;
+
+ return hotplug;
}
static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+ &intel_sdvo->hotplug_active, 2);
}
static bool
@@ -1345,7 +1438,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
}
} else
status = connector_status_disconnected;
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -1419,7 +1511,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
else
ret = connector_status_disconnected;
- connector->display_info.raw_edid = NULL;
kfree(edid);
} else
ret = connector_status_connected;
@@ -1465,7 +1556,6 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
drm_add_edid_modes(connector, edid);
}
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
}
@@ -1837,8 +1927,8 @@ set_value:
done:
if (intel_sdvo->base.base.crtc) {
struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
- drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
- crtc->y, crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
}
return 0;
@@ -1846,15 +1936,13 @@ done:
}
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
- .dpms = intel_sdvo_dpms,
.mode_fixup = intel_sdvo_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_sdvo_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_sdvo_dpms,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
@@ -2026,6 +2114,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
connector->base.base.interlace_allowed = 1;
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
intel_connector_attach_encoder(&connector->base, &encoder->base);
drm_sysfs_connector_add(&connector->base.base);
@@ -2064,17 +2153,18 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
- if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
+ if (intel_sdvo_get_hotplug_support(intel_sdvo) &
+ intel_sdvo_connector->output_flag) {
connector->polled = DRM_CONNECTOR_POLL_HPD;
- intel_sdvo->hotplug_active[0] |= 1 << device;
+ intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
/* Some SDVO devices have one-shot hotplug interrupts.
* Ensure that they get re-enabled when an interrupt happens.
*/
intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
intel_sdvo_enable_hotplug(intel_encoder);
- }
- else
+ } else {
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ }
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
@@ -2082,8 +2172,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo->is_hdmi = true;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT));
+ intel_sdvo->base.cloneable = true;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo->is_hdmi)
@@ -2114,7 +2203,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
intel_sdvo->is_tv = true;
intel_sdvo->base.needs_tv_clock = true;
- intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+ intel_sdvo->base.cloneable = false;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
@@ -2157,8 +2246,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT));
+ intel_sdvo->base.cloneable = true;
intel_sdvo_connector_init(intel_sdvo_connector,
intel_sdvo);
@@ -2190,8 +2278,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT));
+ /* SDVO LVDS is cloneable because the SDVO encoder does the upscaling,
+ * as opposed to native LVDS, where we upscale with the panel-fitter
+ * (and hence only the native LVDS resolution could be cloned). */
+ intel_sdvo->base.cloneable = true;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
@@ -2576,6 +2666,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
+ intel_encoder->disable = intel_disable_sdvo;
+ intel_encoder->enable = intel_enable_sdvo;
+ intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
+
/* In default case sdvo lvds is false */
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
goto err;
@@ -2590,7 +2684,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
/* Only enable the hotplug irq if we need it, to work around noisy
* hotplug lines.
*/
- if (intel_sdvo->hotplug_active[0])
+ if (intel_sdvo->hotplug_active)
dev_priv->hotplug_supported_mask |= hotplug_mask;
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index befce6c49704..d2c5c8f3baf3 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -836,22 +836,37 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
base);
}
+static bool
+intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ(TV_CTL);
+
+ if (!(tmp & TV_ENC_ENABLE))
+ return false;
+
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
static void
-intel_tv_dpms(struct drm_encoder *encoder, int mode)
+intel_enable_tv(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
- break;
- }
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+}
+
+static void
+intel_disable_tv(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
}
static const struct tv_mode *
@@ -895,17 +910,14 @@ intel_tv_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
- struct intel_encoder *other_encoder;
if (!tv_mode)
return false;
- for_each_encoder_on_crtc(dev, encoder->crtc, other_encoder)
- if (&other_encoder->base != encoder)
- return false;
+ if (intel_encoder_check_is_cloned(&intel_tv->base))
+ return false;
adjusted_mode->clock = tv_mode->clock;
return true;
@@ -1303,12 +1315,9 @@ intel_tv_detect(struct drm_connector *connector, bool force)
if (force) {
struct intel_load_detect_pipe tmp;
- if (intel_get_load_detect_pipe(&intel_tv->base, connector,
- &mode, &tmp)) {
+ if (intel_get_load_detect_pipe(connector, &mode, &tmp)) {
type = intel_tv_detect_type(intel_tv, connector);
- intel_release_load_detect_pipe(&intel_tv->base,
- connector,
- &tmp);
+ intel_release_load_detect_pipe(connector, &tmp);
} else
return connector_status_unknown;
} else
@@ -1474,22 +1483,20 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
}
if (changed && crtc)
- drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
- crtc->y, crtc->fb);
+ intel_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
out:
return ret;
}
static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
- .dpms = intel_tv_dpms,
.mode_fixup = intel_tv_mode_fixup,
- .prepare = intel_encoder_prepare,
.mode_set = intel_tv_mode_set,
- .commit = intel_encoder_commit,
+ .disable = intel_encoder_noop,
};
static const struct drm_connector_funcs intel_tv_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
@@ -1619,10 +1626,15 @@ intel_tv_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
+ intel_encoder->enable = intel_enable_tv;
+ intel_encoder->disable = intel_disable_tv;
+ intel_encoder->get_hw_state = intel_tv_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
- intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
+ intel_encoder->cloneable = false;
intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 6f13b3563234..d22cbbf3a202 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -195,7 +195,6 @@ struct mga_device {
struct drm_global_reference mem_global_ref;
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
- atomic_t validate_sequence;
} ttm;
u32 reg_1e24; /* SE model number */
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index b69642d5d850..c7420e83c0b9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1399,7 +1399,6 @@ static int mga_vga_get_modes(struct drm_connector *connector)
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
- connector->display_info.raw_edid = NULL;
kfree(edid);
}
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index e1387bd73d76..72266ae91fa1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -166,9 +166,7 @@ dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay)
if (ret)
return ret;
- NV_DEBUG(drm, "status %02x %02x %02x %02x %02x %02x\n",
- dp->stat[0], dp->stat[1], dp->stat[2], dp->stat[3],
- dp->stat[4], dp->stat[5]);
+ NV_DEBUG(drm, "status %*ph\n", 6, dp->stat);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 2817101fb167..96184d02c8d9 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -83,25 +83,19 @@ static void atombios_scaler_setup(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
ENABLE_SCALER_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
-
+ struct radeon_encoder *radeon_encoder =
+ to_radeon_encoder(radeon_crtc->encoder);
/* fixme - fill in enc_priv for atom dac */
enum radeon_tv_std tv_std = TV_STD_NTSC;
bool is_tv = false, is_cv = false;
- struct drm_encoder *encoder;
if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
return;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- /* find tv std */
- if (encoder->crtc == crtc) {
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
- struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
- tv_std = tv_dac->tv_std;
- is_tv = true;
- }
- }
+ if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
+ struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
+ tv_std = tv_dac->tv_std;
+ is_tv = true;
}
memset(&args, 0, sizeof(args));
@@ -533,99 +527,87 @@ union adjust_pixel_clock {
};
static u32 atombios_adjust_pll(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct radeon_pll *pll,
- bool ss_enabled,
- struct radeon_atom_ss *ss)
+ struct drm_display_mode *mode)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *encoder = NULL;
- struct radeon_encoder *radeon_encoder = NULL;
- struct drm_connector *connector = NULL;
+ struct drm_encoder *encoder = radeon_crtc->encoder;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
u32 adjusted_clock = mode->clock;
- int encoder_mode = 0;
+ int encoder_mode = atombios_get_encoder_mode(encoder);
u32 dp_clock = mode->clock;
- int bpc = 8;
- bool is_duallink = false;
+ int bpc = radeon_get_monitor_bpc(connector);
+ bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
/* reset the pll flags */
- pll->flags = 0;
+ radeon_crtc->pll_flags = 0;
if (ASIC_IS_AVIVO(rdev)) {
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
- pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
- RADEON_PLL_PREFER_CLOSEST_LOWER);
+ radeon_crtc->pll_flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
+ RADEON_PLL_PREFER_CLOSEST_LOWER);
if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
- pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
- pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
if (rdev->family < CHIP_RV770)
- pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
/* use frac fb div on APUs */
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
- pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
} else {
- pll->flags |= RADEON_PLL_LEGACY;
+ radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
if (mode->clock > 200000) /* range limits??? */
- pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
- pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
}
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- radeon_encoder = to_radeon_encoder(encoder);
- connector = radeon_get_connector_for_encoder(encoder);
- bpc = radeon_get_monitor_bpc(connector);
- encoder_mode = atombios_get_encoder_mode(encoder);
- is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
- if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
- (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
- if (connector) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector =
- radeon_connector->con_priv;
-
- dp_clock = dig_connector->dp_clock;
- }
- }
+ if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+ (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
- /* use recommended ref_div for ss */
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (ss_enabled) {
- if (ss->refdiv) {
- pll->flags |= RADEON_PLL_USE_REF_DIV;
- pll->reference_div = ss->refdiv;
- if (ASIC_IS_AVIVO(rdev))
- pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
- }
- }
- }
+ dp_clock = dig_connector->dp_clock;
+ }
+ }
- if (ASIC_IS_AVIVO(rdev)) {
- /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
- if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
- adjusted_clock = mode->clock * 2;
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- pll->flags |= RADEON_PLL_IS_LCD;
- } else {
- if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
- pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
- if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
- pll->flags |= RADEON_PLL_USE_REF_DIV;
+ /* use recommended ref_div for ss */
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (radeon_crtc->ss_enabled) {
+ if (radeon_crtc->ss.refdiv) {
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+ radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
+ if (ASIC_IS_AVIVO(rdev))
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
}
- break;
}
}
+ if (ASIC_IS_AVIVO(rdev)) {
+ /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
+ adjusted_clock = mode->clock * 2;
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ radeon_crtc->pll_flags |= RADEON_PLL_IS_LCD;
+ } else {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
+ radeon_crtc->pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
+ if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+ }
+
/* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
* accordingly based on the encoder/transmitter to work around
* special hw requirements.
@@ -650,7 +632,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
args.v1.ucEncodeMode = encoder_mode;
- if (ss_enabled && ss->percentage)
+ if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
args.v1.ucConfig |=
ADJUST_DISPLAY_CONFIG_SS_ENABLE;
@@ -663,7 +645,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
args.v3.sInput.ucEncodeMode = encoder_mode;
args.v3.sInput.ucDispPllConfig = 0;
- if (ss_enabled && ss->percentage)
+ if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_SS_ENABLE;
if (ENCODER_MODE_IS_DP(encoder_mode)) {
@@ -695,14 +677,14 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
index, (uint32_t *)&args);
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
if (args.v3.sOutput.ucRefDiv) {
- pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
- pll->flags |= RADEON_PLL_USE_REF_DIV;
- pll->reference_div = args.v3.sOutput.ucRefDiv;
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+ radeon_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv;
}
if (args.v3.sOutput.ucPostDiv) {
- pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
- pll->flags |= RADEON_PLL_USE_POST_DIV;
- pll->post_div = args.v3.sOutput.ucPostDiv;
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_POST_DIV;
+ radeon_crtc->pll_post_div = args.v3.sOutput.ucPostDiv;
}
break;
default:
@@ -837,7 +819,10 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
args.v3.ucFracFbDiv = frac_fb_div;
args.v3.ucPostDiv = post_div;
args.v3.ucPpll = pll_id;
- args.v3.ucMiscInfo = (pll_id << 2);
+ if (crtc_id == ATOM_CRTC2)
+ args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
+ else
+ args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1;
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
args.v3.ucTransmitterId = encoder_id;
@@ -907,58 +892,29 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
-static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *encoder = NULL;
- struct radeon_encoder *radeon_encoder = NULL;
- u32 pll_clock = mode->clock;
- u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
- struct radeon_pll *pll;
- u32 adjusted_clock;
- int encoder_mode = 0;
- struct radeon_atom_ss ss;
- bool ss_enabled = false;
- int bpc = 8;
+ struct radeon_encoder *radeon_encoder =
+ to_radeon_encoder(radeon_crtc->encoder);
+ int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- radeon_encoder = to_radeon_encoder(encoder);
- encoder_mode = atombios_get_encoder_mode(encoder);
- break;
- }
- }
-
- if (!radeon_encoder)
- return;
-
- switch (radeon_crtc->pll_id) {
- case ATOM_PPLL1:
- pll = &rdev->clock.p1pll;
- break;
- case ATOM_PPLL2:
- pll = &rdev->clock.p2pll;
- break;
- case ATOM_DCPLL:
- case ATOM_PPLL_INVALID:
- default:
- pll = &rdev->clock.dcpll;
- break;
- }
+ radeon_crtc->bpc = 8;
+ radeon_crtc->ss_enabled = false;
if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
- (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
+ (radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector =
- radeon_get_connector_for_encoder(encoder);
+ radeon_get_connector_for_encoder(radeon_crtc->encoder);
struct radeon_connector *radeon_connector =
to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
int dp_clock;
- bpc = radeon_get_monitor_bpc(connector);
+ radeon_crtc->bpc = radeon_get_monitor_bpc(connector);
switch (encoder_mode) {
case ATOM_ENCODER_MODE_DP_MST:
@@ -966,45 +922,54 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
/* DP/eDP */
dp_clock = dig_connector->dp_clock / 10;
if (ASIC_IS_DCE4(rdev))
- ss_enabled =
- radeon_atombios_get_asic_ss_info(rdev, &ss,
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss,
ASIC_INTERNAL_SS_ON_DP,
dp_clock);
else {
if (dp_clock == 16200) {
- ss_enabled =
- radeon_atombios_get_ppll_ss_info(rdev, &ss,
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev,
+ &radeon_crtc->ss,
ATOM_DP_SS_ID2);
- if (!ss_enabled)
- ss_enabled =
- radeon_atombios_get_ppll_ss_info(rdev, &ss,
+ if (!radeon_crtc->ss_enabled)
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev,
+ &radeon_crtc->ss,
ATOM_DP_SS_ID1);
} else
- ss_enabled =
- radeon_atombios_get_ppll_ss_info(rdev, &ss,
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev,
+ &radeon_crtc->ss,
ATOM_DP_SS_ID1);
}
break;
case ATOM_ENCODER_MODE_LVDS:
if (ASIC_IS_DCE4(rdev))
- ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
- dig->lcd_ss_id,
- mode->clock / 10);
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev,
+ &radeon_crtc->ss,
+ dig->lcd_ss_id,
+ mode->clock / 10);
else
- ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss,
- dig->lcd_ss_id);
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev,
+ &radeon_crtc->ss,
+ dig->lcd_ss_id);
break;
case ATOM_ENCODER_MODE_DVI:
if (ASIC_IS_DCE4(rdev))
- ss_enabled =
- radeon_atombios_get_asic_ss_info(rdev, &ss,
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev,
+ &radeon_crtc->ss,
ASIC_INTERNAL_SS_ON_TMDS,
mode->clock / 10);
break;
case ATOM_ENCODER_MODE_HDMI:
if (ASIC_IS_DCE4(rdev))
- ss_enabled =
- radeon_atombios_get_asic_ss_info(rdev, &ss,
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev,
+ &radeon_crtc->ss,
ASIC_INTERNAL_SS_ON_HDMI,
mode->clock / 10);
break;
@@ -1014,43 +979,80 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
}
/* adjust pixel clock as needed */
- adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
+ radeon_crtc->adjusted_clock = atombios_adjust_pll(crtc, mode);
+
+ return true;
+}
+
+static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder =
+ to_radeon_encoder(radeon_crtc->encoder);
+ u32 pll_clock = mode->clock;
+ u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
+ struct radeon_pll *pll;
+ int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
+
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
+ pll = &rdev->clock.p1pll;
+ break;
+ case ATOM_PPLL2:
+ pll = &rdev->clock.p2pll;
+ break;
+ case ATOM_DCPLL:
+ case ATOM_PPLL_INVALID:
+ default:
+ pll = &rdev->clock.dcpll;
+ break;
+ }
+
+ /* update pll params */
+ pll->flags = radeon_crtc->pll_flags;
+ pll->reference_div = radeon_crtc->pll_reference_div;
+ pll->post_div = radeon_crtc->pll_post_div;
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
/* TV seems to prefer the legacy algo on some boards */
- radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
- &ref_div, &post_div);
+ radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
+ &fb_div, &frac_fb_div, &ref_div, &post_div);
else if (ASIC_IS_AVIVO(rdev))
- radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
- &ref_div, &post_div);
+ radeon_compute_pll_avivo(pll, radeon_crtc->adjusted_clock, &pll_clock,
+ &fb_div, &frac_fb_div, &ref_div, &post_div);
else
- radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
- &ref_div, &post_div);
+ radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
+ &fb_div, &frac_fb_div, &ref_div, &post_div);
- atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
+ atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id,
+ radeon_crtc->crtc_id, &radeon_crtc->ss);
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
encoder_mode, radeon_encoder->encoder_id, mode->clock,
- ref_div, fb_div, frac_fb_div, post_div, bpc, ss_enabled, &ss);
+ ref_div, fb_div, frac_fb_div, post_div,
+ radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss);
- if (ss_enabled) {
+ if (radeon_crtc->ss_enabled) {
/* calculate ss amount and step size */
if (ASIC_IS_DCE4(rdev)) {
u32 step_size;
- u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000;
- ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
- ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
+ u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000;
+ radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
+ radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
- if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
- step_size = (4 * amount * ref_div * (ss.rate * 2048)) /
+ if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
+ step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100);
else
- step_size = (2 * amount * ref_div * (ss.rate * 2048)) /
+ step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100);
- ss.step = step_size;
+ radeon_crtc->ss.step = step_size;
}
- atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
+ atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id,
+ radeon_crtc->crtc_id, &radeon_crtc->ss);
}
}
@@ -1479,85 +1481,251 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
}
}
+/**
+ * radeon_get_pll_use_mask - look up a mask of which pplls are in use
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the mask of which PPLLs (Pixel PLLs) are in use.
+ */
+static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *test_crtc;
+ struct radeon_crtc *test_radeon_crtc;
+ u32 pll_in_use = 0;
+
+ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc == test_crtc)
+ continue;
+
+ test_radeon_crtc = to_radeon_crtc(test_crtc);
+ if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+ pll_in_use |= (1 << test_radeon_crtc->pll_id);
+ }
+ return pll_in_use;
+}
+
+/**
+ * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
+ * also in DP mode. For DP, a single PPLL can be used for all DP
+ * crtcs/encoders.
+ */
+static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *test_crtc;
+ struct radeon_crtc *test_radeon_crtc;
+
+ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc == test_crtc)
+ continue;
+ test_radeon_crtc = to_radeon_crtc(test_crtc);
+ if (test_radeon_crtc->encoder &&
+ ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+ /* for DP use the same PLL for all */
+ if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+ return test_radeon_crtc->pll_id;
+ }
+ }
+ return ATOM_PPLL_INVALID;
+}
+
+/**
+ * radeon_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
+ *
+ * @crtc: drm crtc
+ * @encoder: drm encoder
+ *
+ * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
+ * be shared (i.e., same clock).
+ */
+static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *test_crtc;
+ struct radeon_crtc *test_radeon_crtc;
+ u32 adjusted_clock, test_adjusted_clock;
+
+ adjusted_clock = radeon_crtc->adjusted_clock;
+
+ if (adjusted_clock == 0)
+ return ATOM_PPLL_INVALID;
+
+ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc == test_crtc)
+ continue;
+ test_radeon_crtc = to_radeon_crtc(test_crtc);
+ if (test_radeon_crtc->encoder &&
+ !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+ /* check if we are already driving this connector with another crtc */
+ if (test_radeon_crtc->connector == radeon_crtc->connector) {
+ /* if we are, return that pll */
+ if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+ return test_radeon_crtc->pll_id;
+ }
+ /* for non-DP check the clock */
+ test_adjusted_clock = test_radeon_crtc->adjusted_clock;
+ if ((crtc->mode.clock == test_crtc->mode.clock) &&
+ (adjusted_clock == test_adjusted_clock) &&
+ (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
+ (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
+ return test_radeon_crtc->pll_id;
+ }
+ }
+ return ATOM_PPLL_INVALID;
+}
+
+/**
+ * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders. For non-DP
+ * monitors a dedicated PPLL must be used. If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself. If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ * Asic specific PLL information
+ *
+ * DCE 6.1
+ * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
+ * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
+ *
+ * DCE 6.0
+ * - PPLL0 is available to all UNIPHY (DP only)
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 5.0
+ * - DCPLL is available to all UNIPHY (DP only)
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 3.0/4.0/4.1
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ */
static int radeon_atom_pick_pll(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *test_encoder;
- struct drm_crtc *test_crtc;
- uint32_t pll_in_use = 0;
+ struct radeon_encoder *radeon_encoder =
+ to_radeon_encoder(radeon_crtc->encoder);
+ u32 pll_in_use;
+ int pll;
if (ASIC_IS_DCE61(rdev)) {
- list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
- if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
- struct radeon_encoder *test_radeon_encoder =
- to_radeon_encoder(test_encoder);
- struct radeon_encoder_atom_dig *dig =
- test_radeon_encoder->enc_priv;
-
- if ((test_radeon_encoder->encoder_id ==
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY) &&
- (dig->linkb == false)) /* UNIPHY A uses PPLL2 */
- return ATOM_PPLL2;
+ struct radeon_encoder_atom_dig *dig =
+ radeon_encoder->enc_priv;
+
+ if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY) &&
+ (dig->linkb == false))
+ /* UNIPHY A uses PPLL2 */
+ return ATOM_PPLL2;
+ else if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+ /* UNIPHY B/C/D/E/F */
+ if (rdev->clock.dp_extclk)
+ /* skip PPLL programming if using ext clock */
+ return ATOM_PPLL_INVALID;
+ else {
+ /* use the same PPLL for all DP monitors */
+ pll = radeon_get_shared_dp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
}
+ } else {
+ /* use the same PPLL for all monitors with the same clock */
+ pll = radeon_get_shared_nondp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
}
/* UNIPHY B/C/D/E/F */
- list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
- struct radeon_crtc *radeon_test_crtc;
-
- if (crtc == test_crtc)
- continue;
-
- radeon_test_crtc = to_radeon_crtc(test_crtc);
- if ((radeon_test_crtc->pll_id == ATOM_PPLL0) ||
- (radeon_test_crtc->pll_id == ATOM_PPLL1))
- pll_in_use |= (1 << radeon_test_crtc->pll_id);
- }
- if (!(pll_in_use & 4))
+ pll_in_use = radeon_get_pll_use_mask(crtc);
+ if (!(pll_in_use & (1 << ATOM_PPLL0)))
return ATOM_PPLL0;
- return ATOM_PPLL1;
+ if (!(pll_in_use & (1 << ATOM_PPLL1)))
+ return ATOM_PPLL1;
+ DRM_ERROR("unable to allocate a PPLL\n");
+ return ATOM_PPLL_INVALID;
} else if (ASIC_IS_DCE4(rdev)) {
- list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
- if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
- /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
- * depending on the asic:
- * DCE4: PPLL or ext clock
- * DCE5: DCPLL or ext clock
- *
- * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
- * PPLL/DCPLL programming and only program the DP DTO for the
- * crtc virtual pixel clock.
- */
- if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
- if (rdev->clock.dp_extclk)
- return ATOM_PPLL_INVALID;
- else if (ASIC_IS_DCE6(rdev))
- return ATOM_PPLL0;
- else if (ASIC_IS_DCE5(rdev))
- return ATOM_DCPLL;
- }
+ /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
+ * depending on the asic:
+ * DCE4: PPLL or ext clock
+ * DCE5: PPLL, DCPLL, or ext clock
+ * DCE6: PPLL, PPLL0, or ext clock
+ *
+ * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
+ * PPLL/DCPLL programming and only program the DP DTO for the
+ * crtc virtual pixel clock.
+ */
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+ if (rdev->clock.dp_extclk)
+ /* skip PPLL programming if using ext clock */
+ return ATOM_PPLL_INVALID;
+ else if (ASIC_IS_DCE6(rdev))
+ /* use PPLL0 for all DP */
+ return ATOM_PPLL0;
+ else if (ASIC_IS_DCE5(rdev))
+ /* use DCPLL for all DP */
+ return ATOM_DCPLL;
+ else {
+ /* use the same PPLL for all DP monitors */
+ pll = radeon_get_shared_dp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
}
+ } else {
+ /* use the same PPLL for all monitors with the same clock */
+ pll = radeon_get_shared_nondp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
}
-
- /* otherwise, pick one of the plls */
- list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
- struct radeon_crtc *radeon_test_crtc;
-
- if (crtc == test_crtc)
- continue;
-
- radeon_test_crtc = to_radeon_crtc(test_crtc);
- if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
- (radeon_test_crtc->pll_id <= ATOM_PPLL2))
- pll_in_use |= (1 << radeon_test_crtc->pll_id);
- }
- if (!(pll_in_use & 1))
+ /* all other cases */
+ pll_in_use = radeon_get_pll_use_mask(crtc);
+ if (!(pll_in_use & (1 << ATOM_PPLL2)))
+ return ATOM_PPLL2;
+ if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1;
- return ATOM_PPLL2;
- } else
- return radeon_crtc->crtc_id;
-
+ DRM_ERROR("unable to allocate a PPLL\n");
+ return ATOM_PPLL_INVALID;
+ } else {
+ if (ASIC_IS_AVIVO(rdev)) {
+ /* in DP mode, the DP ref clock can come from either PPLL
+ * depending on the asic:
+ * DCE3: PPLL1 or PPLL2
+ */
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+ /* use the same PPLL for all DP monitors */
+ pll = radeon_get_shared_dp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
+ } else {
+ /* use the same PPLL for all monitors with the same clock */
+ pll = radeon_get_shared_nondp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
+ }
+ /* all other cases */
+ pll_in_use = radeon_get_pll_use_mask(crtc);
+ if (!(pll_in_use & (1 << ATOM_PPLL2)))
+ return ATOM_PPLL2;
+ if (!(pll_in_use & (1 << ATOM_PPLL1)))
+ return ATOM_PPLL1;
+ DRM_ERROR("unable to allocate a PPLL\n");
+ return ATOM_PPLL_INVALID;
+ } else {
+ /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
+ return radeon_crtc->crtc_id;
+ }
+ }
}
void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
@@ -1588,18 +1756,13 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder =
+ to_radeon_encoder(radeon_crtc->encoder);
bool is_tvcv = false;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- /* find tv std */
- if (encoder->crtc == crtc) {
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- if (radeon_encoder->active_device &
- (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
- is_tvcv = true;
- }
- }
+ if (radeon_encoder->active_device &
+ (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ is_tvcv = true;
atombios_crtc_set_pll(crtc, adjusted_mode);
@@ -1626,8 +1789,34 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ /* assign the encoder to the radeon crtc to avoid repeated lookups later */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ radeon_crtc->encoder = encoder;
+ radeon_crtc->connector = radeon_get_connector_for_encoder(encoder);
+ break;
+ }
+ }
+ if ((radeon_crtc->encoder == NULL) || (radeon_crtc->connector == NULL)) {
+ radeon_crtc->encoder = NULL;
+ radeon_crtc->connector = NULL;
+ return false;
+ }
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
+ if (!atombios_crtc_prepare_pll(crtc, adjusted_mode))
+ return false;
+ /* pick pll */
+ radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
+ /* if we can't get a PPLL for a non-DP encoder, fail */
+ if ((radeon_crtc->pll_id == ATOM_PPLL_INVALID) &&
+ !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder)))
+ return false;
+
return true;
}
@@ -1638,8 +1827,6 @@ static void atombios_crtc_prepare(struct drm_crtc *crtc)
struct radeon_device *rdev = dev->dev_private;
radeon_crtc->in_mode_set = true;
- /* pick pll */
- radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
/* disable crtc pair power gating before programming */
if (ASIC_IS_DCE6(rdev))
@@ -1697,7 +1884,10 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
break;
}
done:
- radeon_crtc->pll_id = -1;
+ radeon_crtc->pll_id = ATOM_PPLL_INVALID;
+ radeon_crtc->adjusted_clock = 0;
+ radeon_crtc->encoder = NULL;
+ radeon_crtc->connector = NULL;
}
static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
@@ -1746,6 +1936,9 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
else
radeon_crtc->crtc_offset = 0;
}
- radeon_crtc->pll_id = -1;
+ radeon_crtc->pll_id = ATOM_PPLL_INVALID;
+ radeon_crtc->adjusted_clock = 0;
+ radeon_crtc->encoder = NULL;
+ radeon_crtc->connector = NULL;
drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 3623b98ed3fe..d48224b0d6b4 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -653,9 +653,7 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
return false;
}
- DRM_DEBUG_KMS("link status %02x %02x %02x %02x %02x %02x\n",
- link_status[0], link_status[1], link_status[2],
- link_status[3], link_status[4], link_status[5]);
+ DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
return true;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 6e8803a1170c..806cbcc94fdd 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -28,9 +28,251 @@
#include "radeon_drm.h"
#include "radeon.h"
#include "atom.h"
+#include <linux/backlight.h>
extern int atom_debug;
+static u8
+radeon_atom_get_backlight_level_from_reg(struct radeon_device *rdev)
+{
+ u8 backlight_level;
+ u32 bios_2_scratch;
+
+ if (rdev->family >= CHIP_R600)
+ bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+ else
+ bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+ backlight_level = ((bios_2_scratch & ATOM_S2_CURRENT_BL_LEVEL_MASK) >>
+ ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+ return backlight_level;
+}
+
+static void
+radeon_atom_set_backlight_level_to_reg(struct radeon_device *rdev,
+ u8 backlight_level)
+{
+ u32 bios_2_scratch;
+
+ if (rdev->family >= CHIP_R600)
+ bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+ else
+ bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+ bios_2_scratch &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+ bios_2_scratch |= ((backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
+ ATOM_S2_CURRENT_BL_LEVEL_MASK);
+
+ if (rdev->family >= CHIP_R600)
+ WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+ else
+ WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+}
+
+u8
+atombios_get_backlight_level(struct radeon_encoder *radeon_encoder)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+ return 0;
+
+ return radeon_atom_get_backlight_level_from_reg(rdev);
+}
+
+void
+atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
+{
+ struct drm_encoder *encoder = &radeon_encoder->base;
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder_atom_dig *dig;
+ DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
+ int index;
+
+ if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+ return;
+
+ if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+ radeon_encoder->enc_priv) {
+ dig = radeon_encoder->enc_priv;
+ dig->backlight_level = level;
+ radeon_atom_set_backlight_level_to_reg(rdev, dig->backlight_level);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+ if (dig->backlight_level == 0) {
+ args.ucAction = ATOM_LCD_BLOFF;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ } else {
+ args.ucAction = ATOM_LCD_BL_BRIGHTNESS_CONTROL;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ args.ucAction = ATOM_LCD_BLON;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig->backlight_level == 0)
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+ else {
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static u8 radeon_atom_bl_level(struct backlight_device *bd)
+{
+ u8 level;
+
+ /* Convert brightness to hardware level */
+ if (bd->props.brightness < 0)
+ level = 0;
+ else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
+ level = RADEON_MAX_BL_LEVEL;
+ else
+ level = bd->props.brightness;
+
+ return level;
+}
+
+static int radeon_atom_backlight_update_status(struct backlight_device *bd)
+{
+ struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+ struct radeon_encoder *radeon_encoder = pdata->encoder;
+
+ atombios_set_backlight_level(radeon_encoder, radeon_atom_bl_level(bd));
+
+ return 0;
+}
+
+static int radeon_atom_backlight_get_brightness(struct backlight_device *bd)
+{
+ struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+ struct radeon_encoder *radeon_encoder = pdata->encoder;
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ return radeon_atom_get_backlight_level_from_reg(rdev);
+}
+
+static const struct backlight_ops radeon_atom_backlight_ops = {
+ .get_brightness = radeon_atom_backlight_get_brightness,
+ .update_status = radeon_atom_backlight_update_status,
+};
+
+void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+ struct drm_connector *drm_connector)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ struct radeon_backlight_privdata *pdata;
+ struct radeon_encoder_atom_dig *dig;
+ u8 backlight_level;
+
+ if (!radeon_encoder->enc_priv)
+ return;
+
+ if (!rdev->is_atom_bios)
+ return;
+
+ if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+ return;
+
+ pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
+ if (!pdata) {
+ DRM_ERROR("Memory allocation failed\n");
+ goto error;
+ }
+
+ memset(&props, 0, sizeof(props));
+ props.max_brightness = RADEON_MAX_BL_LEVEL;
+ props.type = BACKLIGHT_RAW;
+ bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
+ pdata, &radeon_atom_backlight_ops, &props);
+ if (IS_ERR(bd)) {
+ DRM_ERROR("Backlight registration failed\n");
+ goto error;
+ }
+
+ pdata->encoder = radeon_encoder;
+
+ backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
+
+ dig = radeon_encoder->enc_priv;
+ dig->bl_dev = bd;
+
+ bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+
+ DRM_INFO("radeon atom DIG backlight initialized\n");
+
+ return;
+
+error:
+ kfree(pdata);
+ return;
+}
+
+static void radeon_atom_backlight_exit(struct radeon_encoder *radeon_encoder)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct backlight_device *bd = NULL;
+ struct radeon_encoder_atom_dig *dig;
+
+ if (!radeon_encoder->enc_priv)
+ return;
+
+ if (!rdev->is_atom_bios)
+ return;
+
+ if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+ return;
+
+ dig = radeon_encoder->enc_priv;
+ bd = dig->bl_dev;
+ dig->bl_dev = NULL;
+
+ if (bd) {
+ struct radeon_legacy_backlight_privdata *pdata;
+
+ pdata = bl_get_data(bd);
+ backlight_device_unregister(bd);
+ kfree(pdata);
+
+ DRM_INFO("radeon atom LVDS backlight unloaded\n");
+ }
+}
+
+#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+void radeon_atom_backlight_init(struct radeon_encoder *encoder)
+{
+}
+
+static void radeon_atom_backlight_exit(struct radeon_encoder *encoder)
+{
+}
+
+#endif
+
/* evil but including atombios.h is much worse */
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
struct drm_display_mode *mode);
@@ -209,6 +451,32 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
}
+static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
+{
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ int bpc = 8;
+
+ if (connector)
+ bpc = radeon_get_monitor_bpc(connector);
+
+ switch (bpc) {
+ case 0:
+ return PANEL_BPC_UNDEFINE;
+ case 6:
+ return PANEL_6BIT_PER_COLOR;
+ case 8:
+ default:
+ return PANEL_8BIT_PER_COLOR;
+ case 10:
+ return PANEL_10BIT_PER_COLOR;
+ case 12:
+ return PANEL_12BIT_PER_COLOR;
+ case 16:
+ return PANEL_16BIT_PER_COLOR;
+ }
+}
+
+
union dvo_encoder_control {
ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
@@ -406,7 +674,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
return ATOM_ENCODER_MODE_DP;
/* DVO is always DVO */
- if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
+ if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DVO1) ||
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
return ATOM_ENCODER_MODE_DVO;
connector = radeon_get_connector_for_encoder(encoder);
@@ -535,7 +804,6 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
int dp_clock = 0;
int dp_lane_count = 0;
int hpd_id = RADEON_HPD_NONE;
- int bpc = 8;
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -545,7 +813,6 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
hpd_id = radeon_connector->hpd.hpd;
- bpc = radeon_get_monitor_bpc(connector);
}
/* no dig encoder assigned */
@@ -612,37 +879,17 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
else
args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder);
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+ if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode))
args.v3.ucLaneNum = dp_lane_count;
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v3.ucLaneNum = 8;
else
args.v3.ucLaneNum = 4;
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+ if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000))
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
args.v3.acConfig.ucDigSel = dig->dig_encoder;
- switch (bpc) {
- case 0:
- args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
- break;
- case 6:
- args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
- break;
- case 8:
- default:
- args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
- break;
- case 10:
- args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
- break;
- case 12:
- args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
- break;
- case 16:
- args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
- break;
- }
+ args.v3.ucBitPerColor = radeon_atom_get_bpc(encoder);
break;
case 4:
args.v4.ucAction = action;
@@ -652,41 +899,21 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
else
args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder);
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+ if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode))
args.v4.ucLaneNum = dp_lane_count;
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v4.ucLaneNum = 8;
else
args.v4.ucLaneNum = 4;
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) {
+ if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) {
if (dp_clock == 270000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
else if (dp_clock == 540000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
}
args.v4.acConfig.ucDigSel = dig->dig_encoder;
- switch (bpc) {
- case 0:
- args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
- break;
- case 6:
- args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
- break;
- case 8:
- default:
- args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
- break;
- case 10:
- args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
- break;
- case 12:
- args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
- break;
- case 16:
- args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
- break;
- }
+ args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
if (hpd_id == RADEON_HPD_NONE)
args.v4.ucHPD_ID = 0;
else
@@ -799,8 +1026,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v1.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
- args.v1.usPixelClock =
- cpu_to_le16(dp_clock / 10);
+ args.v1.usPixelClock = cpu_to_le16(dp_clock / 10);
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
@@ -857,8 +1083,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v2.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
- args.v2.usPixelClock =
- cpu_to_le16(dp_clock / 10);
+ args.v2.usPixelClock = cpu_to_le16(dp_clock / 10);
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
@@ -900,8 +1125,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v3.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
- args.v3.usPixelClock =
- cpu_to_le16(dp_clock / 10);
+ args.v3.usPixelClock = cpu_to_le16(dp_clock / 10);
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
@@ -960,8 +1184,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v4.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
- args.v4.usPixelClock =
- cpu_to_le16(dp_clock / 10);
+ args.v4.usPixelClock = cpu_to_le16(dp_clock / 10);
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
@@ -1147,7 +1370,6 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
int dp_lane_count = 0;
int connector_object_id = 0;
u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
- int bpc = 8;
if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
connector = radeon_get_connector_for_encoder_init(encoder);
@@ -1163,7 +1385,6 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
dp_lane_count = dig_connector->dp_lane_count;
connector_object_id =
(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- bpc = radeon_get_monitor_bpc(connector);
}
memset(&args, 0, sizeof(args));
@@ -1221,27 +1442,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
break;
}
- switch (bpc) {
- case 0:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
- break;
- case 6:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
- break;
- case 8:
- default:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
- break;
- case 10:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
- break;
- case 12:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
- break;
- case 16:
- args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
- break;
- }
+ args.v3.sExtEncoder.ucBitPerColor = radeon_atom_get_bpc(encoder);
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
@@ -2286,6 +2487,8 @@ static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
void radeon_enc_destroy(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ radeon_atom_backlight_exit(radeon_encoder);
kfree(radeon_encoder->enc_priv);
drm_encoder_cleanup(encoder);
kfree(radeon_encoder);
@@ -2295,7 +2498,7 @@ static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
.destroy = radeon_enc_destroy,
};
-struct radeon_encoder_atom_dac *
+static struct radeon_encoder_atom_dac *
radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
{
struct drm_device *dev = radeon_encoder->base.dev;
@@ -2309,7 +2512,7 @@ radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
return dac;
}
-struct radeon_encoder_atom_dig *
+static struct radeon_encoder_atom_dig *
radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
{
int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e93b80a6d4e9..c4ded396b78d 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -37,6 +37,16 @@
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
+static const u32 crtc_offsets[6] =
+{
+ EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_CRTC5_REGISTER_OFFSET
+};
+
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
@@ -109,17 +119,19 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
*/
void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
int i;
- if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_MASTER_EN) {
+ if (crtc >= rdev->num_crtc)
+ return;
+
+ if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK))
+ if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
break;
udelay(1);
}
for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)
+ if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
break;
udelay(1);
}
@@ -314,6 +326,64 @@ void sumo_pm_init_profile(struct radeon_device *rdev)
}
/**
+ * btc_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (BTC, cayman).
+ * Used for profile mode only.
+ */
+void btc_pm_init_profile(struct radeon_device *rdev)
+{
+ int idx;
+
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+ /* starting with BTC, there is one state that is used for both
+ * MH and SH. Difference is that we always use the high clock index for
+ * mclk.
+ */
+ if (rdev->flags & RADEON_IS_MOBILITY)
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ else
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+}
+
+/**
* evergreen_pm_misc - set additional pm hw parameters callback.
*
* @rdev: radeon_device pointer
@@ -1109,7 +1179,7 @@ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
}
}
-int evergreen_pcie_gart_enable(struct radeon_device *rdev)
+static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r;
@@ -1168,7 +1238,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
return 0;
}
-void evergreen_pcie_gart_disable(struct radeon_device *rdev)
+static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
@@ -1193,7 +1263,7 @@ void evergreen_pcie_gart_disable(struct radeon_device *rdev)
radeon_gart_table_vram_unpin(rdev);
}
-void evergreen_pcie_gart_fini(struct radeon_device *rdev)
+static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
{
evergreen_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
@@ -1201,7 +1271,7 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev)
}
-void evergreen_agp_enable(struct radeon_device *rdev)
+static void evergreen_agp_enable(struct radeon_device *rdev)
{
u32 tmp;
@@ -1229,116 +1299,103 @@ void evergreen_agp_enable(struct radeon_device *rdev)
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
+ u32 crtc_enabled, tmp, frame_count, blackout;
+ int i, j;
+
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
- /* Stop all video */
+ /* disable VGA render */
WREG32(VGA_RENDER_CONTROL, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
- }
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
- }
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ /* blank the display controllers */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
+ if (crtc_enabled) {
+ save->crtc_enabled[i] = true;
+ if (ASIC_IS_DCE6(rdev)) {
+ tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+ if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+ radeon_wait_for_vblank(rdev, i);
+ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ }
+ } else {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
+ radeon_wait_for_vblank(rdev, i);
+ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ }
+ }
+ /* wait for the next frame */
+ frame_count = radeon_get_vblank_counter(rdev, i);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (radeon_get_vblank_counter(rdev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+ }
}
- WREG32(D1VGA_CONTROL, 0);
- WREG32(D2VGA_CONTROL, 0);
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_D3VGA_CONTROL, 0);
- WREG32(EVERGREEN_D4VGA_CONTROL, 0);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_D5VGA_CONTROL, 0);
- WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+ radeon_mc_wait_for_idle(rdev);
+
+ blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+ if ((blackout & BLACKOUT_MODE_MASK) != 1) {
+ /* Block CPU access */
+ WREG32(BIF_FB_EN, 0);
+ /* blackout the MC */
+ blackout &= ~BLACKOUT_MODE_MASK;
+ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
}
}
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
-
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
-
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
-
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
+ u32 tmp, frame_count;
+ int i, j;
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ /* update crtc base addresses */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)rdev->mc.vram_start);
}
-
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
- /* Unlock host access */
+
+ /* unblackout the MC */
+ tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
+ tmp &= ~BLACKOUT_MODE_MASK;
+ WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
+ /* allow CPU access */
+ WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled) {
+ if (ASIC_IS_DCE6(rdev)) {
+ tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ } else {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ }
+ /* wait for the next frame */
+ frame_count = radeon_get_vblank_counter(rdev, i);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (radeon_get_vblank_counter(rdev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ /* Unlock vga access */
WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
@@ -1557,7 +1614,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
return 0;
}
-int evergreen_cp_resume(struct radeon_device *rdev)
+static int evergreen_cp_resume(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
@@ -2333,22 +2390,10 @@ int evergreen_asic_reset(struct radeon_device *rdev)
u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
{
- switch (crtc) {
- case 0:
- return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
- case 1:
- return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
- case 2:
- return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
- case 3:
- return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
- case 4:
- return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
- case 5:
- return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
- default:
+ if (crtc >= rdev->num_crtc)
return 0;
- }
+ else
+ return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
}
void evergreen_disable_interrupt_state(struct radeon_device *rdev)
@@ -2541,10 +2586,6 @@ int evergreen_irq_set(struct radeon_device *rdev)
DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
}
- if (rdev->irq.gui_idle) {
- DRM_DEBUG("gui idle\n");
- grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
- }
if (rdev->family >= CHIP_CAYMAN) {
cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
@@ -2726,7 +2767,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
}
}
-void evergreen_irq_disable(struct radeon_device *rdev)
+static void evergreen_irq_disable(struct radeon_device *rdev)
{
r600_disable_interrupts(rdev);
/* Wait and acknowledge irq */
@@ -3079,7 +3120,6 @@ restart_ih:
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
- wake_up(&rdev->irq.idle_queue);
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index e44a62a07fe3..2ceab2b52d69 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -846,6 +846,16 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
return -EINVAL;
}
+ if (!mipmap) {
+ if (llevel) {
+ dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ } else {
+ return 0; /* everything's ok */
+ }
+ }
+
/* check mipmap size */
for (i = 1; i <= llevel; i++) {
unsigned w, h, d;
@@ -995,7 +1005,7 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
* Assume that chunk_ib_index is properly set. Will return -EINVAL
* if packet is bigger than remaining ib size. or if packets is unknown.
**/
-int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
+static int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx)
{
@@ -1081,6 +1091,27 @@ static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
}
/**
+ * evergreen_cs_packet_next_is_pkt3_nop() - test if the next packet is NOP
+ * @p: structure holding the parser context.
+ *
+ * Check if the next packet is a relocation packet3.
+ **/
+static bool evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet p3reloc;
+ int r;
+
+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return false;
+ }
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ return false;
+ }
+ return true;
+}
+
+/**
* evergreen_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
*
@@ -2330,7 +2361,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
for (i = 0; i < (pkt->count / 8); i++) {
struct radeon_bo *texture, *mipmap;
u32 toffset, moffset;
- u32 size, offset;
+ u32 size, offset, mip_address, tex_dim;
switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
case SQ_TEX_VTX_VALID_TEXTURE:
@@ -2359,14 +2390,28 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
texture = reloc->robj;
toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+
/* tex mip base */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad SET_RESOURCE (tex)\n");
- return -EINVAL;
+ tex_dim = ib[idx+1+(i*8)+0] & 0x7;
+ mip_address = ib[idx+1+(i*8)+3];
+
+ if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
+ !mip_address &&
+ !evergreen_cs_packet_next_is_pkt3_nop(p)) {
+ /* MIP_ADDRESS should point to FMASK for an MSAA texture.
+ * It should be 0 if FMASK is disabled. */
+ moffset = 0;
+ mipmap = NULL;
+ } else {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ return -EINVAL;
+ }
+ moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ mipmap = reloc->robj;
}
- moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- mipmap = reloc->robj;
+
r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 8beac1065025..034f4c22e5db 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -218,6 +218,8 @@
#define EVERGREEN_CRTC_CONTROL 0x6e70
# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL 0x6e74
+# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
#define EVERGREEN_CRTC_STATUS 0x6e8c
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 79347855d9bf..df542f1a5dfb 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -87,6 +87,10 @@
#define CONFIG_MEMSIZE 0x5428
+#define BIF_FB_EN 0x5490
+#define FB_READ_EN (1 << 0)
+#define FB_WRITE_EN (1 << 1)
+
#define CP_COHER_BASE 0x85F8
#define CP_STALLED_STAT1 0x8674
#define CP_STALLED_STAT2 0x8678
@@ -430,6 +434,9 @@
#define NOOFCHAN_MASK 0x00003000
#define MC_SHARED_CHREMAP 0x2008
+#define MC_SHARED_BLACKOUT_CNTL 0x20ac
+#define BLACKOUT_MODE_MASK 0x00000007
+
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000003
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 853800e8582f..9a46f7d4e61f 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -726,7 +726,7 @@ void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
WREG32(VM_INVALIDATE_REQUEST, 1);
}
-int cayman_pcie_gart_enable(struct radeon_device *rdev)
+static int cayman_pcie_gart_enable(struct radeon_device *rdev)
{
int i, r;
@@ -782,7 +782,7 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
(u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
- WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
cayman_pcie_gart_tlb_flush(rdev);
@@ -793,7 +793,7 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
return 0;
}
-void cayman_pcie_gart_disable(struct radeon_device *rdev)
+static void cayman_pcie_gart_disable(struct radeon_device *rdev)
{
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
@@ -813,7 +813,7 @@ void cayman_pcie_gart_disable(struct radeon_device *rdev)
radeon_gart_table_vram_unpin(rdev);
}
-void cayman_pcie_gart_fini(struct radeon_device *rdev)
+static void cayman_pcie_gart_fini(struct radeon_device *rdev)
{
cayman_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
@@ -879,12 +879,13 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
- radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
+ radeon_ring_write(ring, ib->length_dw |
+ (ib->vm ? (ib->vm->id << 24) : 0));
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, ib->vm_id);
+ radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
@@ -1004,7 +1005,7 @@ static void cayman_cp_fini(struct radeon_device *rdev)
radeon_scratch_free(rdev, ring->rptr_save_reg);
}
-int cayman_cp_resume(struct radeon_device *rdev)
+static int cayman_cp_resume(struct radeon_device *rdev)
{
static const int ridx[] = {
RADEON_RING_TYPE_GFX_INDEX,
@@ -1496,53 +1497,16 @@ void cayman_vm_fini(struct radeon_device *rdev)
{
}
-int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id)
-{
- WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn);
- WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
- /* flush hdp cache */
- WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
- /* bits 0-7 are the VM contexts0-7 */
- WREG32(VM_INVALIDATE_REQUEST, 1 << id);
- return 0;
-}
-
-void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
-{
- WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0);
- WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
- /* flush hdp cache */
- WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
- /* bits 0-7 are the VM contexts0-7 */
- WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
-}
-
-void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
-{
- if (vm->id == -1)
- return;
-
- /* flush hdp cache */
- WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
- /* bits 0-7 are the VM contexts0-7 */
- WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
-}
-
-#define R600_PTE_VALID (1 << 0)
+#define R600_ENTRY_VALID (1 << 0)
#define R600_PTE_SYSTEM (1 << 1)
#define R600_PTE_SNOOPED (1 << 2)
#define R600_PTE_READABLE (1 << 5)
#define R600_PTE_WRITEABLE (1 << 6)
-uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
- struct radeon_vm *vm,
- uint32_t flags)
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
{
uint32_t r600_flags = 0;
-
- r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+ r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
if (flags & RADEON_VM_PAGE_SYSTEM) {
@@ -1552,12 +1516,76 @@ uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
return r600_flags;
}
-void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
- unsigned pfn, uint64_t addr, uint32_t flags)
+/**
+ * cayman_vm_set_page - update the page tables using the CP
+ *
+ * @rdev: radeon_device pointer
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the CP (cayman-si).
+ */
+void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
+ uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+ int i;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + count * 2));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (i = 0; i < count; ++i) {
+ uint64_t value = 0;
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ addr += incr;
+
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ addr += incr;
+ }
+
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+}
+
+/**
+ * cayman_vm_flush - vm flush using the CP
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using the CP (cayman-si).
+ */
+void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
- void __iomem *ptr = (void *)vm->pt;
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0));
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0));
+ radeon_ring_write(ring, vm->last_pfn);
- addr = addr & 0xFFFFFFFFFFFFF000ULL;
- addr |= flags;
- writeq(addr, ptr + (pfn * 8));
+ radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ radeon_ring_write(ring, 0x1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
+ radeon_ring_write(ring, 1 << vm->id);
}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 870db340d377..2423d1b5d385 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -585,6 +585,7 @@
#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
#define PACKET3_SET_RESOURCE_INDIRECT 0x74
#define PACKET3_SET_APPEND_CNT 0x75
+#define PACKET3_ME_WRITE 0x7A
#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8d7e33a0b243..b41237bf884b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -80,10 +80,12 @@ MODULE_FIRMWARE(FIRMWARE_R520);
*/
void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
int i;
- if (radeon_crtc->crtc_id == 0) {
+ if (crtc >= rdev->num_crtc)
+ return;
+
+ if (crtc == 0) {
if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
for (i = 0; i < rdev->usec_timeout; i++) {
if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
@@ -698,9 +700,6 @@ int r100_irq_set(struct radeon_device *rdev)
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
tmp |= RADEON_SW_INT_ENABLE;
}
- if (rdev->irq.gui_idle) {
- tmp |= RADEON_GUI_IDLE_MASK;
- }
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
tmp |= RADEON_CRTC_VBLANK_MASK;
@@ -737,12 +736,6 @@ static uint32_t r100_irq_ack(struct radeon_device *rdev)
RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
- /* the interrupt works, but the status bit is permanently asserted */
- if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
- if (!rdev->irq.gui_idle_acked)
- irq_mask |= RADEON_GUI_IDLE_STAT;
- }
-
if (irqs) {
WREG32(RADEON_GEN_INT_STATUS, irqs);
}
@@ -754,9 +747,6 @@ int r100_irq_process(struct radeon_device *rdev)
uint32_t status, msi_rearm;
bool queue_hotplug = false;
- /* reset gui idle ack. the status bit is broken */
- rdev->irq.gui_idle_acked = false;
-
status = r100_irq_ack(rdev);
if (!status) {
return IRQ_NONE;
@@ -769,11 +759,6 @@ int r100_irq_process(struct radeon_device *rdev)
if (status & RADEON_SW_INT_TEST) {
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
}
- /* gui idle interrupt */
- if (status & RADEON_GUI_IDLE_STAT) {
- rdev->irq.gui_idle_acked = true;
- wake_up(&rdev->irq.idle_queue);
- }
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
if (rdev->irq.crtc_vblank_int[0]) {
@@ -803,8 +788,6 @@ int r100_irq_process(struct radeon_device *rdev)
}
status = r100_irq_ack(rdev);
}
- /* reset gui idle ack. the status bit is broken */
- rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
if (rdev->msi_enabled) {
@@ -2530,7 +2513,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
/*
* Global GPU functions
*/
-void r100_errata(struct radeon_device *rdev)
+static void r100_errata(struct radeon_device *rdev)
{
rdev->pll_errata = 0;
@@ -2545,51 +2528,7 @@ void r100_errata(struct radeon_device *rdev)
}
}
-/* Wait for vertical sync on primary CRTC */
-void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
-{
- uint32_t crtc_gen_cntl, tmp;
- int i;
-
- crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
- if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
- !(crtc_gen_cntl & RADEON_CRTC_EN)) {
- return;
- }
- /* Clear the CRTC_VBLANK_SAVE bit */
- WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_CRTC_STATUS);
- if (tmp & RADEON_CRTC_VBLANK_SAVE) {
- return;
- }
- DRM_UDELAY(1);
- }
-}
-
-/* Wait for vertical sync on secondary CRTC */
-void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
-{
- uint32_t crtc2_gen_cntl, tmp;
- int i;
-
- crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
- if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
- !(crtc2_gen_cntl & RADEON_CRTC2_EN))
- return;
-
- /* Clear the CRTC_VBLANK_SAVE bit */
- WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_CRTC2_STATUS);
- if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
- return;
- }
- DRM_UDELAY(1);
- }
-}
-
-int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
+static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
{
unsigned i;
uint32_t tmp;
@@ -2950,7 +2889,7 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
WREG32(RADEON_CONFIG_CNTL, temp);
}
-void r100_mc_init(struct radeon_device *rdev)
+static void r100_mc_init(struct radeon_device *rdev)
{
u64 base;
@@ -3022,7 +2961,7 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
r100_pll_errata_after_data(rdev);
}
-void r100_set_safe_registers(struct radeon_device *rdev)
+static void r100_set_safe_registers(struct radeon_device *rdev)
{
if (ASIC_IS_RN50(rdev)) {
rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
@@ -3817,9 +3756,10 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, 256);
+ r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
if (r) {
- return r;
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ goto free_scratch;
}
ib.ptr[0] = PACKET0(scratch, 0);
ib.ptr[1] = 0xDEADBEEF;
@@ -3832,13 +3772,13 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.length_dw = 8;
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
- radeon_scratch_free(rdev, scratch);
- radeon_ib_free(rdev, &ib);
- return r;
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ goto free_ib;
}
r = radeon_fence_wait(ib.fence, false);
if (r) {
- return r;
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ goto free_ib;
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
@@ -3854,8 +3794,10 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
scratch, tmp);
r = -EINVAL;
}
- radeon_scratch_free(rdev, scratch);
+free_ib:
radeon_ib_free(rdev, &ib);
+free_scratch:
+ radeon_scratch_free(rdev, scratch);
return r;
}
@@ -3964,7 +3906,7 @@ static void r100_mc_program(struct radeon_device *rdev)
r100_mc_resume(rdev, &save);
}
-void r100_clock_startup(struct radeon_device *rdev)
+static void r100_clock_startup(struct radeon_device *rdev)
{
u32 tmp;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 646a1927dda7..4949bfc14b58 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -296,7 +296,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_unlock_commit(rdev, ring);
}
-void r300_errata(struct radeon_device *rdev)
+static void r300_errata(struct radeon_device *rdev)
{
rdev->pll_errata = 0;
@@ -322,7 +322,7 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void r300_gpu_init(struct radeon_device *rdev)
+static void r300_gpu_init(struct radeon_device *rdev)
{
uint32_t gb_tile_config, tmp;
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 079d3c52c08a..28b4f871aaf4 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -119,7 +119,7 @@ static void r520_vram_get_type(struct radeon_device *rdev)
rdev->mc.vram_width *= 2;
}
-void r520_mc_init(struct radeon_device *rdev)
+static void r520_mc_init(struct radeon_device *rdev)
{
r520_vram_get_type(rdev);
@@ -131,7 +131,7 @@ void r520_mc_init(struct radeon_device *rdev)
radeon_update_bandwidth_info(rdev);
}
-void r520_mc_program(struct radeon_device *rdev)
+static void r520_mc_program(struct radeon_device *rdev)
{
struct rv515_mc_save save;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index d79c639ae739..39b743fff791 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -98,7 +98,7 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev);
/* r600,rv610,rv630,rv620,rv635,rv670 */
int r600_mc_wait_for_idle(struct radeon_device *rdev);
-void r600_gpu_init(struct radeon_device *rdev);
+static void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
@@ -881,7 +881,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
return radeon_gart_table_vram_alloc(rdev);
}
-int r600_pcie_gart_enable(struct radeon_device *rdev)
+static int r600_pcie_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r, i;
@@ -938,7 +938,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
return 0;
}
-void r600_pcie_gart_disable(struct radeon_device *rdev)
+static void r600_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int i;
@@ -971,14 +971,14 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
radeon_gart_table_vram_unpin(rdev);
}
-void r600_pcie_gart_fini(struct radeon_device *rdev)
+static void r600_pcie_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
r600_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
}
-void r600_agp_enable(struct radeon_device *rdev)
+static void r600_agp_enable(struct radeon_device *rdev)
{
u32 tmp;
int i;
@@ -1158,7 +1158,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
}
}
-int r600_mc_init(struct radeon_device *rdev)
+static int r600_mc_init(struct radeon_device *rdev)
{
u32 tmp;
int chansize, numchan;
@@ -1258,7 +1258,7 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
* reset, it's up to the caller to determine if the GPU needs one. We
* might add an helper function to check that.
*/
-int r600_gpu_soft_reset(struct radeon_device *rdev)
+static int r600_gpu_soft_reset(struct radeon_device *rdev)
{
struct rv515_mc_save save;
u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
@@ -1433,7 +1433,7 @@ int r600_count_pipe_bits(uint32_t val)
return ret;
}
-void r600_gpu_init(struct radeon_device *rdev)
+static void r600_gpu_init(struct radeon_device *rdev)
{
u32 tiling_config;
u32 ramcfg;
@@ -2347,7 +2347,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
/* FIXME: implement */
}
-int r600_startup(struct radeon_device *rdev)
+static int r600_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
@@ -2635,10 +2635,10 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ib_get(rdev, ring->idx, &ib, 256);
+ r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
- return r;
+ goto free_scratch;
}
ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
@@ -2646,15 +2646,13 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.length_dw = 3;
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
- radeon_scratch_free(rdev, scratch);
- radeon_ib_free(rdev, &ib);
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
- return r;
+ goto free_ib;
}
r = radeon_fence_wait(ib.fence, false);
if (r) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
- return r;
+ goto free_ib;
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
@@ -2669,8 +2667,10 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
scratch, tmp);
r = -EINVAL;
}
- radeon_scratch_free(rdev, scratch);
+free_ib:
radeon_ib_free(rdev, &ib);
+free_scratch:
+ radeon_scratch_free(rdev, scratch);
return r;
}
@@ -3088,10 +3088,6 @@ int r600_irq_set(struct radeon_device *rdev)
DRM_DEBUG("r600_irq_set: hdmi 0\n");
hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
}
- if (rdev->irq.gui_idle) {
- DRM_DEBUG("gui idle\n");
- grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
- }
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
@@ -3475,7 +3471,6 @@ restart_ih:
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
- wake_up(&rdev->irq.idle_queue);
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 3c031a48205d..661fec2a2cc1 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -489,31 +489,36 @@ set_default_state(drm_radeon_private_t *dev_priv)
ADVANCE_RING();
}
-static uint32_t i2f(uint32_t input)
+/* 23 bits of float fractional data */
+#define I2F_FRAC_BITS 23
+#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
+
+/*
+ * Converts unsigned integer into 32-bit IEEE floating point representation.
+ * Will be exact from 0 to 2^24. Above that, we round towards zero
+ * as the fractional bits will not fit in a float. (It would be better to
+ * round towards even as the fpu does, but that is slower.)
+ */
+__pure uint32_t int2float(uint32_t x)
{
- u32 result, i, exponent, fraction;
-
- if ((input & 0x3fff) == 0)
- result = 0; /* 0 is a special case */
- else {
- exponent = 140; /* exponent biased by 127; */
- fraction = (input & 0x3fff) << 10; /* cheat and only
- handle numbers below 2^^15 */
- for (i = 0; i < 14; i++) {
- if (fraction & 0x800000)
- break;
- else {
- fraction = fraction << 1; /* keep
- shifting left until top bit = 1 */
- exponent = exponent - 1;
- }
- }
- result = exponent << 23 | (fraction & 0x7fffff); /* mask
- off top bit; assumed 1 */
- }
- return result;
-}
+ uint32_t msb, exponent, fraction;
+
+ /* Zero is special */
+ if (!x) return 0;
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+ /*
+ * Use a rotate instead of a shift because that works both leftwards
+ * and rightwards due to the mod(32) behaviour. This means we don't
+ * need to check to see if we are above 2^24 or not.
+ */
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return fraction + exponent;
+}
static int r600_nomm_get_vb(struct drm_device *dev)
{
@@ -632,20 +637,20 @@ r600_blit_copy(struct drm_device *dev,
vb = r600_nomm_get_vb_ptr(dev);
}
- vb[0] = i2f(dst_x);
+ vb[0] = int2float(dst_x);
vb[1] = 0;
- vb[2] = i2f(src_x);
+ vb[2] = int2float(src_x);
vb[3] = 0;
- vb[4] = i2f(dst_x);
- vb[5] = i2f(h);
- vb[6] = i2f(src_x);
- vb[7] = i2f(h);
+ vb[4] = int2float(dst_x);
+ vb[5] = int2float(h);
+ vb[6] = int2float(src_x);
+ vb[7] = int2float(h);
- vb[8] = i2f(dst_x + cur_size);
- vb[9] = i2f(h);
- vb[10] = i2f(src_x + cur_size);
- vb[11] = i2f(h);
+ vb[8] = int2float(dst_x + cur_size);
+ vb[9] = int2float(h);
+ vb[10] = int2float(src_x + cur_size);
+ vb[11] = int2float(h);
/* src */
set_tex_resource(dev_priv, FMT_8,
@@ -721,20 +726,20 @@ r600_blit_copy(struct drm_device *dev,
vb = r600_nomm_get_vb_ptr(dev);
}
- vb[0] = i2f(dst_x / 4);
+ vb[0] = int2float(dst_x / 4);
vb[1] = 0;
- vb[2] = i2f(src_x / 4);
+ vb[2] = int2float(src_x / 4);
vb[3] = 0;
- vb[4] = i2f(dst_x / 4);
- vb[5] = i2f(h);
- vb[6] = i2f(src_x / 4);
- vb[7] = i2f(h);
+ vb[4] = int2float(dst_x / 4);
+ vb[5] = int2float(h);
+ vb[6] = int2float(src_x / 4);
+ vb[7] = int2float(h);
- vb[8] = i2f((dst_x + cur_size) / 4);
- vb[9] = i2f(h);
- vb[10] = i2f((src_x + cur_size) / 4);
- vb[11] = i2f(h);
+ vb[8] = int2float((dst_x + cur_size) / 4);
+ vb[9] = int2float(h);
+ vb[10] = int2float((src_x + cur_size) / 4);
+ vb[11] = int2float(h);
/* src */
set_tex_resource(dev_priv, FMT_8_8_8_8,
@@ -804,20 +809,20 @@ r600_blit_swap(struct drm_device *dev,
dx2 = dx + w;
dy2 = dy + h;
- vb[0] = i2f(dx);
- vb[1] = i2f(dy);
- vb[2] = i2f(sx);
- vb[3] = i2f(sy);
+ vb[0] = int2float(dx);
+ vb[1] = int2float(dy);
+ vb[2] = int2float(sx);
+ vb[3] = int2float(sy);
- vb[4] = i2f(dx);
- vb[5] = i2f(dy2);
- vb[6] = i2f(sx);
- vb[7] = i2f(sy2);
+ vb[4] = int2float(dx);
+ vb[5] = int2float(dy2);
+ vb[6] = int2float(sx);
+ vb[7] = int2float(sy2);
- vb[8] = i2f(dx2);
- vb[9] = i2f(dy2);
- vb[10] = i2f(sx2);
- vb[11] = i2f(sy2);
+ vb[8] = int2float(dx2);
+ vb[9] = int2float(dy2);
+ vb[10] = int2float(sx2);
+ vb[11] = int2float(sy2);
switch(cpp) {
case 4:
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 2bef8549ddfe..1c7ed3a5d045 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -455,46 +455,6 @@ set_default_state(struct radeon_device *rdev)
radeon_ring_write(ring, sq_stack_resource_mgmt_2);
}
-#define I2F_MAX_BITS 15
-#define I2F_MAX_INPUT ((1 << I2F_MAX_BITS) - 1)
-#define I2F_SHIFT (24 - I2F_MAX_BITS)
-
-/*
- * Converts unsigned integer into 32-bit IEEE floating point representation.
- * Conversion is not universal and only works for the range from 0
- * to 2^I2F_MAX_BITS-1. Currently we only use it with inputs between
- * 0 and 16384 (inclusive), so I2F_MAX_BITS=15 is enough. If necessary,
- * I2F_MAX_BITS can be increased, but that will add to the loop iterations
- * and slow us down. Conversion is done by shifting the input and counting
- * down until the first 1 reaches bit position 23. The resulting counter
- * and the shifted input are, respectively, the exponent and the fraction.
- * The sign is always zero.
- */
-static uint32_t i2f(uint32_t input)
-{
- u32 result, i, exponent, fraction;
-
- WARN_ON_ONCE(input > I2F_MAX_INPUT);
-
- if ((input & I2F_MAX_INPUT) == 0)
- result = 0;
- else {
- exponent = 126 + I2F_MAX_BITS;
- fraction = (input & I2F_MAX_INPUT) << I2F_SHIFT;
-
- for (i = 0; i < I2F_MAX_BITS; i++) {
- if (fraction & 0x800000)
- break;
- else {
- fraction = fraction << 1;
- exponent = exponent - 1;
- }
- }
- result = exponent << 23 | (fraction & 0x7fffff);
- }
- return result;
-}
-
int r600_blit_init(struct radeon_device *rdev)
{
u32 obj_size;
@@ -766,14 +726,14 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
vb_cpu_addr[3] = 0;
vb_cpu_addr[4] = 0;
- vb_cpu_addr[5] = i2f(h);
+ vb_cpu_addr[5] = int2float(h);
vb_cpu_addr[6] = 0;
- vb_cpu_addr[7] = i2f(h);
+ vb_cpu_addr[7] = int2float(h);
- vb_cpu_addr[8] = i2f(w);
- vb_cpu_addr[9] = i2f(h);
- vb_cpu_addr[10] = i2f(w);
- vb_cpu_addr[11] = i2f(h);
+ vb_cpu_addr[8] = int2float(w);
+ vb_cpu_addr[9] = int2float(h);
+ vb_cpu_addr[10] = int2float(w);
+ vb_cpu_addr[11] = int2float(h);
rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
w, h, w, src_gpu_addr, size_in_bytes);
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
index f437d36dd98c..2f3ce7a75976 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -35,4 +35,5 @@ extern const u32 r6xx_default_state[];
extern const u32 r6xx_ps_size, r6xx_vs_size;
extern const u32 r6xx_default_size, r7xx_default_size;
+__pure uint32_t int2float(uint32_t x);
#endif
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index f37676d7f217..853f05ced1b1 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -847,7 +847,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
* Assume that chunk_ib_index is properly set. Will return -EINVAL
* if packet is bigger than remaining ib size. or if packets is unknown.
**/
-int r600_cs_packet_parse(struct radeon_cs_parser *p,
+static int r600_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx)
{
@@ -2180,7 +2180,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
break;
case PACKET3_STRMOUT_BASE_UPDATE:
- if (p->family < CHIP_RV770) {
+ /* RS780 and RS880 also need this */
+ if (p->family < CHIP_RS780) {
DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e3558c3ef24a..857a7d7862fb 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -53,7 +53,7 @@ enum r600_hdmi_iec_status_bits {
AUDIO_STATUS_LEVEL = 0x80
};
-struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
+static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
/* 32kHz 44.1kHz 48kHz */
/* Clock N CTS N CTS N CTS */
{ 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 59a15315ae9f..b04c06444d8b 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -123,6 +123,7 @@ extern int radeon_lockup_timeout;
#define CAYMAN_RING_TYPE_CP2_INDEX 2
/* hardcode those limit for now */
+#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
#define RADEON_IB_VM_MAX_SIZE (64 << 10)
@@ -253,6 +254,22 @@ static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
}
}
+static inline bool radeon_fence_is_earlier(struct radeon_fence *a,
+ struct radeon_fence *b)
+{
+ if (!a) {
+ return false;
+ }
+
+ if (!b) {
+ return true;
+ }
+
+ BUG_ON(a->ring != b->ring);
+
+ return a->seq < b->seq;
+}
+
/*
* Tiling registers
*/
@@ -275,18 +292,20 @@ struct radeon_mman {
/* bo virtual address in a specific vm */
struct radeon_bo_va {
- /* bo list is protected by bo being reserved */
+ /* protected by bo being reserved */
struct list_head bo_list;
- /* vm list is protected by vm mutex */
- struct list_head vm_list;
- /* constant after initialization */
- struct radeon_vm *vm;
- struct radeon_bo *bo;
uint64_t soffset;
uint64_t eoffset;
uint32_t flags;
- struct radeon_fence *fence;
bool valid;
+ unsigned ref_count;
+
+ /* protected by vm mutex */
+ struct list_head vm_list;
+
+ /* constant after initialization */
+ struct radeon_vm *vm;
+ struct radeon_bo *bo;
};
struct radeon_bo {
@@ -566,9 +585,6 @@ struct radeon_irq {
atomic_t pflip[RADEON_MAX_CRTCS];
wait_queue_head_t vblank_queue;
bool hpd[RADEON_MAX_HPD_PINS];
- bool gui_idle;
- bool gui_idle_acked;
- wait_queue_head_t idle_queue;
bool afmt[RADEON_MAX_AFMT_BLOCKS];
union radeon_irq_stat_regs stat_regs;
};
@@ -583,7 +599,6 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
-int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev);
/*
* CP & rings.
@@ -596,7 +611,7 @@ struct radeon_ib {
uint32_t *ptr;
int ring;
struct radeon_fence *fence;
- unsigned vm_id;
+ struct radeon_vm *vm;
bool is_const_ib;
struct radeon_fence *sync_to[RADEON_NUM_RINGS];
struct radeon_semaphore *semaphore;
@@ -632,41 +647,38 @@ struct radeon_ring {
/*
* VM
*/
+
+/* maximum number of VMIDs */
+#define RADEON_NUM_VM 16
+
+/* defines number of bits in page table versus page directory,
+ * a page is 4KB so we have 12 bits offset, 9 bits in the page
+ * table and the remaining 19 bits are in the page directory */
+#define RADEON_VM_BLOCK_SIZE 9
+
+/* number of entries in page table */
+#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
+
struct radeon_vm {
struct list_head list;
struct list_head va;
- int id;
+ unsigned id;
unsigned last_pfn;
- u64 pt_gpu_addr;
- u64 *pt;
+ u64 pd_gpu_addr;
struct radeon_sa_bo *sa_bo;
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
-};
-
-struct radeon_vm_funcs {
- int (*init)(struct radeon_device *rdev);
- void (*fini)(struct radeon_device *rdev);
- /* cs mutex must be lock for schedule_ib */
- int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
- void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
- void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
- uint32_t (*page_flags)(struct radeon_device *rdev,
- struct radeon_vm *vm,
- uint32_t flags);
- void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
- unsigned pfn, uint64_t addr, uint32_t flags);
+ /* last flush or NULL if we still need to flush */
+ struct radeon_fence *last_flush;
};
struct radeon_vm_manager {
struct mutex lock;
struct list_head lru_vm;
- uint32_t use_bitmap;
+ struct radeon_fence *active[RADEON_NUM_VM];
struct radeon_sa_manager sa_manager;
uint32_t max_pfn;
- /* fields constant after init */
- const struct radeon_vm_funcs *funcs;
/* number of VMIDs */
unsigned nvm;
/* vram base address for page table entry */
@@ -738,7 +750,8 @@ struct si_rlc {
};
int radeon_ib_get(struct radeon_device *rdev, int ring,
- struct radeon_ib *ib, unsigned size);
+ struct radeon_ib *ib, struct radeon_vm *vm,
+ unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib);
@@ -1131,6 +1144,15 @@ struct radeon_asic {
void (*tlb_flush)(struct radeon_device *rdev);
int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
} gart;
+ struct {
+ int (*init)(struct radeon_device *rdev);
+ void (*fini)(struct radeon_device *rdev);
+
+ u32 pt_ring_index;
+ void (*set_page)(struct radeon_device *rdev, uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+ } vm;
/* ring specific callbacks */
struct {
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -1143,6 +1165,7 @@ struct radeon_asic {
int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
+ void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
} ring[RADEON_NUM_RINGS];
/* irqs */
struct {
@@ -1157,6 +1180,10 @@ struct radeon_asic {
u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
/* wait for vblank */
void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
+ /* set backlight level */
+ void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
+ /* get backlight level */
+ u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
} display;
/* copy functions for bo handling */
struct {
@@ -1428,6 +1455,56 @@ struct r600_vram_scratch {
u64 gpu_addr;
};
+/*
+ * ACPI
+ */
+struct radeon_atif_notification_cfg {
+ bool enabled;
+ int command_code;
+};
+
+struct radeon_atif_notifications {
+ bool display_switch;
+ bool expansion_mode_change;
+ bool thermal_state;
+ bool forced_power_state;
+ bool system_power_state;
+ bool display_conf_change;
+ bool px_gfx_switch;
+ bool brightness_change;
+ bool dgpu_display_event;
+};
+
+struct radeon_atif_functions {
+ bool system_params;
+ bool sbios_requests;
+ bool select_active_disp;
+ bool lid_state;
+ bool get_tv_standard;
+ bool set_tv_standard;
+ bool get_panel_expansion_mode;
+ bool set_panel_expansion_mode;
+ bool temperature_change;
+ bool graphics_device_types;
+};
+
+struct radeon_atif {
+ struct radeon_atif_notifications notifications;
+ struct radeon_atif_functions functions;
+ struct radeon_atif_notification_cfg notification_cfg;
+ struct radeon_encoder *encoder_for_bl;
+};
+
+struct radeon_atcs_functions {
+ bool get_ext_state;
+ bool pcie_perf_req;
+ bool pcie_dev_rdy;
+ bool pcie_bus_width;
+};
+
+struct radeon_atcs {
+ struct radeon_atcs_functions functions;
+};
/*
* Core structure, functions and helpers.
@@ -1520,6 +1597,9 @@ struct radeon_device {
/* virtual memory */
struct radeon_vm_manager vm_manager;
struct mutex gpu_clock_mutex;
+ /* ACPI interface */
+ struct radeon_atif atif;
+ struct radeon_atcs atcs;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -1683,15 +1763,21 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
+#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
+#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
+#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags)))
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
+#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
+#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
+#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
@@ -1759,22 +1845,30 @@ int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev);
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
-int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
-void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
+struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
+ struct radeon_vm *vm, int ring);
+void radeon_vm_fence(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_fence *fence);
+uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo);
-int radeon_vm_bo_add(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
- uint64_t offset,
- uint32_t flags);
+struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
+ struct radeon_bo *bo);
+struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo);
+int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va,
+ uint64_t offset,
+ uint32_t flags);
int radeon_vm_bo_rmv(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo);
+ struct radeon_bo_va *bo_va);
/* audio */
void r600_audio_update_hdmi(struct work_struct *work);
@@ -1832,12 +1926,14 @@ extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_displ
extern int ni_init_microcode(struct radeon_device *rdev);
extern int ni_mc_load_microcode(struct radeon_device *rdev);
-/* radeon_acpi.c */
-#if defined(CONFIG_ACPI)
-extern int radeon_acpi_init(struct radeon_device *rdev);
-#else
-static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
-#endif
+/* radeon_acpi.c */
+#if defined(CONFIG_ACPI)
+extern int radeon_acpi_init(struct radeon_device *rdev);
+extern void radeon_acpi_fini(struct radeon_device *rdev);
+#else
+static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
+static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
+#endif
#include "radeon_object.h"
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 3516a6081dcf..c3976eb341bf 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -1,35 +1,120 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/slab.h>
+#include <linux/power_supply.h>
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_bus.h>
+#include <acpi/video.h>
#include "drmP.h"
#include "drm.h"
#include "drm_sarea.h"
#include "drm_crtc_helper.h"
#include "radeon.h"
+#include "radeon_acpi.h"
+#include "atom.h"
#include <linux/vga_switcheroo.h>
+#define ACPI_AC_CLASS "ac_adapter"
+
+extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev);
+
+struct atif_verify_interface {
+ u16 size; /* structure size in bytes (includes size field) */
+ u16 version; /* version */
+ u32 notification_mask; /* supported notifications mask */
+ u32 function_bits; /* supported functions bit vector */
+} __packed;
+
+struct atif_system_params {
+ u16 size; /* structure size in bytes (includes size field) */
+ u32 valid_mask; /* valid flags mask */
+ u32 flags; /* flags */
+ u8 command_code; /* notify command code */
+} __packed;
+
+struct atif_sbios_requests {
+ u16 size; /* structure size in bytes (includes size field) */
+ u32 pending; /* pending sbios requests */
+ u8 panel_exp_mode; /* panel expansion mode */
+ u8 thermal_gfx; /* thermal state: target gfx controller */
+ u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */
+ u8 forced_power_gfx; /* forced power state: target gfx controller */
+ u8 forced_power_state; /* forced power state: state id */
+ u8 system_power_src; /* system power source */
+ u8 backlight_level; /* panel backlight level (0-255) */
+} __packed;
+
+#define ATIF_NOTIFY_MASK 0x3
+#define ATIF_NOTIFY_NONE 0
+#define ATIF_NOTIFY_81 1
+#define ATIF_NOTIFY_N 2
+
+struct atcs_verify_interface {
+ u16 size; /* structure size in bytes (includes size field) */
+ u16 version; /* version */
+ u32 function_bits; /* supported functions bit vector */
+} __packed;
+
/* Call the ATIF method
+ */
+/**
+ * radeon_atif_call - call an ATIF method
*
- * Note: currently we discard the output
+ * @handle: acpi handle
+ * @function: the ATIF function to execute
+ * @params: ATIF function params
+ *
+ * Executes the requested ATIF function (all asics).
+ * Returns a pointer to the acpi output buffer.
*/
-static int radeon_atif_call(acpi_handle handle)
+static union acpi_object *radeon_atif_call(acpi_handle handle, int function,
+ struct acpi_buffer *params)
{
acpi_status status;
union acpi_object atif_arg_elements[2];
struct acpi_object_list atif_arg;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
atif_arg.count = 2;
atif_arg.pointer = &atif_arg_elements[0];
atif_arg_elements[0].type = ACPI_TYPE_INTEGER;
- atif_arg_elements[0].integer.value = 0;
- atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
- atif_arg_elements[1].integer.value = 0;
+ atif_arg_elements[0].integer.value = function;
+
+ if (params) {
+ atif_arg_elements[1].type = ACPI_TYPE_BUFFER;
+ atif_arg_elements[1].buffer.length = params->length;
+ atif_arg_elements[1].buffer.pointer = params->pointer;
+ } else {
+ /* We need a second fake parameter */
+ atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ atif_arg_elements[1].integer.value = 0;
+ }
status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
@@ -38,17 +123,434 @@ static int radeon_atif_call(acpi_handle handle)
DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
acpi_format_exception(status));
kfree(buffer.pointer);
- return 1;
+ return NULL;
}
- kfree(buffer.pointer);
- return 0;
+ return buffer.pointer;
+}
+
+/**
+ * radeon_atif_parse_notification - parse supported notifications
+ *
+ * @n: supported notifications struct
+ * @mask: supported notifications mask from ATIF
+ *
+ * Use the supported notifications mask from ATIF function
+ * ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications
+ * are supported (all asics).
+ */
+static void radeon_atif_parse_notification(struct radeon_atif_notifications *n, u32 mask)
+{
+ n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
+ n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
+ n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
+ n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
+ n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
+ n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
+ n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
+ n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
+ n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
+}
+
+/**
+ * radeon_atif_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATIF
+ *
+ * Use the supported functions mask from ATIF function
+ * ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atif_parse_functions(struct radeon_atif_functions *f, u32 mask)
+{
+ f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
+ f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
+ f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
+ f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
+ f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
+ f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
+ f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
+ f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
+ f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
+ f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
+}
+
+/**
+ * radeon_atif_verify_interface - verify ATIF
+ *
+ * @handle: acpi handle
+ * @atif: radeon atif struct
+ *
+ * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function
+ * to initialize ATIF and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atif_verify_interface(acpi_handle handle,
+ struct radeon_atif *atif)
+{
+ union acpi_object *info;
+ struct atif_verify_interface output;
+ size_t size;
+ int err = 0;
+
+ info = radeon_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
+ if (!info)
+ return -EIO;
+
+ memset(&output, 0, sizeof(output));
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 12) {
+ DRM_INFO("ATIF buffer is too small: %lu\n", size);
+ err = -EINVAL;
+ goto out;
+ }
+ size = min(sizeof(output), size);
+
+ memcpy(&output, info->buffer.pointer, size);
+
+ /* TODO: check version? */
+ DRM_DEBUG_DRIVER("ATIF version %u\n", output.version);
+
+ radeon_atif_parse_notification(&atif->notifications, output.notification_mask);
+ radeon_atif_parse_functions(&atif->functions, output.function_bits);
+
+out:
+ kfree(info);
+ return err;
+}
+
+/**
+ * radeon_atif_get_notification_params - determine notify configuration
+ *
+ * @handle: acpi handle
+ * @n: atif notification configuration struct
+ *
+ * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function
+ * to determine if a notifier is used and if so which one
+ * (all asics). This is either Notify(VGA, 0x81) or Notify(VGA, n)
+ * where n is specified in the result if a notifier is used.
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atif_get_notification_params(acpi_handle handle,
+ struct radeon_atif_notification_cfg *n)
+{
+ union acpi_object *info;
+ struct atif_system_params params;
+ size_t size;
+ int err = 0;
+
+ info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
+ if (!info) {
+ err = -EIO;
+ goto out;
+ }
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 10) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ memset(&params, 0, sizeof(params));
+ size = min(sizeof(params), size);
+ memcpy(&params, info->buffer.pointer, size);
+
+ DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n",
+ params.flags, params.valid_mask);
+ params.flags = params.flags & params.valid_mask;
+
+ if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) {
+ n->enabled = false;
+ n->command_code = 0;
+ } else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) {
+ n->enabled = true;
+ n->command_code = 0x81;
+ } else {
+ if (size < 11) {
+ err = -EINVAL;
+ goto out;
+ }
+ n->enabled = true;
+ n->command_code = params.command_code;
+ }
+
+out:
+ DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n",
+ (n->enabled ? "enabled" : "disabled"),
+ n->command_code);
+ kfree(info);
+ return err;
+}
+
+/**
+ * radeon_atif_get_sbios_requests - get requested sbios event
+ *
+ * @handle: acpi handle
+ * @req: atif sbios request struct
+ *
+ * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function
+ * to determine what requests the sbios is making to the driver
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atif_get_sbios_requests(acpi_handle handle,
+ struct atif_sbios_requests *req)
+{
+ union acpi_object *info;
+ size_t size;
+ int count = 0;
+
+ info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
+ if (!info)
+ return -EIO;
+
+ size = *(u16 *)info->buffer.pointer;
+ if (size < 0xd) {
+ count = -EINVAL;
+ goto out;
+ }
+ memset(req, 0, sizeof(*req));
+
+ size = min(sizeof(*req), size);
+ memcpy(req, info->buffer.pointer, size);
+ DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending);
+
+ count = hweight32(req->pending);
+
+out:
+ kfree(info);
+ return count;
+}
+
+/**
+ * radeon_atif_handler - handle ATIF notify requests
+ *
+ * @rdev: radeon_device pointer
+ * @event: atif sbios request struct
+ *
+ * Checks the acpi event and if it matches an atif event,
+ * handles it.
+ * Returns NOTIFY code
+ */
+int radeon_atif_handler(struct radeon_device *rdev,
+ struct acpi_bus_event *event)
+{
+ struct radeon_atif *atif = &rdev->atif;
+ struct atif_sbios_requests req;
+ acpi_handle handle;
+ int count;
+
+ DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
+ event->device_class, event->type);
+
+ if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+ return NOTIFY_DONE;
+
+ if (!atif->notification_cfg.enabled ||
+ event->type != atif->notification_cfg.command_code)
+ /* Not our event */
+ return NOTIFY_DONE;
+
+ /* Check pending SBIOS requests */
+ handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ count = radeon_atif_get_sbios_requests(handle, &req);
+
+ if (count <= 0)
+ return NOTIFY_DONE;
+
+ DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
+
+ if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
+ struct radeon_encoder *enc = atif->encoder_for_bl;
+
+ if (enc) {
+ DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+ req.backlight_level);
+
+ radeon_set_backlight_level(rdev, enc, req.backlight_level);
+
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *dig = enc->enc_priv;
+ backlight_force_update(dig->bl_dev,
+ BACKLIGHT_UPDATE_HOTKEY);
+ } else {
+ struct radeon_encoder_lvds *dig = enc->enc_priv;
+ backlight_force_update(dig->bl_dev,
+ BACKLIGHT_UPDATE_HOTKEY);
+ }
+ }
+ }
+ /* TODO: check other events */
+
+ /* We've handled the event, stop the notifier chain. The ACPI interface
+ * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
+ * userspace if the event was generated only to signal a SBIOS
+ * request.
+ */
+ return NOTIFY_BAD;
+}
+
+/* Call the ATCS method
+ */
+/**
+ * radeon_atcs_call - call an ATCS method
+ *
+ * @handle: acpi handle
+ * @function: the ATCS function to execute
+ * @params: ATCS function params
+ *
+ * Executes the requested ATCS function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *radeon_atcs_call(acpi_handle handle, int function,
+ struct acpi_buffer *params)
+{
+ acpi_status status;
+ union acpi_object atcs_arg_elements[2];
+ struct acpi_object_list atcs_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ atcs_arg.count = 2;
+ atcs_arg.pointer = &atcs_arg_elements[0];
+
+ atcs_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atcs_arg_elements[0].integer.value = function;
+
+ if (params) {
+ atcs_arg_elements[1].type = ACPI_TYPE_BUFFER;
+ atcs_arg_elements[1].buffer.length = params->length;
+ atcs_arg_elements[1].buffer.pointer = params->pointer;
+ } else {
+ /* We need a second fake parameter */
+ atcs_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ atcs_arg_elements[1].integer.value = 0;
+ }
+
+ status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer);
+
+ /* Fail only if calling the method fails and ATIF is supported */
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n",
+ acpi_format_exception(status));
+ kfree(buffer.pointer);
+ return NULL;
+ }
+
+ return buffer.pointer;
+}
+
+/**
+ * radeon_atcs_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATCS
+ *
+ * Use the supported functions mask from ATCS function
+ * ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atcs_parse_functions(struct radeon_atcs_functions *f, u32 mask)
+{
+ f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED;
+ f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED;
+ f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED;
+ f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED;
+}
+
+/**
+ * radeon_atcs_verify_interface - verify ATCS
+ *
+ * @handle: acpi handle
+ * @atcs: radeon atcs struct
+ *
+ * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function
+ * to initialize ATCS and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atcs_verify_interface(acpi_handle handle,
+ struct radeon_atcs *atcs)
+{
+ union acpi_object *info;
+ struct atcs_verify_interface output;
+ size_t size;
+ int err = 0;
+
+ info = radeon_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
+ if (!info)
+ return -EIO;
+
+ memset(&output, 0, sizeof(output));
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 8) {
+ DRM_INFO("ATCS buffer is too small: %lu\n", size);
+ err = -EINVAL;
+ goto out;
+ }
+ size = min(sizeof(output), size);
+
+ memcpy(&output, info->buffer.pointer, size);
+
+ /* TODO: check version? */
+ DRM_DEBUG_DRIVER("ATCS version %u\n", output.version);
+
+ radeon_atcs_parse_functions(&atcs->functions, output.function_bits);
+
+out:
+ kfree(info);
+ return err;
+}
+
+/**
+ * radeon_acpi_event - handle notify events
+ *
+ * @nb: notifier block
+ * @val: val
+ * @data: acpi event
+ *
+ * Calls relevant radeon functions in response to various
+ * acpi events.
+ * Returns NOTIFY code
+ */
+static int radeon_acpi_event(struct notifier_block *nb,
+ unsigned long val,
+ void *data)
+{
+ struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
+ struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
+
+ if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
+ if (power_supply_is_system_supplied() > 0)
+ DRM_DEBUG_DRIVER("pm: AC\n");
+ else
+ DRM_DEBUG_DRIVER("pm: DC\n");
+
+ radeon_pm_acpi_event_handler(rdev);
+ }
+
+ /* Check for pending SBIOS requests */
+ return radeon_atif_handler(rdev, entry);
}
/* Call all ACPI methods here */
+/**
+ * radeon_acpi_init - init driver acpi support
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Verifies the AMD ACPI interfaces and registers with the acpi
+ * notifier chain (all asics).
+ * Returns 0 on success, error on failure.
+ */
int radeon_acpi_init(struct radeon_device *rdev)
{
acpi_handle handle;
+ struct radeon_atif *atif = &rdev->atif;
+ struct radeon_atcs *atcs = &rdev->atcs;
int ret;
/* Get the device handle */
@@ -58,11 +560,90 @@ int radeon_acpi_init(struct radeon_device *rdev)
if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
return 0;
+ /* Call the ATCS method */
+ ret = radeon_atcs_verify_interface(handle, atcs);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
+ }
+
/* Call the ATIF method */
- ret = radeon_atif_call(handle);
- if (ret)
- return ret;
+ ret = radeon_atif_verify_interface(handle, atif);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
+ goto out;
+ }
+
+ if (atif->notifications.brightness_change) {
+ struct drm_encoder *tmp;
+ struct radeon_encoder *target = NULL;
+
+ /* Find the encoder controlling the brightness */
+ list_for_each_entry(tmp, &rdev->ddev->mode_config.encoder_list,
+ head) {
+ struct radeon_encoder *enc = to_radeon_encoder(tmp);
+
+ if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+ enc->enc_priv) {
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *dig = enc->enc_priv;
+ if (dig->bl_dev) {
+ target = enc;
+ break;
+ }
+ } else {
+ struct radeon_encoder_lvds *dig = enc->enc_priv;
+ if (dig->bl_dev) {
+ target = enc;
+ break;
+ }
+ }
+ }
+ }
+
+ atif->encoder_for_bl = target;
+ if (!target) {
+ /* Brightness change notification is enabled, but we
+ * didn't find a backlight controller, this should
+ * never happen.
+ */
+ DRM_ERROR("Cannot find a backlight controller\n");
+ }
+ }
- return 0;
+ if (atif->functions.sbios_requests && !atif->functions.system_params) {
+ /* XXX check this workraround, if sbios request function is
+ * present we have to see how it's configured in the system
+ * params
+ */
+ atif->functions.system_params = true;
+ }
+
+ if (atif->functions.system_params) {
+ ret = radeon_atif_get_notification_params(handle,
+ &atif->notification_cfg);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
+ ret);
+ /* Disable notification */
+ atif->notification_cfg.enabled = false;
+ }
+ }
+
+out:
+ rdev->acpi_nb.notifier_call = radeon_acpi_event;
+ register_acpi_notifier(&rdev->acpi_nb);
+
+ return ret;
}
+/**
+ * radeon_acpi_fini - tear down driver acpi support
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Unregisters with the acpi notifier chain (all asics).
+ */
+void radeon_acpi_fini(struct radeon_device *rdev)
+{
+ unregister_acpi_notifier(&rdev->acpi_nb);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.h b/drivers/gpu/drm/radeon/radeon_acpi.h
new file mode 100644
index 000000000000..be4af76f213d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_acpi.h
@@ -0,0 +1,445 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef RADEON_ACPI_H
+#define RADEON_ACPI_H
+
+struct radeon_device;
+struct acpi_bus_event;
+
+int radeon_atif_handler(struct radeon_device *rdev,
+ struct acpi_bus_event *event);
+
+/* AMD hw uses four ACPI control methods:
+ * 1. ATIF
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATIF provides an entry point for the gfx driver to interact with the sbios.
+ * The AMD ACPI notification mechanism uses Notify (VGA, 0x81) or a custom
+ * notification. Which notification is used as indicated by the ATIF Control
+ * Method GET_SYSTEM_PARAMETERS. When the driver receives Notify (VGA, 0x81) or
+ * a custom notification it invokes ATIF Control Method GET_SYSTEM_BIOS_REQUESTS
+ * to identify pending System BIOS requests and associated parameters. For
+ * example, if one of the pending requests is DISPLAY_SWITCH_REQUEST, the driver
+ * will perform display device detection and invoke ATIF Control Method
+ * SELECT_ACTIVE_DISPLAYS.
+ *
+ * 2. ATPX
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATPX methods are used on PowerXpress systems to handle mux switching and
+ * discrete GPU power control.
+ *
+ * 3. ATRM
+ * ARG0: (ACPI_INTEGER) offset of vbios rom data
+ * ARG1: (ACPI_BUFFER) size of the buffer to fill (up to 4K).
+ * OUTPUT: (ACPI_BUFFER) output buffer
+ * ATRM provides an interfacess to access the discrete GPU vbios image on
+ * PowerXpress systems with multiple GPUs.
+ *
+ * 4. ATCS
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATCS provides an interface to AMD chipset specific functionality.
+ *
+ */
+/* ATIF */
+#define ATIF_FUNCTION_VERIFY_INTERFACE 0x0
+/* ARG0: ATIF_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - version
+ * DWORD - supported notifications mask
+ * DWORD - supported functions bit vector
+ */
+/* Notifications mask */
+# define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED (1 << 0)
+# define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED (1 << 1)
+# define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED (1 << 2)
+# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED (1 << 3)
+# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED (1 << 4)
+# define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED (1 << 5)
+# define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED (1 << 6)
+# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED (1 << 7)
+# define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED (1 << 8)
+/* supported functions vector */
+# define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED (1 << 0)
+# define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED (1 << 1)
+# define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED (1 << 2)
+# define ATIF_GET_LID_STATE_SUPPORTED (1 << 3)
+# define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED (1 << 4)
+# define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED (1 << 5)
+# define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED (1 << 6)
+# define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED (1 << 7)
+# define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED (1 << 12)
+# define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED (1 << 14)
+#define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS 0x1
+/* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ *
+ * OR
+ *
+ * WORD - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ * BYTE - notify command code
+ *
+ * flags
+ * bits 1:0:
+ * 0 - Notify(VGA, 0x81) is not used for notification
+ * 1 - Notify(VGA, 0x81) is used for notification
+ * 2 - Notify(VGA, n) is used for notification where
+ * n (0xd0-0xd9) is specified in notify command code.
+ * bit 2:
+ * 1 - lid changes not reported though int10
+ */
+#define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS 0x2
+/* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * DWORD - pending sbios requests
+ * BYTE - panel expansion mode
+ * BYTE - thermal state: target gfx controller
+ * BYTE - thermal state: state id (0: exit state, non-0: state)
+ * BYTE - forced power state: target gfx controller
+ * BYTE - forced power state: state id
+ * BYTE - system power source
+ * BYTE - panel backlight level (0-255)
+ */
+/* pending sbios requests */
+# define ATIF_DISPLAY_SWITCH_REQUEST (1 << 0)
+# define ATIF_EXPANSION_MODE_CHANGE_REQUEST (1 << 1)
+# define ATIF_THERMAL_STATE_CHANGE_REQUEST (1 << 2)
+# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST (1 << 3)
+# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST (1 << 4)
+# define ATIF_DISPLAY_CONF_CHANGE_REQUEST (1 << 5)
+# define ATIF_PX_GFX_SWITCH_REQUEST (1 << 6)
+# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST (1 << 7)
+# define ATIF_DGPU_DISPLAY_EVENT (1 << 8)
+/* panel expansion mode */
+# define ATIF_PANEL_EXPANSION_DISABLE 0
+# define ATIF_PANEL_EXPANSION_FULL 1
+# define ATIF_PANEL_EXPANSION_ASPECT 2
+/* target gfx controller */
+# define ATIF_TARGET_GFX_SINGLE 0
+# define ATIF_TARGET_GFX_PX_IGPU 1
+# define ATIF_TARGET_GFX_PX_DGPU 2
+/* system power source */
+# define ATIF_POWER_SOURCE_AC 1
+# define ATIF_POWER_SOURCE_DC 2
+# define ATIF_POWER_SOURCE_RESTRICTED_AC_1 3
+# define ATIF_POWER_SOURCE_RESTRICTED_AC_2 4
+#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS 0x3
+/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - selected displays
+ * WORD - connected displays
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - selected displays
+ */
+# define ATIF_LCD1 (1 << 0)
+# define ATIF_CRT1 (1 << 1)
+# define ATIF_TV (1 << 2)
+# define ATIF_DFP1 (1 << 3)
+# define ATIF_CRT2 (1 << 4)
+# define ATIF_LCD2 (1 << 5)
+# define ATIF_DFP2 (1 << 7)
+# define ATIF_CV (1 << 8)
+# define ATIF_DFP3 (1 << 9)
+# define ATIF_DFP4 (1 << 10)
+# define ATIF_DFP5 (1 << 11)
+# define ATIF_DFP6 (1 << 12)
+#define ATIF_FUNCTION_GET_LID_STATE 0x4
+/* ARG0: ATIF_FUNCTION_GET_LID_STATE
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - lid state (0: open, 1: closed)
+ *
+ * GET_LID_STATE only works at boot and resume, for general lid
+ * status, use the kernel provided status
+ */
+#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS 0x5
+/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - 0
+ * BYTE - TV standard
+ */
+# define ATIF_TV_STD_NTSC 0
+# define ATIF_TV_STD_PAL 1
+# define ATIF_TV_STD_PALM 2
+# define ATIF_TV_STD_PAL60 3
+# define ATIF_TV_STD_NTSCJ 4
+# define ATIF_TV_STD_PALCN 5
+# define ATIF_TV_STD_PALN 6
+# define ATIF_TV_STD_SCART_RGB 9
+#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS 0x6
+/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - 0
+ * BYTE - TV standard
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS 0x7
+/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - panel expansion mode
+ */
+#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS 0x8
+/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - panel expansion mode
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION 0xD
+/* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - gfx controller id
+ * BYTE - current temperature (degress Celsius)
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES 0xF
+/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES
+ * ARG1: none
+ * OUTPUT:
+ * WORD - number of gfx devices
+ * WORD - device structure size in bytes (excludes device size field)
+ * DWORD - flags \
+ * WORD - bus number } repeated structure
+ * WORD - device number /
+ */
+/* flags */
+# define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE (1 << 0)
+# define ATIF_XGP_PORT (1 << 1)
+# define ATIF_VGA_ENABLED_GRAPHICS_DEVICE (1 << 2)
+# define ATIF_XGP_PORT_IN_DOCK (1 << 3)
+
+/* ATPX */
+#define ATPX_FUNCTION_VERIFY_INTERFACE 0x0
+/* ARG0: ATPX_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - version
+ * DWORD - supported functions bit vector
+ */
+/* supported functions vector */
+# define ATPX_GET_PX_PARAMETERS_SUPPORTED (1 << 0)
+# define ATPX_POWER_CONTROL_SUPPORTED (1 << 1)
+# define ATPX_DISPLAY_MUX_CONTROL_SUPPORTED (1 << 2)
+# define ATPX_I2C_MUX_CONTROL_SUPPORTED (1 << 3)
+# define ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED (1 << 4)
+# define ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED (1 << 5)
+# define ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED (1 << 7)
+# define ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED (1 << 8)
+#define ATPX_FUNCTION_GET_PX_PARAMETERS 0x1
+/* ARG0: ATPX_FUNCTION_GET_PX_PARAMETERS
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ */
+/* flags */
+# define ATPX_LVDS_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 0)
+# define ATPX_CRT1_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 1)
+# define ATPX_DVI1_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 2)
+# define ATPX_CRT1_RGB_SIGNAL_MUXED (1 << 3)
+# define ATPX_TV_SIGNAL_MUXED (1 << 4)
+# define ATPX_DFP_SIGNAL_MUXED (1 << 5)
+# define ATPX_SEPARATE_MUX_FOR_I2C (1 << 6)
+# define ATPX_DYNAMIC_PX_SUPPORTED (1 << 7)
+# define ATPX_ACF_NOT_SUPPORTED (1 << 8)
+# define ATPX_FIXED_NOT_SUPPORTED (1 << 9)
+# define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10)
+# define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11)
+#define ATPX_FUNCTION_POWER_CONTROL 0x2
+/* ARG0: ATPX_FUNCTION_POWER_CONTROL
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - dGPU power state (0: power off, 1: power on)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_DISPLAY_MUX_CONTROL 0x3
+/* ARG0: ATPX_FUNCTION_DISPLAY_MUX_CONTROL
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - display mux control (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+# define ATPX_INTEGRATED_GPU 0
+# define ATPX_DISCRETE_GPU 1
+#define ATPX_FUNCTION_I2C_MUX_CONTROL 0x4
+/* ARG0: ATPX_FUNCTION_I2C_MUX_CONTROL
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - i2c/aux/hpd mux control (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION 0x5
+/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - target gpu (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION 0x6
+/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - target gpu (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING 0x8
+/* ARG0: ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING
+ * ARG1: none
+ * OUTPUT:
+ * WORD - number of display connectors
+ * WORD - connector structure size in bytes (excludes connector size field)
+ * BYTE - flags \
+ * BYTE - ATIF display vector bit position } repeated
+ * BYTE - adapter id (0: iGPU, 1-n: dGPU ordered by pcie bus number) } structure
+ * WORD - connector ACPI id /
+ */
+/* flags */
+# define ATPX_DISPLAY_OUTPUT_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 0)
+# define ATPX_DISPLAY_HPD_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 1)
+# define ATPX_DISPLAY_I2C_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 2)
+#define ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS 0x9
+/* ARG0: ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS
+ * ARG1: none
+ * OUTPUT:
+ * WORD - number of HPD/DDC ports
+ * WORD - port structure size in bytes (excludes port size field)
+ * BYTE - ATIF display vector bit position \
+ * BYTE - hpd id } reapeated structure
+ * BYTE - ddc id /
+ *
+ * available on A+A systems only
+ */
+/* hpd id */
+# define ATPX_HPD_NONE 0
+# define ATPX_HPD1 1
+# define ATPX_HPD2 2
+# define ATPX_HPD3 3
+# define ATPX_HPD4 4
+# define ATPX_HPD5 5
+# define ATPX_HPD6 6
+/* ddc id */
+# define ATPX_DDC_NONE 0
+# define ATPX_DDC1 1
+# define ATPX_DDC2 2
+# define ATPX_DDC3 3
+# define ATPX_DDC4 4
+# define ATPX_DDC5 5
+# define ATPX_DDC6 6
+# define ATPX_DDC7 7
+# define ATPX_DDC8 8
+
+/* ATCS */
+#define ATCS_FUNCTION_VERIFY_INTERFACE 0x0
+/* ARG0: ATCS_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - version
+ * DWORD - supported functions bit vector
+ */
+/* supported functions vector */
+# define ATCS_GET_EXTERNAL_STATE_SUPPORTED (1 << 0)
+# define ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED (1 << 1)
+# define ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED (1 << 2)
+# define ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED (1 << 3)
+#define ATCS_FUNCTION_GET_EXTERNAL_STATE 0x1
+/* ARG0: ATCS_FUNCTION_GET_EXTERNAL_STATE
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags (0: undocked, 1: docked)
+ */
+/* flags */
+# define ATCS_DOCKED (1 << 0)
+#define ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST 0x2
+/* ARG0: ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
+ * WORD - valid flags mask
+ * WORD - flags
+ * BYTE - request type
+ * BYTE - performance request
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - return value
+ */
+/* flags */
+# define ATCS_ADVERTISE_CAPS (1 << 0)
+# define ATCS_WAIT_FOR_COMPLETION (1 << 1)
+/* request type */
+# define ATCS_PCIE_LINK_SPEED 1
+/* performance request */
+# define ATCS_REMOVE 0
+# define ATCS_FORCE_LOW_POWER 1
+# define ATCS_PERF_LEVEL_1 2 /* PCIE Gen 1 */
+# define ATCS_PERF_LEVEL_2 3 /* PCIE Gen 2 */
+# define ATCS_PERF_LEVEL_3 4 /* PCIE Gen 3 */
+/* return value */
+# define ATCS_REQUEST_REFUSED 1
+# define ATCS_REQUEST_COMPLETE 2
+# define ATCS_REQUEST_IN_PROGRESS 3
+#define ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION 0x3
+/* ARG0: ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION
+ * ARG1: none
+ * OUTPUT: none
+ */
+#define ATCS_FUNCTION_SET_PCIE_BUS_WIDTH 0x4
+/* ARG0: ATCS_FUNCTION_SET_PCIE_BUS_WIDTH
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
+ * BYTE - number of active lanes
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - number of active lanes
+ */
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 973417c4b014..654520b95ab7 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -198,6 +198,8 @@ static struct radeon_asic r100_asic = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
+ .set_backlight_level = &radeon_legacy_set_backlight_level,
+ .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
@@ -272,6 +274,8 @@ static struct radeon_asic r200_asic = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
+ .set_backlight_level = &radeon_legacy_set_backlight_level,
+ .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
@@ -346,6 +350,8 @@ static struct radeon_asic r300_asic = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
+ .set_backlight_level = &radeon_legacy_set_backlight_level,
+ .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
@@ -420,6 +426,8 @@ static struct radeon_asic r300_asic_pcie = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
+ .set_backlight_level = &radeon_legacy_set_backlight_level,
+ .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
@@ -494,6 +502,8 @@ static struct radeon_asic r420_asic = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
@@ -568,6 +578,8 @@ static struct radeon_asic rs400_asic = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
+ .set_backlight_level = &radeon_legacy_set_backlight_level,
+ .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
@@ -642,6 +654,8 @@ static struct radeon_asic rs600_asic = {
.bandwidth_update = &rs600_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
@@ -716,6 +730,8 @@ static struct radeon_asic rs690_asic = {
.get_vblank_counter = &rs600_get_vblank_counter,
.bandwidth_update = &rs690_bandwidth_update,
.wait_for_vblank = &avivo_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
@@ -790,6 +806,8 @@ static struct radeon_asic rv515_asic = {
.get_vblank_counter = &rs600_get_vblank_counter,
.bandwidth_update = &rv515_bandwidth_update,
.wait_for_vblank = &avivo_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
@@ -864,6 +882,8 @@ static struct radeon_asic r520_asic = {
.bandwidth_update = &rv515_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
@@ -937,6 +957,8 @@ static struct radeon_asic r600_asic = {
.bandwidth_update = &rv515_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1010,6 +1032,8 @@ static struct radeon_asic rs780_asic = {
.bandwidth_update = &rs690_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1083,6 +1107,8 @@ static struct radeon_asic rv770_asic = {
.bandwidth_update = &rv515_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1156,6 +1182,8 @@ static struct radeon_asic evergreen_asic = {
.bandwidth_update = &evergreen_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1229,6 +1257,8 @@ static struct radeon_asic sumo_asic = {
.bandwidth_update = &evergreen_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1302,6 +1332,8 @@ static struct radeon_asic btc_asic = {
.bandwidth_update = &evergreen_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1325,7 +1357,7 @@ static struct radeon_asic btc_asic = {
.misc = &evergreen_pm_misc,
.prepare = &evergreen_pm_prepare,
.finish = &evergreen_pm_finish,
- .init_profile = &r600_pm_init_profile,
+ .init_profile = &btc_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
@@ -1342,16 +1374,6 @@ static struct radeon_asic btc_asic = {
},
};
-static const struct radeon_vm_funcs cayman_vm_funcs = {
- .init = &cayman_vm_init,
- .fini = &cayman_vm_fini,
- .bind = &cayman_vm_bind,
- .unbind = &cayman_vm_unbind,
- .tlb_flush = &cayman_vm_tlb_flush,
- .page_flags = &cayman_vm_page_flags,
- .set_page = &cayman_vm_set_page,
-};
-
static struct radeon_asic cayman_asic = {
.init = &cayman_init,
.fini = &cayman_fini,
@@ -1366,6 +1388,12 @@ static struct radeon_asic cayman_asic = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
+ .vm = {
+ .init = &cayman_vm_init,
+ .fini = &cayman_vm_fini,
+ .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .set_page = &cayman_vm_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1376,6 +1404,7 @@ static struct radeon_asic cayman_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
+ .vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1386,6 +1415,7 @@ static struct radeon_asic cayman_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
+ .vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1396,6 +1426,7 @@ static struct radeon_asic cayman_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
+ .vm_flush = &cayman_vm_flush,
}
},
.irq = {
@@ -1406,6 +1437,8 @@ static struct radeon_asic cayman_asic = {
.bandwidth_update = &evergreen_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1429,7 +1462,7 @@ static struct radeon_asic cayman_asic = {
.misc = &evergreen_pm_misc,
.prepare = &evergreen_pm_prepare,
.finish = &evergreen_pm_finish,
- .init_profile = &r600_pm_init_profile,
+ .init_profile = &btc_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
@@ -1460,6 +1493,12 @@ static struct radeon_asic trinity_asic = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
+ .vm = {
+ .init = &cayman_vm_init,
+ .fini = &cayman_vm_fini,
+ .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .set_page = &cayman_vm_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1470,6 +1509,7 @@ static struct radeon_asic trinity_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
+ .vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1480,6 +1520,7 @@ static struct radeon_asic trinity_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
+ .vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1490,6 +1531,7 @@ static struct radeon_asic trinity_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
+ .vm_flush = &cayman_vm_flush,
}
},
.irq = {
@@ -1500,6 +1542,8 @@ static struct radeon_asic trinity_asic = {
.bandwidth_update = &dce6_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1540,16 +1584,6 @@ static struct radeon_asic trinity_asic = {
},
};
-static const struct radeon_vm_funcs si_vm_funcs = {
- .init = &si_vm_init,
- .fini = &si_vm_fini,
- .bind = &si_vm_bind,
- .unbind = &si_vm_unbind,
- .tlb_flush = &si_vm_tlb_flush,
- .page_flags = &cayman_vm_page_flags,
- .set_page = &cayman_vm_set_page,
-};
-
static struct radeon_asic si_asic = {
.init = &si_init,
.fini = &si_fini,
@@ -1564,6 +1598,12 @@ static struct radeon_asic si_asic = {
.tlb_flush = &si_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
+ .vm = {
+ .init = &si_vm_init,
+ .fini = &si_vm_fini,
+ .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .set_page = &si_vm_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &si_ring_ib_execute,
@@ -1574,6 +1614,7 @@ static struct radeon_asic si_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &si_gpu_is_lockup,
+ .vm_flush = &si_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &si_ring_ib_execute,
@@ -1584,6 +1625,7 @@ static struct radeon_asic si_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &si_gpu_is_lockup,
+ .vm_flush = &si_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &si_ring_ib_execute,
@@ -1594,6 +1636,7 @@ static struct radeon_asic si_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &si_gpu_is_lockup,
+ .vm_flush = &si_vm_flush,
}
},
.irq = {
@@ -1604,6 +1647,8 @@ static struct radeon_asic si_asic = {
.bandwidth_update = &dce6_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = NULL,
@@ -1697,6 +1742,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock;
rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock;
rdev->asic->pm.set_memory_clock = NULL;
+ rdev->asic->display.set_backlight_level = &radeon_legacy_set_backlight_level;
}
break;
case CHIP_RS400:
@@ -1769,13 +1815,11 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->asic = &cayman_asic;
/* set num crtcs */
rdev->num_crtc = 6;
- rdev->vm_manager.funcs = &cayman_vm_funcs;
break;
case CHIP_ARUBA:
rdev->asic = &trinity_asic;
/* set num crtcs */
rdev->num_crtc = 4;
- rdev->vm_manager.funcs = &cayman_vm_funcs;
break;
case CHIP_TAHITI:
case CHIP_PITCAIRN:
@@ -1783,7 +1827,6 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->asic = &si_asic;
/* set num crtcs */
rdev->num_crtc = 6;
- rdev->vm_manager.funcs = &si_vm_funcs;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 18c38d14c8cd..5e3a0e5c6be1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -42,6 +42,12 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev);
void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
+void atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
+u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder);
+void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
+u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
+
+
/*
* r100,rv100,rs100,rv200,rs200
*/
@@ -389,6 +395,7 @@ void r700_cp_fini(struct radeon_device *rdev);
struct evergreen_mc_save {
u32 vga_render_control;
u32 vga_hdp_control;
+ bool crtc_enabled[RADEON_MAX_CRTCS];
};
void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
@@ -413,6 +420,7 @@ extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
extern void sumo_pm_init_profile(struct radeon_device *rdev);
+extern void btc_pm_init_profile(struct radeon_device *rdev);
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
@@ -435,14 +443,11 @@ int cayman_asic_reset(struct radeon_device *rdev);
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int cayman_vm_init(struct radeon_device *rdev);
void cayman_vm_fini(struct radeon_device *rdev);
-int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
-void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
-void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
-uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
- struct radeon_vm *vm,
- uint32_t flags);
-void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
- unsigned pfn, uint64_t addr, uint32_t flags);
+void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
+void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
/* DCE6 - SI */
@@ -465,9 +470,10 @@ int si_irq_set(struct radeon_device *rdev);
int si_irq_process(struct radeon_device *rdev);
int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev);
-int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
-void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
-void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
+void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
uint64_t si_get_gpu_clock(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index d67d4f3eb6f4..01b90b4f5e22 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1254,6 +1254,10 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
if (rdev->clock.max_pixel_clock == 0)
rdev->clock.max_pixel_clock = 40000;
+ /* not technically a clock, but... */
+ rdev->mode_info.firmware_flags =
+ le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
+
return true;
}
@@ -2005,7 +2009,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
/* add the i2c bus for thermal/fan chip */
- if (power_info->info.ucOverdriveThermalController > 0) {
+ if ((power_info->info.ucOverdriveThermalController > 0) &&
+ (power_info->info.ucOverdriveThermalController < ARRAY_SIZE(thermal_controller_names))) {
DRM_INFO("Possible %s thermal controller at 0x%02x\n",
thermal_controller_names[power_info->info.ucOverdriveThermalController],
power_info->info.ucOverdriveControllerAddress >> 1);
@@ -2209,7 +2214,7 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
(controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
DRM_INFO("Special thermal controller config\n");
- } else {
+ } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
pp_lib_thermal_controller_names[controller->ucType],
controller->ucI2cAddress >> 1,
@@ -2224,6 +2229,12 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
strlcpy(info.type, name, sizeof(info.type));
i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
}
+ } else {
+ DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
+ controller->ucType,
+ controller->ucI2cAddress >> 1,
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
}
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 2a2cf0b88a28..582e99449c12 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -12,30 +12,62 @@
#include <acpi/acpi_bus.h>
#include <linux/pci.h>
-#define ATPX_VERSION 0
-#define ATPX_GPU_PWR 2
-#define ATPX_MUX_SELECT 3
-#define ATPX_I2C_MUX_SELECT 4
-#define ATPX_SWITCH_START 5
-#define ATPX_SWITCH_END 6
-
-#define ATPX_INTEGRATED 0
-#define ATPX_DISCRETE 1
+#include "radeon_acpi.h"
+
+struct radeon_atpx_functions {
+ bool px_params;
+ bool power_cntl;
+ bool disp_mux_cntl;
+ bool i2c_mux_cntl;
+ bool switch_start;
+ bool switch_end;
+ bool disp_connectors_mapping;
+ bool disp_detetion_ports;
+};
-#define ATPX_MUX_IGD 0
-#define ATPX_MUX_DISCRETE 1
+struct radeon_atpx {
+ acpi_handle handle;
+ struct radeon_atpx_functions functions;
+};
static struct radeon_atpx_priv {
bool atpx_detected;
/* handle for device - and atpx */
acpi_handle dhandle;
- acpi_handle atpx_handle;
+ struct radeon_atpx atpx;
} radeon_atpx_priv;
-static int radeon_atpx_get_version(acpi_handle handle)
+struct atpx_verify_interface {
+ u16 size; /* structure size in bytes (includes size field) */
+ u16 version; /* version */
+ u32 function_bits; /* supported functions bit vector */
+} __packed;
+
+struct atpx_power_control {
+ u16 size;
+ u8 dgpu_state;
+} __packed;
+
+struct atpx_mux {
+ u16 size;
+ u16 mux;
+} __packed;
+
+/**
+ * radeon_atpx_call - call an ATPX method
+ *
+ * @handle: acpi handle
+ * @function: the ATPX function to execute
+ * @params: ATPX function params
+ *
+ * Executes the requested ATPX function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
+ struct acpi_buffer *params)
{
acpi_status status;
- union acpi_object atpx_arg_elements[2], *obj;
+ union acpi_object atpx_arg_elements[2];
struct acpi_object_list atpx_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -43,99 +75,292 @@ static int radeon_atpx_get_version(acpi_handle handle)
atpx_arg.pointer = &atpx_arg_elements[0];
atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
- atpx_arg_elements[0].integer.value = ATPX_VERSION;
+ atpx_arg_elements[0].integer.value = function;
+
+ if (params) {
+ atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
+ atpx_arg_elements[1].buffer.length = params->length;
+ atpx_arg_elements[1].buffer.pointer = params->pointer;
+ } else {
+ /* We need a second fake parameter */
+ atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ atpx_arg_elements[1].integer.value = 0;
+ }
- atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
- atpx_arg_elements[1].integer.value = ATPX_VERSION;
+ status = acpi_evaluate_object(handle, "ATPX", &atpx_arg, &buffer);
- status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
- if (ACPI_FAILURE(status)) {
- printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
- return -ENOSYS;
+ /* Fail only if calling the method fails and ATPX is supported */
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ printk("failed to evaluate ATPX got %s\n",
+ acpi_format_exception(status));
+ kfree(buffer.pointer);
+ return NULL;
}
- obj = (union acpi_object *)buffer.pointer;
- if (obj && (obj->type == ACPI_TYPE_BUFFER))
- printk(KERN_INFO "radeon atpx: version is %d\n", *((u8 *)(obj->buffer.pointer) + 2));
- kfree(buffer.pointer);
- return 0;
+
+ return buffer.pointer;
}
-static int radeon_atpx_execute(acpi_handle handle, int cmd_id, u16 value)
+/**
+ * radeon_atpx_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATPX
+ *
+ * Use the supported functions mask from ATPX function
+ * ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mask)
{
- acpi_status status;
- union acpi_object atpx_arg_elements[2];
- struct acpi_object_list atpx_arg;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- uint8_t buf[4] = {0};
-
- if (!handle)
- return -EINVAL;
-
- atpx_arg.count = 2;
- atpx_arg.pointer = &atpx_arg_elements[0];
+ f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED;
+ f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED;
+ f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED;
+ f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED;
+ f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
+ f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
+ f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
+ f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
+}
- atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
- atpx_arg_elements[0].integer.value = cmd_id;
+/**
+ * radeon_atpx_verify_interface - verify ATPX
+ *
+ * @handle: acpi handle
+ * @atpx: radeon atpx struct
+ *
+ * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
+ * to initialize ATPX and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
+{
+ union acpi_object *info;
+ struct atpx_verify_interface output;
+ size_t size;
+ int err = 0;
+
+ info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL);
+ if (!info)
+ return -EIO;
+
+ memset(&output, 0, sizeof(output));
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 8) {
+ printk("ATPX buffer is too small: %lu\n", size);
+ err = -EINVAL;
+ goto out;
+ }
+ size = min(sizeof(output), size);
- buf[2] = value & 0xff;
- buf[3] = (value >> 8) & 0xff;
+ memcpy(&output, info->buffer.pointer, size);
- atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
- atpx_arg_elements[1].buffer.length = 4;
- atpx_arg_elements[1].buffer.pointer = buf;
+ /* TODO: check version? */
+ printk("ATPX version %u\n", output.version);
- status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
- if (ACPI_FAILURE(status)) {
- printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
- return -ENOSYS;
- }
- kfree(buffer.pointer);
+ radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
- return 0;
+out:
+ kfree(info);
+ return err;
}
-static int radeon_atpx_set_discrete_state(acpi_handle handle, int state)
+/**
+ * radeon_atpx_set_discrete_state - power up/down discrete GPU
+ *
+ * @atpx: atpx info struct
+ * @state: discrete GPU state (0 = power down, 1 = power up)
+ *
+ * Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to
+ * power down/up the discrete GPU (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
{
- return radeon_atpx_execute(handle, ATPX_GPU_PWR, state);
+ struct acpi_buffer params;
+ union acpi_object *info;
+ struct atpx_power_control input;
+
+ if (atpx->functions.power_cntl) {
+ input.size = 3;
+ input.dgpu_state = state;
+ params.length = input.size;
+ params.pointer = &input;
+ info = radeon_atpx_call(atpx->handle,
+ ATPX_FUNCTION_POWER_CONTROL,
+ &params);
+ if (!info)
+ return -EIO;
+ kfree(info);
+ }
+ return 0;
}
-static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id)
+/**
+ * radeon_atpx_switch_disp_mux - switch display mux
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to
+ * switch the display mux between the discrete GPU and integrated GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_disp_mux(struct radeon_atpx *atpx, u16 mux_id)
{
- return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id);
+ struct acpi_buffer params;
+ union acpi_object *info;
+ struct atpx_mux input;
+
+ if (atpx->functions.disp_mux_cntl) {
+ input.size = 4;
+ input.mux = mux_id;
+ params.length = input.size;
+ params.pointer = &input;
+ info = radeon_atpx_call(atpx->handle,
+ ATPX_FUNCTION_DISPLAY_MUX_CONTROL,
+ &params);
+ if (!info)
+ return -EIO;
+ kfree(info);
+ }
+ return 0;
}
-static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id)
+/**
+ * radeon_atpx_switch_i2c_mux - switch i2c/hpd mux
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to
+ * switch the i2c/hpd mux between the discrete GPU and integrated GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_i2c_mux(struct radeon_atpx *atpx, u16 mux_id)
{
- return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id);
+ struct acpi_buffer params;
+ union acpi_object *info;
+ struct atpx_mux input;
+
+ if (atpx->functions.i2c_mux_cntl) {
+ input.size = 4;
+ input.mux = mux_id;
+ params.length = input.size;
+ params.pointer = &input;
+ info = radeon_atpx_call(atpx->handle,
+ ATPX_FUNCTION_I2C_MUX_CONTROL,
+ &params);
+ if (!info)
+ return -EIO;
+ kfree(info);
+ }
+ return 0;
}
-static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id)
+/**
+ * radeon_atpx_switch_start - notify the sbios of a GPU switch
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX
+ * function to notify the sbios that a switch between the discrete GPU and
+ * integrated GPU has begun (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_start(struct radeon_atpx *atpx, u16 mux_id)
{
- return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id);
+ struct acpi_buffer params;
+ union acpi_object *info;
+ struct atpx_mux input;
+
+ if (atpx->functions.switch_start) {
+ input.size = 4;
+ input.mux = mux_id;
+ params.length = input.size;
+ params.pointer = &input;
+ info = radeon_atpx_call(atpx->handle,
+ ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION,
+ &params);
+ if (!info)
+ return -EIO;
+ kfree(info);
+ }
+ return 0;
}
-static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id)
+/**
+ * radeon_atpx_switch_end - notify the sbios of a GPU switch
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX
+ * function to notify the sbios that a switch between the discrete GPU and
+ * integrated GPU has ended (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_end(struct radeon_atpx *atpx, u16 mux_id)
{
- return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id);
+ struct acpi_buffer params;
+ union acpi_object *info;
+ struct atpx_mux input;
+
+ if (atpx->functions.switch_end) {
+ input.size = 4;
+ input.mux = mux_id;
+ params.length = input.size;
+ params.pointer = &input;
+ info = radeon_atpx_call(atpx->handle,
+ ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION,
+ &params);
+ if (!info)
+ return -EIO;
+ kfree(info);
+ }
+ return 0;
}
+/**
+ * radeon_atpx_switchto - switch to the requested GPU
+ *
+ * @id: GPU to switch to
+ *
+ * Execute the necessary ATPX functions to switch between the discrete GPU and
+ * integrated GPU (all asics).
+ * Returns 0 on success, error on failure.
+ */
static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
{
- int gpu_id;
+ u16 gpu_id;
if (id == VGA_SWITCHEROO_IGD)
- gpu_id = ATPX_INTEGRATED;
+ gpu_id = ATPX_INTEGRATED_GPU;
else
- gpu_id = ATPX_DISCRETE;
+ gpu_id = ATPX_DISCRETE_GPU;
- radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id);
- radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id);
- radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id);
- radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id);
+ radeon_atpx_switch_start(&radeon_atpx_priv.atpx, gpu_id);
+ radeon_atpx_switch_disp_mux(&radeon_atpx_priv.atpx, gpu_id);
+ radeon_atpx_switch_i2c_mux(&radeon_atpx_priv.atpx, gpu_id);
+ radeon_atpx_switch_end(&radeon_atpx_priv.atpx, gpu_id);
return 0;
}
+/**
+ * radeon_atpx_switchto - switch to the requested GPU
+ *
+ * @id: GPU to switch to
+ * @state: requested power state (0 = off, 1 = on)
+ *
+ * Execute the necessary ATPX function to power down/up the discrete GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
enum vga_switcheroo_state state)
{
@@ -143,10 +368,18 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
if (id == VGA_SWITCHEROO_IGD)
return 0;
- radeon_atpx_set_discrete_state(radeon_atpx_priv.atpx_handle, state);
+ radeon_atpx_set_discrete_state(&radeon_atpx_priv.atpx, state);
return 0;
}
+/**
+ * radeon_atpx_pci_probe_handle - look up the ATRM and ATPX handles
+ *
+ * @pdev: pci device
+ *
+ * Look up the ATPX and ATRM handles (all asics).
+ * Returns true if the handles are found, false if not.
+ */
static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
{
acpi_handle dhandle, atpx_handle;
@@ -161,18 +394,30 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
return false;
radeon_atpx_priv.dhandle = dhandle;
- radeon_atpx_priv.atpx_handle = atpx_handle;
+ radeon_atpx_priv.atpx.handle = atpx_handle;
return true;
}
+/**
+ * radeon_atpx_init - verify the ATPX interface
+ *
+ * Verify the ATPX interface (all asics).
+ * Returns 0 on success, error on failure.
+ */
static int radeon_atpx_init(void)
{
/* set up the ATPX handle */
-
- radeon_atpx_get_version(radeon_atpx_priv.atpx_handle);
- return 0;
+ return radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
}
+/**
+ * radeon_atpx_get_client_id - get the client id
+ *
+ * @pdev: pci device
+ *
+ * look up whether we are the integrated or discrete GPU (all asics).
+ * Returns the client id.
+ */
static int radeon_atpx_get_client_id(struct pci_dev *pdev)
{
if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
@@ -188,6 +433,12 @@ static struct vga_switcheroo_handler radeon_atpx_handler = {
.get_client_id = radeon_atpx_get_client_id,
};
+/**
+ * radeon_atpx_detect - detect whether we have PX
+ *
+ * Check if we have a PX system (all asics).
+ * Returns true if we have a PX system, false if not.
+ */
static bool radeon_atpx_detect(void)
{
char acpi_method_name[255] = { 0 };
@@ -203,7 +454,7 @@ static bool radeon_atpx_detect(void)
}
if (has_atpx && vga_count == 2) {
- acpi_get_name(radeon_atpx_priv.atpx_handle, ACPI_FULL_PATHNAME, &buffer);
+ acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
acpi_method_name);
radeon_atpx_priv.atpx_detected = true;
@@ -212,6 +463,11 @@ static bool radeon_atpx_detect(void)
return false;
}
+/**
+ * radeon_register_atpx_handler - register with vga_switcheroo
+ *
+ * Register the PX callbacks with vga_switcheroo (all asics).
+ */
void radeon_register_atpx_handler(void)
{
bool r;
@@ -224,6 +480,11 @@ void radeon_register_atpx_handler(void)
vga_switcheroo_register_handler(&radeon_atpx_handler);
}
+/**
+ * radeon_unregister_atpx_handler - unregister with vga_switcheroo
+ *
+ * Unregister the PX callbacks with vga_switcheroo (all asics).
+ */
void radeon_unregister_atpx_handler(void)
{
vga_switcheroo_unregister_handler();
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index f75247d42ffd..8a73f0758903 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3319,15 +3319,6 @@ static void combios_write_ram_size(struct drm_device *dev)
WREG32(RADEON_CONFIG_MEMSIZE, mem_size);
}
-void radeon_combios_dyn_clk_setup(struct drm_device *dev, int enable)
-{
- uint16_t dyn_clk_info =
- combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
-
- if (dyn_clk_info)
- combios_parse_pll_table(dev, dyn_clk_info);
-}
-
void radeon_combios_asic_init(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 895e628b60f8..69a142fc3d1d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -40,10 +40,6 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder,
bool connected);
-extern void
-radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
- struct drm_connector *drm_connector);
-
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -198,7 +194,7 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
}
}
-struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
+static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
{
struct drm_mode_object *obj;
struct drm_encoder *encoder;
@@ -219,7 +215,7 @@ struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int enc
return NULL;
}
-struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
+static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct drm_mode_object *obj;
@@ -370,7 +366,7 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn
}
}
-int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
+static int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = connector->dev;
@@ -691,13 +687,13 @@ static int radeon_lvds_set_property(struct drm_connector *connector,
}
-struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
.get_modes = radeon_lvds_get_modes,
.mode_valid = radeon_lvds_mode_valid,
.best_encoder = radeon_best_single_encoder,
};
-struct drm_connector_funcs radeon_lvds_connector_funcs = {
+static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -809,13 +805,13 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
return ret;
}
-struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
.get_modes = radeon_vga_get_modes,
.mode_valid = radeon_vga_mode_valid,
.best_encoder = radeon_best_single_encoder,
};
-struct drm_connector_funcs radeon_vga_connector_funcs = {
+static const struct drm_connector_funcs radeon_vga_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -879,13 +875,13 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
return ret;
}
-struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = {
.get_modes = radeon_tv_get_modes,
.mode_valid = radeon_tv_mode_valid,
.best_encoder = radeon_best_single_encoder,
};
-struct drm_connector_funcs radeon_tv_connector_funcs = {
+static const struct drm_connector_funcs radeon_tv_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_tv_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1089,7 +1085,7 @@ out:
}
/* okay need to be smart in here about which encoder to pick */
-struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
+static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1179,13 +1175,13 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
.get_modes = radeon_dvi_get_modes,
.mode_valid = radeon_dvi_mode_valid,
.best_encoder = radeon_dvi_encoder,
};
-struct drm_connector_funcs radeon_dvi_connector_funcs = {
+static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_dvi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1462,13 +1458,13 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
}
}
-struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
.get_modes = radeon_dp_get_modes,
.mode_valid = radeon_dp_mode_valid,
.best_encoder = radeon_dvi_encoder,
};
-struct drm_connector_funcs radeon_dp_connector_funcs = {
+static const struct drm_connector_funcs radeon_dp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -2008,15 +2004,4 @@ radeon_add_legacy_connector(struct drm_device *dev,
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector);
- if (connector_type == DRM_MODE_CONNECTOR_LVDS) {
- struct drm_encoder *drm_encoder;
-
- list_for_each_entry(drm_encoder, &dev->mode_config.encoder_list, head) {
- struct radeon_encoder *radeon_encoder;
-
- radeon_encoder = to_radeon_encoder(drm_encoder);
- if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_LVDS)
- radeon_legacy_backlight_init(radeon_encoder, connector);
- }
- }
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index b4a0db24f4dd..d59eb59cdb81 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -32,7 +32,7 @@
void r100_cs_dump_packet(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt);
-int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
+static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
{
struct drm_device *ddev = p->rdev->ddev;
struct radeon_cs_chunk *chunk;
@@ -115,19 +115,27 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
return 0;
}
+static void radeon_cs_sync_to(struct radeon_cs_parser *p,
+ struct radeon_fence *fence)
+{
+ struct radeon_fence *other;
+
+ if (!fence)
+ return;
+
+ other = p->ib.sync_to[fence->ring];
+ p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
+}
+
static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
int i;
for (i = 0; i < p->nrelocs; i++) {
- struct radeon_fence *a, *b;
-
- if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
+ if (!p->relocs[i].robj)
continue;
- a = p->relocs[i].robj->tbo.sync_obj;
- b = p->ib.sync_to[a->ring];
- p->ib.sync_to[a->ring] = radeon_fence_later(a, b);
+ radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj);
}
}
@@ -278,30 +286,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return 0;
}
-static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser,
- struct radeon_fence *fence)
-{
- struct radeon_fpriv *fpriv = parser->filp->driver_priv;
- struct radeon_vm *vm = &fpriv->vm;
- struct radeon_bo_list *lobj;
-
- if (parser->chunk_ib_idx == -1) {
- return;
- }
- if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) {
- return;
- }
-
- list_for_each_entry(lobj, &parser->validated, tv.head) {
- struct radeon_bo_va *bo_va;
- struct radeon_bo *rbo = lobj->bo;
-
- bo_va = radeon_bo_va(rbo, vm);
- radeon_fence_unref(&bo_va->fence);
- bo_va->fence = radeon_fence_ref(fence);
- }
-}
-
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
@@ -315,8 +299,6 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
unsigned i;
if (!error) {
- /* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */
- radeon_bo_vm_fence_va(parser, parser->ib.fence);
ttm_eu_fence_buffer_objects(&parser->validated,
parser->ib.fence);
} else {
@@ -363,7 +345,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
* uncached).
*/
r = radeon_ib_get(rdev, parser->ring, &parser->ib,
- ib_chunk->length_dw * 4);
+ NULL, ib_chunk->length_dw * 4);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
@@ -380,7 +362,6 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
return r;
}
radeon_cs_sync_rings(parser);
- parser->ib.vm_id = 0;
r = radeon_ib_schedule(rdev, &parser->ib, NULL);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
@@ -391,10 +372,15 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
struct radeon_vm *vm)
{
+ struct radeon_device *rdev = parser->rdev;
struct radeon_bo_list *lobj;
struct radeon_bo *bo;
int r;
+ r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
+ if (r) {
+ return r;
+ }
list_for_each_entry(lobj, &parser->validated, tv.head) {
bo = lobj->bo;
r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
@@ -426,7 +412,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
return -EINVAL;
}
r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
- ib_chunk->length_dw * 4);
+ vm, ib_chunk->length_dw * 4);
if (r) {
DRM_ERROR("Failed to get const ib !\n");
return r;
@@ -450,7 +436,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
return -EINVAL;
}
r = radeon_ib_get(rdev, parser->ring, &parser->ib,
- ib_chunk->length_dw * 4);
+ vm, ib_chunk->length_dw * 4);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
@@ -468,7 +454,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
- r = radeon_vm_bind(rdev, vm);
+ r = radeon_vm_alloc_pt(rdev, vm);
if (r) {
goto out;
}
@@ -477,32 +463,21 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
goto out;
}
radeon_cs_sync_rings(parser);
-
- parser->ib.vm_id = vm->id;
- /* ib pool is bind at 0 in virtual address space,
- * so gpu_addr is the offset inside the pool bo
- */
- parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
+ radeon_cs_sync_to(parser, vm->fence);
+ radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring));
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
- parser->const_ib.vm_id = vm->id;
- /* ib pool is bind at 0 in virtual address space,
- * so gpu_addr is the offset inside the pool bo
- */
- parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset;
r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
} else {
r = radeon_ib_schedule(rdev, &parser->ib, NULL);
}
-out:
if (!r) {
- if (vm->fence) {
- radeon_fence_unref(&vm->fence);
- }
- vm->fence = radeon_fence_ref(parser->ib.fence);
+ radeon_vm_fence(rdev, vm, parser->ib.fence);
}
+
+out:
mutex_unlock(&vm->mutex);
mutex_unlock(&rdev->vm_manager.lock);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7a3daebd732d..64a42647f08a 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -842,7 +842,7 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
* Validates certain module parameters and updates
* the associated values used by the driver (all asics).
*/
-void radeon_check_arguments(struct radeon_device *rdev)
+static void radeon_check_arguments(struct radeon_device *rdev)
{
/* vramlimit must be a power of two */
switch (radeon_vram_limit) {
@@ -1013,13 +1013,11 @@ int radeon_device_init(struct radeon_device *rdev,
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
- init_waitqueue_head(&rdev->irq.idle_queue);
r = radeon_gem_init(rdev);
if (r)
return r;
/* initialize vm here */
mutex_init(&rdev->vm_manager.lock);
- rdev->vm_manager.use_bitmap = 1;
rdev->vm_manager.max_pfn = 1 << 20;
INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
@@ -1284,6 +1282,13 @@ int radeon_resume_kms(struct drm_device *dev)
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
radeon_atom_disp_eng_pll_init(rdev);
+ /* turn on the BL */
+ if (rdev->mode_info.bl_encoder) {
+ u8 bl_level = radeon_get_backlight_level(rdev,
+ rdev->mode_info.bl_encoder);
+ radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
+ bl_level);
+ }
}
/* reset hpd state */
radeon_hpd_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 8c593ea82c41..27ece75b4789 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -64,9 +64,11 @@
* 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
* 2.21.0 - r600-r700: FMASK and CMASK
* 2.22.0 - r600 only: RESOLVE_BOX allowed
+ * 2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
+ * 2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 22
+#define KMS_DRIVER_MINOR 24
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 74670696277d..e66c807df9e6 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -29,6 +29,14 @@
#include "radeon.h"
#include "atom.h"
+extern void
+radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
+ struct drm_connector *drm_connector);
+extern void
+radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+ struct drm_connector *drm_connector);
+
+
static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -153,6 +161,7 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
void
radeon_link_encoder_connector(struct drm_device *dev)
{
+ struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct drm_encoder *encoder;
@@ -163,8 +172,16 @@ radeon_link_encoder_connector(struct drm_device *dev)
radeon_connector = to_radeon_connector(connector);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
- if (radeon_encoder->devices & radeon_connector->devices)
+ if (radeon_encoder->devices & radeon_connector->devices) {
drm_mode_connector_attach_encoder(connector, encoder);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (rdev->is_atom_bios)
+ radeon_atom_backlight_init(radeon_encoder, connector);
+ else
+ radeon_legacy_backlight_init(radeon_encoder, connector);
+ rdev->mode_info.bl_encoder = radeon_encoder;
+ }
+ }
}
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 5906914a78bc..6ada72c6d7a1 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -316,22 +316,6 @@ static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
return new_fb;
}
-static char *mode_option;
-int radeon_parse_options(char *options)
-{
- char *this_opt;
-
- if (!options || !*options)
- return 0;
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
- if (!*this_opt)
- continue;
- mode_option = this_opt;
- }
- return 0;
-}
-
void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 2a59375dbe52..1eb3db52a36e 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -399,7 +399,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
return 0;
}
-bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
+static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
{
unsigned i;
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index bb3b7fe05ccd..753b7ca3c807 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -423,6 +423,18 @@ void radeon_gart_fini(struct radeon_device *rdev)
*/
/**
+ * radeon_vm_directory_size - returns the size of the page directory in bytes
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the size of the page directory in bytes (cayman+).
+ */
+static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
+{
+ return (rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE) * 8;
+}
+
+/**
* radeon_vm_manager_init - init the vm manager
*
* @rdev: radeon_device pointer
@@ -435,12 +447,15 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
struct radeon_vm *vm;
struct radeon_bo_va *bo_va;
int r;
+ unsigned size;
if (!rdev->vm_manager.enabled) {
- /* mark first vm as always in use, it's the system one */
/* allocate enough for 2 full VM pts */
+ size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
+ size += RADEON_GPU_PAGE_ALIGN(rdev->vm_manager.max_pfn * 8);
+ size *= 2;
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
- rdev->vm_manager.max_pfn * 8 * 2,
+ size,
RADEON_GEM_DOMAIN_VRAM);
if (r) {
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -448,10 +463,10 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
return r;
}
- r = rdev->vm_manager.funcs->init(rdev);
+ r = radeon_asic_vm_init(rdev);
if (r)
return r;
-
+
rdev->vm_manager.enabled = true;
r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
@@ -461,73 +476,36 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
/* restore page table */
list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
- if (vm->id == -1)
+ if (vm->sa_bo == NULL)
continue;
list_for_each_entry(bo_va, &vm->va, vm_list) {
- struct ttm_mem_reg *mem = NULL;
- if (bo_va->valid)
- mem = &bo_va->bo->tbo.mem;
-
bo_va->valid = false;
- r = radeon_vm_bo_update_pte(rdev, vm, bo_va->bo, mem);
- if (r) {
- DRM_ERROR("Failed to update pte for vm %d!\n", vm->id);
- }
- }
-
- r = rdev->vm_manager.funcs->bind(rdev, vm, vm->id);
- if (r) {
- DRM_ERROR("Failed to bind vm %d!\n", vm->id);
}
}
return 0;
}
-/* global mutex must be lock */
/**
- * radeon_vm_unbind_locked - unbind a specific vm
+ * radeon_vm_free_pt - free the page table for a specific vm
*
* @rdev: radeon_device pointer
* @vm: vm to unbind
*
- * Unbind the requested vm (cayman+).
- * Wait for use of the VM to finish, then unbind the page table,
- * and free the page table memory.
+ * Free the page table of a specific vm (cayman+).
+ *
+ * Global and local mutex must be lock!
*/
-static void radeon_vm_unbind_locked(struct radeon_device *rdev,
+static void radeon_vm_free_pt(struct radeon_device *rdev,
struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va;
- if (vm->id == -1) {
+ if (!vm->sa_bo)
return;
- }
- /* wait for vm use to end */
- while (vm->fence) {
- int r;
- r = radeon_fence_wait(vm->fence, false);
- if (r)
- DRM_ERROR("error while waiting for fence: %d\n", r);
- if (r == -EDEADLK) {
- mutex_unlock(&rdev->vm_manager.lock);
- r = radeon_gpu_reset(rdev);
- mutex_lock(&rdev->vm_manager.lock);
- if (!r)
- continue;
- }
- break;
- }
- radeon_fence_unref(&vm->fence);
-
- /* hw unbind */
- rdev->vm_manager.funcs->unbind(rdev, vm);
- rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
list_del_init(&vm->list);
- vm->id = -1;
- radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
- vm->pt = NULL;
+ radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence);
list_for_each_entry(bo_va, &vm->va, vm_list) {
bo_va->valid = false;
@@ -544,16 +522,22 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
void radeon_vm_manager_fini(struct radeon_device *rdev)
{
struct radeon_vm *vm, *tmp;
+ int i;
if (!rdev->vm_manager.enabled)
return;
mutex_lock(&rdev->vm_manager.lock);
- /* unbind all active vm */
+ /* free all allocated page tables */
list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
- radeon_vm_unbind_locked(rdev, vm);
+ mutex_lock(&vm->mutex);
+ radeon_vm_free_pt(rdev, vm);
+ mutex_unlock(&vm->mutex);
}
- rdev->vm_manager.funcs->fini(rdev);
+ for (i = 0; i < RADEON_NUM_VM; ++i) {
+ radeon_fence_unref(&rdev->vm_manager.active[i]);
+ }
+ radeon_asic_vm_fini(rdev);
mutex_unlock(&rdev->vm_manager.lock);
radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
@@ -561,46 +545,34 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
rdev->vm_manager.enabled = false;
}
-/* global mutex must be locked */
/**
- * radeon_vm_unbind - locked version of unbind
- *
- * @rdev: radeon_device pointer
- * @vm: vm to unbind
- *
- * Locked version that wraps radeon_vm_unbind_locked (cayman+).
- */
-void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
-{
- mutex_lock(&vm->mutex);
- radeon_vm_unbind_locked(rdev, vm);
- mutex_unlock(&vm->mutex);
-}
-
-/* global and local mutex must be locked */
-/**
- * radeon_vm_bind - bind a page table to a VMID
+ * radeon_vm_alloc_pt - allocates a page table for a VM
*
* @rdev: radeon_device pointer
* @vm: vm to bind
*
- * Bind the requested vm (cayman+).
- * Suballocate memory for the page table, allocate a VMID
- * and bind the page table to it, and finally start to populate
- * the page table.
+ * Allocate a page table for the requested vm (cayman+).
+ * Also starts to populate the page table.
* Returns 0 for success, error for failure.
+ *
+ * Global and local mutex must be locked!
*/
-int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
+int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
{
struct radeon_vm *vm_evict;
- unsigned i;
- int id = -1, r;
+ int r;
+ u64 *pd_addr;
+ int tables_size;
if (vm == NULL) {
return -EINVAL;
}
- if (vm->id != -1) {
+ /* allocate enough to cover the current VM size */
+ tables_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
+ tables_size += RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8);
+
+ if (vm->sa_bo != NULL) {
/* update lru */
list_del_init(&vm->list);
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
@@ -609,98 +581,215 @@ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
- RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
- RADEON_GPU_PAGE_SIZE, false);
- if (r) {
+ tables_size, RADEON_GPU_PAGE_SIZE, false);
+ if (r == -ENOMEM) {
if (list_empty(&rdev->vm_manager.lru_vm)) {
return r;
}
vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
- radeon_vm_unbind(rdev, vm_evict);
+ mutex_lock(&vm_evict->mutex);
+ radeon_vm_free_pt(rdev, vm_evict);
+ mutex_unlock(&vm_evict->mutex);
goto retry;
+
+ } else if (r) {
+ return r;
}
- vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo);
- vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
- memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
-retry_id:
- /* search for free vm */
- for (i = 0; i < rdev->vm_manager.nvm; i++) {
- if (!(rdev->vm_manager.use_bitmap & (1 << i))) {
- id = i;
- break;
+ pd_addr = radeon_sa_bo_cpu_addr(vm->sa_bo);
+ vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
+ memset(pd_addr, 0, tables_size);
+
+ list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+ return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
+ &rdev->ring_tmp_bo.bo->tbo.mem);
+}
+
+/**
+ * radeon_vm_grab_id - allocate the next free VMID
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ *
+ * Allocate an id for the vm (cayman+).
+ * Returns the fence we need to sync to (if any).
+ *
+ * Global and local mutex must be locked!
+ */
+struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
+ struct radeon_vm *vm, int ring)
+{
+ struct radeon_fence *best[RADEON_NUM_RINGS] = {};
+ unsigned choices[2] = {};
+ unsigned i;
+
+ /* check if the id is still valid */
+ if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
+ return NULL;
+
+ /* we definately need to flush */
+ radeon_fence_unref(&vm->last_flush);
+
+ /* skip over VMID 0, since it is the system VM */
+ for (i = 1; i < rdev->vm_manager.nvm; ++i) {
+ struct radeon_fence *fence = rdev->vm_manager.active[i];
+
+ if (fence == NULL) {
+ /* found a free one */
+ vm->id = i;
+ return NULL;
+ }
+
+ if (radeon_fence_is_earlier(fence, best[fence->ring])) {
+ best[fence->ring] = fence;
+ choices[fence->ring == ring ? 0 : 1] = i;
}
}
- /* evict vm if necessary */
- if (id == -1) {
- vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
- radeon_vm_unbind(rdev, vm_evict);
- goto retry_id;
+
+ for (i = 0; i < 2; ++i) {
+ if (choices[i]) {
+ vm->id = choices[i];
+ return rdev->vm_manager.active[choices[i]];
+ }
}
- /* do hw bind */
- r = rdev->vm_manager.funcs->bind(rdev, vm, id);
- if (r) {
- radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
- return r;
+ /* should never happen */
+ BUG();
+ return NULL;
+}
+
+/**
+ * radeon_vm_fence - remember fence for vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm we want to fence
+ * @fence: fence to remember
+ *
+ * Fence the vm (cayman+).
+ * Set the fence used to protect page table and id.
+ *
+ * Global and local mutex must be locked!
+ */
+void radeon_vm_fence(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_fence *fence)
+{
+ radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
+ rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
+
+ radeon_fence_unref(&vm->fence);
+ vm->fence = radeon_fence_ref(fence);
+}
+
+/**
+ * radeon_vm_bo_find - find the bo_va for a specific vm & bo
+ *
+ * @vm: requested vm
+ * @bo: requested buffer object
+ *
+ * Find @bo inside the requested vm (cayman+).
+ * Search inside the @bos vm list for the requested vm
+ * Returns the found bo_va or NULL if none is found
+ *
+ * Object has to be reserved!
+ */
+struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va;
+
+ list_for_each_entry(bo_va, &bo->va, bo_list) {
+ if (bo_va->vm == vm) {
+ return bo_va;
+ }
}
- rdev->vm_manager.use_bitmap |= 1 << id;
- vm->id = id;
- list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
- return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
- &rdev->ring_tmp_bo.bo->tbo.mem);
+ return NULL;
}
-/* object have to be reserved */
/**
* radeon_vm_bo_add - add a bo to a specific vm
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @bo: radeon buffer object
- * @offset: requested offset of the buffer in the VM address space
- * @flags: attributes of pages (read/write/valid/etc.)
*
* Add @bo into the requested vm (cayman+).
- * Add @bo to the list of bos associated with the vm and validate
- * the offset requested within the vm address space.
- * Returns 0 for success, error for failure.
+ * Add @bo to the list of bos associated with the vm
+ * Returns newly added bo_va or NULL for failure
+ *
+ * Object has to be reserved!
*/
-int radeon_vm_bo_add(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
- uint64_t offset,
- uint32_t flags)
+struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo)
{
- struct radeon_bo_va *bo_va, *tmp;
- struct list_head *head;
- uint64_t size = radeon_bo_size(bo), last_offset = 0;
- unsigned last_pfn;
+ struct radeon_bo_va *bo_va;
bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
if (bo_va == NULL) {
- return -ENOMEM;
+ return NULL;
}
bo_va->vm = vm;
bo_va->bo = bo;
- bo_va->soffset = offset;
- bo_va->eoffset = offset + size;
- bo_va->flags = flags;
+ bo_va->soffset = 0;
+ bo_va->eoffset = 0;
+ bo_va->flags = 0;
bo_va->valid = false;
+ bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
INIT_LIST_HEAD(&bo_va->vm_list);
- /* make sure object fit at this offset */
- if (bo_va->soffset >= bo_va->eoffset) {
- kfree(bo_va);
- return -EINVAL;
- }
- last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE;
- if (last_pfn > rdev->vm_manager.max_pfn) {
- kfree(bo_va);
- dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
- last_pfn, rdev->vm_manager.max_pfn);
- return -EINVAL;
+ mutex_lock(&vm->mutex);
+ list_add(&bo_va->vm_list, &vm->va);
+ list_add_tail(&bo_va->bo_list, &bo->va);
+ mutex_unlock(&vm->mutex);
+
+ return bo_va;
+}
+
+/**
+ * radeon_vm_bo_set_addr - set bos virtual address inside a vm
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: bo_va to store the address
+ * @soffset: requested offset of the buffer in the VM address space
+ * @flags: attributes of pages (read/write/valid/etc.)
+ *
+ * Set offset of @bo_va (cayman+).
+ * Validate and set the offset requested within the vm address space.
+ * Returns 0 for success, error for failure.
+ *
+ * Object has to be reserved!
+ */
+int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va,
+ uint64_t soffset,
+ uint32_t flags)
+{
+ uint64_t size = radeon_bo_size(bo_va->bo);
+ uint64_t eoffset, last_offset = 0;
+ struct radeon_vm *vm = bo_va->vm;
+ struct radeon_bo_va *tmp;
+ struct list_head *head;
+ unsigned last_pfn;
+
+ if (soffset) {
+ /* make sure object fit at this offset */
+ eoffset = soffset + size;
+ if (soffset >= eoffset) {
+ return -EINVAL;
+ }
+
+ last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
+ if (last_pfn > rdev->vm_manager.max_pfn) {
+ dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+ last_pfn, rdev->vm_manager.max_pfn);
+ return -EINVAL;
+ }
+
+ } else {
+ eoffset = last_pfn = 0;
}
mutex_lock(&vm->mutex);
@@ -713,7 +802,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
if (last_pfn > vm->last_pfn) {
/* grow va space 32M by 32M */
unsigned align = ((32 << 20) >> 12) - 1;
- radeon_vm_unbind_locked(rdev, vm);
+ radeon_vm_free_pt(rdev, vm);
vm->last_pfn = (last_pfn + align) & ~align;
}
mutex_unlock(&rdev->vm_manager.lock);
@@ -721,68 +810,60 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
head = &vm->va;
last_offset = 0;
list_for_each_entry(tmp, &vm->va, vm_list) {
- if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) {
+ if (bo_va == tmp) {
+ /* skip over currently modified bo */
+ continue;
+ }
+
+ if (soffset >= last_offset && eoffset <= tmp->soffset) {
/* bo can be added before this one */
break;
}
- if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) {
+ if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
/* bo and tmp overlap, invalid offset */
dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
- bo, (unsigned)bo_va->soffset, tmp->bo,
+ bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
(unsigned)tmp->soffset, (unsigned)tmp->eoffset);
- kfree(bo_va);
mutex_unlock(&vm->mutex);
return -EINVAL;
}
last_offset = tmp->eoffset;
head = &tmp->vm_list;
}
- list_add(&bo_va->vm_list, head);
- list_add_tail(&bo_va->bo_list, &bo->va);
+
+ bo_va->soffset = soffset;
+ bo_va->eoffset = eoffset;
+ bo_va->flags = flags;
+ bo_va->valid = false;
+ list_move(&bo_va->vm_list, head);
+
mutex_unlock(&vm->mutex);
return 0;
}
/**
- * radeon_vm_get_addr - get the physical address of the page
+ * radeon_vm_map_gart - get the physical address of a gart page
*
* @rdev: radeon_device pointer
- * @mem: ttm mem
- * @pfn: pfn
+ * @addr: the unmapped addr
*
* Look up the physical address of the page that the pte resolves
* to (cayman+).
* Returns the physical address of the page.
*/
-static u64 radeon_vm_get_addr(struct radeon_device *rdev,
- struct ttm_mem_reg *mem,
- unsigned pfn)
+uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
{
- u64 addr = 0;
-
- switch (mem->mem_type) {
- case TTM_PL_VRAM:
- addr = (mem->start << PAGE_SHIFT);
- addr += pfn * RADEON_GPU_PAGE_SIZE;
- addr += rdev->vm_manager.vram_base_offset;
- break;
- case TTM_PL_TT:
- /* offset inside page table */
- addr = mem->start << PAGE_SHIFT;
- addr += pfn * RADEON_GPU_PAGE_SIZE;
- addr = addr >> PAGE_SHIFT;
- /* page table offset */
- addr = rdev->gart.pages_addr[addr];
- /* in case cpu page size != gpu page size*/
- addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
- break;
- default:
- break;
- }
- return addr;
+ uint64_t result;
+
+ /* page table offset */
+ result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
+
+ /* in case cpu page size != gpu page size*/
+ result |= addr & (~PAGE_MASK);
+
+ return result;
}
-/* object have to be reserved & global and local mutex must be locked */
/**
* radeon_vm_bo_update_pte - map a bo into the vm page table
*
@@ -793,103 +874,160 @@ static u64 radeon_vm_get_addr(struct radeon_device *rdev,
*
* Fill in the page table entries for @bo (cayman+).
* Returns 0 for success, -EINVAL for failure.
+ *
+ * Object have to be reserved & global and local mutex must be locked!
*/
int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
struct ttm_mem_reg *mem)
{
+ unsigned ridx = rdev->asic->vm.pt_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ridx];
+ struct radeon_semaphore *sem = NULL;
struct radeon_bo_va *bo_va;
- unsigned ngpu_pages, i;
- uint64_t addr = 0, pfn;
- uint32_t flags;
+ unsigned nptes, npdes, ndw;
+ uint64_t pe, addr;
+ uint64_t pfn;
+ int r;
/* nothing to do if vm isn't bound */
- if (vm->id == -1)
+ if (vm->sa_bo == NULL)
return 0;
- bo_va = radeon_bo_va(bo, vm);
+ bo_va = radeon_vm_bo_find(vm, bo);
if (bo_va == NULL) {
dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
return -EINVAL;
}
- if (bo_va->valid && mem)
+ if (!bo_va->soffset) {
+ dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
+ bo, vm);
+ return -EINVAL;
+ }
+
+ if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
return 0;
- ngpu_pages = radeon_bo_ngpu_pages(bo);
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
if (mem) {
+ addr = mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_SYSTEM) {
bo_va->flags |= RADEON_VM_PAGE_VALID;
bo_va->valid = true;
}
if (mem->mem_type == TTM_PL_TT) {
bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+ } else {
+ addr += rdev->vm_manager.vram_base_offset;
}
+ } else {
+ addr = 0;
+ bo_va->valid = false;
}
- pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
- flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags);
- for (i = 0, addr = 0; i < ngpu_pages; i++) {
- if (mem && bo_va->valid) {
- addr = radeon_vm_get_addr(rdev, mem, i);
+
+ if (vm->fence && radeon_fence_signaled(vm->fence)) {
+ radeon_fence_unref(&vm->fence);
+ }
+
+ if (vm->fence && vm->fence->ring != ridx) {
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ return r;
}
- rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
}
- rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm);
+
+ /* estimate number of dw needed */
+ /* reserve space for 32-bit padding */
+ ndw = 32;
+
+ nptes = radeon_bo_ngpu_pages(bo);
+
+ pfn = (bo_va->soffset / RADEON_GPU_PAGE_SIZE);
+
+ /* handle cases where a bo spans several pdes */
+ npdes = (ALIGN(pfn + nptes, RADEON_VM_PTE_COUNT) -
+ (pfn & ~(RADEON_VM_PTE_COUNT - 1))) >> RADEON_VM_BLOCK_SIZE;
+
+ /* reserve space for one header for every 2k dwords */
+ ndw += (nptes >> 11) * 3;
+ /* reserve space for pte addresses */
+ ndw += nptes * 2;
+
+ /* reserve space for one header for every 2k dwords */
+ ndw += (npdes >> 11) * 3;
+ /* reserve space for pde addresses */
+ ndw += npdes * 2;
+
+ r = radeon_ring_lock(rdev, ring, ndw);
+ if (r) {
+ return r;
+ }
+
+ if (sem && radeon_fence_need_sync(vm->fence, ridx)) {
+ radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx);
+ radeon_fence_note_sync(vm->fence, ridx);
+ }
+
+ /* update page table entries */
+ pe = vm->pd_gpu_addr;
+ pe += radeon_vm_directory_size(rdev);
+ pe += (bo_va->soffset / RADEON_GPU_PAGE_SIZE) * 8;
+
+ radeon_asic_vm_set_page(rdev, pe, addr, nptes,
+ RADEON_GPU_PAGE_SIZE, bo_va->flags);
+
+ /* update page directory entries */
+ addr = pe;
+
+ pe = vm->pd_gpu_addr;
+ pe += ((bo_va->soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE) * 8;
+
+ radeon_asic_vm_set_page(rdev, pe, addr, npdes,
+ RADEON_VM_PTE_COUNT * 8, RADEON_VM_PAGE_VALID);
+
+ radeon_fence_unref(&vm->fence);
+ r = radeon_fence_emit(rdev, &vm->fence, ridx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, vm->fence);
+ radeon_fence_unref(&vm->last_flush);
return 0;
}
-/* object have to be reserved */
/**
* radeon_vm_bo_rmv - remove a bo to a specific vm
*
* @rdev: radeon_device pointer
- * @vm: requested vm
- * @bo: radeon buffer object
+ * @bo_va: requested bo_va
*
- * Remove @bo from the requested vm (cayman+).
- * Remove @bo from the list of bos associated with the vm and
- * remove the ptes for @bo in the page table.
+ * Remove @bo_va->bo from the requested vm (cayman+).
+ * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
+ * remove the ptes for @bo_va in the page table.
* Returns 0 for success.
+ *
+ * Object have to be reserved!
*/
int radeon_vm_bo_rmv(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo)
+ struct radeon_bo_va *bo_va)
{
- struct radeon_bo_va *bo_va;
int r;
- bo_va = radeon_bo_va(bo, vm);
- if (bo_va == NULL)
- return 0;
-
- /* wait for va use to end */
- while (bo_va->fence) {
- r = radeon_fence_wait(bo_va->fence, false);
- if (r) {
- DRM_ERROR("error while waiting for fence: %d\n", r);
- }
- if (r == -EDEADLK) {
- r = radeon_gpu_reset(rdev);
- if (!r)
- continue;
- }
- break;
- }
- radeon_fence_unref(&bo_va->fence);
-
mutex_lock(&rdev->vm_manager.lock);
- mutex_lock(&vm->mutex);
- radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
+ mutex_lock(&bo_va->vm->mutex);
+ r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
mutex_unlock(&rdev->vm_manager.lock);
list_del(&bo_va->vm_list);
- mutex_unlock(&vm->mutex);
+ mutex_unlock(&bo_va->vm->mutex);
list_del(&bo_va->bo_list);
kfree(bo_va);
- return 0;
+ return r;
}
/**
@@ -925,27 +1063,23 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
*/
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
{
+ struct radeon_bo_va *bo_va;
int r;
- vm->id = -1;
+ vm->id = 0;
vm->fence = NULL;
+ vm->last_pfn = 0;
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->list);
INIT_LIST_HEAD(&vm->va);
- /* SI requires equal sized PTs for all VMs, so always set
- * last_pfn to max_pfn. cayman allows variable sized
- * pts so we can grow then as needed. Once we switch
- * to two level pts we can unify this again.
- */
- if (rdev->family >= CHIP_TAHITI)
- vm->last_pfn = rdev->vm_manager.max_pfn;
- else
- vm->last_pfn = 0;
+
/* map the ib pool buffer at 0 in virtual address space, set
* read only
*/
- r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0,
- RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
+ bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo);
+ r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+ RADEON_VM_PAGE_READABLE |
+ RADEON_VM_PAGE_SNOOPED);
return r;
}
@@ -965,7 +1099,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
- radeon_vm_unbind_locked(rdev, vm);
+ radeon_vm_free_pt(rdev, vm);
mutex_unlock(&rdev->vm_manager.lock);
/* remove all bo at this point non are busy any more because unbind
@@ -973,10 +1107,9 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
*/
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
- bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
+ bo_va = radeon_vm_bo_find(vm, rdev->ring_tmp_bo.bo);
list_del_init(&bo_va->bo_list);
list_del_init(&bo_va->vm_list);
- radeon_fence_unref(&bo_va->fence);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
kfree(bo_va);
}
@@ -988,10 +1121,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
r = radeon_bo_reserve(bo_va->bo, false);
if (!r) {
list_del_init(&bo_va->bo_list);
- radeon_fence_unref(&bo_va->fence);
radeon_bo_unreserve(bo_va->bo);
kfree(bo_va);
}
}
+ radeon_fence_unref(&vm->fence);
+ radeon_fence_unref(&vm->last_flush);
mutex_unlock(&vm->mutex);
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 1b57b0058ad6..6579befa5101 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -124,6 +124,30 @@ void radeon_gem_fini(struct radeon_device *rdev)
*/
int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
+ struct radeon_bo *rbo = gem_to_radeon_bo(obj);
+ struct radeon_device *rdev = rbo->rdev;
+ struct radeon_fpriv *fpriv = file_priv->driver_priv;
+ struct radeon_vm *vm = &fpriv->vm;
+ struct radeon_bo_va *bo_va;
+ int r;
+
+ if (rdev->family < CHIP_CAYMAN) {
+ return 0;
+ }
+
+ r = radeon_bo_reserve(rbo, false);
+ if (r) {
+ return r;
+ }
+
+ bo_va = radeon_vm_bo_find(vm, rbo);
+ if (!bo_va) {
+ bo_va = radeon_vm_bo_add(rdev, vm, rbo);
+ } else {
+ ++bo_va->ref_count;
+ }
+ radeon_bo_unreserve(rbo);
+
return 0;
}
@@ -134,16 +158,25 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
struct radeon_device *rdev = rbo->rdev;
struct radeon_fpriv *fpriv = file_priv->driver_priv;
struct radeon_vm *vm = &fpriv->vm;
+ struct radeon_bo_va *bo_va;
+ int r;
if (rdev->family < CHIP_CAYMAN) {
return;
}
- if (radeon_bo_reserve(rbo, false)) {
- dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n");
+ r = radeon_bo_reserve(rbo, true);
+ if (r) {
+ dev_err(rdev->dev, "leaking bo va because "
+ "we fail to reserve bo (%d)\n", r);
return;
}
- radeon_vm_bo_rmv(rdev, vm, rbo);
+ bo_va = radeon_vm_bo_find(vm, rbo);
+ if (bo_va) {
+ if (--bo_va->ref_count == 0) {
+ radeon_vm_bo_rmv(rdev, bo_va);
+ }
+ }
radeon_bo_unreserve(rbo);
}
@@ -459,19 +492,24 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
drm_gem_object_unreference_unlocked(gobj);
return r;
}
+ bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
+ if (!bo_va) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ drm_gem_object_unreference_unlocked(gobj);
+ return -ENOENT;
+ }
+
switch (args->operation) {
case RADEON_VA_MAP:
- bo_va = radeon_bo_va(rbo, &fpriv->vm);
- if (bo_va) {
+ if (bo_va->soffset) {
args->operation = RADEON_VA_RESULT_VA_EXIST;
args->offset = bo_va->soffset;
goto out;
}
- r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
- args->offset, args->flags);
+ r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
break;
case RADEON_VA_UNMAP:
- r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
+ r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
break;
default:
break;
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index 48b7cea31e08..c4bb2269be10 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -369,7 +369,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
#define compat_radeon_cp_setparam NULL
#endif /* X86_64 || IA64 */
-drm_ioctl_compat_t *radeon_compat_ioctls[] = {
+static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
[DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
[DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
[DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index afaa1727abd2..b06fa5936100 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -99,7 +99,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
atomic_set(&rdev->irq.ring_int[i], 0);
- rdev->irq.gui_idle = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
@@ -147,7 +146,6 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
atomic_set(&rdev->irq.ring_int[i], 0);
- rdev->irq.gui_idle = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
@@ -204,6 +202,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
(rdev->pdev->subsystem_device == 0x01fd))
return true;
+ /* Gateway RS690 only seems to work with MSIs. */
+ if ((rdev->pdev->device == 0x791f) &&
+ (rdev->pdev->subsystem_vendor == 0x107b) &&
+ (rdev->pdev->subsystem_device == 0x0185))
+ return true;
+
+ /* try and enable MSIs by default on all RS690s */
+ if (rdev->family == CHIP_RS690)
+ return true;
+
/* RV515 seems to have MSI issues where it loses
* MSI rearms occasionally. This leads to lockups and freezes.
* disable it by default.
@@ -457,34 +465,3 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
-/**
- * radeon_irq_kms_wait_gui_idle - waits for drawing engine to be idle
- *
- * @rdev: radeon device pointer
- *
- * Enabled the GUI idle interrupt and waits for it to fire (r6xx+).
- * This is currently used to make sure the 3D engine is idle for power
- * management, but should be replaces with proper fence waits.
- * GUI idle interrupts don't work very well on pre-r6xx hw and it also
- * does not take into account other aspects of the chip that may be busy.
- * DO NOT USE GOING FORWARD.
- */
-int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev)
-{
- unsigned long irqflags;
- int r;
-
- spin_lock_irqsave(&rdev->irq.lock, irqflags);
- rdev->irq.gui_idle = true;
- radeon_irq_set(rdev);
- spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
-
- r = wait_event_timeout(rdev->irq.idle_queue, radeon_gui_idle(rdev),
- msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
-
- spin_lock_irqsave(&rdev->irq.lock, irqflags);
- rdev->irq.gui_idle = false;
- radeon_irq_set(rdev);
- spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
- return r;
-}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 414b4acf6947..cb8e94d1a2b2 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -51,6 +51,7 @@ int radeon_driver_unload_kms(struct drm_device *dev)
if (rdev == NULL)
return 0;
+ radeon_acpi_fini(rdev);
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
kfree(rdev);
@@ -103,11 +104,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
goto out;
}
- /* Call ACPI methods */
- acpi_status = radeon_acpi_init(rdev);
- if (acpi_status)
- dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
-
/* Again modeset_init should fail only on fatal error
* otherwise it should provide enough functionalities
* for shadowfb to run
@@ -115,6 +111,17 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
r = radeon_modeset_init(rdev);
if (r)
dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
+
+ /* Call ACPI methods: require modeset init
+ * but failure is not fatal
+ */
+ if (!r) {
+ acpi_status = radeon_acpi_init(rdev);
+ if (acpi_status)
+ dev_dbg(&dev->pdev->dev,
+ "Error during ACPI methods call\n");
+ }
+
out:
if (r)
radeon_driver_unload_kms(dev);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 94b4a1c12893..5677a424b585 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -206,11 +206,6 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
}
-void radeon_restore_common_regs(struct drm_device *dev)
-{
- /* don't need this yet */
-}
-
static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -295,7 +290,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
return 1;
}
-void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 670e9910f869..8ad9c5f16014 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -271,13 +271,6 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
-#define MAX_RADEON_LEVEL 0xFF
-
-struct radeon_backlight_privdata {
- struct radeon_encoder *encoder;
- uint8_t negative;
-};
-
static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
{
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
@@ -286,21 +279,33 @@ static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
/* Convert brightness to hardware level */
if (bd->props.brightness < 0)
level = 0;
- else if (bd->props.brightness > MAX_RADEON_LEVEL)
- level = MAX_RADEON_LEVEL;
+ else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
+ level = RADEON_MAX_BL_LEVEL;
else
level = bd->props.brightness;
if (pdata->negative)
- level = MAX_RADEON_LEVEL - level;
+ level = RADEON_MAX_BL_LEVEL - level;
return level;
}
-static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
+u8
+radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ u8 backlight_level;
+
+ backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+ RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+ return backlight_level;
+}
+
+void
+radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
{
- struct radeon_backlight_privdata *pdata = bl_get_data(bd);
- struct radeon_encoder *radeon_encoder = pdata->encoder;
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
int dpms_mode = DRM_MODE_DPMS_ON;
@@ -308,19 +313,31 @@ static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
if (radeon_encoder->enc_priv) {
if (rdev->is_atom_bios) {
struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
- dpms_mode = lvds->dpms_mode;
- lvds->backlight_level = radeon_legacy_lvds_level(bd);
+ if (lvds->backlight_level > 0)
+ dpms_mode = lvds->dpms_mode;
+ else
+ dpms_mode = DRM_MODE_DPMS_OFF;
+ lvds->backlight_level = level;
} else {
struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
- dpms_mode = lvds->dpms_mode;
- lvds->backlight_level = radeon_legacy_lvds_level(bd);
+ if (lvds->backlight_level > 0)
+ dpms_mode = lvds->dpms_mode;
+ else
+ dpms_mode = DRM_MODE_DPMS_OFF;
+ lvds->backlight_level = level;
}
}
- if (bd->props.brightness > 0)
- radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
- else
- radeon_legacy_lvds_update(&radeon_encoder->base, DRM_MODE_DPMS_OFF);
+ radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
+}
+
+static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
+{
+ struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+ struct radeon_encoder *radeon_encoder = pdata->encoder;
+
+ radeon_legacy_set_backlight_level(radeon_encoder,
+ radeon_legacy_lvds_level(bd));
return 0;
}
@@ -336,7 +353,7 @@ static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd)
backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
- return pdata->negative ? MAX_RADEON_LEVEL - backlight_level : backlight_level;
+ return pdata->negative ? RADEON_MAX_BL_LEVEL - backlight_level : backlight_level;
}
static const struct backlight_ops radeon_backlight_ops = {
@@ -370,7 +387,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
}
memset(&props, 0, sizeof(props));
- props.max_brightness = MAX_RADEON_LEVEL;
+ props.max_brightness = RADEON_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
pdata, &radeon_backlight_ops, &props);
@@ -449,7 +466,7 @@ static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder)
}
if (bd) {
- struct radeon_legacy_backlight_privdata *pdata;
+ struct radeon_backlight_privdata *pdata;
pdata = bl_get_data(bd);
backlight_device_unregister(bd);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index d56978949f34..527761801590 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -252,8 +252,23 @@ struct radeon_mode_info {
/* pointer to fbdev info structure */
struct radeon_fbdev *rfbdev;
+ /* firmware flags */
+ u16 firmware_flags;
+ /* pointer to backlight encoder */
+ struct radeon_encoder *bl_encoder;
};
+#define RADEON_MAX_BL_LEVEL 0xFF
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+struct radeon_backlight_privdata {
+ struct radeon_encoder *encoder;
+ uint8_t negative;
+};
+
+#endif
+
#define MAX_H_CODE_TIMING_LEN 32
#define MAX_V_CODE_TIMING_LEN 32
@@ -269,6 +284,18 @@ struct radeon_tv_regs {
uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN];
};
+struct radeon_atom_ss {
+ uint16_t percentage;
+ uint8_t type;
+ uint16_t step;
+ uint8_t delay;
+ uint8_t range;
+ uint8_t refdiv;
+ /* asic_ss */
+ uint16_t rate;
+ uint16_t amount;
+};
+
struct radeon_crtc {
struct drm_crtc base;
int crtc_id;
@@ -293,6 +320,16 @@ struct radeon_crtc {
/* page flipping */
struct radeon_unpin_work *unpin_work;
int deferred_flip_completion;
+ /* pll sharing */
+ struct radeon_atom_ss ss;
+ bool ss_enabled;
+ u32 adjusted_clock;
+ int bpc;
+ u32 pll_reference_div;
+ u32 pll_post_div;
+ u32 pll_flags;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
};
struct radeon_encoder_primary_dac {
@@ -346,18 +383,6 @@ struct radeon_encoder_ext_tmds {
};
/* spread spectrum */
-struct radeon_atom_ss {
- uint16_t percentage;
- uint8_t type;
- uint16_t step;
- uint8_t delay;
- uint8_t range;
- uint8_t refdiv;
- /* asic_ss */
- uint16_t rate;
- uint16_t amount;
-};
-
struct radeon_encoder_atom_dig {
bool linkb;
/* atom dig */
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 9024e7222839..a236795dc69a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -52,7 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo)
list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
/* remove from all vm address space */
- radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo);
+ radeon_vm_bo_rmv(bo->rdev, bo_va);
}
}
@@ -627,18 +627,17 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
/**
* radeon_bo_reserve - reserve bo
* @bo: bo structure
- * @no_wait: don't sleep while trying to reserve (return -EBUSY)
+ * @no_intr: don't return -ERESTARTSYS on pending signal
*
* Returns:
- * -EBUSY: buffer is busy and @no_wait is true
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
*/
-int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
+int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
{
int r;
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
@@ -646,16 +645,3 @@ int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
}
return 0;
}
-
-/* object have to be reserved */
-struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm)
-{
- struct radeon_bo_va *bo_va;
-
- list_for_each_entry(bo_va, &rbo->va, bo_list) {
- if (bo_va->vm == vm) {
- return bo_va;
- }
- }
- return NULL;
-}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 17fb99f177cf..93cd491fff2e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -52,7 +52,7 @@ static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
return 0;
}
-int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait);
+int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr);
static inline void radeon_bo_unreserve(struct radeon_bo *bo)
{
@@ -141,8 +141,6 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
-extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
- struct radeon_vm *vm);
/*
* sub allocation
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 7ae606600107..bc2e7050a9d8 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -24,9 +24,6 @@
#include "radeon.h"
#include "avivod.h"
#include "atom.h"
-#ifdef CONFIG_ACPI
-#include <linux/acpi.h>
-#endif
#include <linux/power_supply.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
@@ -36,7 +33,7 @@
#define RADEON_WAIT_VBLANK_TIMEOUT 200
static const char *radeon_pm_state_type_name[5] = {
- "Default",
+ "",
"Powersave",
"Battery",
"Balanced",
@@ -50,8 +47,6 @@ static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish
static void radeon_pm_update_profile(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
-#define ACPI_AC_CLASS "ac_adapter"
-
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
int instance)
@@ -70,33 +65,17 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
return rdev->pm.default_power_state_index;
}
-#ifdef CONFIG_ACPI
-static int radeon_acpi_event(struct notifier_block *nb,
- unsigned long val,
- void *data)
+void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
{
- struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
- struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
-
- if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
- if (power_supply_is_system_supplied() > 0)
- DRM_DEBUG_DRIVER("pm: AC\n");
- else
- DRM_DEBUG_DRIVER("pm: DC\n");
-
- if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
- if (rdev->pm.profile == PM_PROFILE_AUTO) {
- mutex_lock(&rdev->pm.mutex);
- radeon_pm_update_profile(rdev);
- radeon_pm_set_clocks(rdev);
- mutex_unlock(&rdev->pm.mutex);
- }
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ if (rdev->pm.profile == PM_PROFILE_AUTO) {
+ mutex_lock(&rdev->pm.mutex);
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ mutex_unlock(&rdev->pm.mutex);
}
}
-
- return NOTIFY_OK;
}
-#endif
static void radeon_pm_update_profile(struct radeon_device *rdev)
{
@@ -188,8 +167,21 @@ static void radeon_set_power_state(struct radeon_device *rdev)
if (sclk > rdev->pm.default_sclk)
sclk = rdev->pm.default_sclk;
- mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
- clock_info[rdev->pm.requested_clock_mode_index].mclk;
+ /* starting with BTC, there is one state that is used for both
+ * MH and SH. Difference is that we always use the high clock index for
+ * mclk.
+ */
+ if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
+ (rdev->family >= CHIP_BARTS) &&
+ rdev->pm.active_crtc_count &&
+ ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
+ (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
+ mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
+ else
+ mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].mclk;
+
if (mclk > rdev->pm.default_mclk)
mclk = rdev->pm.default_mclk;
@@ -253,18 +245,13 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
down_write(&rdev->pm.mclk_lock);
mutex_lock(&rdev->ring_lock);
- /* gui idle int has issues on older chips it seems */
- if (rdev->family >= CHIP_R600) {
- if (rdev->irq.installed) {
- /* wait for GPU to become idle */
- radeon_irq_kms_wait_gui_idle(rdev);
- }
- } else {
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- if (ring->ready) {
- radeon_fence_wait_empty_locked(rdev, RADEON_RING_TYPE_GFX_INDEX);
- }
+ /* wait for the rings to drain */
+ for (i = 0; i < RADEON_NUM_RINGS; i++) {
+ struct radeon_ring *ring = &rdev->ring[i];
+ if (ring->ready)
+ radeon_fence_wait_empty_locked(rdev, i);
}
+
radeon_unmap_vram_bos(rdev);
if (rdev->irq.installed) {
@@ -320,17 +307,15 @@ static void radeon_pm_print_states(struct radeon_device *rdev)
for (j = 0; j < power_state->num_clock_modes; j++) {
clock_info = &(power_state->clock_info[j]);
if (rdev->flags & RADEON_IS_IGP)
- DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n",
- j,
- clock_info->sclk * 10,
- clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
+ DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
+ j,
+ clock_info->sclk * 10);
else
- DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n",
- j,
- clock_info->sclk * 10,
- clock_info->mclk * 10,
- clock_info->voltage.voltage,
- clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
+ DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
+ j,
+ clock_info->sclk * 10,
+ clock_info->mclk * 10,
+ clock_info->voltage.voltage);
}
}
}
@@ -547,7 +532,9 @@ void radeon_pm_suspend(struct radeon_device *rdev)
void radeon_pm_resume(struct radeon_device *rdev)
{
/* set up the default clocks if the MC ucode is loaded */
- if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+ if ((rdev->family >= CHIP_BARTS) &&
+ (rdev->family <= CHIP_CAYMAN) &&
+ rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
SET_VOLTAGE_TYPE_ASIC_VDDC);
@@ -602,7 +589,9 @@ int radeon_pm_init(struct radeon_device *rdev)
radeon_pm_print_states(rdev);
radeon_pm_init_profile(rdev);
/* set up the default clocks if the MC ucode is loaded */
- if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+ if ((rdev->family >= CHIP_BARTS) &&
+ (rdev->family <= CHIP_CAYMAN) &&
+ rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
SET_VOLTAGE_TYPE_ASIC_VDDC);
@@ -632,10 +621,6 @@ int radeon_pm_init(struct radeon_device *rdev)
if (ret)
DRM_ERROR("failed to create device file for power method\n");
-#ifdef CONFIG_ACPI
- rdev->acpi_nb.notifier_call = radeon_acpi_event;
- register_acpi_notifier(&rdev->acpi_nb);
-#endif
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for PM!\n");
}
@@ -666,9 +651,6 @@ void radeon_pm_fini(struct radeon_device *rdev)
device_remove_file(rdev->dev, &dev_attr_power_profile);
device_remove_file(rdev->dev, &dev_attr_power_method);
-#ifdef CONFIG_ACPI
- unregister_acpi_notifier(&rdev->acpi_nb);
-#endif
}
if (rdev->pm.power_state)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 43c431a2686d..028508859a3b 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -43,7 +43,7 @@
* produce command buffers which are send to the kernel and
* put in IBs for execution by the requested ring.
*/
-int radeon_debugfs_sa_init(struct radeon_device *rdev);
+static int radeon_debugfs_sa_init(struct radeon_device *rdev);
/**
* radeon_ib_get - request an IB (Indirect Buffer)
@@ -58,7 +58,8 @@ int radeon_debugfs_sa_init(struct radeon_device *rdev);
* Returns 0 on success, error on failure.
*/
int radeon_ib_get(struct radeon_device *rdev, int ring,
- struct radeon_ib *ib, unsigned size)
+ struct radeon_ib *ib, struct radeon_vm *vm,
+ unsigned size)
{
int i, r;
@@ -76,8 +77,15 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
ib->ring = ring;
ib->fence = NULL;
ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
- ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
- ib->vm_id = 0;
+ ib->vm = vm;
+ if (vm) {
+ /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
+ * space and soffset is the offset inside the pool bo
+ */
+ ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
+ } else {
+ ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
+ }
ib->is_const_ib = false;
for (i = 0; i < RADEON_NUM_RINGS; ++i)
ib->sync_to[i] = NULL;
@@ -152,6 +160,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
if (!need_sync) {
radeon_semaphore_free(rdev, &ib->semaphore, NULL);
}
+ /* if we can't remember our last VM flush then flush now! */
+ if (ib->vm && !ib->vm->last_flush) {
+ radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
+ }
if (const_ib) {
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
@@ -166,6 +178,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
if (const_ib) {
const_ib->fence = radeon_fence_ref(ib->fence);
}
+ /* we just flushed the VM, remember that */
+ if (ib->vm && !ib->vm->last_flush) {
+ ib->vm->last_flush = radeon_fence_ref(ib->fence);
+ }
radeon_ring_unlock_commit(rdev, ring);
return 0;
}
@@ -275,7 +291,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
* wptr. The GPU then starts fetching commands and executes
* them until the pointers are equal again.
*/
-int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
+static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
/**
* radeon_ring_write - write a value to the ring
@@ -803,7 +819,7 @@ static struct drm_info_list radeon_debugfs_sa_list[] = {
#endif
-int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
+static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
unsigned i;
@@ -823,7 +839,7 @@ int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *rin
return 0;
}
-int radeon_debugfs_sa_init(struct radeon_device *rdev)
+static int radeon_debugfs_sa_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 4e771240fdd0..105fde69d045 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -316,7 +316,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
{
struct radeon_fence *fences[RADEON_NUM_RINGS];
unsigned tries[RADEON_NUM_RINGS];
- int i, r = -ENOMEM;
+ int i, r;
BUG_ON(align > RADEON_GPU_PAGE_SIZE);
BUG_ON(size > sa_manager->size);
@@ -331,7 +331,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
INIT_LIST_HEAD(&(*sa_bo)->flist);
spin_lock(&sa_manager->wq.lock);
- while(1) {
+ do {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
fences[i] = NULL;
tries[i] = 0;
@@ -349,26 +349,22 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
/* see if we can skip over some allocations */
} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
- if (!block) {
- break;
- }
-
spin_unlock(&sa_manager->wq.lock);
r = radeon_fence_wait_any(rdev, fences, false);
spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */
- if (r == -ENOENT) {
+ if (r == -ENOENT && block) {
r = wait_event_interruptible_locked(
sa_manager->wq,
radeon_sa_event(sa_manager, size, align)
);
+
+ } else if (r == -ENOENT) {
+ r = -ENOMEM;
}
- if (r) {
- goto out_err;
- }
- };
-out_err:
+ } while (!r);
+
spin_unlock(&sa_manager->wq.lock);
kfree(*sa_bo);
*sa_bo = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 7c16540c10ff..587c09a00ba2 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -313,7 +313,7 @@ out_cleanup:
printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
}
-void radeon_test_ring_sync2(struct radeon_device *rdev,
+static void radeon_test_ring_sync2(struct radeon_device *rdev,
struct radeon_ring *ringA,
struct radeon_ring *ringB,
struct radeon_ring *ringC)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5b71c716d83f..5ebe1b3e5db2 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -549,7 +549,7 @@ static struct ttm_backend_func radeon_backend_func = {
.destroy = &radeon_ttm_backend_destroy,
};
-struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 2752f7f78237..73051ce3121e 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -242,7 +242,7 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void rs400_gpu_init(struct radeon_device *rdev)
+static void rs400_gpu_init(struct radeon_device *rdev)
{
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
@@ -252,7 +252,7 @@ void rs400_gpu_init(struct radeon_device *rdev)
}
}
-void rs400_mc_init(struct radeon_device *rdev)
+static void rs400_mc_init(struct radeon_device *rdev)
{
u64 base;
@@ -370,7 +370,7 @@ static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
#endif
}
-void rs400_mc_program(struct radeon_device *rdev)
+static void rs400_mc_program(struct radeon_device *rdev)
{
struct r100_mc_save save;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5301b3df8466..dc8d021a999b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -43,22 +43,30 @@
#include "rs600_reg_safe.h"
-void rs600_gpu_init(struct radeon_device *rdev);
+static void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+static const u32 crtc_offsets[2] =
+{
+ 0,
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
int i;
- if (RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset) & AVIVO_CRTC_EN) {
+ if (crtc >= rdev->num_crtc)
+ return;
+
+ if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) {
for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK))
+ if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK))
break;
udelay(1);
}
for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK)
+ if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
break;
udelay(1);
}
@@ -424,7 +432,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
}
-int rs600_gart_init(struct radeon_device *rdev)
+static int rs600_gart_init(struct radeon_device *rdev)
{
int r;
@@ -506,7 +514,7 @@ static int rs600_gart_enable(struct radeon_device *rdev)
return 0;
}
-void rs600_gart_disable(struct radeon_device *rdev)
+static void rs600_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
@@ -517,7 +525,7 @@ void rs600_gart_disable(struct radeon_device *rdev)
radeon_gart_table_vram_unpin(rdev);
}
-void rs600_gart_fini(struct radeon_device *rdev)
+static void rs600_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
rs600_gart_disable(rdev);
@@ -567,9 +575,6 @@ int rs600_irq_set(struct radeon_device *rdev)
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
tmp |= S_000040_SW_INT_EN(1);
}
- if (rdev->irq.gui_idle) {
- tmp |= S_000040_GUI_IDLE(1);
- }
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
@@ -602,12 +607,6 @@ static inline u32 rs600_irq_ack(struct radeon_device *rdev)
uint32_t irq_mask = S_000044_SW_INT(1);
u32 tmp;
- /* the interrupt works, but the status bit is permanently asserted */
- if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
- if (!rdev->irq.gui_idle_acked)
- irq_mask |= S_000044_GUI_IDLE_STAT(1);
- }
-
if (G_000044_DISPLAY_INT_STAT(irqs)) {
rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
@@ -667,9 +666,6 @@ int rs600_irq_process(struct radeon_device *rdev)
bool queue_hotplug = false;
bool queue_hdmi = false;
- /* reset gui idle ack. the status bit is broken */
- rdev->irq.gui_idle_acked = false;
-
status = rs600_irq_ack(rdev);
if (!status &&
!rdev->irq.stat_regs.r500.disp_int &&
@@ -683,11 +679,6 @@ int rs600_irq_process(struct radeon_device *rdev)
if (G_000044_SW_INT(status)) {
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
}
- /* GUI idle */
- if (G_000040_GUI_IDLE(status)) {
- rdev->irq.gui_idle_acked = true;
- wake_up(&rdev->irq.idle_queue);
- }
/* Vertical blank interrupts */
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
if (rdev->irq.crtc_vblank_int[0]) {
@@ -721,8 +712,6 @@ int rs600_irq_process(struct radeon_device *rdev)
}
status = rs600_irq_ack(rdev);
}
- /* reset gui idle ack. the status bit is broken */
- rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
if (queue_hdmi)
@@ -764,7 +753,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void rs600_gpu_init(struct radeon_device *rdev)
+static void rs600_gpu_init(struct radeon_device *rdev)
{
r420_pipes_init(rdev);
/* Wait for mc idle */
@@ -772,7 +761,7 @@ void rs600_gpu_init(struct radeon_device *rdev)
dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
}
-void rs600_mc_init(struct radeon_device *rdev)
+static void rs600_mc_init(struct radeon_device *rdev)
{
u64 base;
@@ -834,7 +823,7 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
WREG32(R_000074_MC_IND_DATA, v);
}
-void rs600_debugfs(struct radeon_device *rdev)
+static void rs600_debugfs(struct radeon_device *rdev)
{
if (r100_debugfs_rbbm_init(rdev))
DRM_ERROR("Failed to register debugfs file for RBBM !\n");
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3b663fcfe061..5cd5aceb69fa 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -145,7 +145,7 @@ void rs690_pm_info(struct radeon_device *rdev)
rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
}
-void rs690_mc_init(struct radeon_device *rdev)
+static void rs690_mc_init(struct radeon_device *rdev)
{
u64 base;
@@ -224,7 +224,7 @@ struct rs690_watermark {
fixed20_12 sclk;
};
-void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
+static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
struct radeon_crtc *crtc,
struct rs690_watermark *wm)
{
@@ -581,7 +581,7 @@ void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
WREG32(R_000078_MC_INDEX, 0x7F);
}
-void rs690_mc_program(struct radeon_device *rdev)
+static void rs690_mc_program(struct radeon_device *rdev)
{
struct rv515_mc_save save;
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index aa8ef491ef3c..2d75d30be5b4 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -35,9 +35,9 @@
#include "rv515_reg_safe.h"
/* This files gather functions specifics to: rv515 */
-int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
-int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
-void rv515_gpu_init(struct radeon_device *rdev);
+static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
+static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
+static void rv515_gpu_init(struct radeon_device *rdev);
int rv515_mc_wait_for_idle(struct radeon_device *rdev);
void rv515_debugfs(struct radeon_device *rdev)
@@ -143,7 +143,7 @@ void rv515_vga_render_disable(struct radeon_device *rdev)
RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
}
-void rv515_gpu_init(struct radeon_device *rdev)
+static void rv515_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
@@ -189,7 +189,7 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
}
}
-void rv515_mc_init(struct radeon_device *rdev)
+static void rv515_mc_init(struct radeon_device *rdev)
{
rv515_vram_get_type(rdev);
@@ -261,7 +261,7 @@ static struct drm_info_list rv515_ga_info_list[] = {
};
#endif
-int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
+static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
@@ -270,7 +270,7 @@ int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
#endif
}
-int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
+static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
@@ -310,7 +310,7 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
}
-void rv515_mc_program(struct radeon_device *rdev)
+static void rv515_mc_program(struct radeon_device *rdev)
{
struct rv515_mc_save save;
@@ -787,7 +787,7 @@ struct rv515_watermark {
fixed20_12 sclk;
};
-void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
+static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
struct radeon_crtc *crtc,
struct rv515_watermark *wm)
{
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index ca8ffec10ff6..2469afe11b85 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -124,7 +124,7 @@ void rv770_pm_misc(struct radeon_device *rdev)
/*
* GART
*/
-int rv770_pcie_gart_enable(struct radeon_device *rdev)
+static int rv770_pcie_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r, i;
@@ -175,7 +175,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
return 0;
}
-void rv770_pcie_gart_disable(struct radeon_device *rdev)
+static void rv770_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int i;
@@ -201,7 +201,7 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
radeon_gart_table_vram_unpin(rdev);
}
-void rv770_pcie_gart_fini(struct radeon_device *rdev)
+static void rv770_pcie_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
rv770_pcie_gart_disable(rdev);
@@ -209,7 +209,7 @@ void rv770_pcie_gart_fini(struct radeon_device *rdev)
}
-void rv770_agp_enable(struct radeon_device *rdev)
+static void rv770_agp_enable(struct radeon_device *rdev)
{
u32 tmp;
int i;
@@ -839,7 +839,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
}
}
-int rv770_mc_init(struct radeon_device *rdev)
+static int rv770_mc_init(struct radeon_device *rdev)
{
u32 tmp;
int chansize, numchan;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 0139e227e3c7..c76825ffa37f 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1806,13 +1806,14 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
- radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
+ radeon_ring_write(ring, ib->length_dw |
+ (ib->vm ? (ib->vm->id << 24) : 0));
if (!ib->is_const_ib) {
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, ib->vm_id);
+ radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
PACKET3_TC_ACTION_ENA |
@@ -2363,7 +2364,7 @@ void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
WREG32(VM_INVALIDATE_REQUEST, 1);
}
-int si_pcie_gart_enable(struct radeon_device *rdev)
+static int si_pcie_gart_enable(struct radeon_device *rdev)
{
int r, i;
@@ -2425,7 +2426,7 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 0);
- WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
si_pcie_gart_tlb_flush(rdev);
@@ -2436,7 +2437,7 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
return 0;
}
-void si_pcie_gart_disable(struct radeon_device *rdev)
+static void si_pcie_gart_disable(struct radeon_device *rdev)
{
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
@@ -2455,7 +2456,7 @@ void si_pcie_gart_disable(struct radeon_device *rdev)
radeon_gart_table_vram_unpin(rdev);
}
-void si_pcie_gart_fini(struct radeon_device *rdev)
+static void si_pcie_gart_fini(struct radeon_device *rdev)
{
si_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
@@ -2788,41 +2789,84 @@ void si_vm_fini(struct radeon_device *rdev)
{
}
-int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id)
+/**
+ * si_vm_set_page - update the page tables using the CP
+ *
+ * @rdev: radeon_device pointer
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the CP (cayman-si).
+ */
+void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
{
- if (id < 8)
- WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
- else
- WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((id - 8) << 2),
- vm->pt_gpu_addr >> 12);
- /* flush hdp cache */
- WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
- /* bits 0-15 are the VM contexts0-15 */
- WREG32(VM_INVALIDATE_REQUEST, 1 << id);
- return 0;
+ struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
+ uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+ int i;
+ uint64_t value;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 2 + count * 2));
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(1)));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe));
+ for (i = 0; i < count; ++i) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
}
-void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
+void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
- if (vm->id < 8)
- WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
- else
- WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2), 0);
- /* flush hdp cache */
- WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
- /* bits 0-15 are the VM contexts0-15 */
- WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
-}
+ struct radeon_ring *ring = &rdev->ring[ridx];
-void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
-{
- if (vm->id == -1)
+ if (vm == NULL)
return;
+ /* write new base address */
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0)));
+
+ if (vm->id < 8) {
+ radeon_ring_write(ring,
+ (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+ } else {
+ radeon_ring_write(ring,
+ (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+ }
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
/* flush hdp cache */
- WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0)));
+ radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0x1);
+
/* bits 0-15 are the VM contexts0-15 */
- WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0)));
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 1 << vm->id);
}
/*
@@ -3199,10 +3243,6 @@ int si_irq_set(struct radeon_device *rdev)
DRM_DEBUG("si_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
- if (rdev->irq.gui_idle) {
- DRM_DEBUG("gui idle\n");
- grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
- }
WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
@@ -3658,7 +3698,6 @@ restart_ih:
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
- wake_up(&rdev->irq.idle_queue);
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index ef4815c27b1c..7d2a20e56577 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -812,6 +812,21 @@
#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
#define PACKET3_WRITE_DATA 0x37
+#define WRITE_DATA_DST_SEL(x) ((x) << 8)
+ /* 0 - register
+ * 1 - memory (sync - via GRBM)
+ * 2 - tc/l2
+ * 3 - gds
+ * 4 - reserved
+ * 5 - memory (async - direct)
+ */
+#define WR_ONE_ADDR (1 << 16)
+#define WR_CONFIRM (1 << 20)
+#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
+ /* 0 - me
+ * 1 - pfp
+ * 2 - ce
+ */
#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
#define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index 1efbb9075837..c89aef420971 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -547,6 +547,8 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->chipset = (enum savage_family)chipset;
+ pci_set_master(dev->pdev);
+
return 0;
}
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
new file mode 100644
index 000000000000..7e7d52b2a2fc
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -0,0 +1,10 @@
+config DRM_SHMOBILE
+ tristate "DRM Support for SH Mobile"
+ depends on DRM && (SUPERH || ARCH_SHMOBILE)
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ help
+ Choose this option if you have an SH Mobile chipset.
+ If M is selected the module will be called shmob-drm.
+
diff --git a/drivers/gpu/drm/shmobile/Makefile b/drivers/gpu/drm/shmobile/Makefile
new file mode 100644
index 000000000000..4c3eeb355630
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/Makefile
@@ -0,0 +1,7 @@
+shmob-drm-y := shmob_drm_backlight.o \
+ shmob_drm_crtc.o \
+ shmob_drm_drv.o \
+ shmob_drm_kms.o \
+ shmob_drm_plane.o
+
+obj-$(CONFIG_DRM_SHMOBILE) += shmob-drm.o
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_backlight.c b/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
new file mode 100644
index 000000000000..463aee18f774
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
@@ -0,0 +1,90 @@
+/*
+ * shmob_drm_backlight.c -- SH Mobile DRM Backlight
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/backlight.h>
+
+#include "shmob_drm_backlight.h"
+#include "shmob_drm_crtc.h"
+#include "shmob_drm_drv.h"
+
+static int shmob_drm_backlight_update(struct backlight_device *bdev)
+{
+ struct shmob_drm_connector *scon = bl_get_data(bdev);
+ struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
+ const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
+ int brightness = bdev->props.brightness;
+
+ if (bdev->props.power != FB_BLANK_UNBLANK ||
+ bdev->props.state & BL_CORE_SUSPENDED)
+ brightness = 0;
+
+ return bdata->set_brightness(brightness);
+}
+
+static int shmob_drm_backlight_get_brightness(struct backlight_device *bdev)
+{
+ struct shmob_drm_connector *scon = bl_get_data(bdev);
+ struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
+ const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
+
+ return bdata->get_brightness();
+}
+
+static const struct backlight_ops shmob_drm_backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = shmob_drm_backlight_update,
+ .get_brightness = shmob_drm_backlight_get_brightness,
+};
+
+void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode)
+{
+ if (scon->backlight == NULL)
+ return;
+
+ scon->backlight->props.power = mode == DRM_MODE_DPMS_ON
+ ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+ backlight_update_status(scon->backlight);
+}
+
+int shmob_drm_backlight_init(struct shmob_drm_connector *scon)
+{
+ struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
+ const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
+ struct drm_connector *connector = &scon->connector;
+ struct drm_device *dev = connector->dev;
+ struct backlight_device *backlight;
+
+ if (!bdata->max_brightness)
+ return 0;
+
+ backlight = backlight_device_register(bdata->name, dev->dev, scon,
+ &shmob_drm_backlight_ops, NULL);
+ if (IS_ERR(backlight)) {
+ dev_err(dev->dev, "unable to register backlight device: %ld\n",
+ PTR_ERR(backlight));
+ return PTR_ERR(backlight);
+ }
+
+ backlight->props.max_brightness = bdata->max_brightness;
+ backlight->props.brightness = bdata->max_brightness;
+ backlight->props.power = FB_BLANK_POWERDOWN;
+ backlight_update_status(backlight);
+
+ scon->backlight = backlight;
+ return 0;
+}
+
+void shmob_drm_backlight_exit(struct shmob_drm_connector *scon)
+{
+ backlight_device_unregister(scon->backlight);
+}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_backlight.h b/drivers/gpu/drm/shmobile/shmob_drm_backlight.h
new file mode 100644
index 000000000000..9477595d2ff3
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_backlight.h
@@ -0,0 +1,23 @@
+/*
+ * shmob_drm_backlight.h -- SH Mobile DRM Backlight
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_BACKLIGHT_H__
+#define __SHMOB_DRM_BACKLIGHT_H__
+
+struct shmob_drm_connector;
+
+void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode);
+int shmob_drm_backlight_init(struct shmob_drm_connector *scon);
+void shmob_drm_backlight_exit(struct shmob_drm_connector *scon);
+
+#endif /* __SHMOB_DRM_BACKLIGHT_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
new file mode 100644
index 000000000000..0e7a9306bd0c
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -0,0 +1,763 @@
+/*
+ * shmob_drm_crtc.c -- SH Mobile DRM CRTCs
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/backlight.h>
+#include <linux/clk.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <video/sh_mobile_meram.h>
+
+#include "shmob_drm_backlight.h"
+#include "shmob_drm_crtc.h"
+#include "shmob_drm_drv.h"
+#include "shmob_drm_kms.h"
+#include "shmob_drm_plane.h"
+#include "shmob_drm_regs.h"
+
+/*
+ * TODO: panel support
+ */
+
+/* -----------------------------------------------------------------------------
+ * Clock management
+ */
+
+static void shmob_drm_clk_on(struct shmob_drm_device *sdev)
+{
+ if (sdev->clock)
+ clk_enable(sdev->clock);
+#if 0
+ if (sdev->meram_dev && sdev->meram_dev->pdev)
+ pm_runtime_get_sync(&sdev->meram_dev->pdev->dev);
+#endif
+}
+
+static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
+{
+#if 0
+ if (sdev->meram_dev && sdev->meram_dev->pdev)
+ pm_runtime_put_sync(&sdev->meram_dev->pdev->dev);
+#endif
+ if (sdev->clock)
+ clk_disable(sdev->clock);
+}
+
+/* -----------------------------------------------------------------------------
+ * CRTC
+ */
+
+static void shmob_drm_crtc_setup_geometry(struct shmob_drm_crtc *scrtc)
+{
+ struct drm_crtc *crtc = &scrtc->crtc;
+ struct shmob_drm_device *sdev = crtc->dev->dev_private;
+ const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
+ const struct drm_display_mode *mode = &crtc->mode;
+ u32 value;
+
+ value = sdev->ldmt1r
+ | ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : LDMT1R_VPOL)
+ | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : LDMT1R_HPOL)
+ | ((idata->flags & SHMOB_DRM_IFACE_FL_DWPOL) ? LDMT1R_DWPOL : 0)
+ | ((idata->flags & SHMOB_DRM_IFACE_FL_DIPOL) ? LDMT1R_DIPOL : 0)
+ | ((idata->flags & SHMOB_DRM_IFACE_FL_DAPOL) ? LDMT1R_DAPOL : 0)
+ | ((idata->flags & SHMOB_DRM_IFACE_FL_HSCNT) ? LDMT1R_HSCNT : 0)
+ | ((idata->flags & SHMOB_DRM_IFACE_FL_DWCNT) ? LDMT1R_DWCNT : 0);
+ lcdc_write(sdev, LDMT1R, value);
+
+ if (idata->interface >= SHMOB_DRM_IFACE_SYS8A &&
+ idata->interface <= SHMOB_DRM_IFACE_SYS24) {
+ /* Setup SYS bus. */
+ value = (idata->sys.cs_setup << LDMT2R_CSUP_SHIFT)
+ | (idata->sys.vsync_active_high ? LDMT2R_RSV : 0)
+ | (idata->sys.vsync_dir_input ? LDMT2R_VSEL : 0)
+ | (idata->sys.write_setup << LDMT2R_WCSC_SHIFT)
+ | (idata->sys.write_cycle << LDMT2R_WCEC_SHIFT)
+ | (idata->sys.write_strobe << LDMT2R_WCLW_SHIFT);
+ lcdc_write(sdev, LDMT2R, value);
+
+ value = (idata->sys.read_latch << LDMT3R_RDLC_SHIFT)
+ | (idata->sys.read_setup << LDMT3R_RCSC_SHIFT)
+ | (idata->sys.read_cycle << LDMT3R_RCEC_SHIFT)
+ | (idata->sys.read_strobe << LDMT3R_RCLW_SHIFT);
+ lcdc_write(sdev, LDMT3R, value);
+ }
+
+ value = ((mode->hdisplay / 8) << 16) /* HDCN */
+ | (mode->htotal / 8); /* HTCN */
+ lcdc_write(sdev, LDHCNR, value);
+
+ value = (((mode->hsync_end - mode->hsync_start) / 8) << 16) /* HSYNW */
+ | (mode->hsync_start / 8); /* HSYNP */
+ lcdc_write(sdev, LDHSYNR, value);
+
+ value = ((mode->hdisplay & 7) << 24) | ((mode->htotal & 7) << 16)
+ | (((mode->hsync_end - mode->hsync_start) & 7) << 8)
+ | (mode->hsync_start & 7);
+ lcdc_write(sdev, LDHAJR, value);
+
+ value = ((mode->vdisplay) << 16) /* VDLN */
+ | mode->vtotal; /* VTLN */
+ lcdc_write(sdev, LDVLNR, value);
+
+ value = ((mode->vsync_end - mode->vsync_start) << 16) /* VSYNW */
+ | mode->vsync_start; /* VSYNP */
+ lcdc_write(sdev, LDVSYNR, value);
+}
+
+static void shmob_drm_crtc_start_stop(struct shmob_drm_crtc *scrtc, bool start)
+{
+ struct shmob_drm_device *sdev = scrtc->crtc.dev->dev_private;
+ u32 value;
+
+ value = lcdc_read(sdev, LDCNT2R);
+ if (start)
+ lcdc_write(sdev, LDCNT2R, value | LDCNT2R_DO);
+ else
+ lcdc_write(sdev, LDCNT2R, value & ~LDCNT2R_DO);
+
+ /* Wait until power is applied/stopped. */
+ while (1) {
+ value = lcdc_read(sdev, LDPMR) & LDPMR_LPS;
+ if ((start && value) || (!start && !value))
+ break;
+
+ cpu_relax();
+ }
+
+ if (!start) {
+ /* Stop the dot clock. */
+ lcdc_write(sdev, LDDCKSTPR, LDDCKSTPR_DCKSTP);
+ }
+}
+
+/*
+ * shmob_drm_crtc_start - Configure and start the LCDC
+ * @scrtc: the SH Mobile CRTC
+ *
+ * Configure and start the LCDC device. External devices (clocks, MERAM, panels,
+ * ...) are not touched by this function.
+ */
+static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
+{
+ struct drm_crtc *crtc = &scrtc->crtc;
+ struct shmob_drm_device *sdev = crtc->dev->dev_private;
+ const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
+ const struct shmob_drm_format_info *format;
+ struct drm_device *dev = sdev->ddev;
+ struct drm_plane *plane;
+ u32 value;
+
+ if (scrtc->started)
+ return;
+
+ format = shmob_drm_format_info(crtc->fb->pixel_format);
+ if (WARN_ON(format == NULL))
+ return;
+
+ /* Enable clocks before accessing the hardware. */
+ shmob_drm_clk_on(sdev);
+
+ /* Reset and enable the LCDC. */
+ lcdc_write(sdev, LDCNT2R, lcdc_read(sdev, LDCNT2R) | LDCNT2R_BR);
+ lcdc_wait_bit(sdev, LDCNT2R, LDCNT2R_BR, 0);
+ lcdc_write(sdev, LDCNT2R, LDCNT2R_ME);
+
+ /* Stop the LCDC first and disable all interrupts. */
+ shmob_drm_crtc_start_stop(scrtc, false);
+ lcdc_write(sdev, LDINTR, 0);
+
+ /* Configure power supply, dot clocks and start them. */
+ lcdc_write(sdev, LDPMR, 0);
+
+ value = sdev->lddckr;
+ if (idata->clk_div) {
+ /* FIXME: sh7724 can only use 42, 48, 54 and 60 for the divider
+ * denominator.
+ */
+ lcdc_write(sdev, LDDCKPAT1R, 0);
+ lcdc_write(sdev, LDDCKPAT2R, (1 << (idata->clk_div / 2)) - 1);
+
+ if (idata->clk_div == 1)
+ value |= LDDCKR_MOSEL;
+ else
+ value |= idata->clk_div;
+ }
+
+ lcdc_write(sdev, LDDCKR, value);
+ lcdc_write(sdev, LDDCKSTPR, 0);
+ lcdc_wait_bit(sdev, LDDCKSTPR, ~0, 0);
+
+ /* TODO: Setup SYS panel */
+
+ /* Setup geometry, format, frame buffer memory and operation mode. */
+ shmob_drm_crtc_setup_geometry(scrtc);
+
+ /* TODO: Handle YUV colorspaces. Hardcode REC709 for now. */
+ lcdc_write(sdev, LDDFR, format->lddfr | LDDFR_CF1);
+ lcdc_write(sdev, LDMLSR, scrtc->line_size);
+ lcdc_write(sdev, LDSA1R, scrtc->dma[0]);
+ if (format->yuv)
+ lcdc_write(sdev, LDSA2R, scrtc->dma[1]);
+ lcdc_write(sdev, LDSM1R, 0);
+
+ /* Word and long word swap. */
+ switch (format->fourcc) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV42:
+ value = LDDDSR_LS | LDDDSR_WS;
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV24:
+ value = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ default:
+ value = LDDDSR_LS;
+ break;
+ }
+ lcdc_write(sdev, LDDDSR, value);
+
+ /* Setup planes. */
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ if (plane->crtc == crtc)
+ shmob_drm_plane_setup(plane);
+ }
+
+ /* Enable the display output. */
+ lcdc_write(sdev, LDCNT1R, LDCNT1R_DE);
+
+ shmob_drm_crtc_start_stop(scrtc, true);
+
+ scrtc->started = true;
+}
+
+static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc)
+{
+ struct drm_crtc *crtc = &scrtc->crtc;
+ struct shmob_drm_device *sdev = crtc->dev->dev_private;
+
+ if (!scrtc->started)
+ return;
+
+ /* Disable the MERAM cache. */
+ if (scrtc->cache) {
+ sh_mobile_meram_cache_free(sdev->meram, scrtc->cache);
+ scrtc->cache = NULL;
+ }
+
+ /* Stop the LCDC. */
+ shmob_drm_crtc_start_stop(scrtc, false);
+
+ /* Disable the display output. */
+ lcdc_write(sdev, LDCNT1R, 0);
+
+ /* Stop clocks. */
+ shmob_drm_clk_off(sdev);
+
+ scrtc->started = false;
+}
+
+void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc)
+{
+ shmob_drm_crtc_stop(scrtc);
+}
+
+void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc)
+{
+ if (scrtc->dpms != DRM_MODE_DPMS_ON)
+ return;
+
+ shmob_drm_crtc_start(scrtc);
+}
+
+static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
+ int x, int y)
+{
+ struct drm_crtc *crtc = &scrtc->crtc;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct shmob_drm_device *sdev = crtc->dev->dev_private;
+ struct drm_gem_cma_object *gem;
+ unsigned int bpp;
+
+ bpp = scrtc->format->yuv ? 8 : scrtc->format->bpp;
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+ scrtc->dma[0] = gem->paddr + fb->offsets[0]
+ + y * fb->pitches[0] + x * bpp / 8;
+
+ if (scrtc->format->yuv) {
+ bpp = scrtc->format->bpp - 8;
+ gem = drm_fb_cma_get_gem_obj(fb, 1);
+ scrtc->dma[1] = gem->paddr + fb->offsets[1]
+ + y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+ + x * (bpp == 16 ? 2 : 1);
+ }
+
+ if (scrtc->cache)
+ sh_mobile_meram_cache_update(sdev->meram, scrtc->cache,
+ scrtc->dma[0], scrtc->dma[1],
+ &scrtc->dma[0], &scrtc->dma[1]);
+}
+
+static void shmob_drm_crtc_update_base(struct shmob_drm_crtc *scrtc)
+{
+ struct drm_crtc *crtc = &scrtc->crtc;
+ struct shmob_drm_device *sdev = crtc->dev->dev_private;
+
+ shmob_drm_crtc_compute_base(scrtc, crtc->x, crtc->y);
+
+ lcdc_write_mirror(sdev, LDSA1R, scrtc->dma[0]);
+ if (scrtc->format->yuv)
+ lcdc_write_mirror(sdev, LDSA2R, scrtc->dma[1]);
+
+ lcdc_write(sdev, LDRCNTR, lcdc_read(sdev, LDRCNTR) ^ LDRCNTR_MRS);
+}
+
+#define to_shmob_crtc(c) container_of(c, struct shmob_drm_crtc, crtc)
+
+static void shmob_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
+
+ if (scrtc->dpms == mode)
+ return;
+
+ if (mode == DRM_MODE_DPMS_ON)
+ shmob_drm_crtc_start(scrtc);
+ else
+ shmob_drm_crtc_stop(scrtc);
+
+ scrtc->dpms = mode;
+}
+
+static bool shmob_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void shmob_drm_crtc_mode_prepare(struct drm_crtc *crtc)
+{
+ shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
+ struct shmob_drm_device *sdev = crtc->dev->dev_private;
+ const struct sh_mobile_meram_cfg *mdata = sdev->pdata->meram;
+ const struct shmob_drm_format_info *format;
+ void *cache;
+
+ format = shmob_drm_format_info(crtc->fb->pixel_format);
+ if (format == NULL) {
+ dev_dbg(sdev->dev, "mode_set: unsupported format %08x\n",
+ crtc->fb->pixel_format);
+ return -EINVAL;
+ }
+
+ scrtc->format = format;
+ scrtc->line_size = crtc->fb->pitches[0];
+
+ if (sdev->meram) {
+ /* Enable MERAM cache if configured. We need to de-init
+ * configured ICBs before we can re-initialize them.
+ */
+ if (scrtc->cache) {
+ sh_mobile_meram_cache_free(sdev->meram, scrtc->cache);
+ scrtc->cache = NULL;
+ }
+
+ cache = sh_mobile_meram_cache_alloc(sdev->meram, mdata,
+ crtc->fb->pitches[0],
+ adjusted_mode->vdisplay,
+ format->meram,
+ &scrtc->line_size);
+ if (!IS_ERR(cache))
+ scrtc->cache = cache;
+ }
+
+ shmob_drm_crtc_compute_base(scrtc, x, y);
+
+ return 0;
+}
+
+static void shmob_drm_crtc_mode_commit(struct drm_crtc *crtc)
+{
+ shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static int shmob_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ shmob_drm_crtc_update_base(to_shmob_crtc(crtc));
+
+ return 0;
+}
+
+static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
+ .dpms = shmob_drm_crtc_dpms,
+ .mode_fixup = shmob_drm_crtc_mode_fixup,
+ .prepare = shmob_drm_crtc_mode_prepare,
+ .commit = shmob_drm_crtc_mode_commit,
+ .mode_set = shmob_drm_crtc_mode_set,
+ .mode_set_base = shmob_drm_crtc_mode_set_base,
+};
+
+void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc,
+ struct drm_file *file)
+{
+ struct drm_pending_vblank_event *event;
+ struct drm_device *dev = scrtc->crtc.dev;
+ unsigned long flags;
+
+ /* Destroy the pending vertical blanking event associated with the
+ * pending page flip, if any, and disable vertical blanking interrupts.
+ */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = scrtc->event;
+ if (event && event->base.file_priv == file) {
+ scrtc->event = NULL;
+ event->base.destroy(&event->base);
+ drm_vblank_put(dev, 0);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
+{
+ struct drm_pending_vblank_event *event;
+ struct drm_device *dev = scrtc->crtc.dev;
+ struct timeval vblanktime;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = scrtc->event;
+ scrtc->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (event == NULL)
+ return;
+
+ event->event.sequence = drm_vblank_count_and_time(dev, 0, &vblanktime);
+ event->event.tv_sec = vblanktime.tv_sec;
+ event->event.tv_usec = vblanktime.tv_usec;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_add_tail(&event->base.link, &event->base.file_priv->event_list);
+ wake_up_interruptible(&event->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ drm_vblank_put(dev, 0);
+}
+
+static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
+ struct drm_device *dev = scrtc->crtc.dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (scrtc->event != NULL) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ crtc->fb = fb;
+ shmob_drm_crtc_update_base(scrtc);
+
+ if (event) {
+ event->pipe = 0;
+ spin_lock_irqsave(&dev->event_lock, flags);
+ scrtc->event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ drm_vblank_get(dev, 0);
+ }
+
+ return 0;
+}
+
+static const struct drm_crtc_funcs crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = shmob_drm_crtc_page_flip,
+};
+
+int shmob_drm_crtc_create(struct shmob_drm_device *sdev)
+{
+ struct drm_crtc *crtc = &sdev->crtc.crtc;
+ int ret;
+
+ sdev->crtc.dpms = DRM_MODE_DPMS_OFF;
+
+ ret = drm_crtc_init(sdev->ddev, crtc, &crtc_funcs);
+ if (ret < 0)
+ return ret;
+
+ drm_crtc_helper_add(crtc, &crtc_helper_funcs);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Encoder
+ */
+
+#define to_shmob_encoder(e) \
+ container_of(e, struct shmob_drm_encoder, encoder)
+
+static void shmob_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct shmob_drm_encoder *senc = to_shmob_encoder(encoder);
+ struct shmob_drm_device *sdev = encoder->dev->dev_private;
+ struct shmob_drm_connector *scon = &sdev->connector;
+
+ if (senc->dpms == mode)
+ return;
+
+ shmob_drm_backlight_dpms(scon, mode);
+
+ senc->dpms = mode;
+}
+
+static bool shmob_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct shmob_drm_device *sdev = dev->dev_private;
+ struct drm_connector *connector = &sdev->connector.connector;
+ const struct drm_display_mode *panel_mode;
+
+ if (list_empty(&connector->modes)) {
+ dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
+ return false;
+ }
+
+ /* The flat panel mode is fixed, just copy it to the adjusted mode. */
+ panel_mode = list_first_entry(&connector->modes,
+ struct drm_display_mode, head);
+ drm_mode_copy(adjusted_mode, panel_mode);
+
+ return true;
+}
+
+static void shmob_drm_encoder_mode_prepare(struct drm_encoder *encoder)
+{
+ /* No-op, everything is handled in the CRTC code. */
+}
+
+static void shmob_drm_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* No-op, everything is handled in the CRTC code. */
+}
+
+static void shmob_drm_encoder_mode_commit(struct drm_encoder *encoder)
+{
+ /* No-op, everything is handled in the CRTC code. */
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+ .dpms = shmob_drm_encoder_dpms,
+ .mode_fixup = shmob_drm_encoder_mode_fixup,
+ .prepare = shmob_drm_encoder_mode_prepare,
+ .commit = shmob_drm_encoder_mode_commit,
+ .mode_set = shmob_drm_encoder_mode_set,
+};
+
+static void shmob_drm_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+ .destroy = shmob_drm_encoder_destroy,
+};
+
+int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
+{
+ struct drm_encoder *encoder = &sdev->encoder.encoder;
+ int ret;
+
+ sdev->encoder.dpms = DRM_MODE_DPMS_OFF;
+
+ encoder->possible_crtcs = 1;
+
+ ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+ if (ret < 0)
+ return ret;
+
+ drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+
+ return 0;
+}
+
+void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev, bool enable)
+{
+ unsigned long flags;
+ u32 ldintr;
+
+ /* Be careful not to acknowledge any pending interrupt. */
+ spin_lock_irqsave(&sdev->irq_lock, flags);
+ ldintr = lcdc_read(sdev, LDINTR) | LDINTR_STATUS_MASK;
+ if (enable)
+ ldintr |= LDINTR_VEE;
+ else
+ ldintr &= ~LDINTR_VEE;
+ lcdc_write(sdev, LDINTR, ldintr);
+ spin_unlock_irqrestore(&sdev->irq_lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * Connector
+ */
+
+#define to_shmob_connector(c) \
+ container_of(c, struct shmob_drm_connector, connector)
+
+static int shmob_drm_connector_get_modes(struct drm_connector *connector)
+{
+ struct shmob_drm_device *sdev = connector->dev->dev_private;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_create(connector->dev);
+ if (mode == NULL)
+ return 0;
+
+ mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+ mode->clock = sdev->pdata->panel.mode.clock;
+ mode->hdisplay = sdev->pdata->panel.mode.hdisplay;
+ mode->hsync_start = sdev->pdata->panel.mode.hsync_start;
+ mode->hsync_end = sdev->pdata->panel.mode.hsync_end;
+ mode->htotal = sdev->pdata->panel.mode.htotal;
+ mode->vdisplay = sdev->pdata->panel.mode.vdisplay;
+ mode->vsync_start = sdev->pdata->panel.mode.vsync_start;
+ mode->vsync_end = sdev->pdata->panel.mode.vsync_end;
+ mode->vtotal = sdev->pdata->panel.mode.vtotal;
+ mode->flags = sdev->pdata->panel.mode.flags;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = sdev->pdata->panel.width_mm;
+ connector->display_info.height_mm = sdev->pdata->panel.height_mm;
+
+ return 1;
+}
+
+static int shmob_drm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+shmob_drm_connector_best_encoder(struct drm_connector *connector)
+{
+ struct shmob_drm_connector *scon = to_shmob_connector(connector);
+
+ return scon->encoder;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+ .get_modes = shmob_drm_connector_get_modes,
+ .mode_valid = shmob_drm_connector_mode_valid,
+ .best_encoder = shmob_drm_connector_best_encoder,
+};
+
+static void shmob_drm_connector_destroy(struct drm_connector *connector)
+{
+ struct shmob_drm_connector *scon = to_shmob_connector(connector);
+
+ shmob_drm_backlight_exit(scon);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+}
+
+static enum drm_connector_status
+shmob_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static const struct drm_connector_funcs connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = shmob_drm_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = shmob_drm_connector_destroy,
+};
+
+int shmob_drm_connector_create(struct shmob_drm_device *sdev,
+ struct drm_encoder *encoder)
+{
+ struct drm_connector *connector = &sdev->connector.connector;
+ int ret;
+
+ sdev->connector.encoder = encoder;
+
+ connector->display_info.width_mm = sdev->pdata->panel.width_mm;
+ connector->display_info.height_mm = sdev->pdata->panel.height_mm;
+
+ ret = drm_connector_init(sdev->ddev, connector, &connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (ret < 0)
+ return ret;
+
+ drm_connector_helper_add(connector, &connector_helper_funcs);
+ ret = drm_sysfs_connector_add(connector);
+ if (ret < 0)
+ goto err_cleanup;
+
+ ret = shmob_drm_backlight_init(&sdev->connector);
+ if (ret < 0)
+ goto err_sysfs;
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret < 0)
+ goto err_backlight;
+
+ connector->encoder = encoder;
+
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_connector_property_set_value(connector,
+ sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
+
+ return 0;
+
+err_backlight:
+ shmob_drm_backlight_exit(&sdev->connector);
+err_sysfs:
+ drm_sysfs_connector_remove(connector);
+err_cleanup:
+ drm_connector_cleanup(connector);
+ return ret;
+}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
new file mode 100644
index 000000000000..e5bd109c4c38
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
@@ -0,0 +1,60 @@
+/*
+ * shmob_drm_crtc.h -- SH Mobile DRM CRTCs
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_CRTC_H__
+#define __SHMOB_DRM_CRTC_H__
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
+struct backlight_device;
+struct shmob_drm_device;
+
+struct shmob_drm_crtc {
+ struct drm_crtc crtc;
+
+ struct drm_pending_vblank_event *event;
+ int dpms;
+
+ const struct shmob_drm_format_info *format;
+ void *cache;
+ unsigned long dma[2];
+ unsigned int line_size;
+ bool started;
+};
+
+struct shmob_drm_encoder {
+ struct drm_encoder encoder;
+ int dpms;
+};
+
+struct shmob_drm_connector {
+ struct drm_connector connector;
+ struct drm_encoder *encoder;
+
+ struct backlight_device *backlight;
+};
+
+int shmob_drm_crtc_create(struct shmob_drm_device *sdev);
+void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev, bool enable);
+void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc,
+ struct drm_file *file);
+void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc);
+void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc);
+void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc);
+
+int shmob_drm_encoder_create(struct shmob_drm_device *sdev);
+int shmob_drm_connector_create(struct shmob_drm_device *sdev,
+ struct drm_encoder *encoder);
+
+#endif /* __SHMOB_DRM_CRTC_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
new file mode 100644
index 000000000000..c71d493fd0c5
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -0,0 +1,361 @@
+/*
+ * shmob_drm_drv.c -- SH Mobile DRM driver
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "shmob_drm_crtc.h"
+#include "shmob_drm_drv.h"
+#include "shmob_drm_kms.h"
+#include "shmob_drm_plane.h"
+#include "shmob_drm_regs.h"
+
+/* -----------------------------------------------------------------------------
+ * Hardware initialization
+ */
+
+static int __devinit shmob_drm_init_interface(struct shmob_drm_device *sdev)
+{
+ static const u32 ldmt1r[] = {
+ [SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8,
+ [SHMOB_DRM_IFACE_RGB9] = LDMT1R_MIFTYP_RGB9,
+ [SHMOB_DRM_IFACE_RGB12A] = LDMT1R_MIFTYP_RGB12A,
+ [SHMOB_DRM_IFACE_RGB12B] = LDMT1R_MIFTYP_RGB12B,
+ [SHMOB_DRM_IFACE_RGB16] = LDMT1R_MIFTYP_RGB16,
+ [SHMOB_DRM_IFACE_RGB18] = LDMT1R_MIFTYP_RGB18,
+ [SHMOB_DRM_IFACE_RGB24] = LDMT1R_MIFTYP_RGB24,
+ [SHMOB_DRM_IFACE_YUV422] = LDMT1R_MIFTYP_YCBCR,
+ [SHMOB_DRM_IFACE_SYS8A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8A,
+ [SHMOB_DRM_IFACE_SYS8B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8B,
+ [SHMOB_DRM_IFACE_SYS8C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8C,
+ [SHMOB_DRM_IFACE_SYS8D] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8D,
+ [SHMOB_DRM_IFACE_SYS9] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS9,
+ [SHMOB_DRM_IFACE_SYS12] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS12,
+ [SHMOB_DRM_IFACE_SYS16A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16A,
+ [SHMOB_DRM_IFACE_SYS16B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16B,
+ [SHMOB_DRM_IFACE_SYS16C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16C,
+ [SHMOB_DRM_IFACE_SYS18] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS18,
+ [SHMOB_DRM_IFACE_SYS24] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS24,
+ };
+
+ if (sdev->pdata->iface.interface >= ARRAY_SIZE(ldmt1r)) {
+ dev_err(sdev->dev, "invalid interface type %u\n",
+ sdev->pdata->iface.interface);
+ return -EINVAL;
+ }
+
+ sdev->ldmt1r = ldmt1r[sdev->pdata->iface.interface];
+ return 0;
+}
+
+static int __devinit shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
+ enum shmob_drm_clk_source clksrc)
+{
+ struct clk *clk;
+ char *clkname;
+
+ switch (clksrc) {
+ case SHMOB_DRM_CLK_BUS:
+ clkname = "bus_clk";
+ sdev->lddckr = LDDCKR_ICKSEL_BUS;
+ break;
+ case SHMOB_DRM_CLK_PERIPHERAL:
+ clkname = "peripheral_clk";
+ sdev->lddckr = LDDCKR_ICKSEL_MIPI;
+ break;
+ case SHMOB_DRM_CLK_EXTERNAL:
+ clkname = NULL;
+ sdev->lddckr = LDDCKR_ICKSEL_HDMI;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ clk = clk_get(sdev->dev, clkname);
+ if (IS_ERR(clk)) {
+ dev_err(sdev->dev, "cannot get dot clock %s\n", clkname);
+ return PTR_ERR(clk);
+ }
+
+ sdev->clock = clk;
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * DRM operations
+ */
+
+static int shmob_drm_unload(struct drm_device *dev)
+{
+ struct shmob_drm_device *sdev = dev->dev_private;
+
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+ drm_vblank_cleanup(dev);
+ drm_irq_uninstall(dev);
+
+ if (sdev->clock)
+ clk_put(sdev->clock);
+
+ if (sdev->mmio)
+ iounmap(sdev->mmio);
+
+ dev->dev_private = NULL;
+ kfree(sdev);
+
+ return 0;
+}
+
+static int shmob_drm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct shmob_drm_platform_data *pdata = dev->dev->platform_data;
+ struct platform_device *pdev = dev->platformdev;
+ struct shmob_drm_device *sdev;
+ struct resource *res;
+ unsigned int i;
+ int ret;
+
+ if (pdata == NULL) {
+ dev_err(dev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ if (sdev == NULL) {
+ dev_err(dev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ sdev->dev = &pdev->dev;
+ sdev->pdata = pdata;
+ spin_lock_init(&sdev->irq_lock);
+
+ sdev->ddev = dev;
+ dev->dev_private = sdev;
+
+ /* I/O resources and clocks */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get memory resource\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ sdev->mmio = ioremap_nocache(res->start, resource_size(res));
+ if (sdev->mmio == NULL) {
+ dev_err(&pdev->dev, "failed to remap memory resource\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ ret = shmob_drm_setup_clocks(sdev, pdata->clk_source);
+ if (ret < 0)
+ goto done;
+
+ ret = shmob_drm_init_interface(sdev);
+ if (ret < 0)
+ goto done;
+
+ ret = shmob_drm_modeset_init(sdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to initialize mode setting\n");
+ goto done;
+ }
+
+ for (i = 0; i < 4; ++i) {
+ ret = shmob_drm_plane_create(sdev, i);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to create plane %u\n", i);
+ goto done;
+ }
+ }
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to initialize vblank\n");
+ goto done;
+ }
+
+ ret = drm_irq_install(dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to install IRQ handler\n");
+ goto done;
+ }
+
+done:
+ if (ret)
+ shmob_drm_unload(dev);
+
+ return ret;
+}
+
+static void shmob_drm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct shmob_drm_device *sdev = dev->dev_private;
+
+ shmob_drm_crtc_cancel_page_flip(&sdev->crtc, file);
+}
+
+static irqreturn_t shmob_drm_irq(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct shmob_drm_device *sdev = dev->dev_private;
+ unsigned long flags;
+ u32 status;
+
+ /* Acknowledge interrupts. Putting interrupt enable and interrupt flag
+ * bits in the same register is really brain-dead design and requires
+ * taking a spinlock.
+ */
+ spin_lock_irqsave(&sdev->irq_lock, flags);
+ status = lcdc_read(sdev, LDINTR);
+ lcdc_write(sdev, LDINTR, status ^ LDINTR_STATUS_MASK);
+ spin_unlock_irqrestore(&sdev->irq_lock, flags);
+
+ if (status & LDINTR_VES) {
+ drm_handle_vblank(dev, 0);
+ shmob_drm_crtc_finish_page_flip(&sdev->crtc);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int shmob_drm_enable_vblank(struct drm_device *dev, int crtc)
+{
+ struct shmob_drm_device *sdev = dev->dev_private;
+
+ shmob_drm_crtc_enable_vblank(sdev, true);
+
+ return 0;
+}
+
+static void shmob_drm_disable_vblank(struct drm_device *dev, int crtc)
+{
+ struct shmob_drm_device *sdev = dev->dev_private;
+
+ shmob_drm_crtc_enable_vblank(sdev, false);
+}
+
+static const struct file_operations shmob_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .fasync = drm_fasync,
+ .llseek = no_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver shmob_drm_driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+ .load = shmob_drm_load,
+ .unload = shmob_drm_unload,
+ .preclose = shmob_drm_preclose,
+ .irq_handler = shmob_drm_irq,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = shmob_drm_enable_vblank,
+ .disable_vblank = shmob_drm_disable_vblank,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_cma_dumb_destroy,
+ .fops = &shmob_drm_fops,
+ .name = "shmob-drm",
+ .desc = "Renesas SH Mobile DRM",
+ .date = "20120424",
+ .major = 1,
+ .minor = 0,
+};
+
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
+
+#if CONFIG_PM_SLEEP
+static int shmob_drm_pm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *ddev = platform_get_drvdata(pdev);
+ struct shmob_drm_device *sdev = ddev->dev_private;
+
+ drm_kms_helper_poll_disable(ddev);
+ shmob_drm_crtc_suspend(&sdev->crtc);
+
+ return 0;
+}
+
+static int shmob_drm_pm_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *ddev = platform_get_drvdata(pdev);
+ struct shmob_drm_device *sdev = ddev->dev_private;
+
+ mutex_lock(&sdev->ddev->mode_config.mutex);
+ shmob_drm_crtc_resume(&sdev->crtc);
+ mutex_unlock(&sdev->ddev->mode_config.mutex);
+
+ drm_kms_helper_poll_enable(sdev->ddev);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops shmob_drm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(shmob_drm_pm_suspend, shmob_drm_pm_resume)
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform driver
+ */
+
+static int __devinit shmob_drm_probe(struct platform_device *pdev)
+{
+ return drm_platform_init(&shmob_drm_driver, pdev);
+}
+
+static int __devexit shmob_drm_remove(struct platform_device *pdev)
+{
+ drm_platform_exit(&shmob_drm_driver, pdev);
+
+ return 0;
+}
+
+static struct platform_driver shmob_drm_platform_driver = {
+ .probe = shmob_drm_probe,
+ .remove = __devexit_p(shmob_drm_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "shmob-drm",
+ .pm = &shmob_drm_pm_ops,
+ },
+};
+
+module_platform_driver(shmob_drm_platform_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Renesas SH Mobile DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.h b/drivers/gpu/drm/shmobile/shmob_drm_drv.h
new file mode 100644
index 000000000000..4d46b811b5a7
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.h
@@ -0,0 +1,47 @@
+/*
+ * shmob_drm.h -- SH Mobile DRM driver
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_DRV_H__
+#define __SHMOB_DRM_DRV_H__
+
+#include <linux/kernel.h>
+#include <linux/platform_data/shmob_drm.h>
+#include <linux/spinlock.h>
+
+#include "shmob_drm_crtc.h"
+
+struct clk;
+struct device;
+struct drm_device;
+struct sh_mobile_meram_info;
+
+struct shmob_drm_device {
+ struct device *dev;
+ const struct shmob_drm_platform_data *pdata;
+
+ void __iomem *mmio;
+ struct clk *clock;
+ struct sh_mobile_meram_info *meram;
+ u32 lddckr;
+ u32 ldmt1r;
+
+ spinlock_t irq_lock; /* Protects hardware LDINTR register */
+
+ struct drm_device *ddev;
+
+ struct shmob_drm_crtc crtc;
+ struct shmob_drm_encoder encoder;
+ struct shmob_drm_connector connector;
+};
+
+#endif /* __SHMOB_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
new file mode 100644
index 000000000000..c291ee385b4f
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -0,0 +1,160 @@
+/*
+ * shmob_drm_kms.c -- SH Mobile DRM Mode Setting
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <video/sh_mobile_meram.h>
+
+#include "shmob_drm_crtc.h"
+#include "shmob_drm_drv.h"
+#include "shmob_drm_kms.h"
+#include "shmob_drm_regs.h"
+
+/* -----------------------------------------------------------------------------
+ * Format helpers
+ */
+
+static const struct shmob_drm_format_info shmob_drm_format_infos[] = {
+ {
+ .fourcc = DRM_FORMAT_RGB565,
+ .bpp = 16,
+ .yuv = false,
+ .lddfr = LDDFR_PKF_RGB16,
+ .meram = SH_MOBILE_MERAM_PF_RGB,
+ }, {
+ .fourcc = DRM_FORMAT_RGB888,
+ .bpp = 24,
+ .yuv = false,
+ .lddfr = LDDFR_PKF_RGB24,
+ .meram = SH_MOBILE_MERAM_PF_RGB,
+ }, {
+ .fourcc = DRM_FORMAT_ARGB8888,
+ .bpp = 32,
+ .yuv = false,
+ .lddfr = LDDFR_PKF_ARGB32,
+ .meram = SH_MOBILE_MERAM_PF_RGB,
+ }, {
+ .fourcc = DRM_FORMAT_NV12,
+ .bpp = 12,
+ .yuv = true,
+ .lddfr = LDDFR_CC | LDDFR_YF_420,
+ .meram = SH_MOBILE_MERAM_PF_NV,
+ }, {
+ .fourcc = DRM_FORMAT_NV21,
+ .bpp = 12,
+ .yuv = true,
+ .lddfr = LDDFR_CC | LDDFR_YF_420,
+ .meram = SH_MOBILE_MERAM_PF_NV,
+ }, {
+ .fourcc = DRM_FORMAT_NV16,
+ .bpp = 16,
+ .yuv = true,
+ .lddfr = LDDFR_CC | LDDFR_YF_422,
+ .meram = SH_MOBILE_MERAM_PF_NV,
+ }, {
+ .fourcc = DRM_FORMAT_NV61,
+ .bpp = 16,
+ .yuv = true,
+ .lddfr = LDDFR_CC | LDDFR_YF_422,
+ .meram = SH_MOBILE_MERAM_PF_NV,
+ }, {
+ .fourcc = DRM_FORMAT_NV24,
+ .bpp = 24,
+ .yuv = true,
+ .lddfr = LDDFR_CC | LDDFR_YF_444,
+ .meram = SH_MOBILE_MERAM_PF_NV24,
+ }, {
+ .fourcc = DRM_FORMAT_NV42,
+ .bpp = 24,
+ .yuv = true,
+ .lddfr = LDDFR_CC | LDDFR_YF_444,
+ .meram = SH_MOBILE_MERAM_PF_NV24,
+ },
+};
+
+const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(shmob_drm_format_infos); ++i) {
+ if (shmob_drm_format_infos[i].fourcc == fourcc)
+ return &shmob_drm_format_infos[i];
+ }
+
+ return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Frame buffer
+ */
+
+static struct drm_framebuffer *
+shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ const struct shmob_drm_format_info *format;
+
+ format = shmob_drm_format_info(mode_cmd->pixel_format);
+ if (format == NULL) {
+ dev_dbg(dev->dev, "unsupported pixel format %08x\n",
+ mode_cmd->pixel_format);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (mode_cmd->pitches[0] & 7 || mode_cmd->pitches[0] >= 65536) {
+ dev_dbg(dev->dev, "valid pitch value %u\n",
+ mode_cmd->pitches[0]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (format->yuv) {
+ unsigned int chroma_cpp = format->bpp == 24 ? 2 : 1;
+
+ if (mode_cmd->pitches[1] != mode_cmd->pitches[0] * chroma_cpp) {
+ dev_dbg(dev->dev,
+ "luma and chroma pitches do not match\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ return drm_fb_cma_create(dev, file_priv, mode_cmd);
+}
+
+static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
+ .fb_create = shmob_drm_fb_create,
+};
+
+int shmob_drm_modeset_init(struct shmob_drm_device *sdev)
+{
+ drm_mode_config_init(sdev->ddev);
+
+ shmob_drm_crtc_create(sdev);
+ shmob_drm_encoder_create(sdev);
+ shmob_drm_connector_create(sdev, &sdev->encoder.encoder);
+
+ drm_kms_helper_poll_init(sdev->ddev);
+
+ sdev->ddev->mode_config.min_width = 0;
+ sdev->ddev->mode_config.min_height = 0;
+ sdev->ddev->mode_config.max_width = 4095;
+ sdev->ddev->mode_config.max_height = 4095;
+ sdev->ddev->mode_config.funcs = &shmob_drm_mode_config_funcs;
+
+ drm_helper_disable_unused_functions(sdev->ddev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.h b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
new file mode 100644
index 000000000000..9495c9111308
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
@@ -0,0 +1,34 @@
+/*
+ * shmob_drm_kms.h -- SH Mobile DRM Mode Setting
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_KMS_H__
+#define __SHMOB_DRM_KMS_H__
+
+#include <linux/types.h>
+
+struct drm_gem_cma_object;
+struct shmob_drm_device;
+
+struct shmob_drm_format_info {
+ u32 fourcc;
+ unsigned int bpp;
+ bool yuv;
+ u32 lddfr;
+ unsigned int meram;
+};
+
+const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc);
+
+int shmob_drm_modeset_init(struct shmob_drm_device *sdev);
+
+#endif /* __SHMOB_DRM_KMS_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
new file mode 100644
index 000000000000..e1eb899b0288
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -0,0 +1,268 @@
+/*
+ * shmob_drm_plane.c -- SH Mobile DRM Planes
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <video/sh_mobile_meram.h>
+
+#include "shmob_drm_drv.h"
+#include "shmob_drm_kms.h"
+#include "shmob_drm_plane.h"
+#include "shmob_drm_regs.h"
+
+struct shmob_drm_plane {
+ struct drm_plane plane;
+ unsigned int index;
+ unsigned int alpha;
+
+ const struct shmob_drm_format_info *format;
+ unsigned long dma[2];
+
+ unsigned int src_x;
+ unsigned int src_y;
+ unsigned int crtc_x;
+ unsigned int crtc_y;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
+};
+
+#define to_shmob_plane(p) container_of(p, struct shmob_drm_plane, plane)
+
+static void shmob_drm_plane_compute_base(struct shmob_drm_plane *splane,
+ struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct drm_gem_cma_object *gem;
+ unsigned int bpp;
+
+ bpp = splane->format->yuv ? 8 : splane->format->bpp;
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+ splane->dma[0] = gem->paddr + fb->offsets[0]
+ + y * fb->pitches[0] + x * bpp / 8;
+
+ if (splane->format->yuv) {
+ bpp = splane->format->bpp - 8;
+ gem = drm_fb_cma_get_gem_obj(fb, 1);
+ splane->dma[1] = gem->paddr + fb->offsets[1]
+ + y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+ + x * (bpp == 16 ? 2 : 1);
+ }
+}
+
+static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane,
+ struct drm_framebuffer *fb)
+{
+ struct shmob_drm_device *sdev = splane->plane.dev->dev_private;
+ u32 format;
+
+ /* TODO: Support ROP3 mode */
+ format = LDBBSIFR_EN | (splane->alpha << LDBBSIFR_LAY_SHIFT);
+
+ switch (splane->format->fourcc) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV42:
+ format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW;
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV24:
+ format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW | LDBBSIFR_SWPB;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ default:
+ format |= LDBBSIFR_SWPL;
+ break;
+ }
+
+ switch (splane->format->fourcc) {
+ case DRM_FORMAT_RGB565:
+ format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16;
+ break;
+ case DRM_FORMAT_RGB888:
+ format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ format |= LDBBSIFR_AL_PK | LDBBSIFR_RY | LDDFR_PKF_ARGB32;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_420;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_422;
+ break;
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_444;
+ break;
+ }
+
+#define plane_reg_dump(sdev, splane, reg) \
+ dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x 0x%08x\n", __func__, \
+ splane->index, #reg, \
+ lcdc_read(sdev, reg(splane->index)), \
+ lcdc_read(sdev, reg(splane->index) + LCDC_SIDE_B_OFFSET))
+
+ plane_reg_dump(sdev, splane, LDBnBSIFR);
+ plane_reg_dump(sdev, splane, LDBnBSSZR);
+ plane_reg_dump(sdev, splane, LDBnBLOCR);
+ plane_reg_dump(sdev, splane, LDBnBSMWR);
+ plane_reg_dump(sdev, splane, LDBnBSAYR);
+ plane_reg_dump(sdev, splane, LDBnBSACR);
+
+ lcdc_write(sdev, LDBCR, LDBCR_UPC(splane->index));
+ dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
+ "LDBCR", lcdc_read(sdev, LDBCR));
+
+ lcdc_write(sdev, LDBnBSIFR(splane->index), format);
+
+ lcdc_write(sdev, LDBnBSSZR(splane->index),
+ (splane->crtc_h << LDBBSSZR_BVSS_SHIFT) |
+ (splane->crtc_w << LDBBSSZR_BHSS_SHIFT));
+ lcdc_write(sdev, LDBnBLOCR(splane->index),
+ (splane->crtc_y << LDBBLOCR_CVLC_SHIFT) |
+ (splane->crtc_x << LDBBLOCR_CHLC_SHIFT));
+ lcdc_write(sdev, LDBnBSMWR(splane->index),
+ fb->pitches[0] << LDBBSMWR_BSMW_SHIFT);
+
+ shmob_drm_plane_compute_base(splane, fb, splane->src_x, splane->src_y);
+
+ lcdc_write(sdev, LDBnBSAYR(splane->index), splane->dma[0]);
+ if (splane->format->yuv)
+ lcdc_write(sdev, LDBnBSACR(splane->index), splane->dma[1]);
+
+ lcdc_write(sdev, LDBCR,
+ LDBCR_UPF(splane->index) | LDBCR_UPD(splane->index));
+ dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
+ "LDBCR", lcdc_read(sdev, LDBCR));
+
+ plane_reg_dump(sdev, splane, LDBnBSIFR);
+ plane_reg_dump(sdev, splane, LDBnBSSZR);
+ plane_reg_dump(sdev, splane, LDBnBLOCR);
+ plane_reg_dump(sdev, splane, LDBnBSMWR);
+ plane_reg_dump(sdev, splane, LDBnBSAYR);
+ plane_reg_dump(sdev, splane, LDBnBSACR);
+}
+
+void shmob_drm_plane_setup(struct drm_plane *plane)
+{
+ struct shmob_drm_plane *splane = to_shmob_plane(plane);
+
+ if (plane->fb == NULL || !plane->enabled)
+ return;
+
+ __shmob_drm_plane_setup(splane, plane->fb);
+}
+
+static int
+shmob_drm_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct shmob_drm_plane *splane = to_shmob_plane(plane);
+ struct shmob_drm_device *sdev = plane->dev->dev_private;
+ const struct shmob_drm_format_info *format;
+
+ format = shmob_drm_format_info(fb->pixel_format);
+ if (format == NULL) {
+ dev_dbg(sdev->dev, "update_plane: unsupported format %08x\n",
+ fb->pixel_format);
+ return -EINVAL;
+ }
+
+ if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) {
+ dev_dbg(sdev->dev, "%s: scaling not supported\n", __func__);
+ return -EINVAL;
+ }
+
+ splane->format = format;
+
+ splane->src_x = src_x >> 16;
+ splane->src_y = src_y >> 16;
+ splane->crtc_x = crtc_x;
+ splane->crtc_y = crtc_y;
+ splane->crtc_w = crtc_w;
+ splane->crtc_h = crtc_h;
+
+ __shmob_drm_plane_setup(splane, fb);
+ return 0;
+}
+
+static int shmob_drm_plane_disable(struct drm_plane *plane)
+{
+ struct shmob_drm_plane *splane = to_shmob_plane(plane);
+ struct shmob_drm_device *sdev = plane->dev->dev_private;
+
+ splane->format = NULL;
+
+ lcdc_write(sdev, LDBnBSIFR(splane->index), 0);
+ return 0;
+}
+
+static void shmob_drm_plane_destroy(struct drm_plane *plane)
+{
+ struct shmob_drm_plane *splane = to_shmob_plane(plane);
+
+ shmob_drm_plane_disable(plane);
+ drm_plane_cleanup(plane);
+ kfree(splane);
+}
+
+static const struct drm_plane_funcs shmob_drm_plane_funcs = {
+ .update_plane = shmob_drm_plane_update,
+ .disable_plane = shmob_drm_plane_disable,
+ .destroy = shmob_drm_plane_destroy,
+};
+
+static const uint32_t formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_NV24,
+ DRM_FORMAT_NV42,
+};
+
+int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index)
+{
+ struct shmob_drm_plane *splane;
+ int ret;
+
+ splane = kzalloc(sizeof(*splane), GFP_KERNEL);
+ if (splane == NULL)
+ return -ENOMEM;
+
+ splane->index = index;
+ splane->alpha = 255;
+
+ ret = drm_plane_init(sdev->ddev, &splane->plane, 1,
+ &shmob_drm_plane_funcs, formats,
+ ARRAY_SIZE(formats), false);
+ if (ret < 0)
+ kfree(splane);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.h b/drivers/gpu/drm/shmobile/shmob_drm_plane.h
new file mode 100644
index 000000000000..99623d05e3b0
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.h
@@ -0,0 +1,22 @@
+/*
+ * shmob_drm_plane.h -- SH Mobile DRM Planes
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_PLANE_H__
+#define __SHMOB_DRM_PLANE_H__
+
+struct shmob_drm_device;
+
+int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index);
+void shmob_drm_plane_setup(struct drm_plane *plane);
+
+#endif /* __SHMOB_DRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_regs.h b/drivers/gpu/drm/shmobile/shmob_drm_regs.h
new file mode 100644
index 000000000000..7923cdd6368e
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_regs.h
@@ -0,0 +1,311 @@
+/*
+ * shmob_drm_regs.h -- SH Mobile DRM registers
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_REGS_H__
+#define __SHMOB_DRM_REGS_H__
+
+#include <linux/io.h>
+
+/* Register definitions */
+#define LDDCKPAT1R 0x400
+#define LDDCKPAT2R 0x404
+#define LDDCKR 0x410
+#define LDDCKR_ICKSEL_BUS (0 << 16)
+#define LDDCKR_ICKSEL_MIPI (1 << 16)
+#define LDDCKR_ICKSEL_HDMI (2 << 16)
+#define LDDCKR_ICKSEL_EXT (3 << 16)
+#define LDDCKR_ICKSEL_MASK (7 << 16)
+#define LDDCKR_MOSEL (1 << 6)
+#define LDDCKSTPR 0x414
+#define LDDCKSTPR_DCKSTS (1 << 16)
+#define LDDCKSTPR_DCKSTP (1 << 0)
+#define LDMT1R 0x418
+#define LDMT1R_VPOL (1 << 28)
+#define LDMT1R_HPOL (1 << 27)
+#define LDMT1R_DWPOL (1 << 26)
+#define LDMT1R_DIPOL (1 << 25)
+#define LDMT1R_DAPOL (1 << 24)
+#define LDMT1R_HSCNT (1 << 17)
+#define LDMT1R_DWCNT (1 << 16)
+#define LDMT1R_IFM (1 << 12)
+#define LDMT1R_MIFTYP_RGB8 (0x0 << 0)
+#define LDMT1R_MIFTYP_RGB9 (0x4 << 0)
+#define LDMT1R_MIFTYP_RGB12A (0x5 << 0)
+#define LDMT1R_MIFTYP_RGB12B (0x6 << 0)
+#define LDMT1R_MIFTYP_RGB16 (0x7 << 0)
+#define LDMT1R_MIFTYP_RGB18 (0xa << 0)
+#define LDMT1R_MIFTYP_RGB24 (0xb << 0)
+#define LDMT1R_MIFTYP_YCBCR (0xf << 0)
+#define LDMT1R_MIFTYP_SYS8A (0x0 << 0)
+#define LDMT1R_MIFTYP_SYS8B (0x1 << 0)
+#define LDMT1R_MIFTYP_SYS8C (0x2 << 0)
+#define LDMT1R_MIFTYP_SYS8D (0x3 << 0)
+#define LDMT1R_MIFTYP_SYS9 (0x4 << 0)
+#define LDMT1R_MIFTYP_SYS12 (0x5 << 0)
+#define LDMT1R_MIFTYP_SYS16A (0x7 << 0)
+#define LDMT1R_MIFTYP_SYS16B (0x8 << 0)
+#define LDMT1R_MIFTYP_SYS16C (0x9 << 0)
+#define LDMT1R_MIFTYP_SYS18 (0xa << 0)
+#define LDMT1R_MIFTYP_SYS24 (0xb << 0)
+#define LDMT1R_MIFTYP_MASK (0xf << 0)
+#define LDMT2R 0x41c
+#define LDMT2R_CSUP_MASK (7 << 26)
+#define LDMT2R_CSUP_SHIFT 26
+#define LDMT2R_RSV (1 << 25)
+#define LDMT2R_VSEL (1 << 24)
+#define LDMT2R_WCSC_MASK (0xff << 16)
+#define LDMT2R_WCSC_SHIFT 16
+#define LDMT2R_WCEC_MASK (0xff << 8)
+#define LDMT2R_WCEC_SHIFT 8
+#define LDMT2R_WCLW_MASK (0xff << 0)
+#define LDMT2R_WCLW_SHIFT 0
+#define LDMT3R 0x420
+#define LDMT3R_RDLC_MASK (0x3f << 24)
+#define LDMT3R_RDLC_SHIFT 24
+#define LDMT3R_RCSC_MASK (0xff << 16)
+#define LDMT3R_RCSC_SHIFT 16
+#define LDMT3R_RCEC_MASK (0xff << 8)
+#define LDMT3R_RCEC_SHIFT 8
+#define LDMT3R_RCLW_MASK (0xff << 0)
+#define LDMT3R_RCLW_SHIFT 0
+#define LDDFR 0x424
+#define LDDFR_CF1 (1 << 18)
+#define LDDFR_CF0 (1 << 17)
+#define LDDFR_CC (1 << 16)
+#define LDDFR_YF_420 (0 << 8)
+#define LDDFR_YF_422 (1 << 8)
+#define LDDFR_YF_444 (2 << 8)
+#define LDDFR_YF_MASK (3 << 8)
+#define LDDFR_PKF_ARGB32 (0x00 << 0)
+#define LDDFR_PKF_RGB16 (0x03 << 0)
+#define LDDFR_PKF_RGB24 (0x0b << 0)
+#define LDDFR_PKF_MASK (0x1f << 0)
+#define LDSM1R 0x428
+#define LDSM1R_OS (1 << 0)
+#define LDSM2R 0x42c
+#define LDSM2R_OSTRG (1 << 0)
+#define LDSA1R 0x430
+#define LDSA2R 0x434
+#define LDMLSR 0x438
+#define LDWBFR 0x43c
+#define LDWBCNTR 0x440
+#define LDWBAR 0x444
+#define LDHCNR 0x448
+#define LDHSYNR 0x44c
+#define LDVLNR 0x450
+#define LDVSYNR 0x454
+#define LDHPDR 0x458
+#define LDVPDR 0x45c
+#define LDPMR 0x460
+#define LDPMR_LPS (3 << 0)
+#define LDINTR 0x468
+#define LDINTR_FE (1 << 10)
+#define LDINTR_VSE (1 << 9)
+#define LDINTR_VEE (1 << 8)
+#define LDINTR_FS (1 << 2)
+#define LDINTR_VSS (1 << 1)
+#define LDINTR_VES (1 << 0)
+#define LDINTR_STATUS_MASK (0xff << 0)
+#define LDSR 0x46c
+#define LDSR_MSS (1 << 10)
+#define LDSR_MRS (1 << 8)
+#define LDSR_AS (1 << 1)
+#define LDCNT1R 0x470
+#define LDCNT1R_DE (1 << 0)
+#define LDCNT2R 0x474
+#define LDCNT2R_BR (1 << 8)
+#define LDCNT2R_MD (1 << 3)
+#define LDCNT2R_SE (1 << 2)
+#define LDCNT2R_ME (1 << 1)
+#define LDCNT2R_DO (1 << 0)
+#define LDRCNTR 0x478
+#define LDRCNTR_SRS (1 << 17)
+#define LDRCNTR_SRC (1 << 16)
+#define LDRCNTR_MRS (1 << 1)
+#define LDRCNTR_MRC (1 << 0)
+#define LDDDSR 0x47c
+#define LDDDSR_LS (1 << 2)
+#define LDDDSR_WS (1 << 1)
+#define LDDDSR_BS (1 << 0)
+#define LDHAJR 0x4a0
+
+#define LDDWD0R 0x800
+#define LDDWDxR_WDACT (1 << 28)
+#define LDDWDxR_RSW (1 << 24)
+#define LDDRDR 0x840
+#define LDDRDR_RSR (1 << 24)
+#define LDDRDR_DRD_MASK (0x3ffff << 0)
+#define LDDWAR 0x900
+#define LDDWAR_WA (1 << 0)
+#define LDDRAR 0x904
+#define LDDRAR_RA (1 << 0)
+
+#define LDBCR 0xb00
+#define LDBCR_UPC(n) (1 << ((n) + 16))
+#define LDBCR_UPF(n) (1 << ((n) + 8))
+#define LDBCR_UPD(n) (1 << ((n) + 0))
+#define LDBnBSIFR(n) (0xb20 + (n) * 0x20 + 0x00)
+#define LDBBSIFR_EN (1 << 31)
+#define LDBBSIFR_VS (1 << 29)
+#define LDBBSIFR_BRSEL (1 << 28)
+#define LDBBSIFR_MX (1 << 27)
+#define LDBBSIFR_MY (1 << 26)
+#define LDBBSIFR_CV3 (3 << 24)
+#define LDBBSIFR_CV2 (2 << 24)
+#define LDBBSIFR_CV1 (1 << 24)
+#define LDBBSIFR_CV0 (0 << 24)
+#define LDBBSIFR_CV_MASK (3 << 24)
+#define LDBBSIFR_LAY_MASK (0xff << 16)
+#define LDBBSIFR_LAY_SHIFT 16
+#define LDBBSIFR_ROP3_MASK (0xff << 16)
+#define LDBBSIFR_ROP3_SHIFT 16
+#define LDBBSIFR_AL_PL8 (3 << 14)
+#define LDBBSIFR_AL_PL1 (2 << 14)
+#define LDBBSIFR_AL_PK (1 << 14)
+#define LDBBSIFR_AL_1 (0 << 14)
+#define LDBBSIFR_AL_MASK (3 << 14)
+#define LDBBSIFR_SWPL (1 << 10)
+#define LDBBSIFR_SWPW (1 << 9)
+#define LDBBSIFR_SWPB (1 << 8)
+#define LDBBSIFR_RY (1 << 7)
+#define LDBBSIFR_CHRR_420 (2 << 0)
+#define LDBBSIFR_CHRR_422 (1 << 0)
+#define LDBBSIFR_CHRR_444 (0 << 0)
+#define LDBBSIFR_RPKF_ARGB32 (0x00 << 0)
+#define LDBBSIFR_RPKF_RGB16 (0x03 << 0)
+#define LDBBSIFR_RPKF_RGB24 (0x0b << 0)
+#define LDBBSIFR_RPKF_MASK (0x1f << 0)
+#define LDBnBSSZR(n) (0xb20 + (n) * 0x20 + 0x04)
+#define LDBBSSZR_BVSS_MASK (0xfff << 16)
+#define LDBBSSZR_BVSS_SHIFT 16
+#define LDBBSSZR_BHSS_MASK (0xfff << 0)
+#define LDBBSSZR_BHSS_SHIFT 0
+#define LDBnBLOCR(n) (0xb20 + (n) * 0x20 + 0x08)
+#define LDBBLOCR_CVLC_MASK (0xfff << 16)
+#define LDBBLOCR_CVLC_SHIFT 16
+#define LDBBLOCR_CHLC_MASK (0xfff << 0)
+#define LDBBLOCR_CHLC_SHIFT 0
+#define LDBnBSMWR(n) (0xb20 + (n) * 0x20 + 0x0c)
+#define LDBBSMWR_BSMWA_MASK (0xffff << 16)
+#define LDBBSMWR_BSMWA_SHIFT 16
+#define LDBBSMWR_BSMW_MASK (0xffff << 0)
+#define LDBBSMWR_BSMW_SHIFT 0
+#define LDBnBSAYR(n) (0xb20 + (n) * 0x20 + 0x10)
+#define LDBBSAYR_FG1A_MASK (0xff << 24)
+#define LDBBSAYR_FG1A_SHIFT 24
+#define LDBBSAYR_FG1R_MASK (0xff << 16)
+#define LDBBSAYR_FG1R_SHIFT 16
+#define LDBBSAYR_FG1G_MASK (0xff << 8)
+#define LDBBSAYR_FG1G_SHIFT 8
+#define LDBBSAYR_FG1B_MASK (0xff << 0)
+#define LDBBSAYR_FG1B_SHIFT 0
+#define LDBnBSACR(n) (0xb20 + (n) * 0x20 + 0x14)
+#define LDBBSACR_FG2A_MASK (0xff << 24)
+#define LDBBSACR_FG2A_SHIFT 24
+#define LDBBSACR_FG2R_MASK (0xff << 16)
+#define LDBBSACR_FG2R_SHIFT 16
+#define LDBBSACR_FG2G_MASK (0xff << 8)
+#define LDBBSACR_FG2G_SHIFT 8
+#define LDBBSACR_FG2B_MASK (0xff << 0)
+#define LDBBSACR_FG2B_SHIFT 0
+#define LDBnBSAAR(n) (0xb20 + (n) * 0x20 + 0x18)
+#define LDBBSAAR_AP_MASK (0xff << 24)
+#define LDBBSAAR_AP_SHIFT 24
+#define LDBBSAAR_R_MASK (0xff << 16)
+#define LDBBSAAR_R_SHIFT 16
+#define LDBBSAAR_GY_MASK (0xff << 8)
+#define LDBBSAAR_GY_SHIFT 8
+#define LDBBSAAR_B_MASK (0xff << 0)
+#define LDBBSAAR_B_SHIFT 0
+#define LDBnBPPCR(n) (0xb20 + (n) * 0x20 + 0x1c)
+#define LDBBPPCR_AP_MASK (0xff << 24)
+#define LDBBPPCR_AP_SHIFT 24
+#define LDBBPPCR_R_MASK (0xff << 16)
+#define LDBBPPCR_R_SHIFT 16
+#define LDBBPPCR_GY_MASK (0xff << 8)
+#define LDBBPPCR_GY_SHIFT 8
+#define LDBBPPCR_B_MASK (0xff << 0)
+#define LDBBPPCR_B_SHIFT 0
+#define LDBnBBGCL(n) (0xb10 + (n) * 0x04)
+#define LDBBBGCL_BGA_MASK (0xff << 24)
+#define LDBBBGCL_BGA_SHIFT 24
+#define LDBBBGCL_BGR_MASK (0xff << 16)
+#define LDBBBGCL_BGR_SHIFT 16
+#define LDBBBGCL_BGG_MASK (0xff << 8)
+#define LDBBBGCL_BGG_SHIFT 8
+#define LDBBBGCL_BGB_MASK (0xff << 0)
+#define LDBBBGCL_BGB_SHIFT 0
+
+#define LCDC_SIDE_B_OFFSET 0x1000
+#define LCDC_MIRROR_OFFSET 0x2000
+
+static inline bool lcdc_is_banked(u32 reg)
+{
+ switch (reg) {
+ case LDMT1R:
+ case LDMT2R:
+ case LDMT3R:
+ case LDDFR:
+ case LDSM1R:
+ case LDSA1R:
+ case LDSA2R:
+ case LDMLSR:
+ case LDWBFR:
+ case LDWBCNTR:
+ case LDWBAR:
+ case LDHCNR:
+ case LDHSYNR:
+ case LDVLNR:
+ case LDVSYNR:
+ case LDHPDR:
+ case LDVPDR:
+ case LDHAJR:
+ return true;
+ default:
+ return reg >= LDBnBBGCL(0) && reg <= LDBnBPPCR(3);
+ }
+}
+
+static inline void lcdc_write_mirror(struct shmob_drm_device *sdev, u32 reg,
+ u32 data)
+{
+ iowrite32(data, sdev->mmio + reg + LCDC_MIRROR_OFFSET);
+}
+
+static inline void lcdc_write(struct shmob_drm_device *sdev, u32 reg, u32 data)
+{
+ iowrite32(data, sdev->mmio + reg);
+ if (lcdc_is_banked(reg))
+ iowrite32(data, sdev->mmio + reg + LCDC_SIDE_B_OFFSET);
+}
+
+static inline u32 lcdc_read(struct shmob_drm_device *sdev, u32 reg)
+{
+ return ioread32(sdev->mmio + reg);
+}
+
+static inline int lcdc_wait_bit(struct shmob_drm_device *sdev, u32 reg,
+ u32 mask, u32 until)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(5);
+
+ while ((lcdc_read(sdev, reg) & mask) != until) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cpu_relax();
+ }
+
+ return 0;
+}
+
+#endif /* __SHMOB_DRM_REGS_H__ */
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index f8187ead7b37..0df71eacd587 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -472,7 +472,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
else
tmp = pgprot_noncached(tmp);
#endif
-#if defined(__sparc__)
+#if defined(__sparc__) || defined(__mips__)
if (!(caching_flags & TTM_PL_FLAG_CACHED))
tmp = pgprot_noncached(tmp);
#endif
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 4f9e548b2eec..969d76532088 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -1060,7 +1060,7 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
if (!_manager)
- goto err_manager;
+ goto err;
mutex_init(&_manager->lock);
INIT_LIST_HEAD(&_manager->pools);
@@ -1078,9 +1078,6 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
}
ttm_dma_pool_mm_shrink_init(_manager);
return 0;
-err_manager:
- kfree(_manager);
- _manager = NULL;
err:
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index fa09daf9a50c..11c9369d4208 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -290,8 +290,6 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
struct file *swap_storage;
struct page *from_page;
struct page *to_page;
- void *from_virtual;
- void *to_virtual;
int i;
int ret = -ENOMEM;
@@ -311,11 +309,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
goto out_err;
preempt_disable();
- from_virtual = kmap_atomic(from_page);
- to_virtual = kmap_atomic(to_page);
- memcpy(to_virtual, from_virtual, PAGE_SIZE);
- kunmap_atomic(to_virtual);
- kunmap_atomic(from_virtual);
+ copy_highpage(to_page, from_page);
preempt_enable();
page_cache_release(from_page);
}
@@ -336,8 +330,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
struct file *swap_storage;
struct page *from_page;
struct page *to_page;
- void *from_virtual;
- void *to_virtual;
int i;
int ret = -ENOMEM;
@@ -367,11 +359,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
goto out_err;
}
preempt_disable();
- from_virtual = kmap_atomic(from_page);
- to_virtual = kmap_atomic(to_page);
- memcpy(to_virtual, from_virtual, PAGE_SIZE);
- kunmap_atomic(to_virtual);
- kunmap_atomic(from_virtual);
+ copy_highpage(to_page, from_page);
preempt_enable();
set_page_dirty(to_page);
mark_page_accessed(to_page);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 8d9dc44f1f94..d568aa4b10d1 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -57,11 +57,8 @@ static int udl_get_modes(struct drm_connector *connector)
edid = (struct edid *)udl_get_edid(udl);
- connector->display_info.raw_edid = (char *)edid;
-
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
- connector->display_info.raw_edid = NULL;
kfree(edid);
return ret;
}
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
index 0731ab2e6c06..0476bfe06d14 100644
--- a/drivers/gpu/drm/udl/udl_encoder.c
+++ b/drivers/gpu/drm/udl/udl_encoder.c
@@ -16,7 +16,7 @@
#include "udl_drv.h"
/* dummy encoder */
-void udl_enc_destroy(struct drm_encoder *encoder)
+static void udl_enc_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
kfree(encoder);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index ce9a61179925..0e47e9614b78 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fb.h>
+#include <linux/dma-buf.h>
#include "drmP.h"
#include "drm.h"
@@ -356,12 +357,12 @@ static struct fb_ops udlfb_ops = {
.fb_release = udl_fb_release,
};
-void udl_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+static void udl_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
}
-void udl_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+static void udl_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno)
{
*red = 0;
@@ -377,16 +378,33 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
{
struct udl_framebuffer *ufb = to_udl_fb(fb);
int i;
+ int ret = 0;
if (!ufb->active_16)
return 0;
+ if (ufb->obj->base.import_attach) {
+ ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
+ 0, ufb->obj->base.size,
+ DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < num_clips; i++) {
- udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
+ ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
clips[i].x2 - clips[i].x1,
clips[i].y2 - clips[i].y1);
+ if (ret)
+ break;
}
- return 0;
+
+ if (ufb->obj->base.import_attach) {
+ dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
+ 0, ufb->obj->base.size,
+ DMA_FROM_DEVICE);
+ }
+ return ret;
}
static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 291ecc145585..47b256375831 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -181,11 +181,6 @@ int udl_gem_vmap(struct udl_gem_object *obj)
int ret;
if (obj->base.import_attach) {
- ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
- 0, obj->base.size, DMA_BIDIRECTIONAL);
- if (ret)
- return -EINVAL;
-
obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
if (!obj->vmapping)
return -ENOMEM;
@@ -206,8 +201,6 @@ void udl_gem_vunmap(struct udl_gem_object *obj)
{
if (obj->base.import_attach) {
dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
- dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
- obj->base.size, DMA_BIDIRECTIONAL);
return;
}
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 4c2d836a0893..40bf468c46c2 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -41,11 +41,8 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
if (total_len > 5) {
- DRM_INFO("vendor descriptor length:%x data:%02x %02x %02x %02x" \
- "%02x %02x %02x %02x %02x %02x %02x\n",
- total_len, desc[0],
- desc[1], desc[2], desc[3], desc[4], desc[5], desc[6],
- desc[7], desc[8], desc[9], desc[10]);
+ DRM_INFO("vendor descriptor length:%x data:%*ph\n",
+ total_len, 11, desc);
if ((desc[0] != total_len) || /* descriptor length */
(desc[1] != 0x5f) || /* vendor descriptor type */
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 9159d48d1dfd..8f258158c1fe 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -391,7 +391,7 @@ static const struct drm_crtc_funcs udl_crtc_funcs = {
.destroy = udl_crtc_destroy,
};
-int udl_crtc_init(struct drm_device *dev)
+static int udl_crtc_init(struct drm_device *dev)
{
struct drm_crtc *crtc;
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index b9320e2608dd..f9fff657a90b 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -126,10 +126,10 @@ static void udl_compress_hline16(
while ((pixel_end > pixel) &&
(cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) {
- uint8_t *raw_pixels_count_byte = 0;
- uint8_t *cmd_pixels_count_byte = 0;
- const u8 *raw_pixel_start = 0;
- const u8 *cmd_pixel_start, *cmd_pixel_end = 0;
+ uint8_t *raw_pixels_count_byte = NULL;
+ uint8_t *cmd_pixels_count_byte = NULL;
+ const u8 *raw_pixel_start = NULL;
+ const u8 *cmd_pixel_start, *cmd_pixel_end = NULL;
prefetchw((void *) cmd); /* pull in one cache line at least */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index ba2c35dbf10e..09f44c5dd49d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -438,7 +438,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_ERROR("Failed allocating a device private struct.\n");
return -ENOMEM;
}
- memset(dev_priv, 0, sizeof(*dev_priv));
pci_set_master(dev->pdev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index c50724bd30f6..54743943d8b3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -483,7 +483,6 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
}
/* only need to do this once */
- memset(cmd, 0, fifo_size);
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
diff --git a/drivers/staging/omapdrm/omap_connector.c b/drivers/staging/omapdrm/omap_connector.c
index 55e9c8655850..38be186c249a 100644
--- a/drivers/staging/omapdrm/omap_connector.c
+++ b/drivers/staging/omapdrm/omap_connector.c
@@ -200,14 +200,11 @@ static int omap_connector_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(
connector, edid);
n = drm_add_edid_modes(connector, edid);
- kfree(connector->display_info.raw_edid);
- connector->display_info.raw_edid = edid;
} else {
drm_mode_connector_update_edid_property(
connector, NULL);
- connector->display_info.raw_edid = NULL;
- kfree(edid);
}
+ kfree(edid);
} else {
struct drm_display_mode *mode = drm_mode_create(dev);
struct omap_video_timings timings = {0};
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
index 8c6ed3b0c6f6..8a027bb77d97 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/staging/omapdrm/omap_fbdev.c
@@ -276,7 +276,7 @@ fail:
if (fbi)
framebuffer_release(fbi);
if (fb)
- fb->funcs->destroy(fb);
+ drm_framebuffer_remove(fb);
}
return ret;
@@ -401,7 +401,7 @@ void omap_fbdev_free(struct drm_device *dev)
/* this will free the backing object */
if (fbdev->fb)
- fbdev->fb->funcs->destroy(fbdev->fb);
+ drm_framebuffer_remove(fbdev->fb);
kfree(fbdev);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index d6b67bb9075f..d5f0c163eef1 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1367,6 +1367,7 @@ extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
/* Cache management (drm_cache.c) */
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
+void drm_clflush_sg(struct sg_table *st);
void drm_clflush_virt_range(char *addr, unsigned long length);
/* Locking IOCTL support (drm_lock.h) */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index bfacf0d5a225..8e405b82707d 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -215,11 +215,10 @@ struct drm_display_info {
u32 color_formats;
u8 cea_rev;
-
- char *raw_edid; /* if any */
};
struct drm_framebuffer_funcs {
+ /* note: use drm_framebuffer_remove() */
void (*destroy)(struct drm_framebuffer *framebuffer);
int (*create_handle)(struct drm_framebuffer *fb,
struct drm_file *file_priv,
@@ -244,6 +243,16 @@ struct drm_framebuffer_funcs {
struct drm_framebuffer {
struct drm_device *dev;
+ /*
+ * Note that the fb is refcounted for the benefit of driver internals,
+ * for example some hw, disabling a CRTC/plane is asynchronous, and
+ * scanout does not actually complete until the next vblank. So some
+ * cleanup (like releasing the reference(s) on the backing GEM bo(s))
+ * should be deferred. In cases like this, the driver would like to
+ * hold a ref to the fb even though it has already been removed from
+ * userspace perspective.
+ */
+ struct kref refcount;
struct list_head head;
struct drm_mode_object base;
const struct drm_framebuffer_funcs *funcs;
@@ -359,6 +368,9 @@ struct drm_crtc_funcs {
* @enabled: is this CRTC enabled?
* @mode: current mode timings
* @hwmode: mode timings as programmed to hw regs
+ * @invert_dimensions: for purposes of error checking crtc vs fb sizes,
+ * invert the width/height of the crtc. This is used if the driver
+ * is performing 90 or 270 degree rotated scanout
* @x: x position on screen
* @y: y position on screen
* @funcs: CRTC control functions
@@ -392,6 +404,8 @@ struct drm_crtc {
*/
struct drm_display_mode hwmode;
+ bool invert_dimensions;
+
int x, y;
const struct drm_crtc_funcs *funcs;
@@ -593,6 +607,7 @@ struct drm_connector {
int video_latency[2]; /* [0]: progressive, [1]: interlaced */
int audio_latency[2];
int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+ unsigned bad_edid_counter;
};
/**
@@ -920,6 +935,9 @@ extern void drm_framebuffer_set_object(struct drm_device *dev,
extern int drm_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs);
+extern void drm_framebuffer_unreference(struct drm_framebuffer *fb);
+extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
+extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
@@ -1035,7 +1053,7 @@ extern int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
extern int drm_edid_header_is_valid(const u8 *raw_edid);
-extern bool drm_edid_block_valid(u8 *raw_edid, int block);
+extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
extern bool drm_edid_is_valid(struct edid *edid);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
new file mode 100644
index 000000000000..76c709837543
--- /dev/null
+++ b/include/drm/drm_fb_cma_helper.h
@@ -0,0 +1,27 @@
+#ifndef __DRM_FB_CMA_HELPER_H__
+#define __DRM_FB_CMA_HELPER_H__
+
+struct drm_fbdev_cma;
+struct drm_gem_cma_object;
+
+struct drm_framebuffer;
+struct drm_device;
+struct drm_file;
+struct drm_mode_fb_cmd2;
+
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+ unsigned int preferred_bpp, unsigned int num_crtc,
+ unsigned int max_conn_count);
+void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
+
+void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
+void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
+
+struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
+ struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd);
+
+struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
+ unsigned int plane);
+
+#endif
+
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index f4621184a9b4..646ae5f39f42 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -106,6 +106,8 @@
#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
/* special NV12 tiled format */
#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
new file mode 100644
index 000000000000..f0f6b1af25ad
--- /dev/null
+++ b/include/drm/drm_gem_cma_helper.h
@@ -0,0 +1,44 @@
+#ifndef __DRM_GEM_CMA_HELPER_H__
+#define __DRM_GEM_CMA_HELPER_H__
+
+struct drm_gem_cma_object {
+ struct drm_gem_object base;
+ dma_addr_t paddr;
+ void *vaddr;
+};
+
+static inline struct drm_gem_cma_object *
+to_drm_gem_cma_obj(struct drm_gem_object *gem_obj)
+{
+ return container_of(gem_obj, struct drm_gem_cma_object, base);
+}
+
+/* free gem object. */
+void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
+
+/* create memory region for drm framebuffer. */
+int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm, struct drm_mode_create_dumb *args);
+
+/* map memory region for drm framebuffer to user space. */
+int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *drm, uint32_t handle, uint64_t *offset);
+
+/* set vm_flags and we can change the vm attribute to other one at here. */
+int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/*
+ * destroy memory region allocated.
+ * - a gem handle and physical memory region pointed by a gem object
+ * would be released by drm_gem_handle_delete().
+ */
+int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *drm, unsigned int handle);
+
+/* allocate physical memory. */
+struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
+ unsigned int size);
+
+extern const struct vm_operations_struct drm_gem_cma_vm_ops;
+
+#endif /* __DRM_GEM_CMA_HELPER_H__ */
diff --git a/include/drm/drm_sarea.h b/include/drm/drm_sarea.h
index ee5389d22c64..1d1a858a203d 100644
--- a/include/drm/drm_sarea.h
+++ b/include/drm/drm_sarea.h
@@ -37,6 +37,8 @@
/* SAREA area needs to be at least a page */
#if defined(__alpha__)
#define SAREA_MAX 0x2000U
+#elif defined(__mips__)
+#define SAREA_MAX 0x4000U
#elif defined(__ia64__)
#define SAREA_MAX 0x10000U /* 64kB */
#else
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 8cc70837f929..e737607e055e 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -203,6 +203,9 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_WAIT 0x2c
#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
+#define DRM_I915_GEM_SET_CACHING 0x2f
+#define DRM_I915_GEM_GET_CACHING 0x30
+#define DRM_I915_REG_READ 0x31
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -227,6 +230,8 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
+#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
@@ -249,6 +254,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
+#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -305,6 +311,9 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_LLC 17
#define I915_PARAM_HAS_ALIASING_PPGTT 18
#define I915_PARAM_HAS_WAIT_TIMEOUT 19
+#define I915_PARAM_HAS_SEMAPHORES 20
+#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
+#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
typedef struct drm_i915_getparam {
int param;
@@ -698,10 +707,31 @@ struct drm_i915_gem_busy {
/** Handle of the buffer to check for busy */
__u32 handle;
- /** Return busy status (1 if busy, 0 if idle) */
+ /** Return busy status (1 if busy, 0 if idle).
+ * The high word is used to indicate on which rings the object
+ * currently resides:
+ * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
+ */
__u32 busy;
};
+#define I915_CACHING_NONE 0
+#define I915_CACHING_CACHED 1
+
+struct drm_i915_gem_caching {
+ /**
+ * Handle of the buffer to set/get the caching level of. */
+ __u32 handle;
+
+ /**
+ * Cacheing level to apply or return value
+ *
+ * bits0-15 are for generic caching control (i.e. the above defined
+ * values). bits16-31 are reserved for platform-specific variations
+ * (e.g. l3$ caching on gen7). */
+ __u32 caching;
+};
+
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
@@ -918,4 +948,8 @@ struct drm_i915_gem_context_destroy {
__u32 pad;
};
+struct drm_i915_reg_read {
+ __u64 offset;
+ __u64 val; /* Return value */
+};
#endif /* _I915_DRM_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 8e29d551bb3c..2e37e9f02e71 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -30,16 +30,10 @@ void intel_gmch_remove(void);
bool intel_enable_gtt(void);
void intel_gtt_chipset_flush(void);
-void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg);
-void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
-int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
- struct scatterlist **sg_list, int *num_sg);
-void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
- unsigned int sg_len,
+void intel_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags);
-void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
- struct page **pages, unsigned int flags);
+void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
/* Special gtt memory types */
#define AGP_DCACHE_MEMORY 1
diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h
new file mode 100644
index 000000000000..7c686d335c12
--- /dev/null
+++ b/include/linux/platform_data/shmob_drm.h
@@ -0,0 +1,99 @@
+/*
+ * shmob_drm.h -- SH Mobile DRM driver
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_H__
+#define __SHMOB_DRM_H__
+
+#include <linux/kernel.h>
+
+#include <drm/drm_mode.h>
+
+struct sh_mobile_meram_cfg;
+struct sh_mobile_meram_info;
+
+enum shmob_drm_clk_source {
+ SHMOB_DRM_CLK_BUS,
+ SHMOB_DRM_CLK_PERIPHERAL,
+ SHMOB_DRM_CLK_EXTERNAL,
+};
+
+enum shmob_drm_interface {
+ SHMOB_DRM_IFACE_RGB8, /* 24bpp, 8:8:8 */
+ SHMOB_DRM_IFACE_RGB9, /* 18bpp, 9:9 */
+ SHMOB_DRM_IFACE_RGB12A, /* 24bpp, 12:12 */
+ SHMOB_DRM_IFACE_RGB12B, /* 12bpp */
+ SHMOB_DRM_IFACE_RGB16, /* 16bpp */
+ SHMOB_DRM_IFACE_RGB18, /* 18bpp */
+ SHMOB_DRM_IFACE_RGB24, /* 24bpp */
+ SHMOB_DRM_IFACE_YUV422, /* 16bpp */
+ SHMOB_DRM_IFACE_SYS8A, /* 24bpp, 8:8:8 */
+ SHMOB_DRM_IFACE_SYS8B, /* 18bpp, 8:8:2 */
+ SHMOB_DRM_IFACE_SYS8C, /* 18bpp, 2:8:8 */
+ SHMOB_DRM_IFACE_SYS8D, /* 16bpp, 8:8 */
+ SHMOB_DRM_IFACE_SYS9, /* 18bpp, 9:9 */
+ SHMOB_DRM_IFACE_SYS12, /* 24bpp, 12:12 */
+ SHMOB_DRM_IFACE_SYS16A, /* 16bpp */
+ SHMOB_DRM_IFACE_SYS16B, /* 18bpp, 16:2 */
+ SHMOB_DRM_IFACE_SYS16C, /* 18bpp, 2:16 */
+ SHMOB_DRM_IFACE_SYS18, /* 18bpp */
+ SHMOB_DRM_IFACE_SYS24, /* 24bpp */
+};
+
+struct shmob_drm_backlight_data {
+ const char *name;
+ int max_brightness;
+ int (*get_brightness)(void);
+ int (*set_brightness)(int brightness);
+};
+
+struct shmob_drm_panel_data {
+ unsigned int width_mm; /* Panel width in mm */
+ unsigned int height_mm; /* Panel height in mm */
+ struct drm_mode_modeinfo mode;
+};
+
+struct shmob_drm_sys_interface_data {
+ unsigned int read_latch:6;
+ unsigned int read_setup:8;
+ unsigned int read_cycle:8;
+ unsigned int read_strobe:8;
+ unsigned int write_setup:8;
+ unsigned int write_cycle:8;
+ unsigned int write_strobe:8;
+ unsigned int cs_setup:3;
+ unsigned int vsync_active_high:1;
+ unsigned int vsync_dir_input:1;
+};
+
+#define SHMOB_DRM_IFACE_FL_DWPOL (1 << 0) /* Rising edge dot clock data latch */
+#define SHMOB_DRM_IFACE_FL_DIPOL (1 << 1) /* Active low display enable */
+#define SHMOB_DRM_IFACE_FL_DAPOL (1 << 2) /* Active low display data */
+#define SHMOB_DRM_IFACE_FL_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */
+#define SHMOB_DRM_IFACE_FL_DWCNT (1 << 4) /* Disable dotclock during blanking */
+
+struct shmob_drm_interface_data {
+ enum shmob_drm_interface interface;
+ struct shmob_drm_sys_interface_data sys;
+ unsigned int clk_div;
+ unsigned int flags;
+};
+
+struct shmob_drm_platform_data {
+ enum shmob_drm_clk_source clk_source;
+ struct shmob_drm_interface_data iface;
+ struct shmob_drm_panel_data panel;
+ struct shmob_drm_backlight_data backlight;
+ const struct sh_mobile_meram_cfg *meram;
+};
+
+#endif /* __SHMOB_DRM_H__ */