summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h3
-rw-r--r--drivers/gpu/drm/ast/ast_main.c9
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c9
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c51
-rw-r--r--drivers/gpu/drm/drm_bufs.c41
-rw-r--r--drivers/gpu/drm/drm_context.c8
-rw-r--r--drivers/gpu/drm/drm_drv.c40
-rw-r--r--drivers/gpu/drm/drm_fops.c10
-rw-r--r--drivers/gpu/drm/drm_gem.c123
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c26
-rw-r--r--drivers/gpu/drm/drm_mm.c226
-rw-r--r--drivers/gpu/drm/drm_pci.c16
-rw-r--r--drivers/gpu/drm/drm_scatter.c13
-rw-r--r--drivers/gpu/drm/drm_stub.c9
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c281
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c34
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h9
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c6
-rw-r--r--drivers/gpu/drm/gma500/gem.c39
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c481
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h2
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c836
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c74
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c297
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h416
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c323
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c37
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c265
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c202
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c973
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1286
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h110
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c71
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h16
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1025
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c418
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h34
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c5
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c8
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c24
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c22
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c431
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c43
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h5
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c30
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c8
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c595
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h2
-rw-r--r--drivers/gpu/drm/mga/mga_state.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c46
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_helpers.c49
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h5
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h2
-rw-r--r--drivers/gpu/drm/r128/r128_state.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c14
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c6
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c14
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h5
-rw-r--r--drivers/gpu/drm/radeon/si.c14
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c2
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c2
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h2
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c8
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c7
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c27
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c102
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c81
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c19
-rw-r--r--drivers/gpu/drm/udl/udl_main.c4
-rw-r--r--drivers/gpu/drm/via/via_dma.c2
-rw-r--r--drivers/gpu/drm/via/via_drv.h2
-rw-r--r--drivers/gpu/drm/via/via_mm.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/host1x/drm/drm.c4
-rw-r--r--drivers/gpu/host1x/drm/gem.c11
-rw-r--r--drivers/gpu/host1x/drm/gem.h2
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c4
130 files changed, 5552 insertions, 4185 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index a7c54c843291..626bc0cb1046 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -6,7 +6,7 @@
#
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
- depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
+ depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA
select HDMI
select I2C
select I2C_ALGOBIT
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 801bcafa3028..d943b94afc90 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -13,7 +13,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
- drm_rect.o
+ drm_rect.o drm_vma_manager.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index df0d0a08097a..a144fb044852 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -216,7 +216,7 @@ static struct drm_driver driver = {
.gem_free_object = ast_gem_free_object,
.dumb_create = ast_dumb_create,
.dumb_map_offset = ast_dumb_mmap_offset,
- .dumb_destroy = ast_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
};
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 622d4ae7eb9e..796dbb212a41 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -322,9 +322,6 @@ ast_bo(struct ttm_buffer_object *bo)
extern int ast_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-extern int ast_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle);
extern int ast_gem_init_object(struct drm_gem_object *obj);
extern void ast_gem_free_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f60fd7bd1183..7f6152d374ca 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -449,13 +449,6 @@ int ast_dumb_create(struct drm_file *file,
return 0;
}
-int ast_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
int ast_gem_init_object(struct drm_gem_object *obj)
{
BUG();
@@ -487,7 +480,7 @@ void ast_gem_free_object(struct drm_gem_object *obj)
static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
{
- return bo->bo.addr_space_offset;
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
}
int
ast_dumb_mmap_offset(struct drm_file *file,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 8ecb601152ef..d35d99c15f84 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -102,7 +102,7 @@ static struct drm_driver driver = {
.gem_free_object = cirrus_gem_free_object,
.dumb_create = cirrus_dumb_create,
.dumb_map_offset = cirrus_dumb_mmap_offset,
- .dumb_destroy = cirrus_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
};
static struct pci_driver cirrus_pci_driver = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index bae55609e6c3..9b0bb9184afd 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -203,9 +203,6 @@ int cirrus_gem_create(struct drm_device *dev,
int cirrus_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int cirrus_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle);
int cirrus_framebuffer_init(struct drm_device *dev,
struct cirrus_framebuffer *gfb,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 35cbae827771..f130a533a512 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,13 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
return 0;
}
-int cirrus_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
int cirrus_gem_init_object(struct drm_gem_object *obj)
{
BUG();
@@ -294,7 +287,7 @@ void cirrus_gem_free_object(struct drm_gem_object *obj)
static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
{
- return bo->bo.addr_space_offset;
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
}
int
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 3d8fed179797..e301d653d97e 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -424,6 +424,57 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
}
/**
+ * drm_agp_clear - Clear AGP resource list
+ * @dev: DRM device
+ *
+ * Iterate over all AGP resources and remove them. But keep the AGP head
+ * intact so it can still be used. It is safe to call this if AGP is disabled or
+ * was already removed.
+ *
+ * If DRIVER_MODESET is active, nothing is done to protect the modesetting
+ * resources from getting destroyed. Drivers are responsible of cleaning them up
+ * during device shutdown.
+ */
+void drm_agp_clear(struct drm_device *dev)
+{
+ struct drm_agp_mem *entry, *tempe;
+
+ if (!drm_core_has_AGP(dev) || !dev->agp)
+ return;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
+ if (entry->bound)
+ drm_unbind_agp(entry->memory);
+ drm_free_agp(entry->memory, entry->pages);
+ kfree(entry);
+ }
+ INIT_LIST_HEAD(&dev->agp->memory);
+
+ if (dev->agp->acquired)
+ drm_agp_release(dev);
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+}
+
+/**
+ * drm_agp_destroy - Destroy AGP head
+ * @dev: DRM device
+ *
+ * Destroy resources that were previously allocated via drm_agp_initp. Caller
+ * must ensure to clean up all AGP resources before calling this. See
+ * drm_agp_clear().
+ *
+ * Call this to destroy AGP heads allocated via drm_agp_init().
+ */
+void drm_agp_destroy(struct drm_agp_head *agp)
+{
+ kfree(agp);
+}
+
+/**
* Binds a collection of pages into AGP memory at the given offset, returning
* the AGP memory structure containing them.
*
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 5a4dbb410b71..bef4abff8fa3 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -243,7 +243,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
}
map->handle = vmalloc_user(map->size);
DRM_DEBUG("%lu %d %p\n",
- map->size, drm_order(map->size), map->handle);
+ map->size, order_base_2(map->size), map->handle);
if (!map->handle) {
kfree(map);
return -ENOMEM;
@@ -630,7 +630,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
return -EINVAL;
count = request->count;
- order = drm_order(request->size);
+ order = order_base_2(request->size);
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -800,7 +800,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
return -EPERM;
count = request->count;
- order = drm_order(request->size);
+ order = order_base_2(request->size);
size = 1 << order;
DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
@@ -1002,7 +1002,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
return -EPERM;
count = request->count;
- order = drm_order(request->size);
+ order = order_base_2(request->size);
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -1157,7 +1157,7 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
return -EPERM;
count = request->count;
- order = drm_order(request->size);
+ order = order_base_2(request->size);
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -1435,7 +1435,7 @@ int drm_markbufs(struct drm_device *dev, void *data,
DRM_DEBUG("%d, %d, %d\n",
request->size, request->low_mark, request->high_mark);
- order = drm_order(request->size);
+ order = order_base_2(request->size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
entry = &dma->bufs[order];
@@ -1600,25 +1600,16 @@ int drm_mapbufs(struct drm_device *dev, void *data,
return retcode;
}
-/**
- * Compute size order. Returns the exponent of the smaller power of two which
- * is greater or equal to given number.
- *
- * \param size size.
- * \return order.
- *
- * \todo Can be made faster.
- */
-int drm_order(unsigned long size)
+struct drm_local_map *drm_getsarea(struct drm_device *dev)
{
- int order;
- unsigned long tmp;
-
- for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
-
- if (size & (size - 1))
- ++order;
+ struct drm_map_list *entry;
- return order;
+ list_for_each_entry(entry, &dev->maplist, head) {
+ if (entry->map && entry->map->type == _DRM_SHM &&
+ (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+ return entry->map;
+ }
+ }
+ return NULL;
}
-EXPORT_SYMBOL(drm_order);
+EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 725968d38976..224ff965bcf7 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -251,7 +251,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
struct drm_file *file_priv, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
- dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
@@ -261,7 +260,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
when the kernel holds the lock, release
that lock here. */
clear_bit(0, &dev->context_flag);
- wake_up(&dev->context_wait);
return 0;
}
@@ -342,12 +340,6 @@ int drm_addctx(struct drm_device *dev, void *data,
return 0;
}
-int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- /* This does nothing */
- return 0;
-}
-
/**
* Get context.
*
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 99fcd7c32ea2..dddd79988ffc 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -87,7 +87,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -122,7 +122,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#endif
- DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
@@ -195,27 +195,8 @@ int drm_lastclose(struct drm_device * dev)
mutex_lock(&dev->struct_mutex);
- /* Clear AGP information */
- if (drm_core_has_AGP(dev) && dev->agp &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct drm_agp_mem *entry, *tempe;
-
- /* Remove AGP resources, but leave dev->agp
- intact until drv_cleanup is called. */
- list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
- if (entry->bound)
- drm_unbind_agp(entry->memory);
- drm_free_agp(entry->memory, entry->pages);
- kfree(entry);
- }
- INIT_LIST_HEAD(&dev->agp->memory);
-
- if (dev->agp->acquired)
- drm_agp_release(dev);
+ drm_agp_clear(dev);
- dev->agp->acquired = 0;
- dev->agp->enabled = 0;
- }
if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
!drm_core_check_feature(dev, DRIVER_MODESET)) {
drm_sg_cleanup(dev->sg);
@@ -485,19 +466,4 @@ long drm_ioctl(struct file *filp,
DRM_DEBUG("ret = %d\n", retcode);
return retcode;
}
-
EXPORT_SYMBOL(drm_ioctl);
-
-struct drm_local_map *drm_getsarea(struct drm_device *dev)
-{
- struct drm_map_list *entry;
-
- list_for_each_entry(entry, &dev->maplist, head) {
- if (entry->map && entry->map->type == _DRM_SHM &&
- (entry->map->flags & _DRM_CONTAINS_LOCK)) {
- return entry->map;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3a24385e0368..72acae908a7d 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -76,20 +76,10 @@ static int drm_setup(struct drm_device * dev)
dev->sigdata.lock = NULL;
dev->context_flag = 0;
- dev->interrupt_flag = 0;
- dev->dma_flag = 0;
dev->last_context = 0;
- dev->last_switch = 0;
- dev->last_checked = 0;
- init_waitqueue_head(&dev->context_wait);
dev->if_version = 0;
- dev->ctx_start = 0;
- dev->lck_start = 0;
-
dev->buf_async = NULL;
- init_waitqueue_head(&dev->buf_readers);
- init_waitqueue_head(&dev->buf_writers);
DRM_DEBUG("\n");
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 603f256152ef..9ab038c8dd5f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -37,6 +37,7 @@
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
#include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
/** @file drm_gem.c
*
@@ -102,14 +103,9 @@ drm_gem_init(struct drm_device *dev)
}
dev->mm_private = mm;
-
- if (drm_ht_create(&mm->offset_hash, 12)) {
- kfree(mm);
- return -ENOMEM;
- }
-
- drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE);
+ drm_vma_offset_manager_init(&mm->vma_manager,
+ DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE);
return 0;
}
@@ -119,8 +115,7 @@ drm_gem_destroy(struct drm_device *dev)
{
struct drm_gem_mm *mm = dev->mm_private;
- drm_mm_takedown(&mm->offset_manager);
- drm_ht_remove(&mm->offset_hash);
+ drm_vma_offset_manager_destroy(&mm->vma_manager);
kfree(mm);
dev->mm_private = NULL;
}
@@ -132,16 +127,14 @@ drm_gem_destroy(struct drm_device *dev)
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size)
{
- BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+ struct file *filp;
- obj->dev = dev;
- obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
- if (IS_ERR(obj->filp))
- return PTR_ERR(obj->filp);
+ filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+ if (IS_ERR(filp))
+ return PTR_ERR(filp);
- kref_init(&obj->refcount);
- atomic_set(&obj->handle_count, 0);
- obj->size = size;
+ drm_gem_private_object_init(dev, obj, size);
+ obj->filp = filp;
return 0;
}
@@ -152,8 +145,8 @@ EXPORT_SYMBOL(drm_gem_object_init);
* no GEM provided backing store. Instead the caller is responsible for
* backing the object and handling it.
*/
-int drm_gem_private_object_init(struct drm_device *dev,
- struct drm_gem_object *obj, size_t size)
+void drm_gem_private_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
{
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
@@ -163,8 +156,6 @@ int drm_gem_private_object_init(struct drm_device *dev,
kref_init(&obj->refcount);
atomic_set(&obj->handle_count, 0);
obj->size = size;
-
- return 0;
}
EXPORT_SYMBOL(drm_gem_private_object_init);
@@ -253,6 +244,20 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
EXPORT_SYMBOL(drm_gem_handle_delete);
/**
+ * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
+ *
+ * This implements the ->dumb_destroy kms driver callback for drivers which use
+ * gem to manage their backing storage.
+ */
+int drm_gem_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+EXPORT_SYMBOL(drm_gem_dumb_destroy);
+
+/**
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
@@ -306,12 +311,8 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list = &obj->map_list;
- drm_ht_remove_item(&mm->offset_hash, &list->hash);
- drm_mm_put_block(list->file_offset_node);
- kfree(list->map);
- list->map = NULL;
+ drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_free_mmap_offset);
@@ -331,54 +332,9 @@ drm_gem_create_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list;
- struct drm_local_map *map;
- int ret;
- /* Set the object up for mmap'ing */
- list = &obj->map_list;
- list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
- if (!list->map)
- return -ENOMEM;
-
- map = list->map;
- map->type = _DRM_GEM;
- map->size = obj->size;
- map->handle = obj;
-
- /* Get a DRM GEM mmap offset allocated... */
- list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- obj->size / PAGE_SIZE, 0, false);
-
- if (!list->file_offset_node) {
- DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
- ret = -ENOSPC;
- goto out_free_list;
- }
-
- list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- obj->size / PAGE_SIZE, 0);
- if (!list->file_offset_node) {
- ret = -ENOMEM;
- goto out_free_list;
- }
-
- list->hash.key = list->file_offset_node->start;
- ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
- if (ret) {
- DRM_ERROR("failed to add to map hash\n");
- goto out_free_mm;
- }
-
- return 0;
-
-out_free_mm:
- drm_mm_put_block(list->file_offset_node);
-out_free_list:
- kfree(list->map);
- list->map = NULL;
-
- return ret;
+ return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
+ obj->size / PAGE_SIZE);
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
@@ -707,8 +663,8 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_local_map *map = NULL;
- struct drm_hash_item *hash;
+ struct drm_gem_object *obj;
+ struct drm_vma_offset_node *node;
int ret = 0;
if (drm_device_is_unplugged(dev))
@@ -716,21 +672,16 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
mutex_lock(&dev->struct_mutex);
- if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+ node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
+ vma_pages(vma));
+ if (!node) {
mutex_unlock(&dev->struct_mutex);
return drm_mmap(filp, vma);
}
- map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
- if (!map ||
- ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
- ret = -EPERM;
- goto out_unlock;
- }
-
- ret = drm_gem_mmap_obj(map->handle, map->size, vma);
+ obj = container_of(node, struct drm_gem_object, vma_node);
+ ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
-out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index ece72a8ac245..0a4f80574eb4 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -27,11 +27,7 @@
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/drm_gem_cma_helper.h>
-
-static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
-{
- return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
-}
+#include <drm/drm_vma_manager.h>
/*
* __drm_gem_cma_create - Create a GEM CMA object without allocating memory
@@ -172,8 +168,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
{
struct drm_gem_cma_object *cma_obj;
- if (gem_obj->map_list.map)
- drm_gem_free_mmap_offset(gem_obj);
+ drm_gem_free_mmap_offset(gem_obj);
cma_obj = to_drm_gem_cma_obj(gem_obj);
@@ -237,7 +232,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
return -EINVAL;
}
- *offset = get_gem_mmap_offset(gem_obj);
+ *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
drm_gem_object_unreference(gem_obj);
@@ -286,27 +281,16 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
}
EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
-/*
- * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
- */
-int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *drm, unsigned int handle)
-{
- return drm_gem_handle_delete(file_priv, handle);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
-
#ifdef CONFIG_DEBUG_FS
void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
{
struct drm_gem_object *obj = &cma_obj->base;
struct drm_device *dev = obj->dev;
- uint64_t off = 0;
+ uint64_t off;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- if (obj->map_list.map)
- off = (uint64_t)obj->map_list.hash.key;
+ off = drm_vma_node_start(&obj->vma_node);
seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
obj->name, obj->refcount.refcount.counter,
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 543b9b3171d3..aded1e11e8ff 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -49,58 +49,18 @@
#define MM_UNUSED_TARGET 4
-static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
-{
- struct drm_mm_node *child;
-
- if (atomic)
- child = kzalloc(sizeof(*child), GFP_ATOMIC);
- else
- child = kzalloc(sizeof(*child), GFP_KERNEL);
-
- if (unlikely(child == NULL)) {
- spin_lock(&mm->unused_lock);
- if (list_empty(&mm->unused_nodes))
- child = NULL;
- else {
- child =
- list_entry(mm->unused_nodes.next,
- struct drm_mm_node, node_list);
- list_del(&child->node_list);
- --mm->num_unused;
- }
- spin_unlock(&mm->unused_lock);
- }
- return child;
-}
-
-/* drm_mm_pre_get() - pre allocate drm_mm_node structure
- * drm_mm: memory manager struct we are pre-allocating for
- *
- * Returns 0 on success or -ENOMEM if allocation fails.
- */
-int drm_mm_pre_get(struct drm_mm *mm)
-{
- struct drm_mm_node *node;
-
- spin_lock(&mm->unused_lock);
- while (mm->num_unused < MM_UNUSED_TARGET) {
- spin_unlock(&mm->unused_lock);
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- spin_lock(&mm->unused_lock);
-
- if (unlikely(node == NULL)) {
- int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
- spin_unlock(&mm->unused_lock);
- return ret;
- }
- ++mm->num_unused;
- list_add_tail(&node->node_list, &mm->unused_nodes);
- }
- spin_unlock(&mm->unused_lock);
- return 0;
-}
-EXPORT_SYMBOL(drm_mm_pre_get);
+static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ enum drm_mm_search_flags flags);
+static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ enum drm_mm_search_flags flags);
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
@@ -147,33 +107,27 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
}
}
-struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
- unsigned long start,
- unsigned long size,
- bool atomic)
+int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
- struct drm_mm_node *hole, *node;
- unsigned long end = start + size;
+ struct drm_mm_node *hole;
+ unsigned long end = node->start + node->size;
unsigned long hole_start;
unsigned long hole_end;
+ BUG_ON(node == NULL);
+
+ /* Find the relevant hole to add our node to */
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
- if (hole_start > start || hole_end < end)
+ if (hole_start > node->start || hole_end < end)
continue;
- node = drm_mm_kmalloc(mm, atomic);
- if (unlikely(node == NULL))
- return NULL;
-
- node->start = start;
- node->size = size;
node->mm = mm;
node->allocated = 1;
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole->node_list);
- if (start == hole_start) {
+ if (node->start == hole_start) {
hole->hole_follows = 0;
list_del_init(&hole->hole_stack);
}
@@ -184,31 +138,14 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
node->hole_follows = 1;
}
- return node;
+ return 0;
}
- WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
- return NULL;
-}
-EXPORT_SYMBOL(drm_mm_create_block);
-
-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- int atomic)
-{
- struct drm_mm_node *node;
-
- node = drm_mm_kmalloc(hole_node->mm, atomic);
- if (unlikely(node == NULL))
- return NULL;
-
- drm_mm_insert_helper(hole_node, node, size, alignment, color);
-
- return node;
+ WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
+ node->start, node->size);
+ return -ENOSPC;
}
-EXPORT_SYMBOL(drm_mm_get_block_generic);
+EXPORT_SYMBOL(drm_mm_reserve_node);
/**
* Search for free space and insert a preallocated memory node. Returns
@@ -217,12 +154,13 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
*/
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
- unsigned long color)
+ unsigned long color,
+ enum drm_mm_search_flags flags)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_generic(mm, size, alignment,
- color, 0);
+ color, flags);
if (!hole_node)
return -ENOSPC;
@@ -231,13 +169,6 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
}
EXPORT_SYMBOL(drm_mm_insert_node_generic);
-int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment)
-{
- return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
-}
-EXPORT_SYMBOL(drm_mm_insert_node);
-
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
@@ -290,27 +221,6 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
}
}
-struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- unsigned long start,
- unsigned long end,
- int atomic)
-{
- struct drm_mm_node *node;
-
- node = drm_mm_kmalloc(hole_node->mm, atomic);
- if (unlikely(node == NULL))
- return NULL;
-
- drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
- start, end);
-
- return node;
-}
-EXPORT_SYMBOL(drm_mm_get_block_range_generic);
-
/**
* Search for free space and insert a preallocated memory node. Returns
* -ENOSPC if no suitable free area is available. This is for range
@@ -318,13 +228,14 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
*/
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment, unsigned long color,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end,
+ enum drm_mm_search_flags flags)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_in_range_generic(mm,
size, alignment, color,
- start, end, 0);
+ start, end, flags);
if (!hole_node)
return -ENOSPC;
@@ -335,14 +246,6 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
-int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment,
- unsigned long start, unsigned long end)
-{
- return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
-}
-EXPORT_SYMBOL(drm_mm_insert_node_in_range);
-
/**
* Remove a memory node from the allocator.
*/
@@ -377,28 +280,6 @@ void drm_mm_remove_node(struct drm_mm_node *node)
}
EXPORT_SYMBOL(drm_mm_remove_node);
-/*
- * Remove a memory node from the allocator and free the allocated struct
- * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
- * drm_mm_get_block functions.
- */
-void drm_mm_put_block(struct drm_mm_node *node)
-{
-
- struct drm_mm *mm = node->mm;
-
- drm_mm_remove_node(node);
-
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&node->node_list, &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(node);
- spin_unlock(&mm->unused_lock);
-}
-EXPORT_SYMBOL(drm_mm_put_block);
-
static int check_free_hole(unsigned long start, unsigned long end,
unsigned long size, unsigned alignment)
{
@@ -414,11 +295,11 @@ static int check_free_hole(unsigned long start, unsigned long end,
return end >= start + size;
}
-struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- bool best_match)
+static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ enum drm_mm_search_flags flags)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
@@ -441,7 +322,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
- if (!best_match)
+ if (!(flags & DRM_MM_SEARCH_BEST))
return entry;
if (entry->size < best_size) {
@@ -452,15 +333,14 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
return best;
}
-EXPORT_SYMBOL(drm_mm_search_free_generic);
-struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
- bool best_match)
+ enum drm_mm_search_flags flags)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
@@ -488,7 +368,7 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
- if (!best_match)
+ if (!(flags & DRM_MM_SEARCH_BEST))
return entry;
if (entry->size < best_size) {
@@ -499,7 +379,6 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
return best;
}
-EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
/**
* Moves an allocation. To be used with embedded struct drm_mm_node.
@@ -634,8 +513,8 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
* corrupted.
*
* When the scan list is empty, the selected memory nodes can be freed. An
- * immediately following drm_mm_search_free with best_match = 0 will then return
- * the just freed block (because its at the top of the free_stack list).
+ * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
+ * return the just freed block (because its at the top of the free_stack list).
*
* Returns one if this block should be evicted, zero otherwise. Will always
* return zero when no hole has been found.
@@ -672,10 +551,7 @@ EXPORT_SYMBOL(drm_mm_clean);
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->hole_stack);
- INIT_LIST_HEAD(&mm->unused_nodes);
- mm->num_unused = 0;
mm->scanned_blocks = 0;
- spin_lock_init(&mm->unused_lock);
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
@@ -695,22 +571,8 @@ EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(struct drm_mm * mm)
{
- struct drm_mm_node *entry, *next;
-
- if (WARN(!list_empty(&mm->head_node.node_list),
- "Memory manager not clean. Delaying takedown\n")) {
- return;
- }
-
- spin_lock(&mm->unused_lock);
- list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
- list_del(&entry->node_list);
- kfree(entry);
- --mm->num_unused;
- }
- spin_unlock(&mm->unused_lock);
-
- BUG_ON(mm->num_unused != 0);
+ WARN(!list_empty(&mm->head_node.node_list),
+ "Memory manager not clean during takedown.\n");
}
EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 80c0b2b29801..0f54ad8a9ced 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -52,10 +52,8 @@
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
-#if 1
unsigned long addr;
size_t sz;
-#endif
/* pci_alloc_consistent only guarantees alignment to the smallest
* PAGE_SIZE order which is greater than or equal to the requested size.
@@ -97,10 +95,8 @@ EXPORT_SYMBOL(drm_pci_alloc);
*/
void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
-#if 1
unsigned long addr;
size_t sz;
-#endif
if (dmah->vaddr) {
/* XXX - Is virt_to_page() legal for consistent mem? */
@@ -287,6 +283,17 @@ static int drm_pci_agp_init(struct drm_device *dev)
return 0;
}
+static void drm_pci_agp_destroy(struct drm_device *dev)
+{
+ if (drm_core_has_AGP(dev) && dev->agp) {
+ if (drm_core_has_MTRR(dev))
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ drm_agp_clear(dev);
+ drm_agp_destroy(dev->agp);
+ dev->agp = NULL;
+ }
+}
+
static struct drm_bus drm_pci_bus = {
.bus_type = DRIVER_BUS_PCI,
.get_irq = drm_pci_get_irq,
@@ -295,6 +302,7 @@ static struct drm_bus drm_pci_bus = {
.set_unique = drm_pci_set_unique,
.irq_by_busid = drm_pci_irq_by_busid,
.agp_init = drm_pci_agp_init,
+ .agp_destroy = drm_pci_agp_destroy,
};
/**
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index d87f60bbc330..a4a076ff1757 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -70,8 +70,10 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
# define ScatterHandle(x) (unsigned int)(x)
#endif
-int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
+int drm_sg_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
+ struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
unsigned long pages, i, j;
@@ -181,15 +183,6 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
return -ENOMEM;
}
-int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_scatter_gather *request = data;
-
- return drm_sg_alloc(dev, request);
-
-}
-
int drm_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 327ca19cda85..d663f7d66dab 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -451,16 +451,11 @@ void drm_put_dev(struct drm_device *dev)
drm_lastclose(dev);
- if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp)
- arch_phys_wc_del(dev->agp->agp_mtrr);
-
if (dev->driver->unload)
dev->driver->unload(dev);
- if (drm_core_has_AGP(dev) && dev->agp) {
- kfree(dev->agp);
- dev->agp = NULL;
- }
+ if (dev->driver->bus->agp_destroy)
+ dev->driver->bus->agp_destroy(dev);
drm_vblank_cleanup(dev);
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
new file mode 100644
index 000000000000..3837481d5607
--- /dev/null
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2012 David Airlie <airlied@linux.ie>
+ * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_vma_manager.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/**
+ * DOC: vma offset manager
+ *
+ * The vma-manager is responsible to map arbitrary driver-dependent memory
+ * regions into the linear user address-space. It provides offsets to the
+ * caller which can then be used on the address_space of the drm-device. It
+ * takes care to not overlap regions, size them appropriately and to not
+ * confuse mm-core by inconsistent fake vm_pgoff fields.
+ * Drivers shouldn't use this for object placement in VMEM. This manager should
+ * only be used to manage mappings into linear user-space VMs.
+ *
+ * We use drm_mm as backend to manage object allocations. But it is highly
+ * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
+ * speed up offset lookups.
+ *
+ * You must not use multiple offset managers on a single address_space.
+ * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
+ * no longer be linear. Please use VM_NONLINEAR in that case and implement your
+ * own offset managers.
+ *
+ * This offset manager works on page-based addresses. That is, every argument
+ * and return code (with the exception of drm_vma_node_offset_addr()) is given
+ * in number of pages, not number of bytes. That means, object sizes and offsets
+ * must always be page-aligned (as usual).
+ * If you want to get a valid byte-based user-space address for a given offset,
+ * please see drm_vma_node_offset_addr().
+ */
+
+/**
+ * drm_vma_offset_manager_init - Initialize new offset-manager
+ * @mgr: Manager object
+ * @page_offset: Offset of available memory area (page-based)
+ * @size: Size of available address space range (page-based)
+ *
+ * Initialize a new offset-manager. The offset and area size available for the
+ * manager are given as @page_offset and @size. Both are interpreted as
+ * page-numbers, not bytes.
+ *
+ * Adding/removing nodes from the manager is locked internally and protected
+ * against concurrent access. However, node allocation and destruction is left
+ * for the caller. While calling into the vma-manager, a given node must
+ * always be guaranteed to be referenced.
+ */
+void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
+ unsigned long page_offset, unsigned long size)
+{
+ rwlock_init(&mgr->vm_lock);
+ mgr->vm_addr_space_rb = RB_ROOT;
+ drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
+}
+EXPORT_SYMBOL(drm_vma_offset_manager_init);
+
+/**
+ * drm_vma_offset_manager_destroy() - Destroy offset manager
+ * @mgr: Manager object
+ *
+ * Destroy an object manager which was previously created via
+ * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
+ * before destroying the manager. Otherwise, drm_mm will refuse to free the
+ * requested resources.
+ *
+ * The manager must not be accessed after this function is called.
+ */
+void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
+{
+ /* take the lock to protect against buggy drivers */
+ write_lock(&mgr->vm_lock);
+ drm_mm_takedown(&mgr->vm_addr_space_mm);
+ write_unlock(&mgr->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
+
+/**
+ * drm_vma_offset_lookup() - Find node in offset space
+ * @mgr: Manager object
+ * @start: Start address for object (page-based)
+ * @pages: Size of object (page-based)
+ *
+ * Find a node given a start address and object size. This returns the _best_
+ * match for the given node. That is, @start may point somewhere into a valid
+ * region and the given node will be returned, as long as the node spans the
+ * whole requested area (given the size in number of pages as @pages).
+ *
+ * RETURNS:
+ * Returns NULL if no suitable node can be found. Otherwise, the best match
+ * is returned. It's the caller's responsibility to make sure the node doesn't
+ * get destroyed before the caller can access it.
+ */
+struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages)
+{
+ struct drm_vma_offset_node *node;
+
+ read_lock(&mgr->vm_lock);
+ node = drm_vma_offset_lookup_locked(mgr, start, pages);
+ read_unlock(&mgr->vm_lock);
+
+ return node;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup);
+
+/**
+ * drm_vma_offset_lookup_locked() - Find node in offset space
+ * @mgr: Manager object
+ * @start: Start address for object (page-based)
+ * @pages: Size of object (page-based)
+ *
+ * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
+ * manually. See drm_vma_offset_lock_lookup() for an example.
+ *
+ * RETURNS:
+ * Returns NULL if no suitable node can be found. Otherwise, the best match
+ * is returned.
+ */
+struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages)
+{
+ struct drm_vma_offset_node *node, *best;
+ struct rb_node *iter;
+ unsigned long offset;
+
+ iter = mgr->vm_addr_space_rb.rb_node;
+ best = NULL;
+
+ while (likely(iter)) {
+ node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
+ offset = node->vm_node.start;
+ if (start >= offset) {
+ iter = iter->rb_right;
+ best = node;
+ if (start == offset)
+ break;
+ } else {
+ iter = iter->rb_left;
+ }
+ }
+
+ /* verify that the node spans the requested area */
+ if (best) {
+ offset = best->vm_node.start + best->vm_node.size;
+ if (offset < start + pages)
+ best = NULL;
+ }
+
+ return best;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
+
+/* internal helper to link @node into the rb-tree */
+static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node)
+{
+ struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
+ struct rb_node *parent = NULL;
+ struct drm_vma_offset_node *iter_node;
+
+ while (likely(*iter)) {
+ parent = *iter;
+ iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
+
+ if (node->vm_node.start < iter_node->vm_node.start)
+ iter = &(*iter)->rb_left;
+ else if (node->vm_node.start > iter_node->vm_node.start)
+ iter = &(*iter)->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&node->vm_rb, parent, iter);
+ rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
+}
+
+/**
+ * drm_vma_offset_add() - Add offset node to manager
+ * @mgr: Manager object
+ * @node: Node to be added
+ * @pages: Allocation size visible to user-space (in number of pages)
+ *
+ * Add a node to the offset-manager. If the node was already added, this does
+ * nothing and return 0. @pages is the size of the object given in number of
+ * pages.
+ * After this call succeeds, you can access the offset of the node until it
+ * is removed again.
+ *
+ * If this call fails, it is safe to retry the operation or call
+ * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
+ * case.
+ *
+ * @pages is not required to be the same size as the underlying memory object
+ * that you want to map. It only limits the size that user-space can map into
+ * their address space.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node, unsigned long pages)
+{
+ int ret;
+
+ write_lock(&mgr->vm_lock);
+
+ if (drm_mm_node_allocated(&node->vm_node)) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
+ pages, 0, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ goto out_unlock;
+
+ _drm_vma_offset_add_rb(mgr, node);
+
+out_unlock:
+ write_unlock(&mgr->vm_lock);
+ return ret;
+}
+EXPORT_SYMBOL(drm_vma_offset_add);
+
+/**
+ * drm_vma_offset_remove() - Remove offset node from manager
+ * @mgr: Manager object
+ * @node: Node to be removed
+ *
+ * Remove a node from the offset manager. If the node wasn't added before, this
+ * does nothing. After this call returns, the offset and size will be 0 until a
+ * new offset is allocated via drm_vma_offset_add() again. Helper functions like
+ * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
+ * offset is allocated.
+ */
+void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node)
+{
+ write_lock(&mgr->vm_lock);
+
+ if (drm_mm_node_allocated(&node->vm_node)) {
+ rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
+ drm_mm_remove_node(&node->vm_node);
+ memset(&node->vm_node, 0, sizeof(node->vm_node));
+ }
+
+ write_unlock(&mgr->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_remove);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ca2729a85129..df81d3c959b4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -213,7 +213,7 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
.close = drm_gem_vm_close,
};
-static struct drm_ioctl_desc exynos_ioctls[] = {
+static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
@@ -271,12 +271,13 @@ static struct drm_driver exynos_drm_driver = {
.gem_vm_ops = &exynos_drm_gem_vm_ops,
.dumb_create = exynos_drm_gem_dumb_create,
.dumb_map_offset = exynos_drm_gem_dumb_map_offset,
- .dumb_destroy = exynos_drm_gem_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = exynos_dmabuf_prime_export,
.gem_prime_import = exynos_dmabuf_prime_import,
.ioctls = exynos_ioctls,
+ .num_ioctls = ARRAY_SIZE(exynos_ioctls),
.fops = &exynos_drm_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -288,7 +289,6 @@ static struct drm_driver exynos_drm_driver = {
static int exynos_drm_platform_probe(struct platform_device *pdev)
{
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
return drm_platform_init(&exynos_drm_driver, pdev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 24c22a8c3364..b904633863e8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -10,6 +10,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
#include <linux/shmem_fs.h>
#include <drm/exynos_drm.h>
@@ -152,8 +153,7 @@ out:
exynos_drm_fini_buf(obj->dev, buf);
exynos_gem_obj->buffer = NULL;
- if (obj->map_list.map)
- drm_gem_free_mmap_offset(obj);
+ drm_gem_free_mmap_offset(obj);
/* release file pointer to gem object. */
drm_gem_object_release(obj);
@@ -703,13 +703,11 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
goto unlock;
}
- if (!obj->map_list.map) {
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
- }
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
- *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
out:
@@ -719,26 +717,6 @@ unlock:
return ret;
}
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- unsigned int handle)
-{
- int ret;
-
- /*
- * obj->refcount and obj->handle_count are decreased and
- * if both them are 0 then exynos_drm_gem_free_object()
- * would be called by callback to release resources.
- */
- ret = drm_gem_handle_delete(file_priv, handle);
- if (ret < 0) {
- DRM_ERROR("failed to delete drm_gem_handle.\n");
- return ret;
- }
-
- return 0;
-}
-
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 468766bee450..09555afdfe9c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -151,15 +151,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
-/*
- * destroy memory region allocated.
- * - a gem handle and physical memory region pointed by a gem object
- * would be released by drm_gem_handle_delete().
- */
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- unsigned int handle);
-
/* page fault handler and mmap fault address(virtual) to physical memory. */
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 8b1b6d923abe..362dd2ad286f 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -321,10 +321,8 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
/* Begin by trying to use stolen memory backing */
backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
if (backing) {
- if (drm_gem_private_object_init(dev,
- &backing->gem, aligned_size) == 0)
- return backing;
- psb_gtt_free_range(dev, backing);
+ drm_gem_private_object_init(dev, &backing->gem, aligned_size);
+ return backing;
}
return NULL;
}
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index eefd6cc5b80d..10ae8c52d06f 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -26,6 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/gma_drm.h>
+#include <drm/drm_vma_manager.h>
#include "psb_drv.h"
int psb_gem_init_object(struct drm_gem_object *obj)
@@ -38,8 +39,7 @@ void psb_gem_free_object(struct drm_gem_object *obj)
struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
/* Remove the list map if one is present */
- if (obj->map_list.map)
- drm_gem_free_mmap_offset(obj);
+ drm_gem_free_mmap_offset(obj);
drm_gem_object_release(obj);
/* This must occur last as it frees up the memory of the GEM object */
@@ -81,13 +81,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
/* What validation is needed here ? */
/* Make it mmapable */
- if (!obj->map_list.map) {
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
- }
- /* GEM should really work out the hash offsets for us */
- *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
out:
drm_gem_object_unreference(obj);
unlock:
@@ -165,23 +162,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
}
/**
- * psb_gem_dumb_destroy - destroy a dumb buffer
- * @file: client file
- * @dev: our DRM device
- * @handle: the object handle
- *
- * Destroy a handle that was created via psb_gem_dumb_create, at least
- * we hope it was created that way. i915 seems to assume the caller
- * does the checking but that might be worth review ! FIXME
- */
-int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle)
-{
- /* No special work needed, drop the reference and see what falls out */
- return drm_gem_handle_delete(file, handle);
-}
-
-/**
* psb_gem_fault - pagefault handler for GEM objects
* @vma: the VMA of the GEM object
* @vmf: fault detail
@@ -261,11 +241,12 @@ static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
if (gtt == NULL)
return -ENOMEM;
- if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0)
- goto free_gtt;
+
+ drm_gem_private_object_init(dev, &gtt->gem, size);
if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
return 0;
-free_gtt:
+
+ drm_gem_object_release(&gtt->gem);
psb_gtt_free_range(dev, gtt);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index bddea5807442..d13c2fc848bc 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -131,7 +131,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-static struct drm_ioctl_desc psb_ioctls[] = {
+static const struct drm_ioctl_desc psb_ioctls[] = {
DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl,
DRM_AUTH),
@@ -652,7 +652,7 @@ static struct drm_driver driver = {
.gem_vm_ops = &psb_gem_vm_ops,
.dumb_create = psb_gem_dumb_create,
.dumb_map_offset = psb_gem_dumb_map_gtt,
- .dumb_destroy = psb_gem_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.fops = &psb_gem_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 6053b8abcd12..984cacfcbaf2 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -838,8 +838,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file);
extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle);
extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index e68b58a1aaf9..c2bd711e86e9 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -23,7 +23,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_edid.h>
-
+#include <drm/i2c/tda998x.h>
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
@@ -32,6 +32,11 @@ struct tda998x_priv {
uint16_t rev;
uint8_t current_page;
int dpms;
+ bool is_hdmi_sink;
+ u8 vip_cntrl_0;
+ u8 vip_cntrl_1;
+ u8 vip_cntrl_2;
+ struct tda998x_encoder_params params;
};
#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
@@ -68,10 +73,13 @@ struct tda998x_priv {
# define I2C_MASTER_DIS_MM (1 << 0)
# define I2C_MASTER_DIS_FILT (1 << 1)
# define I2C_MASTER_APP_STRT_LAT (1 << 2)
+#define REG_FEAT_POWERDOWN REG(0x00, 0x0e) /* read/write */
+# define FEAT_POWERDOWN_SPDIF (1 << 3)
#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */
#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */
#define REG_INT_FLAGS_2 REG(0x00, 0x11) /* read/write */
# define INT_FLAGS_2_EDID_BLK_RD (1 << 1)
+#define REG_ENA_ACLK REG(0x00, 0x16) /* read/write */
#define REG_ENA_VP_0 REG(0x00, 0x18) /* read/write */
#define REG_ENA_VP_1 REG(0x00, 0x19) /* read/write */
#define REG_ENA_VP_2 REG(0x00, 0x1a) /* read/write */
@@ -110,6 +118,8 @@ struct tda998x_priv {
#define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */
# define VIP_CNTRL_5_CKCASE (1 << 0)
# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1)
+#define REG_MUX_AP REG(0x00, 0x26) /* read/write */
+#define REG_MUX_VP_VIP_OUT REG(0x00, 0x27) /* read/write */
#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */
# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0)
# define MAT_CONTRL_MAT_BP (1 << 2)
@@ -130,8 +140,12 @@ struct tda998x_priv {
#define REG_VS_LINE_END_1_LSB REG(0x00, 0xae) /* write */
#define REG_VS_PIX_END_1_MSB REG(0x00, 0xaf) /* write */
#define REG_VS_PIX_END_1_LSB REG(0x00, 0xb0) /* write */
+#define REG_VS_LINE_STRT_2_MSB REG(0x00, 0xb1) /* write */
+#define REG_VS_LINE_STRT_2_LSB REG(0x00, 0xb2) /* write */
#define REG_VS_PIX_STRT_2_MSB REG(0x00, 0xb3) /* write */
#define REG_VS_PIX_STRT_2_LSB REG(0x00, 0xb4) /* write */
+#define REG_VS_LINE_END_2_MSB REG(0x00, 0xb5) /* write */
+#define REG_VS_LINE_END_2_LSB REG(0x00, 0xb6) /* write */
#define REG_VS_PIX_END_2_MSB REG(0x00, 0xb7) /* write */
#define REG_VS_PIX_END_2_LSB REG(0x00, 0xb8) /* write */
#define REG_HS_PIX_START_MSB REG(0x00, 0xb9) /* write */
@@ -142,21 +156,29 @@ struct tda998x_priv {
#define REG_VWIN_START_1_LSB REG(0x00, 0xbe) /* write */
#define REG_VWIN_END_1_MSB REG(0x00, 0xbf) /* write */
#define REG_VWIN_END_1_LSB REG(0x00, 0xc0) /* write */
+#define REG_VWIN_START_2_MSB REG(0x00, 0xc1) /* write */
+#define REG_VWIN_START_2_LSB REG(0x00, 0xc2) /* write */
+#define REG_VWIN_END_2_MSB REG(0x00, 0xc3) /* write */
+#define REG_VWIN_END_2_LSB REG(0x00, 0xc4) /* write */
#define REG_DE_START_MSB REG(0x00, 0xc5) /* write */
#define REG_DE_START_LSB REG(0x00, 0xc6) /* write */
#define REG_DE_STOP_MSB REG(0x00, 0xc7) /* write */
#define REG_DE_STOP_LSB REG(0x00, 0xc8) /* write */
#define REG_TBG_CNTRL_0 REG(0x00, 0xca) /* write */
+# define TBG_CNTRL_0_TOP_TGL (1 << 0)
+# define TBG_CNTRL_0_TOP_SEL (1 << 1)
+# define TBG_CNTRL_0_DE_EXT (1 << 2)
+# define TBG_CNTRL_0_TOP_EXT (1 << 3)
# define TBG_CNTRL_0_FRAME_DIS (1 << 5)
# define TBG_CNTRL_0_SYNC_MTHD (1 << 6)
# define TBG_CNTRL_0_SYNC_ONCE (1 << 7)
#define REG_TBG_CNTRL_1 REG(0x00, 0xcb) /* write */
-# define TBG_CNTRL_1_VH_TGL_0 (1 << 0)
-# define TBG_CNTRL_1_VH_TGL_1 (1 << 1)
-# define TBG_CNTRL_1_VH_TGL_2 (1 << 2)
-# define TBG_CNTRL_1_VHX_EXT_DE (1 << 3)
-# define TBG_CNTRL_1_VHX_EXT_HS (1 << 4)
-# define TBG_CNTRL_1_VHX_EXT_VS (1 << 5)
+# define TBG_CNTRL_1_H_TGL (1 << 0)
+# define TBG_CNTRL_1_V_TGL (1 << 1)
+# define TBG_CNTRL_1_TGL_EN (1 << 2)
+# define TBG_CNTRL_1_X_EXT (1 << 3)
+# define TBG_CNTRL_1_H_EXT (1 << 4)
+# define TBG_CNTRL_1_V_EXT (1 << 5)
# define TBG_CNTRL_1_DWIN_DIS (1 << 6)
#define REG_ENABLE_SPACE REG(0x00, 0xd6) /* write */
#define REG_HVF_CNTRL_0 REG(0x00, 0xe4) /* write */
@@ -171,6 +193,12 @@ struct tda998x_priv {
# define HVF_CNTRL_1_PAD(x) (((x) & 3) << 4)
# define HVF_CNTRL_1_SEMI_PLANAR (1 << 6)
#define REG_RPT_CNTRL REG(0x00, 0xf0) /* write */
+#define REG_I2S_FORMAT REG(0x00, 0xfc) /* read/write */
+# define I2S_FORMAT(x) (((x) & 3) << 0)
+#define REG_AIP_CLKSEL REG(0x00, 0xfd) /* write */
+# define AIP_CLKSEL_FS(x) (((x) & 3) << 0)
+# define AIP_CLKSEL_CLK_POL(x) (((x) & 1) << 2)
+# define AIP_CLKSEL_AIP(x) (((x) & 7) << 3)
/* Page 02h: PLL settings */
@@ -194,6 +222,12 @@ struct tda998x_priv {
#define REG_PLL_SCGR1 REG(0x02, 0x09) /* read/write */
#define REG_PLL_SCGR2 REG(0x02, 0x0a) /* read/write */
#define REG_AUDIO_DIV REG(0x02, 0x0e) /* read/write */
+# define AUDIO_DIV_SERCLK_1 0
+# define AUDIO_DIV_SERCLK_2 1
+# define AUDIO_DIV_SERCLK_4 2
+# define AUDIO_DIV_SERCLK_8 3
+# define AUDIO_DIV_SERCLK_16 4
+# define AUDIO_DIV_SERCLK_32 5
#define REG_SEL_CLK REG(0x02, 0x11) /* read/write */
# define SEL_CLK_SEL_CLK1 (1 << 0)
# define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1)
@@ -212,6 +246,11 @@ struct tda998x_priv {
/* Page 10h: information frames and packets */
+#define REG_IF1_HB0 REG(0x10, 0x20) /* read/write */
+#define REG_IF2_HB0 REG(0x10, 0x40) /* read/write */
+#define REG_IF3_HB0 REG(0x10, 0x60) /* read/write */
+#define REG_IF4_HB0 REG(0x10, 0x80) /* read/write */
+#define REG_IF5_HB0 REG(0x10, 0xa0) /* read/write */
/* Page 11h: audio settings and content info packets */
@@ -221,14 +260,39 @@ struct tda998x_priv {
# define AIP_CNTRL_0_LAYOUT (1 << 2)
# define AIP_CNTRL_0_ACR_MAN (1 << 5)
# define AIP_CNTRL_0_RST_CTS (1 << 6)
+#define REG_CA_I2S REG(0x11, 0x01) /* read/write */
+# define CA_I2S_CA_I2S(x) (((x) & 31) << 0)
+# define CA_I2S_HBR_CHSTAT (1 << 6)
+#define REG_LATENCY_RD REG(0x11, 0x04) /* read/write */
+#define REG_ACR_CTS_0 REG(0x11, 0x05) /* read/write */
+#define REG_ACR_CTS_1 REG(0x11, 0x06) /* read/write */
+#define REG_ACR_CTS_2 REG(0x11, 0x07) /* read/write */
+#define REG_ACR_N_0 REG(0x11, 0x08) /* read/write */
+#define REG_ACR_N_1 REG(0x11, 0x09) /* read/write */
+#define REG_ACR_N_2 REG(0x11, 0x0a) /* read/write */
+#define REG_CTS_N REG(0x11, 0x0c) /* read/write */
+# define CTS_N_K(x) (((x) & 7) << 0)
+# define CTS_N_M(x) (((x) & 3) << 4)
#define REG_ENC_CNTRL REG(0x11, 0x0d) /* read/write */
# define ENC_CNTRL_RST_ENC (1 << 0)
# define ENC_CNTRL_RST_SEL (1 << 1)
# define ENC_CNTRL_CTL_CODE(x) (((x) & 3) << 2)
+#define REG_DIP_FLAGS REG(0x11, 0x0e) /* read/write */
+# define DIP_FLAGS_ACR (1 << 0)
+# define DIP_FLAGS_GC (1 << 1)
+#define REG_DIP_IF_FLAGS REG(0x11, 0x0f) /* read/write */
+# define DIP_IF_FLAGS_IF1 (1 << 1)
+# define DIP_IF_FLAGS_IF2 (1 << 2)
+# define DIP_IF_FLAGS_IF3 (1 << 3)
+# define DIP_IF_FLAGS_IF4 (1 << 4)
+# define DIP_IF_FLAGS_IF5 (1 << 5)
+#define REG_CH_STAT_B(x) REG(0x11, 0x14 + (x)) /* read/write */
/* Page 12h: HDCP and OTP */
#define REG_TX3 REG(0x12, 0x9a) /* read/write */
+#define REG_TX4 REG(0x12, 0x9b) /* read/write */
+# define TX4_PD_RAM (1 << 1)
#define REG_TX33 REG(0x12, 0xb8) /* read/write */
# define TX33_HDMI (1 << 1)
@@ -338,6 +402,23 @@ fail:
return ret;
}
+static void
+reg_write_range(struct drm_encoder *encoder, uint16_t reg, uint8_t *p, int cnt)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ uint8_t buf[cnt+1];
+ int ret;
+
+ buf[0] = REG2ADDR(reg);
+ memcpy(&buf[1], p, cnt);
+
+ set_page(encoder, reg);
+
+ ret = i2c_master_send(client, buf, cnt + 1);
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+}
+
static uint8_t
reg_read(struct drm_encoder *encoder, uint16_t reg)
{
@@ -406,13 +487,172 @@ tda998x_reset(struct drm_encoder *encoder)
reg_write(encoder, REG_SERIALIZER, 0x00);
reg_write(encoder, REG_BUFFER_OUT, 0x00);
reg_write(encoder, REG_PLL_SCG1, 0x00);
- reg_write(encoder, REG_AUDIO_DIV, 0x03);
+ reg_write(encoder, REG_AUDIO_DIV, AUDIO_DIV_SERCLK_8);
reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
reg_write(encoder, REG_PLL_SCGN1, 0xfa);
reg_write(encoder, REG_PLL_SCGN2, 0x00);
reg_write(encoder, REG_PLL_SCGR1, 0x5b);
reg_write(encoder, REG_PLL_SCGR2, 0x00);
reg_write(encoder, REG_PLL_SCG2, 0x10);
+
+ /* Write the default value MUX register */
+ reg_write(encoder, REG_MUX_VP_VIP_OUT, 0x24);
+}
+
+static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
+{
+ uint8_t sum = 0;
+
+ while (bytes--)
+ sum += *buf++;
+ return (255 - sum) + 1;
+}
+
+#define HB(x) (x)
+#define PB(x) (HB(2) + 1 + (x))
+
+static void
+tda998x_write_if(struct drm_encoder *encoder, uint8_t bit, uint16_t addr,
+ uint8_t *buf, size_t size)
+{
+ buf[PB(0)] = tda998x_cksum(buf, size);
+
+ reg_clear(encoder, REG_DIP_IF_FLAGS, bit);
+ reg_write_range(encoder, addr, buf, size);
+ reg_set(encoder, REG_DIP_IF_FLAGS, bit);
+}
+
+static void
+tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
+{
+ uint8_t buf[PB(5) + 1];
+
+ buf[HB(0)] = 0x84;
+ buf[HB(1)] = 0x01;
+ buf[HB(2)] = 10;
+ buf[PB(0)] = 0;
+ buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
+ buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
+ buf[PB(4)] = p->audio_frame[4];
+ buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
+
+ tda998x_write_if(encoder, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
+ sizeof(buf));
+}
+
+static void
+tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ uint8_t buf[PB(13) + 1];
+
+ memset(buf, 0, sizeof(buf));
+ buf[HB(0)] = 0x82;
+ buf[HB(1)] = 0x02;
+ buf[HB(2)] = 13;
+ buf[PB(4)] = drm_match_cea_mode(mode);
+
+ tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
+ sizeof(buf));
+}
+
+static void tda998x_audio_mute(struct drm_encoder *encoder, bool on)
+{
+ if (on) {
+ reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
+ reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
+ reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+ } else {
+ reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+ }
+}
+
+static void
+tda998x_configure_audio(struct drm_encoder *encoder,
+ struct drm_display_mode *mode, struct tda998x_encoder_params *p)
+{
+ uint8_t buf[6], clksel_aip, clksel_fs, ca_i2s, cts_n, adiv;
+ uint32_t n;
+
+ /* Enable audio ports */
+ reg_write(encoder, REG_ENA_AP, p->audio_cfg);
+ reg_write(encoder, REG_ENA_ACLK, p->audio_clk_cfg);
+
+ /* Set audio input source */
+ switch (p->audio_format) {
+ case AFMT_SPDIF:
+ reg_write(encoder, REG_MUX_AP, 0x40);
+ clksel_aip = AIP_CLKSEL_AIP(0);
+ /* FS64SPDIF */
+ clksel_fs = AIP_CLKSEL_FS(2);
+ cts_n = CTS_N_M(3) | CTS_N_K(3);
+ ca_i2s = 0;
+ break;
+
+ case AFMT_I2S:
+ reg_write(encoder, REG_MUX_AP, 0x64);
+ clksel_aip = AIP_CLKSEL_AIP(1);
+ /* ACLK */
+ clksel_fs = AIP_CLKSEL_FS(0);
+ cts_n = CTS_N_M(3) | CTS_N_K(3);
+ ca_i2s = CA_I2S_CA_I2S(0);
+ break;
+ }
+
+ reg_write(encoder, REG_AIP_CLKSEL, clksel_aip);
+ reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT);
+
+ /* Enable automatic CTS generation */
+ reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_ACR_MAN);
+ reg_write(encoder, REG_CTS_N, cts_n);
+
+ /*
+ * Audio input somehow depends on HDMI line rate which is
+ * related to pixclk. Testing showed that modes with pixclk
+ * >100MHz need a larger divider while <40MHz need the default.
+ * There is no detailed info in the datasheet, so we just
+ * assume 100MHz requires larger divider.
+ */
+ if (mode->clock > 100000)
+ adiv = AUDIO_DIV_SERCLK_16;
+ else
+ adiv = AUDIO_DIV_SERCLK_8;
+ reg_write(encoder, REG_AUDIO_DIV, adiv);
+
+ /*
+ * This is the approximate value of N, which happens to be
+ * the recommended values for non-coherent clocks.
+ */
+ n = 128 * p->audio_sample_rate / 1000;
+
+ /* Write the CTS and N values */
+ buf[0] = 0x44;
+ buf[1] = 0x42;
+ buf[2] = 0x01;
+ buf[3] = n;
+ buf[4] = n >> 8;
+ buf[5] = n >> 16;
+ reg_write_range(encoder, REG_ACR_CTS_0, buf, 6);
+
+ /* Set CTS clock reference */
+ reg_write(encoder, REG_AIP_CLKSEL, clksel_aip | clksel_fs);
+
+ /* Reset CTS generator */
+ reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
+ reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
+
+ /* Write the channel status */
+ buf[0] = 0x04;
+ buf[1] = 0x00;
+ buf[2] = 0x00;
+ buf[3] = 0xf1;
+ reg_write_range(encoder, REG_CH_STAT_B(0), buf, 4);
+
+ tda998x_audio_mute(encoder, true);
+ mdelay(20);
+ tda998x_audio_mute(encoder, false);
+
+ /* Write the audio information packet */
+ tda998x_write_aif(encoder, p);
}
/* DRM encoder functions */
@@ -420,6 +660,23 @@ tda998x_reset(struct drm_encoder *encoder)
static void
tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+ struct tda998x_encoder_params *p = params;
+
+ priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
+ (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
+ VIP_CNTRL_0_SWAP_B(p->swap_b) |
+ (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
+ priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
+ (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
+ VIP_CNTRL_1_SWAP_D(p->swap_d) |
+ (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
+ priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
+ (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
+ VIP_CNTRL_2_SWAP_F(p->swap_f) |
+ (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
+
+ priv->params = *p;
}
static void
@@ -436,18 +693,14 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
- /* enable audio and video ports */
- reg_write(encoder, REG_ENA_AP, 0xff);
+ /* enable video ports, audio will be enabled later */
reg_write(encoder, REG_ENA_VP_0, 0xff);
reg_write(encoder, REG_ENA_VP_1, 0xff);
reg_write(encoder, REG_ENA_VP_2, 0xff);
/* set muxing after enabling ports: */
- reg_write(encoder, REG_VIP_CNTRL_0,
- VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3));
- reg_write(encoder, REG_VIP_CNTRL_1,
- VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1));
- reg_write(encoder, REG_VIP_CNTRL_2,
- VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5));
+ reg_write(encoder, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
+ reg_write(encoder, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
+ reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
break;
case DRM_MODE_DPMS_OFF:
/* disable audio and video ports */
@@ -494,43 +747,78 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct tda998x_priv *priv = to_tda998x_priv(encoder);
- uint16_t hs_start, hs_end, line_start, line_end;
- uint16_t vwin_start, vwin_end, de_start, de_end;
- uint16_t ref_pix, ref_line, pix_start2;
+ uint16_t ref_pix, ref_line, n_pix, n_line;
+ uint16_t hs_pix_s, hs_pix_e;
+ uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
+ uint16_t vs2_pix_s, vs2_pix_e, vs2_line_s, vs2_line_e;
+ uint16_t vwin1_line_s, vwin1_line_e;
+ uint16_t vwin2_line_s, vwin2_line_e;
+ uint16_t de_pix_s, de_pix_e;
uint8_t reg, div, rep;
- hs_start = mode->hsync_start - mode->hdisplay;
- hs_end = mode->hsync_end - mode->hdisplay;
- line_start = 1;
- line_end = 1 + mode->vsync_end - mode->vsync_start;
- vwin_start = mode->vtotal - mode->vsync_start;
- vwin_end = vwin_start + mode->vdisplay;
- de_start = mode->htotal - mode->hdisplay;
- de_end = mode->htotal;
-
- pix_start2 = 0;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- pix_start2 = (mode->htotal / 2) + hs_start;
-
- /* TODO how is this value calculated? It is 2 for all common
- * formats in the tables in out of tree nxp driver (assuming
- * I've properly deciphered their byzantine table system)
+ /*
+ * Internally TDA998x is using ITU-R BT.656 style sync but
+ * we get VESA style sync. TDA998x is using a reference pixel
+ * relative to ITU to sync to the input frame and for output
+ * sync generation. Currently, we are using reference detection
+ * from HS/VS, i.e. REFPIX/REFLINE denote frame start sync point
+ * which is position of rising VS with coincident rising HS.
+ *
+ * Now there is some issues to take care of:
+ * - HDMI data islands require sync-before-active
+ * - TDA998x register values must be > 0 to be enabled
+ * - REFLINE needs an additional offset of +1
+ * - REFPIX needs an addtional offset of +1 for UYUV and +3 for RGB
+ *
+ * So we add +1 to all horizontal and vertical register values,
+ * plus an additional +3 for REFPIX as we are using RGB input only.
*/
- ref_line = 2;
-
- /* this might changes for other color formats from the CRTC: */
- ref_pix = 3 + hs_start;
+ n_pix = mode->htotal;
+ n_line = mode->vtotal;
+
+ hs_pix_e = mode->hsync_end - mode->hdisplay;
+ hs_pix_s = mode->hsync_start - mode->hdisplay;
+ de_pix_e = mode->htotal;
+ de_pix_s = mode->htotal - mode->hdisplay;
+ ref_pix = 3 + hs_pix_s;
+
+ /*
+ * Attached LCD controllers may generate broken sync. Allow
+ * those to adjust the position of the rising VS edge by adding
+ * HSKEW to ref_pix.
+ */
+ if (adjusted_mode->flags & DRM_MODE_FLAG_HSKEW)
+ ref_pix += adjusted_mode->hskew;
+
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0) {
+ ref_line = 1 + mode->vsync_start - mode->vdisplay;
+ vwin1_line_s = mode->vtotal - mode->vdisplay - 1;
+ vwin1_line_e = vwin1_line_s + mode->vdisplay;
+ vs1_pix_s = vs1_pix_e = hs_pix_s;
+ vs1_line_s = mode->vsync_start - mode->vdisplay;
+ vs1_line_e = vs1_line_s +
+ mode->vsync_end - mode->vsync_start;
+ vwin2_line_s = vwin2_line_e = 0;
+ vs2_pix_s = vs2_pix_e = 0;
+ vs2_line_s = vs2_line_e = 0;
+ } else {
+ ref_line = 1 + (mode->vsync_start - mode->vdisplay)/2;
+ vwin1_line_s = (mode->vtotal - mode->vdisplay)/2;
+ vwin1_line_e = vwin1_line_s + mode->vdisplay/2;
+ vs1_pix_s = vs1_pix_e = hs_pix_s;
+ vs1_line_s = (mode->vsync_start - mode->vdisplay)/2;
+ vs1_line_e = vs1_line_s +
+ (mode->vsync_end - mode->vsync_start)/2;
+ vwin2_line_s = vwin1_line_s + mode->vtotal/2;
+ vwin2_line_e = vwin2_line_s + mode->vdisplay/2;
+ vs2_pix_s = vs2_pix_e = hs_pix_s + mode->htotal/2;
+ vs2_line_s = vs1_line_s + mode->vtotal/2 ;
+ vs2_line_e = vs2_line_s +
+ (mode->vsync_end - mode->vsync_start)/2;
+ }
div = 148500 / mode->clock;
- DBG("clock=%d, div=%u", mode->clock, div);
- DBG("hs_start=%u, hs_end=%u, line_start=%u, line_end=%u",
- hs_start, hs_end, line_start, line_end);
- DBG("vwin_start=%u, vwin_end=%u, de_start=%u, de_end=%u",
- vwin_start, vwin_end, de_start, de_end);
- DBG("ref_line=%u, ref_pix=%u, pix_start2=%u",
- ref_line, ref_pix, pix_start2);
-
/* mute the audio FIFO: */
reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
@@ -561,9 +849,6 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
PLL_SERIAL_2_SRL_PR(rep));
- reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, pix_start2);
- reg_write16(encoder, REG_VS_PIX_END_2_MSB, pix_start2);
-
/* set color matrix bypass flag: */
reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP);
@@ -572,47 +857,75 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD);
+ /*
+ * Sync on rising HSYNC/VSYNC
+ */
reg_write(encoder, REG_VIP_CNTRL_3, 0);
reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS);
+
+ /*
+ * TDA19988 requires high-active sync at input stage,
+ * so invert low-active sync provided by master encoder here
+ */
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL);
+ /*
+ * Always generate sync polarity relative to input sync and
+ * revert input stage toggled sync at output stage
+ */
+ reg = TBG_CNTRL_1_TGL_EN;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
+ reg |= TBG_CNTRL_1_H_TGL;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ reg |= TBG_CNTRL_1_V_TGL;
+ reg_write(encoder, REG_TBG_CNTRL_1, reg);
reg_write(encoder, REG_VIDFORMAT, 0x00);
- reg_write16(encoder, REG_NPIX_MSB, mode->hdisplay - 1);
- reg_write16(encoder, REG_NLINE_MSB, mode->vdisplay - 1);
- reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, line_start);
- reg_write16(encoder, REG_VS_LINE_END_1_MSB, line_end);
- reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, hs_start);
- reg_write16(encoder, REG_VS_PIX_END_1_MSB, hs_start);
- reg_write16(encoder, REG_HS_PIX_START_MSB, hs_start);
- reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_end);
- reg_write16(encoder, REG_VWIN_START_1_MSB, vwin_start);
- reg_write16(encoder, REG_VWIN_END_1_MSB, vwin_end);
- reg_write16(encoder, REG_DE_START_MSB, de_start);
- reg_write16(encoder, REG_DE_STOP_MSB, de_end);
+ reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
+ reg_write16(encoder, REG_REFLINE_MSB, ref_line);
+ reg_write16(encoder, REG_NPIX_MSB, n_pix);
+ reg_write16(encoder, REG_NLINE_MSB, n_line);
+ reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, vs1_line_s);
+ reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, vs1_pix_s);
+ reg_write16(encoder, REG_VS_LINE_END_1_MSB, vs1_line_e);
+ reg_write16(encoder, REG_VS_PIX_END_1_MSB, vs1_pix_e);
+ reg_write16(encoder, REG_VS_LINE_STRT_2_MSB, vs2_line_s);
+ reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, vs2_pix_s);
+ reg_write16(encoder, REG_VS_LINE_END_2_MSB, vs2_line_e);
+ reg_write16(encoder, REG_VS_PIX_END_2_MSB, vs2_pix_e);
+ reg_write16(encoder, REG_HS_PIX_START_MSB, hs_pix_s);
+ reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_pix_e);
+ reg_write16(encoder, REG_VWIN_START_1_MSB, vwin1_line_s);
+ reg_write16(encoder, REG_VWIN_END_1_MSB, vwin1_line_e);
+ reg_write16(encoder, REG_VWIN_START_2_MSB, vwin2_line_s);
+ reg_write16(encoder, REG_VWIN_END_2_MSB, vwin2_line_e);
+ reg_write16(encoder, REG_DE_START_MSB, de_pix_s);
+ reg_write16(encoder, REG_DE_STOP_MSB, de_pix_e);
if (priv->rev == TDA19988) {
/* let incoming pixels fill the active space (if any) */
reg_write(encoder, REG_ENABLE_SPACE, 0x01);
}
- reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
- reg_write16(encoder, REG_REFLINE_MSB, ref_line);
-
- reg = TBG_CNTRL_1_VHX_EXT_DE |
- TBG_CNTRL_1_VHX_EXT_HS |
- TBG_CNTRL_1_VHX_EXT_VS |
- TBG_CNTRL_1_DWIN_DIS | /* HDCP off */
- TBG_CNTRL_1_VH_TGL_2;
- if (mode->flags & (DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC))
- reg |= TBG_CNTRL_1_VH_TGL_0;
- reg_set(encoder, REG_TBG_CNTRL_1, reg);
-
/* must be last register set: */
reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE);
+
+ /* Only setup the info frames if the sink is HDMI */
+ if (priv->is_hdmi_sink) {
+ /* We need to turn HDMI HDCP stuff on to get audio through */
+ reg_clear(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
+ reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1));
+ reg_set(encoder, REG_TX33, TX33_HDMI);
+
+ tda998x_write_avi(encoder, adjusted_mode);
+
+ if (priv->params.audio_cfg)
+ tda998x_configure_audio(encoder, adjusted_mode,
+ &priv->params);
+ }
}
static enum drm_connector_status
@@ -673,6 +986,7 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
static uint8_t *
do_get_edid(struct drm_encoder *encoder)
{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
int j = 0, valid_extensions = 0;
uint8_t *block, *new;
bool print_bad_edid = drm_debug & DRM_UT_KMS;
@@ -680,6 +994,9 @@ do_get_edid(struct drm_encoder *encoder)
if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
+ if (priv->rev == TDA19988)
+ reg_clear(encoder, REG_TX4, TX4_PD_RAM);
+
/* base block fetch */
if (read_edid_block(encoder, block, 0))
goto fail;
@@ -689,7 +1006,7 @@ do_get_edid(struct drm_encoder *encoder)
/* if there's no extensions, we're done */
if (block[0x7e] == 0)
- return block;
+ goto done;
new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
@@ -716,9 +1033,15 @@ do_get_edid(struct drm_encoder *encoder)
block = new;
}
+done:
+ if (priv->rev == TDA19988)
+ reg_set(encoder, REG_TX4, TX4_PD_RAM);
+
return block;
fail:
+ if (priv->rev == TDA19988)
+ reg_set(encoder, REG_TX4, TX4_PD_RAM);
dev_warn(encoder->dev->dev, "failed to read EDID\n");
kfree(block);
return NULL;
@@ -728,12 +1051,14 @@ static int
tda998x_encoder_get_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
struct edid *edid = (struct edid *)do_get_edid(encoder);
int n = 0;
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
n = drm_add_edid_modes(connector, edid);
+ priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
kfree(edid);
}
@@ -807,6 +1132,10 @@ tda998x_encoder_init(struct i2c_client *client,
if (!priv)
return -ENOMEM;
+ priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
+ priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
+ priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
+
priv->current_page = 0;
priv->cec = i2c_new_dummy(client->adapter, 0x34);
priv->dpms = DRM_MODE_DPMS_OFF;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ada49eda489f..eac755bb8f9b 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -1241,7 +1241,7 @@ int i810_driver_dma_quiescent(struct drm_device *dev)
return 0;
}
-struct drm_ioctl_desc i810_ioctls[] = {
+const struct drm_ioctl_desc i810_ioctls[] = {
DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index 6e0acad9e0f5..d4d16eddd651 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -125,7 +125,7 @@ extern void i810_driver_preclose(struct drm_device *dev,
extern int i810_driver_device_is_agp(struct drm_device *dev);
extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-extern struct drm_ioctl_desc i810_ioctls[];
+extern const struct drm_ioctl_desc i810_ioctls[];
extern int i810_max_ioctl;
#define I810_BASE(reg) ((unsigned long) \
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 40034ecefd3b..b8449a84a0dc 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -5,6 +5,7 @@
ccflags-y := -Iinclude/drm
i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_debugfs.o \
+ i915_gpu_error.o \
i915_suspend.o \
i915_gem.o \
i915_gem_context.o \
@@ -37,6 +38,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
intel_sprite.o \
intel_opregion.o \
intel_sideband.o \
+ intel_uncore.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
dvo_ivch.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 757e0fa11043..af42e94f6846 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -307,7 +307,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
idf |= CH7xxx_IDF_HSP;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- idf |= CH7xxx_IDF_HSP;
+ idf |= CH7xxx_IDF_VSP;
ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 47d6c748057e..ed72fe08217c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,7 +30,6 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/export.h>
-#include <generated/utsrelease.h>
#include <drm/drmP.h>
#include "intel_drv.h"
#include "intel_ringbuffer.h"
@@ -90,16 +89,6 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
}
}
-static const char *cache_level_str(int type)
-{
- switch (type) {
- case I915_CACHE_NONE: return " uncached";
- case I915_CACHE_LLC: return " snooped (LLC)";
- case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
- default: return "";
- }
-}
-
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
@@ -113,7 +102,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->last_read_seqno,
obj->last_write_seqno,
obj->last_fenced_seqno,
- cache_level_str(obj->cache_level),
+ i915_cache_level_str(obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
@@ -122,9 +111,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (pinned x %d)", obj->pin_count);
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
- if (obj->gtt_space != NULL)
- seq_printf(m, " (gtt offset: %08x, size: %08x)",
- obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+ if (i915_gem_obj_ggtt_bound(obj))
+ seq_printf(m, " (gtt offset: %08lx, size: %08x)",
+ i915_gem_obj_ggtt_offset(obj), (unsigned int)i915_gem_obj_ggtt_size(obj));
if (obj->stolen)
seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
if (obj->pin_mappable || obj->fault_mappable) {
@@ -146,7 +135,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
uintptr_t list = (uintptr_t) node->info_ent->data;
struct list_head *head;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
int count, ret;
@@ -157,12 +147,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
switch (list) {
case ACTIVE_LIST:
- seq_printf(m, "Active:\n");
- head = &dev_priv->mm.active_list;
+ seq_puts(m, "Active:\n");
+ head = &vm->active_list;
break;
case INACTIVE_LIST:
- seq_printf(m, "Inactive:\n");
- head = &dev_priv->mm.inactive_list;
+ seq_puts(m, "Inactive:\n");
+ head = &vm->inactive_list;
break;
default:
mutex_unlock(&dev->struct_mutex);
@@ -171,11 +161,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, head, mm_list) {
- seq_printf(m, " ");
+ seq_puts(m, " ");
describe_obj(m, obj);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
total_obj_size += obj->base.size;
- total_gtt_size += obj->gtt_space->size;
+ total_gtt_size += i915_gem_obj_ggtt_size(obj);
count++;
}
mutex_unlock(&dev->struct_mutex);
@@ -187,10 +177,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
#define count_objects(list, member) do { \
list_for_each_entry(obj, list, member) { \
- size += obj->gtt_space->size; \
+ size += i915_gem_obj_ggtt_size(obj); \
++count; \
if (obj->map_and_fenceable) { \
- mappable_size += obj->gtt_space->size; \
+ mappable_size += i915_gem_obj_ggtt_size(obj); \
++mappable_count; \
} \
} \
@@ -209,7 +199,7 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->count++;
stats->total += obj->base.size;
- if (obj->gtt_space) {
+ if (i915_gem_obj_ggtt_bound(obj)) {
if (!list_empty(&obj->ring_list))
stats->active += obj->base.size;
else
@@ -222,7 +212,7 @@ static int per_file_stats(int id, void *ptr, void *data)
return 0;
}
-static int i915_gem_object_info(struct seq_file *m, void* data)
+static int i915_gem_object_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
@@ -230,6 +220,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
u32 count, mappable_count, purgeable_count;
size_t size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_file *file;
int ret;
@@ -247,12 +238,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.active_list, mm_list);
+ count_objects(&vm->active_list, mm_list);
seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.inactive_list, mm_list);
+ count_objects(&vm->inactive_list, mm_list);
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
@@ -267,11 +258,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->fault_mappable) {
- size += obj->gtt_space->size;
+ size += i915_gem_obj_ggtt_size(obj);
++count;
}
if (obj->pin_mappable) {
- mappable_size += obj->gtt_space->size;
+ mappable_size += i915_gem_obj_ggtt_size(obj);
++mappable_count;
}
if (obj->madv == I915_MADV_DONTNEED) {
@@ -287,10 +278,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, size);
seq_printf(m, "%zu [%lu] gtt total\n",
- dev_priv->gtt.total,
- dev_priv->gtt.mappable_end - dev_priv->gtt.start);
+ dev_priv->gtt.base.total,
+ dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
@@ -310,7 +301,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
return 0;
}
-static int i915_gem_gtt_info(struct seq_file *m, void* data)
+static int i915_gem_gtt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
@@ -329,11 +320,11 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
if (list == PINNED_LIST && obj->pin_count == 0)
continue;
- seq_printf(m, " ");
+ seq_puts(m, " ");
describe_obj(m, obj);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
total_obj_size += obj->base.size;
- total_gtt_size += obj->gtt_space->size;
+ total_gtt_size += i915_gem_obj_ggtt_size(obj);
count++;
}
@@ -371,20 +362,22 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
pipe, plane);
}
if (work->enable_stall_check)
- seq_printf(m, "Stall check enabled, ");
+ seq_puts(m, "Stall check enabled, ");
else
- seq_printf(m, "Stall check waiting for page flip ioctl, ");
+ seq_puts(m, "Stall check waiting for page flip ioctl, ");
seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (work->old_fb_obj) {
struct drm_i915_gem_object *obj = work->old_fb_obj;
if (obj)
- seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+ seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
+ i915_gem_obj_ggtt_offset(obj));
}
if (work->pending_flip_obj) {
struct drm_i915_gem_object *obj = work->pending_flip_obj;
if (obj)
- seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+ seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
+ i915_gem_obj_ggtt_offset(obj));
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -424,7 +417,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
mutex_unlock(&dev->struct_mutex);
if (count == 0)
- seq_printf(m, "No requests\n");
+ seq_puts(m, "No requests\n");
return 0;
}
@@ -574,10 +567,10 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
seq_printf(m, "Fence %d, pin count = %d, object = ",
i, dev_priv->fence_regs[i].pin_count);
if (obj == NULL)
- seq_printf(m, "unused");
+ seq_puts(m, "unused");
else
describe_obj(m, obj);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
mutex_unlock(&dev->struct_mutex);
@@ -606,361 +599,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
-static const char *ring_str(int ring)
-{
- switch (ring) {
- case RCS: return "render";
- case VCS: return "bsd";
- case BCS: return "blt";
- case VECS: return "vebox";
- default: return "";
- }
-}
-
-static const char *pin_flag(int pinned)
-{
- if (pinned > 0)
- return " P";
- else if (pinned < 0)
- return " p";
- else
- return "";
-}
-
-static const char *tiling_flag(int tiling)
-{
- switch (tiling) {
- default:
- case I915_TILING_NONE: return "";
- case I915_TILING_X: return " X";
- case I915_TILING_Y: return " Y";
- }
-}
-
-static const char *dirty_flag(int dirty)
-{
- return dirty ? " dirty" : "";
-}
-
-static const char *purgeable_flag(int purgeable)
-{
- return purgeable ? " purgeable" : "";
-}
-
-static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
-{
-
- if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
- e->err = -ENOSPC;
- return false;
- }
-
- if (e->bytes == e->size - 1 || e->err)
- return false;
-
- return true;
-}
-
-static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
- unsigned len)
-{
- if (e->pos + len <= e->start) {
- e->pos += len;
- return false;
- }
-
- /* First vsnprintf needs to fit in its entirety for memmove */
- if (len >= e->size) {
- e->err = -EIO;
- return false;
- }
-
- return true;
-}
-
-static void __i915_error_advance(struct drm_i915_error_state_buf *e,
- unsigned len)
-{
- /* If this is first printf in this window, adjust it so that
- * start position matches start of the buffer
- */
-
- if (e->pos < e->start) {
- const size_t off = e->start - e->pos;
-
- /* Should not happen but be paranoid */
- if (off > len || e->bytes) {
- e->err = -EIO;
- return;
- }
-
- memmove(e->buf, e->buf + off, len - off);
- e->bytes = len - off;
- e->pos = e->start;
- return;
- }
-
- e->bytes += len;
- e->pos += len;
-}
-
-static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
- const char *f, va_list args)
-{
- unsigned len;
-
- if (!__i915_error_ok(e))
- return;
-
- /* Seek the first printf which is hits start position */
- if (e->pos < e->start) {
- len = vsnprintf(NULL, 0, f, args);
- if (!__i915_error_seek(e, len))
- return;
- }
-
- len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
- if (len >= e->size - e->bytes)
- len = e->size - e->bytes - 1;
-
- __i915_error_advance(e, len);
-}
-
-static void i915_error_puts(struct drm_i915_error_state_buf *e,
- const char *str)
-{
- unsigned len;
-
- if (!__i915_error_ok(e))
- return;
-
- len = strlen(str);
-
- /* Seek the first printf which is hits start position */
- if (e->pos < e->start) {
- if (!__i915_error_seek(e, len))
- return;
- }
-
- if (len >= e->size - e->bytes)
- len = e->size - e->bytes - 1;
- memcpy(e->buf + e->bytes, str, len);
-
- __i915_error_advance(e, len);
-}
-
-void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
-{
- va_list args;
-
- va_start(args, f);
- i915_error_vprintf(e, f, args);
- va_end(args);
-}
-
-#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
-#define err_puts(e, s) i915_error_puts(e, s)
-
-static void print_error_buffers(struct drm_i915_error_state_buf *m,
- const char *name,
- struct drm_i915_error_buffer *err,
- int count)
-{
- err_printf(m, "%s [%d]:\n", name, count);
-
- while (count--) {
- err_printf(m, " %08x %8u %02x %02x %x %x",
- err->gtt_offset,
- err->size,
- err->read_domains,
- err->write_domain,
- err->rseqno, err->wseqno);
- err_puts(m, pin_flag(err->pinned));
- err_puts(m, tiling_flag(err->tiling));
- err_puts(m, dirty_flag(err->dirty));
- err_puts(m, purgeable_flag(err->purgeable));
- err_puts(m, err->ring != -1 ? " " : "");
- err_puts(m, ring_str(err->ring));
- err_puts(m, cache_level_str(err->cache_level));
-
- if (err->name)
- err_printf(m, " (name: %d)", err->name);
- if (err->fence_reg != I915_FENCE_REG_NONE)
- err_printf(m, " (fence: %d)", err->fence_reg);
-
- err_puts(m, "\n");
- err++;
- }
-}
-
-static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
- struct drm_device *dev,
- struct drm_i915_error_state *error,
- unsigned ring)
-{
- BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
- err_printf(m, "%s command stream:\n", ring_str(ring));
- err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
- err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
- err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
- err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
- err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
- err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
- err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
- if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
- err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
-
- if (INTEL_INFO(dev)->gen >= 4)
- err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
- err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
- err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
- if (INTEL_INFO(dev)->gen >= 6) {
- err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
- err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
- err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
- error->semaphore_mboxes[ring][0],
- error->semaphore_seqno[ring][0]);
- err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
- error->semaphore_mboxes[ring][1],
- error->semaphore_seqno[ring][1]);
- }
- err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
- err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
- err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
- err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
-}
-
-struct i915_error_state_file_priv {
- struct drm_device *dev;
- struct drm_i915_error_state *error;
-};
-
-
-static int i915_error_state(struct i915_error_state_file_priv *error_priv,
- struct drm_i915_error_state_buf *m)
-
-{
- struct drm_device *dev = error_priv->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_error_state *error = error_priv->error;
- struct intel_ring_buffer *ring;
- int i, j, page, offset, elt;
-
- if (!error) {
- err_printf(m, "no error state collected\n");
- return 0;
- }
-
- err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
- error->time.tv_usec);
- err_printf(m, "Kernel: " UTS_RELEASE "\n");
- err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
- err_printf(m, "EIR: 0x%08x\n", error->eir);
- err_printf(m, "IER: 0x%08x\n", error->ier);
- err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
- err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
- err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
- err_printf(m, "CCID: 0x%08x\n", error->ccid);
-
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
-
- for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
- err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
- error->extra_instdone[i]);
-
- if (INTEL_INFO(dev)->gen >= 6) {
- err_printf(m, "ERROR: 0x%08x\n", error->error);
- err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
- }
-
- if (INTEL_INFO(dev)->gen == 7)
- err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
-
- for_each_ring(ring, dev_priv, i)
- i915_ring_error_state(m, dev, error, i);
-
- if (error->active_bo)
- print_error_buffers(m, "Active",
- error->active_bo,
- error->active_bo_count);
-
- if (error->pinned_bo)
- print_error_buffers(m, "Pinned",
- error->pinned_bo,
- error->pinned_bo_count);
-
- for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- struct drm_i915_error_object *obj;
-
- if ((obj = error->ring[i].batchbuffer)) {
- err_printf(m, "%s --- gtt_offset = 0x%08x\n",
- dev_priv->ring[i].name,
- obj->gtt_offset);
- offset = 0;
- for (page = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- err_printf(m, "%08x : %08x\n", offset,
- obj->pages[page][elt]);
- offset += 4;
- }
- }
- }
-
- if (error->ring[i].num_requests) {
- err_printf(m, "%s --- %d requests\n",
- dev_priv->ring[i].name,
- error->ring[i].num_requests);
- for (j = 0; j < error->ring[i].num_requests; j++) {
- err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
- error->ring[i].requests[j].seqno,
- error->ring[i].requests[j].jiffies,
- error->ring[i].requests[j].tail);
- }
- }
-
- if ((obj = error->ring[i].ringbuffer)) {
- err_printf(m, "%s --- ringbuffer = 0x%08x\n",
- dev_priv->ring[i].name,
- obj->gtt_offset);
- offset = 0;
- for (page = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- err_printf(m, "%08x : %08x\n",
- offset,
- obj->pages[page][elt]);
- offset += 4;
- }
- }
- }
-
- obj = error->ring[i].ctx;
- if (obj) {
- err_printf(m, "%s --- HW Context = 0x%08x\n",
- dev_priv->ring[i].name,
- obj->gtt_offset);
- offset = 0;
- for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
- err_printf(m, "[%04x] %08x %08x %08x %08x\n",
- offset,
- obj->pages[0][elt],
- obj->pages[0][elt+1],
- obj->pages[0][elt+2],
- obj->pages[0][elt+3]);
- offset += 16;
- }
- }
- }
-
- if (error->overlay)
- intel_overlay_print_error_state(m, error->overlay);
-
- if (error->display)
- intel_display_print_error_state(m, dev, error->display);
-
- return 0;
-}
-
static ssize_t
i915_error_state_write(struct file *filp,
const char __user *ubuf,
@@ -986,9 +624,7 @@ i915_error_state_write(struct file *filp,
static int i915_error_state_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_error_state_file_priv *error_priv;
- unsigned long flags;
error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
if (!error_priv)
@@ -996,11 +632,7 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
error_priv->dev = dev;
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- error_priv->error = dev_priv->gpu_error.first_error;
- if (error_priv->error)
- kref_get(&error_priv->error->ref);
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+ i915_error_state_get(dev, error_priv);
file->private_data = error_priv;
@@ -1011,8 +643,7 @@ static int i915_error_state_release(struct inode *inode, struct file *file)
{
struct i915_error_state_file_priv *error_priv = file->private_data;
- if (error_priv->error)
- kref_put(&error_priv->error->ref, i915_error_state_free);
+ i915_error_state_put(error_priv);
kfree(error_priv);
return 0;
@@ -1025,40 +656,15 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
struct drm_i915_error_state_buf error_str;
loff_t tmp_pos = 0;
ssize_t ret_count = 0;
- int ret = 0;
-
- memset(&error_str, 0, sizeof(error_str));
-
- /* We need to have enough room to store any i915_error_state printf
- * so that we can move it to start position.
- */
- error_str.size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
- error_str.buf = kmalloc(error_str.size,
- GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
-
- if (error_str.buf == NULL) {
- error_str.size = PAGE_SIZE;
- error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
- }
-
- if (error_str.buf == NULL) {
- error_str.size = 128;
- error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
- }
-
- if (error_str.buf == NULL)
- return -ENOMEM;
-
- error_str.start = *pos;
+ int ret;
- ret = i915_error_state(error_priv, &error_str);
+ ret = i915_error_state_buf_init(&error_str, count, *pos);
if (ret)
- goto out;
+ return ret;
- if (error_str.bytes == 0 && error_str.err) {
- ret = error_str.err;
+ ret = i915_error_state_to_str(&error_str, error_priv);
+ if (ret)
goto out;
- }
ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
error_str.buf,
@@ -1069,7 +675,7 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
else
*pos = error_str.start + ret_count;
out:
- kfree(error_str.buf);
+ i915_error_state_buf_release(&error_str);
return ret ?: ret_count;
}
@@ -1246,7 +852,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
(freq_sts >> 8) & 0xff));
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
- seq_printf(m, "no P-state info available\n");
+ seq_puts(m, "no P-state info available\n");
}
return 0;
@@ -1341,28 +947,28 @@ static int ironlake_drpc_info(struct seq_file *m)
seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
seq_printf(m, "Render standby enabled: %s\n",
(rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
- seq_printf(m, "Current RS state: ");
+ seq_puts(m, "Current RS state: ");
switch (rstdbyctl & RSX_STATUS_MASK) {
case RSX_STATUS_ON:
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
break;
case RSX_STATUS_RC1:
- seq_printf(m, "RC1\n");
+ seq_puts(m, "RC1\n");
break;
case RSX_STATUS_RC1E:
- seq_printf(m, "RC1E\n");
+ seq_puts(m, "RC1E\n");
break;
case RSX_STATUS_RS1:
- seq_printf(m, "RS1\n");
+ seq_puts(m, "RS1\n");
break;
case RSX_STATUS_RS2:
- seq_printf(m, "RS2 (RC6)\n");
+ seq_puts(m, "RS2 (RC6)\n");
break;
case RSX_STATUS_RS3:
- seq_printf(m, "RC3 (RC6+)\n");
+ seq_puts(m, "RC3 (RC6+)\n");
break;
default:
- seq_printf(m, "unknown\n");
+ seq_puts(m, "unknown\n");
break;
}
@@ -1377,20 +983,19 @@ static int gen6_drpc_info(struct seq_file *m)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
unsigned forcewake_count;
- int count=0, ret;
-
+ int count = 0, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- spin_lock_irq(&dev_priv->gt_lock);
- forcewake_count = dev_priv->forcewake_count;
- spin_unlock_irq(&dev_priv->gt_lock);
+ spin_lock_irq(&dev_priv->uncore.lock);
+ forcewake_count = dev_priv->uncore.forcewake_count;
+ spin_unlock_irq(&dev_priv->uncore.lock);
if (forcewake_count) {
- seq_printf(m, "RC information inaccurate because somebody "
- "holds a forcewake reference \n");
+ seq_puts(m, "RC information inaccurate because somebody "
+ "holds a forcewake reference \n");
} else {
/* NB: we cannot use forcewake, else we read the wrong values */
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
@@ -1399,7 +1004,7 @@ static int gen6_drpc_info(struct seq_file *m)
}
gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
- trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
+ trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
@@ -1423,25 +1028,25 @@ static int gen6_drpc_info(struct seq_file *m)
yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
seq_printf(m, "Deepest RC6 Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
- seq_printf(m, "Current RC state: ");
+ seq_puts(m, "Current RC state: ");
switch (gt_core_status & GEN6_RCn_MASK) {
case GEN6_RC0:
if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
- seq_printf(m, "Core Power Down\n");
+ seq_puts(m, "Core Power Down\n");
else
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
break;
case GEN6_RC3:
- seq_printf(m, "RC3\n");
+ seq_puts(m, "RC3\n");
break;
case GEN6_RC6:
- seq_printf(m, "RC6\n");
+ seq_puts(m, "RC6\n");
break;
case GEN6_RC7:
- seq_printf(m, "RC7\n");
+ seq_puts(m, "RC7\n");
break;
default:
- seq_printf(m, "Unknown\n");
+ seq_puts(m, "Unknown\n");
break;
}
@@ -1485,43 +1090,46 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
if (!I915_HAS_FBC(dev)) {
- seq_printf(m, "FBC unsupported on this chipset\n");
+ seq_puts(m, "FBC unsupported on this chipset\n");
return 0;
}
if (intel_fbc_enabled(dev)) {
- seq_printf(m, "FBC enabled\n");
+ seq_puts(m, "FBC enabled\n");
} else {
- seq_printf(m, "FBC disabled: ");
- switch (dev_priv->no_fbc_reason) {
+ seq_puts(m, "FBC disabled: ");
+ switch (dev_priv->fbc.no_fbc_reason) {
case FBC_NO_OUTPUT:
- seq_printf(m, "no outputs");
+ seq_puts(m, "no outputs");
break;
case FBC_STOLEN_TOO_SMALL:
- seq_printf(m, "not enough stolen memory");
+ seq_puts(m, "not enough stolen memory");
break;
case FBC_UNSUPPORTED_MODE:
- seq_printf(m, "mode not supported");
+ seq_puts(m, "mode not supported");
break;
case FBC_MODE_TOO_LARGE:
- seq_printf(m, "mode too large");
+ seq_puts(m, "mode too large");
break;
case FBC_BAD_PLANE:
- seq_printf(m, "FBC unsupported on plane");
+ seq_puts(m, "FBC unsupported on plane");
break;
case FBC_NOT_TILED:
- seq_printf(m, "scanout buffer not tiled");
+ seq_puts(m, "scanout buffer not tiled");
break;
case FBC_MULTIPLE_PIPES:
- seq_printf(m, "multiple pipes are enabled");
+ seq_puts(m, "multiple pipes are enabled");
break;
case FBC_MODULE_PARAM:
- seq_printf(m, "disabled per module param (default off)");
+ seq_puts(m, "disabled per module param (default off)");
+ break;
+ case FBC_CHIP_DEFAULT:
+ seq_puts(m, "disabled per chip default");
break;
default:
- seq_printf(m, "unknown reason");
+ seq_puts(m, "unknown reason");
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
return 0;
}
@@ -1604,7 +1212,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
int gpu_freq, ia_freq;
if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
- seq_printf(m, "unsupported on this chipset\n");
+ seq_puts(m, "unsupported on this chipset\n");
return 0;
}
@@ -1612,7 +1220,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret)
return ret;
- seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
+ seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
for (gpu_freq = dev_priv->rps.min_delay;
gpu_freq <= dev_priv->rps.max_delay;
@@ -1701,7 +1309,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.bits_per_pixel,
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
mutex_unlock(&dev->mode_config.mutex);
mutex_lock(&dev->mode_config.fb_lock);
@@ -1716,7 +1324,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.bits_per_pixel,
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.fb_lock);
@@ -1736,22 +1344,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
return ret;
if (dev_priv->ips.pwrctx) {
- seq_printf(m, "power context ");
+ seq_puts(m, "power context ");
describe_obj(m, dev_priv->ips.pwrctx);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
if (dev_priv->ips.renderctx) {
- seq_printf(m, "render context ");
+ seq_puts(m, "render context ");
describe_obj(m, dev_priv->ips.renderctx);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
for_each_ring(ring, dev_priv, i) {
if (ring->default_context) {
seq_printf(m, "HW default context %s ring ", ring->name);
describe_obj(m, ring->default_context->obj);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
}
@@ -1767,9 +1375,9 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned forcewake_count;
- spin_lock_irq(&dev_priv->gt_lock);
- forcewake_count = dev_priv->forcewake_count;
- spin_unlock_irq(&dev_priv->gt_lock);
+ spin_lock_irq(&dev_priv->uncore.lock);
+ forcewake_count = dev_priv->uncore.forcewake_count;
+ spin_unlock_irq(&dev_priv->uncore.lock);
seq_printf(m, "forcewake count = %u\n", forcewake_count);
@@ -1778,7 +1386,7 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
static const char *swizzle_string(unsigned swizzle)
{
- switch(swizzle) {
+ switch (swizzle) {
case I915_BIT_6_SWIZZLE_NONE:
return "none";
case I915_BIT_6_SWIZZLE_9:
@@ -1868,7 +1476,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- seq_printf(m, "aliasing PPGTT:\n");
+ seq_puts(m, "aliasing PPGTT:\n");
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
}
seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
@@ -1886,7 +1494,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
if (!IS_VALLEYVIEW(dev)) {
- seq_printf(m, "unsupported\n");
+ seq_puts(m, "unsupported\n");
return 0;
}
@@ -1924,6 +1532,148 @@ static int i915_dpio_info(struct seq_file *m, void *data)
return 0;
}
+static int i915_llc(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Size calculation for LLC is a bit of a pain. Ignore for now. */
+ seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
+ seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
+
+ return 0;
+}
+
+static int i915_edp_psr_status(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 psrstat, psrperf;
+
+ if (!IS_HASWELL(dev)) {
+ seq_puts(m, "PSR not supported on this platform\n");
+ } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
+ seq_puts(m, "PSR enabled\n");
+ } else {
+ seq_puts(m, "PSR disabled: ");
+ switch (dev_priv->no_psr_reason) {
+ case PSR_NO_SOURCE:
+ seq_puts(m, "not supported on this platform");
+ break;
+ case PSR_NO_SINK:
+ seq_puts(m, "not supported by panel");
+ break;
+ case PSR_MODULE_PARAM:
+ seq_puts(m, "disabled by flag");
+ break;
+ case PSR_CRTC_NOT_ACTIVE:
+ seq_puts(m, "crtc not active");
+ break;
+ case PSR_PWR_WELL_ENABLED:
+ seq_puts(m, "power well enabled");
+ break;
+ case PSR_NOT_TILED:
+ seq_puts(m, "not tiled");
+ break;
+ case PSR_SPRITE_ENABLED:
+ seq_puts(m, "sprite enabled");
+ break;
+ case PSR_S3D_ENABLED:
+ seq_puts(m, "stereo 3d enabled");
+ break;
+ case PSR_INTERLACED_ENABLED:
+ seq_puts(m, "interlaced enabled");
+ break;
+ case PSR_HSW_NOT_DDIA:
+ seq_puts(m, "HSW ties PSR to DDI A (eDP)");
+ break;
+ default:
+ seq_puts(m, "unknown reason");
+ }
+ seq_puts(m, "\n");
+ return 0;
+ }
+
+ psrstat = I915_READ(EDP_PSR_STATUS_CTL);
+
+ seq_puts(m, "PSR Current State: ");
+ switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
+ case EDP_PSR_STATUS_STATE_IDLE:
+ seq_puts(m, "Reset state\n");
+ break;
+ case EDP_PSR_STATUS_STATE_SRDONACK:
+ seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
+ break;
+ case EDP_PSR_STATUS_STATE_SRDENT:
+ seq_puts(m, "SRD entry\n");
+ break;
+ case EDP_PSR_STATUS_STATE_BUFOFF:
+ seq_puts(m, "Wait for buffer turn off\n");
+ break;
+ case EDP_PSR_STATUS_STATE_BUFON:
+ seq_puts(m, "Wait for buffer turn on\n");
+ break;
+ case EDP_PSR_STATUS_STATE_AUXACK:
+ seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
+ break;
+ case EDP_PSR_STATUS_STATE_SRDOFFACK:
+ seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
+ break;
+ default:
+ seq_puts(m, "Unknown\n");
+ break;
+ }
+
+ seq_puts(m, "Link Status: ");
+ switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
+ case EDP_PSR_STATUS_LINK_FULL_OFF:
+ seq_puts(m, "Link is fully off\n");
+ break;
+ case EDP_PSR_STATUS_LINK_FULL_ON:
+ seq_puts(m, "Link is fully on\n");
+ break;
+ case EDP_PSR_STATUS_LINK_STANDBY:
+ seq_puts(m, "Link is in standby\n");
+ break;
+ default:
+ seq_puts(m, "Unknown\n");
+ break;
+ }
+
+ seq_printf(m, "PSR Entry Count: %u\n",
+ psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
+ EDP_PSR_STATUS_COUNT_MASK);
+
+ seq_printf(m, "Max Sleep Timer Counter: %u\n",
+ psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
+ EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
+
+ seq_printf(m, "Had AUX error: %s\n",
+ yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
+
+ seq_printf(m, "Sending AUX: %s\n",
+ yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
+
+ seq_printf(m, "Sending Idle: %s\n",
+ yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
+
+ seq_printf(m, "Sending TP2 TP3: %s\n",
+ yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
+
+ seq_printf(m, "Sending TP1: %s\n",
+ yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
+
+ seq_printf(m, "Idle Count: %u\n",
+ psrstat & EDP_PSR_STATUS_IDLE_MASK);
+
+ psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
+ seq_printf(m, "Performance Counter: %u\n", psrperf);
+
+ return 0;
+}
+
static int
i915_wedged_get(void *data, u64 *val)
{
@@ -2006,6 +1756,7 @@ i915_drop_caches_set(void *data, u64 val)
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
int ret;
DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
@@ -2026,7 +1777,8 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_retire_requests(dev);
if (val & DROP_BOUND) {
- list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
+ list_for_each_entry_safe(obj, next, &vm->inactive_list,
+ mm_list)
if (obj->pin_count == 0) {
ret = i915_gem_object_unbind(obj);
if (ret)
@@ -2353,64 +2105,40 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
{"i915_dpio", i915_dpio_info, 0},
+ {"i915_llc", i915_llc, 0},
+ {"i915_edp_psr_status", i915_edp_psr_status, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
+struct i915_debugfs_files {
+ const char *name;
+ const struct file_operations *fops;
+} i915_debugfs_files[] = {
+ {"i915_wedged", &i915_wedged_fops},
+ {"i915_max_freq", &i915_max_freq_fops},
+ {"i915_min_freq", &i915_min_freq_fops},
+ {"i915_cache_sharing", &i915_cache_sharing_fops},
+ {"i915_ring_stop", &i915_ring_stop_fops},
+ {"i915_gem_drop_caches", &i915_drop_caches_fops},
+ {"i915_error_state", &i915_error_state_fops},
+ {"i915_next_seqno", &i915_next_seqno_fops},
+};
+
int i915_debugfs_init(struct drm_minor *minor)
{
- int ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_wedged",
- &i915_wedged_fops);
- if (ret)
- return ret;
+ int ret, i;
ret = i915_forcewake_create(minor->debugfs_root, minor);
if (ret)
return ret;
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_max_freq",
- &i915_max_freq_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_min_freq",
- &i915_min_freq_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_cache_sharing",
- &i915_cache_sharing_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_ring_stop",
- &i915_ring_stop_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_gem_drop_caches",
- &i915_drop_caches_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_error_state",
- &i915_error_state_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_next_seqno",
- &i915_next_seqno_fops);
- if (ret)
- return ret;
+ for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ i915_debugfs_files[i].name,
+ i915_debugfs_files[i].fops);
+ if (ret)
+ return ret;
+ }
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
@@ -2419,26 +2147,18 @@ int i915_debugfs_init(struct drm_minor *minor)
void i915_debugfs_cleanup(struct drm_minor *minor)
{
+ int i;
+
drm_debugfs_remove_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
- 1, minor);
+ for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
+ struct drm_info_list *info_list =
+ (struct drm_info_list *) i915_debugfs_files[i].fops;
+
+ drm_debugfs_remove_files(info_list, 1, minor);
+ }
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 66c63808fa35..f44c1a004f95 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1323,10 +1323,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
- if (INTEL_INFO(dev)->num_pipes == 0) {
- dev_priv->mm.suspended = 0;
+ if (INTEL_INFO(dev)->num_pipes == 0)
return 0;
- }
ret = intel_fbdev_init(dev);
if (ret)
@@ -1352,9 +1350,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
drm_kms_helper_poll_init(dev);
- /* We're off and running w/KMS */
- dev_priv->mm.suspended = 0;
-
return 0;
cleanup_gem:
@@ -1363,7 +1358,7 @@ cleanup_gem:
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
- drm_mm_takedown(&dev_priv->mm.gtt_space);
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
cleanup_irq:
drm_irq_uninstall(dev);
cleanup_gem_stolen:
@@ -1441,22 +1436,6 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
}
/**
- * intel_early_sanitize_regs - clean up BIOS state
- * @dev: DRM device
- *
- * This function must be called before we do any I915_READ or I915_WRITE. Its
- * purpose is to clean up any state left by the BIOS that may affect us when
- * reading and/or writing registers.
- */
-static void intel_early_sanitize_regs(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (HAS_FPGA_DBG_UNCLAIMED(dev))
- I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-}
-
-/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
* @flags: startup flags
@@ -1497,15 +1476,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
- spin_lock_init(&dev_priv->rps.lock);
- spin_lock_init(&dev_priv->gt_lock);
spin_lock_init(&dev_priv->backlight.lock);
+ spin_lock_init(&dev_priv->uncore.lock);
+ spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->modeset_restore_lock);
i915_dump_device_info(dev_priv);
+ INIT_LIST_HEAD(&dev_priv->vm_list);
+ INIT_LIST_HEAD(&dev_priv->gtt.base.global_link);
+ list_add(&dev_priv->gtt.base.global_link, &dev_priv->vm_list);
+
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
@@ -1531,7 +1514,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_bridge;
}
- intel_early_sanitize_regs(dev);
+ intel_uncore_early_sanitize(dev);
+
+ if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
+ /* The docs do not explain exactly how the calculation can be
+ * made. It is somewhat guessable, but for now, it's always
+ * 128MB.
+ * NB: We can't write IDICR yet because we do not have gt funcs
+ * set up */
+ dev_priv->ellc_size = 128;
+ DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
+ }
ret = i915_gem_gtt_init(dev);
if (ret)
@@ -1567,8 +1560,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_rmmap;
}
- dev_priv->mm.gtt_mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
- aperture_size);
+ dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
+ aperture_size);
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
@@ -1594,8 +1587,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_detect_pch(dev);
intel_irq_init(dev);
- intel_gt_sanitize(dev);
- intel_gt_init(dev);
+ intel_pm_init(dev);
+ intel_uncore_sanitize(dev);
+ intel_uncore_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
@@ -1630,9 +1624,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gem_unload;
}
- /* Start out suspended */
- dev_priv->mm.suspended = 1;
-
if (HAS_POWER_WELL(dev))
i915_init_power_well(dev);
@@ -1642,6 +1633,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
DRM_ERROR("failed to init modeset\n");
goto out_gem_unload;
}
+ } else {
+ /* Start out suspended in ums mode. */
+ dev_priv->ums.mm_suspended = 1;
}
i915_setup_sysfs(dev);
@@ -1668,9 +1662,9 @@ out_gem_unload:
intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->wq);
out_mtrrfree:
- arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
+ arch_phys_wc_del(dev_priv->gtt.mtrr);
io_mapping_free(dev_priv->gtt.mappable);
- dev_priv->gtt.gtt_remove(dev);
+ dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
@@ -1706,7 +1700,7 @@ int i915_driver_unload(struct drm_device *dev)
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
io_mapping_free(dev_priv->gtt.mappable);
- arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
+ arch_phys_wc_del(dev_priv->gtt.mtrr);
acpi_video_unregister();
@@ -1755,7 +1749,9 @@ int i915_driver_unload(struct drm_device *dev)
i915_free_hws(dev);
}
- drm_mm_takedown(&dev_priv->mm.gtt_space);
+ list_del(&dev_priv->gtt.base.global_link);
+ WARN_ON(!list_empty(&dev_priv->vm_list));
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
@@ -1765,7 +1761,7 @@ int i915_driver_unload(struct drm_device *dev)
destroy_workqueue(dev_priv->wq);
pm_qos_remove_request(&dev_priv->pm_qos);
- dev_priv->gtt.gtt_remove(dev);
+ dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
if (dev_priv->slab)
kmem_cache_destroy(dev_priv->slab);
@@ -1841,7 +1837,7 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
kfree(file_priv);
}
-struct drm_ioctl_desc i915_ioctls[] = {
+const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 45b3c030f483..13457e3e9cad 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -118,6 +118,10 @@ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
MODULE_PARM_DESC(i915_enable_ppgtt,
"Enable PPGTT (default: true)");
+int i915_enable_psr __read_mostly = 0;
+module_param_named(enable_psr, i915_enable_psr, int, 0600);
+MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+
unsigned int i915_preliminary_hw_support __read_mostly = 0;
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support,
@@ -132,6 +136,16 @@ int i915_enable_ips __read_mostly = 1;
module_param_named(enable_ips, i915_enable_ips, int, 0600);
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
+bool i915_fastboot __read_mostly = 0;
+module_param_named(fastboot, i915_fastboot, bool, 0600);
+MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
+ "(default: false)");
+
+bool i915_prefault_disable __read_mostly;
+module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
+MODULE_PARM_DESC(prefault_disable,
+ "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
+
static struct drm_driver driver;
extern int intel_agp_enabled;
@@ -551,7 +565,11 @@ static int i915_drm_freeze(struct drm_device *dev)
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- int error = i915_gem_idle(dev);
+ int error;
+
+ mutex_lock(&dev->struct_mutex);
+ error = i915_gem_idle(dev);
+ mutex_unlock(&dev->struct_mutex);
if (error) {
dev_err(&dev->pdev->dev,
"GEM idle failed, resume might fail\n");
@@ -656,7 +674,6 @@ static int __i915_drm_thaw(struct drm_device *dev)
intel_init_pch_refclk(dev);
mutex_lock(&dev->struct_mutex);
- dev_priv->mm.suspended = 0;
error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
@@ -706,7 +723,7 @@ static int i915_drm_thaw(struct drm_device *dev)
{
int error = 0;
- intel_gt_sanitize(dev);
+ intel_uncore_sanitize(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
@@ -732,7 +749,7 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
- intel_gt_sanitize(dev);
+ intel_uncore_sanitize(dev);
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
@@ -753,139 +770,6 @@ int i915_resume(struct drm_device *dev)
return 0;
}
-static int i8xx_do_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_I85X(dev))
- return -ENODEV;
-
- I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
- POSTING_READ(D_STATE);
-
- if (IS_I830(dev) || IS_845G(dev)) {
- I915_WRITE(DEBUG_RESET_I830,
- DEBUG_RESET_DISPLAY |
- DEBUG_RESET_RENDER |
- DEBUG_RESET_FULL);
- POSTING_READ(DEBUG_RESET_I830);
- msleep(1);
-
- I915_WRITE(DEBUG_RESET_I830, 0);
- POSTING_READ(DEBUG_RESET_I830);
- }
-
- msleep(1);
-
- I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
- POSTING_READ(D_STATE);
-
- return 0;
-}
-
-static int i965_reset_complete(struct drm_device *dev)
-{
- u8 gdrst;
- pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- return (gdrst & GRDOM_RESET_ENABLE) == 0;
-}
-
-static int i965_do_reset(struct drm_device *dev)
-{
- int ret;
- u8 gdrst;
-
- /*
- * Set the domains we want to reset (GRDOM/bits 2 and 3) as
- * well as the reset bit (GR/bit 0). Setting the GR bit
- * triggers the reset; when done, the hardware will clear it.
- */
- pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- pci_write_config_byte(dev->pdev, I965_GDRST,
- gdrst | GRDOM_RENDER |
- GRDOM_RESET_ENABLE);
- ret = wait_for(i965_reset_complete(dev), 500);
- if (ret)
- return ret;
-
- /* We can't reset render&media without also resetting display ... */
- pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- pci_write_config_byte(dev->pdev, I965_GDRST,
- gdrst | GRDOM_MEDIA |
- GRDOM_RESET_ENABLE);
-
- return wait_for(i965_reset_complete(dev), 500);
-}
-
-static int ironlake_do_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 gdrst;
- int ret;
-
- gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
- gdrst &= ~GRDOM_MASK;
- I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
- gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
- if (ret)
- return ret;
-
- /* We can't reset render&media without also resetting display ... */
- gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
- gdrst &= ~GRDOM_MASK;
- I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
- gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
- return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
-}
-
-static int gen6_do_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
- unsigned long irqflags;
-
- /* Hold gt_lock across reset to prevent any register access
- * with forcewake not set correctly
- */
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
-
- /* Reset the chip */
-
- /* GEN6_GDRST is not in the gt power well, no need to check
- * for fifo space for the write or forcewake the chip for
- * the read
- */
- I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
-
- /* Spin waiting for the device to ack the reset request */
- ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
-
- /* If reset with a user forcewake, try to restore, otherwise turn it off */
- if (dev_priv->forcewake_count)
- dev_priv->gt.force_wake_get(dev_priv);
- else
- dev_priv->gt.force_wake_put(dev_priv);
-
- /* Restore fifo count */
- dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
-
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
- return ret;
-}
-
-int intel_gpu_reset(struct drm_device *dev)
-{
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6: return gen6_do_reset(dev);
- case 5: return ironlake_do_reset(dev);
- case 4: return i965_do_reset(dev);
- case 2: return i8xx_do_reset(dev);
- default: return -ENODEV;
- }
-}
-
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
@@ -955,11 +839,11 @@ int i915_reset(struct drm_device *dev)
* switched away).
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
- !dev_priv->mm.suspended) {
+ !dev_priv->ums.mm_suspended) {
struct intel_ring_buffer *ring;
int i;
- dev_priv->mm.suspended = 0;
+ dev_priv->ums.mm_suspended = 0;
i915_gem_init_swizzling(dev);
@@ -1154,7 +1038,7 @@ static struct drm_driver driver = {
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_mmap_gtt,
- .dumb_destroy = i915_gem_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = &i915_driver_fops,
.name = DRIVER_NAME,
@@ -1215,136 +1099,3 @@ module_exit(i915_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
-
-/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
- ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
-static void
-ilk_dummy_write(struct drm_i915_private *dev_priv)
-{
- /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
- * the chip from rc6 before touching it for real. MI_MODE is masked,
- * hence harmless to write 0 into. */
- I915_WRITE_NOTRACE(MI_MODE, 0);
-}
-
-static void
-hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
-{
- if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
- (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
- DRM_ERROR("Unknown unclaimed register before writing to %x\n",
- reg);
- I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- }
-}
-
-static void
-hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
-{
- if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
- (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
- DRM_ERROR("Unclaimed write to %x\n", reg);
- I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- }
-}
-
-#define __i915_read(x, y) \
-u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
- unsigned long irqflags; \
- u##x val = 0; \
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
- if (IS_GEN5(dev_priv->dev)) \
- ilk_dummy_write(dev_priv); \
- if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- if (dev_priv->forcewake_count == 0) \
- dev_priv->gt.force_wake_get(dev_priv); \
- val = read##y(dev_priv->regs + reg); \
- if (dev_priv->forcewake_count == 0) \
- dev_priv->gt.force_wake_put(dev_priv); \
- } else { \
- val = read##y(dev_priv->regs + reg); \
- } \
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
- trace_i915_reg_rw(false, reg, val, sizeof(val)); \
- return val; \
-}
-
-__i915_read(8, b)
-__i915_read(16, w)
-__i915_read(32, l)
-__i915_read(64, q)
-#undef __i915_read
-
-#define __i915_write(x, y) \
-void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
- unsigned long irqflags; \
- u32 __fifo_ret = 0; \
- trace_i915_reg_rw(true, reg, val, sizeof(val)); \
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
- if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
- } \
- if (IS_GEN5(dev_priv->dev)) \
- ilk_dummy_write(dev_priv); \
- hsw_unclaimed_reg_clear(dev_priv, reg); \
- write##y(val, dev_priv->regs + reg); \
- if (unlikely(__fifo_ret)) { \
- gen6_gt_check_fifodbg(dev_priv); \
- } \
- hsw_unclaimed_reg_check(dev_priv, reg); \
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
-}
-__i915_write(8, b)
-__i915_write(16, w)
-__i915_write(32, l)
-__i915_write(64, q)
-#undef __i915_write
-
-static const struct register_whitelist {
- uint64_t offset;
- uint32_t size;
- uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
-} whitelist[] = {
- { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
-};
-
-int i915_reg_read_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_reg_read *reg = data;
- struct register_whitelist const *entry = whitelist;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
- if (entry->offset == reg->offset &&
- (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
- break;
- }
-
- if (i == ARRAY_SIZE(whitelist))
- return -EINVAL;
-
- switch (entry->size) {
- case 8:
- reg->val = I915_READ64(reg->offset);
- break;
- case 4:
- reg->val = I915_READ(reg->offset);
- break;
- case 2:
- reg->val = I915_READ16(reg->offset);
- break;
- case 1:
- reg->val = I915_READ8(reg->offset);
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d2ee3343c943..ab568201b932 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -144,6 +144,7 @@ enum intel_dpll_id {
struct intel_dpll_hw_state {
uint32_t dpll;
+ uint32_t dpll_md;
uint32_t fp0;
uint32_t fp1;
};
@@ -156,6 +157,8 @@ struct intel_shared_dpll {
/* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id;
struct intel_dpll_hw_state hw_state;
+ void (*mode_set)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
void (*enable)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
void (*disable)(struct drm_i915_private *dev_priv,
@@ -364,6 +367,7 @@ struct drm_i915_display_funcs {
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
struct intel_crtc_config *);
+ void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
int (*crtc_mode_set)(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *old_fb);
@@ -387,11 +391,20 @@ struct drm_i915_display_funcs {
/* pll clock increase/decrease */
};
-struct drm_i915_gt_funcs {
+struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv);
void (*force_wake_put)(struct drm_i915_private *dev_priv);
};
+struct intel_uncore {
+ spinlock_t lock; /** lock is also taken in irq contexts. */
+
+ struct intel_uncore_funcs funcs;
+
+ unsigned fifo_count;
+ unsigned forcewake_count;
+};
+
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
func(is_mobile) sep \
func(is_i85x) sep \
@@ -442,6 +455,54 @@ enum i915_cache_level {
typedef uint32_t gen6_gtt_pte_t;
+struct i915_address_space {
+ struct drm_mm mm;
+ struct drm_device *dev;
+ struct list_head global_link;
+ unsigned long start; /* Start offset always 0 for dri2 */
+ size_t total; /* size addr space maps (ex. 2GB for ggtt) */
+
+ struct {
+ dma_addr_t addr;
+ struct page *page;
+ } scratch;
+
+ /**
+ * List of objects currently involved in rendering.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /* FIXME: Need a more generic return type */
+ gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
+ enum i915_cache_level level);
+ void (*clear_range)(struct i915_address_space *vm,
+ unsigned int first_entry,
+ unsigned int num_entries);
+ void (*insert_entries)(struct i915_address_space *vm,
+ struct sg_table *st,
+ unsigned int first_entry,
+ enum i915_cache_level cache_level);
+ void (*cleanup)(struct i915_address_space *vm);
+};
+
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
@@ -450,8 +511,7 @@ typedef uint32_t gen6_gtt_pte_t;
* the spec.
*/
struct i915_gtt {
- unsigned long start; /* Start offset of used GTT */
- size_t total; /* Total size GTT can map */
+ struct i915_address_space base;
size_t stolen_size; /* Total size of stolen memory */
unsigned long mappable_end; /* End offset that we can CPU map */
@@ -462,50 +522,35 @@ struct i915_gtt {
void __iomem *gsm;
bool do_idle_maps;
- dma_addr_t scratch_page_dma;
- struct page *scratch_page;
+
+ int mtrr;
/* global gtt ops */
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
size_t *stolen, phys_addr_t *mappable_base,
unsigned long *mappable_end);
- void (*gtt_remove)(struct drm_device *dev);
- void (*gtt_clear_range)(struct drm_device *dev,
- unsigned int first_entry,
- unsigned int num_entries);
- void (*gtt_insert_entries)(struct drm_device *dev,
- struct sg_table *st,
- unsigned int pg_start,
- enum i915_cache_level cache_level);
- gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level);
};
-#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
+#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
-#define I915_PPGTT_PD_ENTRIES 512
-#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
- struct drm_device *dev;
+ struct i915_address_space base;
unsigned num_pd_entries;
struct page **pt_pages;
uint32_t pd_offset;
dma_addr_t *pt_dma_addr;
- dma_addr_t scratch_page_dma_addr;
- /* pte functions, mirroring the interface of the global gtt. */
- void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
- unsigned int first_entry,
- unsigned int num_entries);
- void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
- struct sg_table *st,
- unsigned int pg_start,
- enum i915_cache_level cache_level);
- gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level);
int (*enable)(struct drm_device *dev);
- void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
+};
+
+/* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct drm_mm_node node;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+
+ struct list_head vma_link; /* Link in the object's VMA list */
};
struct i915_ctx_hang_stats {
@@ -528,15 +573,46 @@ struct i915_hw_context {
struct i915_ctx_hang_stats hang_stats;
};
-enum no_fbc_reason {
- FBC_NO_OUTPUT, /* no outputs enabled to compress */
- FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
- FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
- FBC_MODE_TOO_LARGE, /* mode too large for compression */
- FBC_BAD_PLANE, /* fbc not supported on plane */
- FBC_NOT_TILED, /* buffer not tiled */
- FBC_MULTIPLE_PIPES, /* more than one pipe active */
- FBC_MODULE_PARAM,
+struct i915_fbc {
+ unsigned long size;
+ unsigned int fb_id;
+ enum plane plane;
+ int y;
+
+ struct drm_mm_node *compressed_fb;
+ struct drm_mm_node *compressed_llb;
+
+ struct intel_fbc_work {
+ struct delayed_work work;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ int interval;
+ } *fbc_work;
+
+ enum {
+ FBC_NO_OUTPUT, /* no outputs enabled to compress */
+ FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
+ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+ FBC_MODE_TOO_LARGE, /* mode too large for compression */
+ FBC_BAD_PLANE, /* fbc not supported on plane */
+ FBC_NOT_TILED, /* buffer not tiled */
+ FBC_MULTIPLE_PIPES, /* more than one pipe active */
+ FBC_MODULE_PARAM,
+ FBC_CHIP_DEFAULT, /* disabled by default on this chip */
+ } no_fbc_reason;
+};
+
+enum no_psr_reason {
+ PSR_NO_SOURCE, /* Not supported on platform */
+ PSR_NO_SINK, /* Not supported by panel */
+ PSR_MODULE_PARAM,
+ PSR_CRTC_NOT_ACTIVE,
+ PSR_PWR_WELL_ENABLED,
+ PSR_NOT_TILED,
+ PSR_SPRITE_ENABLED,
+ PSR_S3D_ENABLED,
+ PSR_INTERLACED_ENABLED,
+ PSR_HSW_NOT_DDIA,
};
enum intel_pch {
@@ -722,12 +798,12 @@ struct i915_suspend_saved_registers {
};
struct intel_gen6_power_mgmt {
+ /* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work;
- struct delayed_work vlv_work;
u32 pm_iir;
- /* lock - irqsave spinlock that protectects the work_struct and
- * pm_iir. */
- spinlock_t lock;
+
+ /* On vlv we need to manually drop to Vmin with a delayed work. */
+ struct delayed_work vlv_work;
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
@@ -793,6 +869,18 @@ struct i915_dri1_state {
uint32_t counter;
};
+struct i915_ums_state {
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int mm_suspended;
+};
+
struct intel_l3_parity {
u32 *remap_info;
struct work_struct error_work;
@@ -801,8 +889,6 @@ struct intel_l3_parity {
struct i915_gem_mm {
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
- /** Memory allocator for GTT */
- struct drm_mm gtt_space;
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
struct list_head bound_list;
@@ -816,37 +902,12 @@ struct i915_gem_mm {
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
- int gtt_mtrr;
-
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
struct shrinker inactive_shrinker;
bool shrinker_no_lock_stealing;
- /**
- * List of objects currently involved in rendering.
- *
- * Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
- * represents when the rendering involved will be completed.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head active_list;
-
- /**
- * LRU list of objects which are not in the ringbuffer and
- * are ready to unbind, but are still in the GTT.
- *
- * last_rendering_seqno is 0 while an object is in this list.
- *
- * A reference is not held on the buffer while on this list,
- * as merely being GTT-bound shouldn't prevent its being
- * freed, and we'll pull it off the list in the free path.
- */
- struct list_head inactive_list;
-
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
@@ -865,16 +926,6 @@ struct i915_gem_mm {
*/
bool interruptible;
- /**
- * Flag if the X Server, and thus DRM, is not currently in
- * control of the device.
- *
- * This is set between LeaveVT and EnterVT. It needs to be
- * replaced with a semaphore. It also needs to be
- * transitioned away from for kernel modesetting.
- */
- int suspended;
-
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
@@ -884,6 +935,7 @@ struct i915_gem_mm {
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
/* accounting, useful for userland debugging */
+ spinlock_t object_stat_lock;
size_t object_memory;
u32 object_count;
};
@@ -897,6 +949,11 @@ struct drm_i915_error_state_buf {
loff_t pos;
};
+struct i915_error_state_file_priv {
+ struct drm_device *dev;
+ struct drm_i915_error_state *error;
+};
+
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -998,14 +1055,7 @@ typedef struct drm_i915_private {
void __iomem *regs;
- struct drm_i915_gt_funcs gt;
- /** gt_fifo_count and the subsequent register write are synchronized
- * with dev->struct_mutex. */
- unsigned gt_fifo_count;
- /** forcewake_count is protected by gt_lock */
- unsigned forcewake_count;
- /** gt_lock is also taken in irq contexts. */
- spinlock_t gt_lock;
+ struct intel_uncore uncore;
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
@@ -1059,12 +1109,7 @@ typedef struct drm_i915_private {
int num_plane;
- unsigned long cfb_size;
- unsigned int cfb_fb;
- enum plane cfb_plane;
- int cfb_y;
- struct intel_fbc_work *fbc_work;
-
+ struct i915_fbc fbc;
struct intel_opregion opregion;
struct intel_vbt_data vbt;
@@ -1081,8 +1126,6 @@ typedef struct drm_i915_private {
} backlight;
/* LVDS info */
- struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
- struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
bool no_aux_handshake;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
@@ -1105,7 +1148,8 @@ typedef struct drm_i915_private {
enum modeset_restore modeset_restore;
struct mutex modeset_restore_lock;
- struct i915_gtt gtt;
+ struct list_head vm_list; /* Global list of all address spaces */
+ struct i915_gtt gtt; /* VMA representing the global address space */
struct i915_gem_mm mm;
@@ -1132,6 +1176,9 @@ typedef struct drm_i915_private {
struct intel_l3_parity l3_parity;
+ /* Cannot be determined by PCIID. You must always read a register. */
+ size_t ellc_size;
+
/* gen6+ rps state */
struct intel_gen6_power_mgmt rps;
@@ -1142,10 +1189,7 @@ typedef struct drm_i915_private {
/* Haswell power well */
struct i915_power_well power_well;
- enum no_fbc_reason no_fbc_reason;
-
- struct drm_mm_node *compressed_fb;
- struct drm_mm_node *compressed_llb;
+ enum no_psr_reason no_psr_reason;
struct i915_gpu_error gpu_error;
@@ -1173,6 +1217,8 @@ typedef struct drm_i915_private {
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
+ /* Old ums support infrastructure, same warning applies. */
+ struct i915_ums_state ums;
} drm_i915_private_t;
/* Iterate over initialised rings */
@@ -1187,7 +1233,7 @@ enum hdmi_force_audio {
HDMI_AUDIO_ON, /* force turn on HDMI audio */
};
-#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
+#define I915_GTT_OFFSET_NONE ((u32)-1)
struct drm_i915_gem_object_ops {
/* Interface between the GEM object and its backing storage.
@@ -1212,8 +1258,9 @@ struct drm_i915_gem_object {
const struct drm_i915_gem_object_ops *ops;
- /** Current space allocated to this object in the GTT, if any. */
- struct drm_mm_node *gtt_space;
+ /** List of VMAs backed by this object */
+ struct list_head vma_list;
+
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
struct list_head global_list;
@@ -1314,13 +1361,6 @@ struct drm_i915_gem_object {
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
- /**
- * Current offset of the object in GTT space.
- *
- * This is the same as gtt_space->start
- */
- uint32_t gtt_offset;
-
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */
@@ -1346,6 +1386,52 @@ struct drm_i915_gem_object {
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+/* This is a temporary define to help transition us to real VMAs. If you see
+ * this, you're either reviewing code, or bisecting it. */
+static inline struct i915_vma *
+__i915_gem_obj_to_vma(struct drm_i915_gem_object *obj)
+{
+ if (list_empty(&obj->vma_list))
+ return NULL;
+ return list_first_entry(&obj->vma_list, struct i915_vma, vma_link);
+}
+
+/* Whether or not this object is currently mapped by the translation tables */
+static inline bool
+i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
+{
+ struct i915_vma *vma = __i915_gem_obj_to_vma(o);
+ if (vma == NULL)
+ return false;
+ return drm_mm_node_allocated(&vma->node);
+}
+
+/* Offset of the first PTE pointing to this object */
+static inline unsigned long
+i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
+{
+ BUG_ON(list_empty(&o->vma_list));
+ return __i915_gem_obj_to_vma(o)->node.start;
+}
+
+/* The size used in the translation tables may be larger than the actual size of
+ * the object on GEN2/GEN3 because of the way tiling is handled. See
+ * i915_gem_get_gtt_size() for more details.
+ */
+static inline unsigned long
+i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
+{
+ BUG_ON(list_empty(&o->vma_list));
+ return __i915_gem_obj_to_vma(o)->node.size;
+}
+
+static inline void
+i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
+ enum i915_cache_level color)
+{
+ __i915_gem_obj_to_vma(o)->node.color = color;
+}
+
/**
* Request queue structure.
*
@@ -1526,7 +1612,7 @@ struct drm_i915_file_private {
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
-extern struct drm_ioctl_desc i915_ioctls[];
+extern const struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc __always_unused;
extern int i915_panel_ignore_lid __read_mostly;
@@ -1540,9 +1626,12 @@ extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
extern int i915_enable_ppgtt __read_mostly;
+extern int i915_enable_psr __read_mostly;
extern unsigned int i915_preliminary_hw_support __read_mostly;
extern int i915_disable_power_well __read_mostly;
extern int i915_enable_ips __read_mostly;
+extern bool i915_fastboot __read_mostly;
+extern bool i915_prefault_disable __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
@@ -1578,15 +1667,20 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
extern void intel_console_resume(struct work_struct *work);
/* i915_irq.c */
+void i915_queue_hangcheck(struct drm_device *dev);
void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
-extern void intel_gt_init(struct drm_device *dev);
-extern void intel_gt_sanitize(struct drm_device *dev);
+extern void intel_pm_init(struct drm_device *dev);
-void i915_error_state_free(struct kref *error_ref);
+extern void intel_uncore_sanitize(struct drm_device *dev);
+extern void intel_uncore_early_sanitize(struct drm_device *dev);
+extern void intel_uncore_init(struct drm_device *dev);
+extern void intel_uncore_reset(struct drm_device *dev);
+extern void intel_uncore_clear_errors(struct drm_device *dev);
+extern void intel_uncore_check_errors(struct drm_device *dev);
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1594,13 +1688,6 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
-#ifdef CONFIG_DEBUG_FS
-extern void i915_destroy_error_state(struct drm_device *dev);
-#else
-#define i915_destroy_error_state(x)
-#endif
-
-
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1657,6 +1744,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
+void i915_gem_vma_destroy(struct i915_vma *vma);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
@@ -1700,8 +1790,6 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args);
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
-int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle);
/**
* Returns true if seq1 is later than seq2.
*/
@@ -1827,7 +1915,7 @@ static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
}
struct i915_ctx_hang_stats * __must_check
-i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
+i915_gem_context_get_hang_stats(struct drm_device *dev,
struct drm_file *file,
u32 id);
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -1911,8 +1999,27 @@ void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
/* i915_debugfs.c */
int i915_debugfs_init(struct drm_minor *minor);
void i915_debugfs_cleanup(struct drm_minor *minor);
+
+/* i915_gpu_error.c */
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
+int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
+ const struct i915_error_state_file_priv *error);
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
+ size_t count, loff_t pos);
+static inline void i915_error_state_buf_release(
+ struct drm_i915_error_state_buf *eb)
+{
+ kfree(eb->buf);
+}
+void i915_capture_error_state(struct drm_device *dev);
+void i915_error_state_get(struct drm_device *dev,
+ struct i915_error_state_file_priv *error_priv);
+void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
+void i915_destroy_error_state(struct drm_device *dev);
+
+void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
+const char *i915_cache_level_str(int type);
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
@@ -1992,7 +2099,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* overlay */
-#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_overlay_error_state *error);
@@ -2001,7 +2107,6 @@ extern struct intel_display_error_state *intel_display_capture_error_state(struc
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct drm_device *dev,
struct intel_display_error_state *error);
-#endif
/* On SNB platform, before reading ring registers forcewake bit
* must be set to prevent GT core from power down and stale values being
@@ -2009,7 +2114,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
-int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@ -2028,39 +2132,37 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
int vlv_gpu_freq(int ddr_freq, int val);
int vlv_freq_opcode(int ddr_freq, int val);
-#define __i915_read(x, y) \
- u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
-
-__i915_read(8, b)
-__i915_read(16, w)
-__i915_read(32, l)
-__i915_read(64, q)
+#define __i915_read(x) \
+ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
+__i915_read(8)
+__i915_read(16)
+__i915_read(32)
+__i915_read(64)
#undef __i915_read
-#define __i915_write(x, y) \
- void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
-
-__i915_write(8, b)
-__i915_write(16, w)
-__i915_write(32, l)
-__i915_write(64, q)
+#define __i915_write(x) \
+ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
+__i915_write(8)
+__i915_write(16)
+__i915_write(32)
+__i915_write(64)
#undef __i915_write
-#define I915_READ8(reg) i915_read8(dev_priv, (reg))
-#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
+#define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
+#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
-#define I915_READ16(reg) i915_read16(dev_priv, (reg))
-#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
-#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
-#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
+#define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
+#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
+#define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
+#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
-#define I915_READ(reg) i915_read32(dev_priv, (reg))
-#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
-#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
-#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
+#define I915_READ(reg) i915_read32(dev_priv, (reg), true)
+#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
+#define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
+#define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
-#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
-#define I915_READ64(reg) i915_read64(dev_priv, (reg))
+#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
+#define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d9e2208cfe98..26c5f802a9df 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -26,6 +26,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
@@ -75,15 +76,19 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
size_t size)
{
+ spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count++;
dev_priv->mm.object_memory += size;
+ spin_unlock(&dev_priv->mm.object_stat_lock);
}
static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
size_t size)
{
+ spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count--;
dev_priv->mm.object_memory -= size;
+ spin_unlock(&dev_priv->mm.object_stat_lock);
}
static int
@@ -135,7 +140,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return obj->gtt_space && !obj->active;
+ return i915_gem_obj_ggtt_bound(obj) && !obj->active;
}
int
@@ -178,10 +183,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count)
- pinned += obj->gtt_space->size;
+ pinned += i915_gem_obj_ggtt_size(obj);
mutex_unlock(&dev->struct_mutex);
- args->aper_size = dev_priv->gtt.total;
+ args->aper_size = dev_priv->gtt.base.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
@@ -219,16 +224,10 @@ i915_gem_create(struct drm_file *file,
return -ENOMEM;
ret = drm_gem_handle_create(file, &obj->base, &handle);
- if (ret) {
- drm_gem_object_release(&obj->base);
- i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
- i915_gem_object_free(obj);
- return ret;
- }
-
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference(&obj->base);
- trace_i915_gem_object_create(obj);
+ drm_gem_object_unreference_unlocked(&obj->base);
+ if (ret)
+ return ret;
*handle_p = handle;
return 0;
@@ -246,13 +245,6 @@ i915_gem_dumb_create(struct drm_file *file,
args->size, &args->handle);
}
-int i915_gem_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
/**
* Creates a new mm object and returns a handle to it.
*/
@@ -422,7 +414,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
* anyway again before the next pread happens. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush = 1;
- if (obj->gtt_space) {
+ if (i915_gem_obj_ggtt_bound(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret)
return ret;
@@ -465,7 +457,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
mutex_unlock(&dev->struct_mutex);
- if (!prefaulted) {
+ if (likely(!i915_prefault_disable) && !prefaulted) {
ret = fault_in_multipages_writeable(user_data, remain);
/* Userspace is tricking us, but we've already clobbered
* its pages with the prefault and promised to write the
@@ -609,7 +601,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
user_data = to_user_ptr(args->data_ptr);
remain = args->size;
- offset = obj->gtt_offset + args->offset;
+ offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -739,7 +731,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* right away and we therefore have to clflush anyway. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush_after = 1;
- if (obj->gtt_space) {
+ if (i915_gem_obj_ggtt_bound(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
return ret;
@@ -860,10 +852,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
- args->size);
- if (ret)
- return -EFAULT;
+ if (likely(!i915_prefault_disable)) {
+ ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+ args->size);
+ if (ret)
+ return -EFAULT;
+ }
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@@ -1360,8 +1354,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
obj->fault_mappable = true;
- pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
- page_offset;
+ pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
+ pfn >>= PAGE_SHIFT;
+ pfn += page_offset;
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
@@ -1425,11 +1420,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
if (!obj->fault_mappable)
return;
- if (obj->base.dev->dev_mapping)
- unmap_mapping_range(obj->base.dev->dev_mapping,
- (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
- obj->base.size, 1);
-
+ drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
obj->fault_mappable = false;
}
@@ -1485,7 +1476,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- if (obj->base.map_list.map)
+ if (drm_vma_node_has_offset(&obj->base.vma_node))
return 0;
dev_priv->mm.shrinker_no_lock_stealing = true;
@@ -1516,9 +1507,6 @@ out:
static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
- if (!obj->base.map_list.map)
- return;
-
drm_gem_free_mmap_offset(&obj->base);
}
@@ -1557,7 +1545,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (ret)
goto out;
- *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
+ *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
out:
drm_gem_object_unreference(&obj->base);
@@ -1667,7 +1655,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages == NULL)
return 0;
- BUG_ON(obj->gtt_space);
+ BUG_ON(i915_gem_obj_ggtt_bound(obj));
if (obj->pages_pin_count)
return -EBUSY;
@@ -1691,6 +1679,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
bool purgeable_only)
{
struct drm_i915_gem_object *obj, *next;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
long count = 0;
list_for_each_entry_safe(obj, next,
@@ -1704,9 +1693,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
}
}
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list,
- mm_list) {
+ list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) {
if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_unbind(obj) == 0 &&
i915_gem_object_put_pages(obj) == 0) {
@@ -1877,6 +1864,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
u32 seqno = intel_ring_get_seqno(ring);
BUG_ON(ring == NULL);
@@ -1893,7 +1881,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
/* Move from whatever list we were on to the tail of execution. */
- list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
+ list_move_tail(&obj->mm_list, &vm->active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
obj->last_read_seqno = seqno;
@@ -1917,11 +1905,12 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj->mm_list, &vm->inactive_list);
list_del_init(&obj->ring_list);
obj->ring = NULL;
@@ -2085,11 +2074,9 @@ int __i915_add_request(struct intel_ring_buffer *ring,
trace_i915_gem_request_add(ring, request->seqno);
ring->outstanding_lazy_request = 0;
- if (!dev_priv->mm.suspended) {
- if (i915_enable_hangcheck) {
- mod_timer(&dev_priv->gpu_error.hangcheck_timer,
- round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
- }
+ if (!dev_priv->ums.mm_suspended) {
+ i915_queue_hangcheck(ring->dev);
+
if (was_empty) {
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
@@ -2121,8 +2108,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
{
- if (acthd >= obj->gtt_offset &&
- acthd < obj->gtt_offset + obj->base.size)
+ if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
+ acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return true;
return false;
@@ -2180,11 +2167,11 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
if (ring->hangcheck.action != wait &&
i915_request_guilty(request, acthd, &inside)) {
- DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
+ DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
ring->name,
inside ? "inside" : "flushing",
request->batch_obj ?
- request->batch_obj->gtt_offset : 0,
+ i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
request->ctx ? request->ctx->id : 0,
acthd);
@@ -2275,6 +2262,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring;
int i;
@@ -2285,12 +2273,8 @@ void i915_gem_reset(struct drm_device *dev)
/* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse.
*/
- list_for_each_entry(obj,
- &dev_priv->mm.inactive_list,
- mm_list)
- {
+ list_for_each_entry(obj, &vm->inactive_list, mm_list)
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
- }
i915_gem_restore_fences(dev);
}
@@ -2400,7 +2384,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
idle &= list_empty(&ring->request_list);
}
- if (!dev_priv->mm.suspended && !idle)
+ if (!dev_priv->ums.mm_suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
if (idle)
@@ -2593,9 +2577,10 @@ int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+ struct i915_vma *vma;
int ret;
- if (obj->gtt_space == NULL)
+ if (!i915_gem_obj_ggtt_bound(obj))
return 0;
if (obj->pin_count)
@@ -2630,13 +2615,20 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
list_del(&obj->mm_list);
- list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
- drm_mm_put_block(obj->gtt_space);
- obj->gtt_space = NULL;
- obj->gtt_offset = 0;
+ vma = __i915_gem_obj_to_vma(obj);
+ list_del(&vma->vma_link);
+ drm_mm_remove_node(&vma->node);
+ i915_gem_vma_destroy(vma);
+
+ /* Since the unbound list is global, only move to that list if
+ * no more VMAs exist.
+ * NB: Until we have real VMAs there will only ever be one */
+ WARN_ON(!list_empty(&obj->vma_list));
+ if (list_empty(&obj->vma_list))
+ list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
return 0;
}
@@ -2691,12 +2683,12 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
POSTING_READ(fence_reg);
if (obj) {
- u32 size = obj->gtt_space->size;
+ u32 size = i915_gem_obj_ggtt_size(obj);
uint64_t val;
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
+ val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
@@ -2720,15 +2712,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
u32 val;
if (obj) {
- u32 size = obj->gtt_space->size;
+ u32 size = i915_gem_obj_ggtt_size(obj);
int pitch_val;
int tile_width;
- WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+ WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
(size & -size) != size ||
- (obj->gtt_offset & (size - 1)),
- "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
- obj->gtt_offset, obj->map_and_fenceable, size);
+ (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+ "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128;
@@ -2739,7 +2731,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
- val = obj->gtt_offset;
+ val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I915_FENCE_SIZE_BITS(size);
@@ -2764,19 +2756,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
uint32_t val;
if (obj) {
- u32 size = obj->gtt_space->size;
+ u32 size = i915_gem_obj_ggtt_size(obj);
uint32_t pitch_val;
- WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+ WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
(size & -size) != size ||
- (obj->gtt_offset & (size - 1)),
- "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
- obj->gtt_offset, size);
+ (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+ "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+ i915_gem_obj_ggtt_offset(obj), size);
pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1;
- val = obj->gtt_offset;
+ val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I830_FENCE_SIZE_BITS(size);
@@ -2997,7 +2989,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
if (HAS_LLC(dev))
return true;
- if (gtt_space == NULL)
+ if (!drm_mm_node_allocated(gtt_space))
return true;
if (list_empty(&gtt_space->node_list))
@@ -3030,8 +3022,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
if (obj->cache_level != obj->gtt_space->color) {
printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
- obj->gtt_space->start,
- obj->gtt_space->start + obj->gtt_space->size,
+ i915_gem_obj_ggtt_offset(obj),
+ i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
obj->cache_level,
obj->gtt_space->color);
err++;
@@ -3042,8 +3034,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
obj->gtt_space,
obj->cache_level)) {
printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
- obj->gtt_space->start,
- obj->gtt_space->start + obj->gtt_space->size,
+ i915_gem_obj_ggtt_offset(obj),
+ i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
obj->cache_level);
err++;
continue;
@@ -3065,13 +3057,17 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_mm_node *node;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
size_t gtt_max = map_and_fenceable ?
- dev_priv->gtt.mappable_end : dev_priv->gtt.total;
+ dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
+ struct i915_vma *vma;
int ret;
+ if (WARN_ON(!list_empty(&obj->vma_list)))
+ return -EBUSY;
+
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
@@ -3110,16 +3106,18 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (node == NULL) {
- i915_gem_object_unpin_pages(obj);
- return -ENOMEM;
+ vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unpin;
}
search_free:
- ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
+ &vma->node,
size, alignment,
- obj->cache_level, 0, gtt_max);
+ obj->cache_level, 0, gtt_max,
+ DRM_MM_SEARCH_DEFAULT);
if (ret) {
ret = i915_gem_evict_something(dev, size, alignment,
obj->cache_level,
@@ -3128,41 +3126,42 @@ search_free:
if (ret == 0)
goto search_free;
- i915_gem_object_unpin_pages(obj);
- kfree(node);
- return ret;
+ goto err_free_vma;
}
- if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
- i915_gem_object_unpin_pages(obj);
- drm_mm_put_block(node);
- return -EINVAL;
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
+ obj->cache_level))) {
+ ret = -EINVAL;
+ goto err_remove_node;
}
ret = i915_gem_gtt_prepare_object(obj);
- if (ret) {
- i915_gem_object_unpin_pages(obj);
- drm_mm_put_block(node);
- return ret;
- }
+ if (ret)
+ goto err_remove_node;
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
-
- obj->gtt_space = node;
- obj->gtt_offset = node->start;
+ list_add_tail(&obj->mm_list, &vm->inactive_list);
+ list_add(&vma->vma_link, &obj->vma_list);
fenceable =
- node->size == fence_size &&
- (node->start & (fence_alignment - 1)) == 0;
+ i915_gem_obj_ggtt_size(obj) == fence_size &&
+ (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
- mappable =
- obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
+ mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
+ dev_priv->gtt.mappable_end;
obj->map_and_fenceable = mappable && fenceable;
trace_i915_gem_object_bind(obj, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
+
+err_remove_node:
+ drm_mm_remove_node(&vma->node);
+err_free_vma:
+ i915_gem_vma_destroy(vma);
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+ return ret;
}
void
@@ -3258,7 +3257,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int ret;
/* Not valid to be called on unbound objects. */
- if (obj->gtt_space == NULL)
+ if (!i915_gem_obj_ggtt_bound(obj))
return -EINVAL;
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3297,7 +3296,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
/* And bump the LRU for this access */
if (i915_gem_object_is_inactive(obj))
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj->mm_list,
+ &dev_priv->gtt.base.inactive_list);
return 0;
}
@@ -3307,6 +3307,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
int ret;
if (obj->cache_level == cache_level)
@@ -3317,13 +3318,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
- if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
+ if (vma && !i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
}
- if (obj->gtt_space) {
+ if (i915_gem_obj_ggtt_bound(obj)) {
ret = i915_gem_object_finish_gpu(obj);
if (ret)
return ret;
@@ -3346,7 +3347,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
- obj->gtt_space->color = cache_level;
+ i915_gem_obj_ggtt_set_color(obj, cache_level);
}
if (cache_level == I915_CACHE_NONE) {
@@ -3627,14 +3628,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
- if (obj->gtt_space != NULL) {
- if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+ if (i915_gem_obj_ggtt_bound(obj)) {
+ if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) {
WARN(obj->pin_count,
"bo is already pinned with incorrect alignment:"
- " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+ " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
- obj->gtt_offset, alignment,
+ i915_gem_obj_ggtt_offset(obj), alignment,
map_and_fenceable,
obj->map_and_fenceable);
ret = i915_gem_object_unbind(obj);
@@ -3643,7 +3644,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
}
- if (obj->gtt_space == NULL) {
+ if (!i915_gem_obj_ggtt_bound(obj)) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
ret = i915_gem_object_bind_to_gtt(obj, alignment,
@@ -3669,7 +3670,7 @@ void
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pin_count == 0);
- BUG_ON(obj->gtt_space == NULL);
+ BUG_ON(!i915_gem_obj_ggtt_bound(obj));
if (--obj->pin_count == 0)
obj->pin_mappable = false;
@@ -3719,7 +3720,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
* as the X server doesn't manage domains yet
*/
i915_gem_object_flush_cpu_write_domain(obj);
- args->offset = obj->gtt_offset;
+ args->offset = i915_gem_obj_ggtt_offset(obj);
out:
drm_gem_object_unreference(&obj->base);
unlock:
@@ -3862,6 +3863,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
+ INIT_LIST_HEAD(&obj->vma_list);
obj->ops = ops;
@@ -3926,6 +3928,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
} else
obj->cache_level = I915_CACHE_NONE;
+ trace_i915_gem_object_create(obj);
+
return obj;
}
@@ -3982,15 +3986,33 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_object_free(obj);
}
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (vma == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&vma->vma_link);
+ vma->vm = vm;
+ vma->obj = obj;
+
+ return vma;
+}
+
+void i915_gem_vma_destroy(struct i915_vma *vma)
+{
+ WARN_ON(vma->node.allocated);
+ kfree(vma);
+}
+
int
i915_gem_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- mutex_lock(&dev->struct_mutex);
-
- if (dev_priv->mm.suspended) {
+ if (dev_priv->ums.mm_suspended) {
mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -4006,18 +4028,11 @@ i915_gem_idle(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- * And not confound mm.suspended!
- */
- dev_priv->mm.suspended = 1;
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
- mutex_unlock(&dev->struct_mutex);
-
/* Cancel the retire work handler, which should be idle now. */
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
@@ -4150,8 +4165,8 @@ i915_gem_init_hw(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
- if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
- I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
+ if (dev_priv->ellc_size)
+ I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
if (HAS_PCH_NOP(dev)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
@@ -4227,7 +4242,7 @@ int
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -4239,7 +4254,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
- dev_priv->mm.suspended = 0;
+ dev_priv->ums.mm_suspended = 0;
ret = i915_gem_init_hw(dev);
if (ret != 0) {
@@ -4247,7 +4262,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
return ret;
}
- BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
@@ -4259,7 +4274,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
cleanup_ringbuffer:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
- dev_priv->mm.suspended = 1;
+ dev_priv->ums.mm_suspended = 1;
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4269,11 +4284,26 @@ int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
drm_irq_uninstall(dev);
- return i915_gem_idle(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_idle(dev);
+
+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
+ * We need to replace this with a semaphore, or something.
+ * And not confound ums.mm_suspended!
+ */
+ if (ret != 0)
+ dev_priv->ums.mm_suspended = 1;
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
}
void
@@ -4284,9 +4314,11 @@ i915_gem_lastclose(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+ mutex_lock(&dev->struct_mutex);
ret = i915_gem_idle(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
+ mutex_unlock(&dev->struct_mutex);
}
static void
@@ -4308,8 +4340,8 @@ i915_gem_load(struct drm_device *dev)
SLAB_HWCACHE_ALIGN,
NULL);
- INIT_LIST_HEAD(&dev_priv->mm.active_list);
- INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->gtt.base.active_list);
+ INIT_LIST_HEAD(&dev_priv->gtt.base.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4580,6 +4612,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_i915_private,
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
int nr_to_scan = sc->nr_to_scan;
bool unlock = true;
@@ -4608,7 +4641,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
+ list_for_each_entry(obj, &vm->inactive_list, mm_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 51b7a2171cae..2470206a4d07 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -304,31 +304,24 @@ static int context_idr_cleanup(int id, void *p, void *data)
}
struct i915_ctx_hang_stats *
-i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
+i915_gem_context_get_hang_stats(struct drm_device *dev,
struct drm_file *file,
u32 id)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_hw_context *to;
-
- if (dev_priv->hw_contexts_disabled)
- return ERR_PTR(-ENOENT);
-
- if (ring->id != RCS)
- return ERR_PTR(-EINVAL);
-
- if (file == NULL)
- return ERR_PTR(-EINVAL);
+ struct i915_hw_context *ctx;
if (id == DEFAULT_CONTEXT_ID)
return &file_priv->hang_stats;
- to = i915_gem_context_get(file->driver_priv, id);
- if (to == NULL)
+ ctx = NULL;
+ if (!dev_priv->hw_contexts_disabled)
+ ctx = i915_gem_context_get(file->driver_priv, id);
+ if (ctx == NULL)
return ERR_PTR(-ENOENT);
- return &to->hang_stats;
+ return &ctx->hang_stats;
}
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
@@ -377,7 +370,7 @@ mi_set_context(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, new_context->obj->gtt_offset |
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 582e6a5f3dac..bf945a39fbb1 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -97,7 +97,7 @@ i915_verify_lists(struct drm_device *dev)
}
}
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
+ list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed inactive %p\n", obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index dc53a527126b..f2e185c9038f 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -289,12 +289,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
goto fail_detach;
}
- ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
- if (ret) {
- i915_gem_object_free(obj);
- goto fail_detach;
- }
-
+ drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index c86d5d9356fd..df61f338dea1 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -34,11 +34,13 @@
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
+ struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
+
if (obj->pin_count)
return false;
list_add(&obj->exec_list, unwind);
- return drm_mm_scan_add_block(obj->gtt_space);
+ return drm_mm_scan_add_block(&vma->node);
}
int
@@ -47,7 +49,9 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
bool mappable, bool nonblocking)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct list_head eviction_list, unwind_list;
+ struct i915_vma *vma;
struct drm_i915_gem_object *obj;
int ret = 0;
@@ -78,15 +82,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
INIT_LIST_HEAD(&unwind_list);
if (mappable)
- drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
- min_size, alignment, cache_level,
- 0, dev_priv->gtt.mappable_end);
+ drm_mm_init_scan_with_range(&vm->mm, min_size,
+ alignment, cache_level, 0,
+ dev_priv->gtt.mappable_end);
else
- drm_mm_init_scan(&dev_priv->mm.gtt_space,
- min_size, alignment, cache_level);
+ drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
/* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
+ list_for_each_entry(obj, &vm->inactive_list, mm_list) {
if (mark_free(obj, &unwind_list))
goto found;
}
@@ -95,7 +98,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
goto none;
/* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ list_for_each_entry(obj, &vm->active_list, mm_list) {
if (mark_free(obj, &unwind_list))
goto found;
}
@@ -106,8 +109,8 @@ none:
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
-
- ret = drm_mm_scan_remove_block(obj->gtt_space);
+ vma = __i915_gem_obj_to_vma(obj);
+ ret = drm_mm_scan_remove_block(&vma->node);
BUG_ON(ret);
list_del_init(&obj->exec_list);
@@ -127,7 +130,8 @@ found:
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
- if (drm_mm_scan_remove_block(obj->gtt_space)) {
+ vma = __i915_gem_obj_to_vma(obj);
+ if (drm_mm_scan_remove_block(&vma->node)) {
list_move(&obj->exec_list, &eviction_list);
drm_gem_object_reference(&obj->base);
continue;
@@ -154,12 +158,13 @@ int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj, *next;
bool lists_empty;
int ret;
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.active_list));
+ lists_empty = (list_empty(&vm->inactive_list) &&
+ list_empty(&vm->active_list));
if (lists_empty)
return -ENOSPC;
@@ -176,8 +181,7 @@ i915_gem_evict_everything(struct drm_device *dev)
i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list, mm_list)
+ list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
if (obj->pin_count == 0)
WARN_ON(i915_gem_object_unbind(obj));
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 87a3227e5179..5b6d764e9bb2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -188,7 +188,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -ENOENT;
target_i915_obj = to_intel_bo(target_obj);
- target_offset = target_i915_obj->gtt_offset;
+ target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
@@ -255,7 +255,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
reloc->delta += target_offset;
if (use_cpu_reloc(obj)) {
- uint32_t page_offset = reloc->offset & ~PAGE_MASK;
+ uint32_t page_offset = offset_in_page(reloc->offset);
char *vaddr;
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
@@ -280,11 +280,11 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
/* Map the page containing the relocation we're going to perform. */
- reloc->offset += obj->gtt_offset;
+ reloc->offset += i915_gem_obj_ggtt_offset(obj);
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
- (reloc_page + (reloc->offset & ~PAGE_MASK));
+ (reloc_page + offset_in_page(reloc->offset));
iowrite32(reloc->delta, reloc_entry);
io_mapping_unmap_atomic(reloc_page);
}
@@ -436,8 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
- if (entry->offset != obj->gtt_offset) {
- entry->offset = obj->gtt_offset;
+ if (entry->offset != i915_gem_obj_ggtt_offset(obj)) {
+ entry->offset = i915_gem_obj_ggtt_offset(obj);
*need_reloc = true;
}
@@ -458,7 +458,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
{
struct drm_i915_gem_exec_object2 *entry;
- if (!obj->gtt_space)
+ if (!i915_gem_obj_ggtt_bound(obj))
return;
entry = obj->exec_entry;
@@ -530,7 +530,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
- if (!obj->gtt_space)
+ if (!i915_gem_obj_ggtt_bound(obj))
continue;
need_fence =
@@ -539,7 +539,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
- if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
+ if ((entry->alignment &&
+ i915_gem_obj_ggtt_offset(obj) & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
@@ -550,7 +551,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
- if (obj->gtt_space)
+ if (i915_gem_obj_ggtt_bound(obj))
continue;
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
@@ -758,8 +759,10 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
- if (fault_in_multipages_readable(ptr, length))
- return -EFAULT;
+ if (likely(!i915_prefault_disable)) {
+ if (fault_in_multipages_readable(ptr, length))
+ return -EFAULT;
+ }
}
return 0;
@@ -872,7 +875,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
break;
case I915_EXEC_BSD:
ring = &dev_priv->ring[VCS];
- if (ctx_id != 0) {
+ if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
return -EPERM;
@@ -880,7 +883,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
break;
case I915_EXEC_BLT:
ring = &dev_priv->ring[BCS];
- if (ctx_id != 0) {
+ if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
return -EPERM;
@@ -888,7 +891,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
break;
case I915_EXEC_VEBOX:
ring = &dev_priv->ring[VECS];
- if (ctx_id != 0) {
+ if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
return -EPERM;
@@ -972,7 +975,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto pre_mutex_err;
- if (dev_priv->mm.suspended) {
+ if (dev_priv->ums.mm_suspended) {
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
goto pre_mutex_err;
@@ -1058,7 +1061,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+ exec_start = i915_gem_obj_ggtt_offset(batch_obj) + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5101ab6869b4..3b639a94dddf 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,8 +28,12 @@
#include "i915_trace.h"
#include "intel_drv.h"
+#define GEN6_PPGTT_PD_ENTRIES 512
+#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
+
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
#define GEN6_PDE_VALID (1 << 0)
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
@@ -41,9 +45,17 @@
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
+
+/* Cacheability Control is a 4-bit value. The low three bits are stored in *
+ * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
+ */
+#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
+ (((bits) & 0x8) << (11 - 3)))
+#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
+#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
-static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
- dma_addr_t addr,
+static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
@@ -69,8 +81,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
#define BYT_PTE_WRITEABLE (1 << 1)
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
-static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
- dma_addr_t addr,
+static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
@@ -87,22 +98,33 @@ static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
return pte;
}
-static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev,
- dma_addr_t addr,
+static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
- pte |= GEN6_PTE_ADDR_ENCODE(addr);
+ pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
- pte |= GEN6_PTE_CACHE_LLC;
+ pte |= HSW_WB_LLC_AGE0;
+
+ return pte;
+}
+
+static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level)
+{
+ gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ pte |= HSW_PTE_ADDR_ENCODE(addr);
+
+ if (level != I915_CACHE_NONE)
+ pte |= HSW_WB_ELLC_LLC_AGE0;
return pte;
}
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
+ struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
gen6_gtt_pte_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
@@ -181,18 +203,18 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
}
/* PPGTT support for Sandybdrige/Gen6 and later */
-static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
+static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_entry,
unsigned num_entries)
{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr, scratch_pte;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
- scratch_pte = ppgtt->pte_encode(ppgtt->dev,
- ppgtt->scratch_page_dma_addr,
- I915_CACHE_LLC);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -212,11 +234,13 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
}
}
-static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
+static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
unsigned first_entry,
enum i915_cache_level cache_level)
{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
@@ -227,8 +251,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
dma_addr_t page_addr;
page_addr = sg_page_iter_dma_address(&sg_iter);
- pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr,
- cache_level);
+ pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr);
act_pt++;
@@ -240,13 +263,17 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
kunmap_atomic(pt_vaddr);
}
-static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
int i;
+ drm_mm_takedown(&ppgtt->base.mm);
+
if (ppgtt->pt_dma_addr) {
for (i = 0; i < ppgtt->num_pd_entries; i++)
- pci_unmap_page(ppgtt->dev->pdev,
+ pci_unmap_page(ppgtt->base.dev->pdev,
ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
@@ -260,7 +287,7 @@ static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_device *dev = ppgtt->dev;
+ struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned first_pd_entry_in_global_pt;
int i;
@@ -272,17 +299,18 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
if (IS_HASWELL(dev)) {
- ppgtt->pte_encode = hsw_pte_encode;
+ ppgtt->base.pte_encode = hsw_pte_encode;
} else if (IS_VALLEYVIEW(dev)) {
- ppgtt->pte_encode = byt_pte_encode;
+ ppgtt->base.pte_encode = byt_pte_encode;
} else {
- ppgtt->pte_encode = gen6_pte_encode;
+ ppgtt->base.pte_encode = gen6_pte_encode;
}
- ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
+ ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
ppgtt->enable = gen6_ppgtt_enable;
- ppgtt->clear_range = gen6_ppgtt_clear_range;
- ppgtt->insert_entries = gen6_ppgtt_insert_entries;
- ppgtt->cleanup = gen6_ppgtt_cleanup;
+ ppgtt->base.clear_range = gen6_ppgtt_clear_range;
+ ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->base.cleanup = gen6_ppgtt_cleanup;
+ ppgtt->base.scratch = dev_priv->gtt.base.scratch;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_pages)
@@ -313,8 +341,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->pt_dma_addr[i] = pt_addr;
}
- ppgtt->clear_range(ppgtt, 0,
- ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
+ ppgtt->base.clear_range(&ppgtt->base, 0,
+ ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
@@ -347,8 +375,7 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
if (!ppgtt)
return -ENOMEM;
- ppgtt->dev = dev;
- ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
+ ppgtt->base.dev = dev;
if (INTEL_INFO(dev)->gen < 8)
ret = gen6_ppgtt_init(ppgtt);
@@ -357,8 +384,11 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
if (ret)
kfree(ppgtt);
- else
+ else {
dev_priv->mm.aliasing_ppgtt = ppgtt;
+ drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
+ ppgtt->base.total);
+ }
return ret;
}
@@ -371,7 +401,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
if (!ppgtt)
return;
- ppgtt->cleanup(ppgtt);
+ ppgtt->base.cleanup(&ppgtt->base);
dev_priv->mm.aliasing_ppgtt = NULL;
}
@@ -379,17 +409,17 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- ppgtt->insert_entries(ppgtt, obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
- cache_level);
+ ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
+ i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
+ cache_level);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj)
{
- ppgtt->clear_range(ppgtt,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ ppgtt->base.clear_range(&ppgtt->base,
+ i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
}
extern int intel_iommu_gfx_mapped;
@@ -436,8 +466,9 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
- dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
- dev_priv->gtt.total / PAGE_SIZE);
+ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+ dev_priv->gtt.base.start / PAGE_SIZE,
+ dev_priv->gtt.base.total / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
i915_gem_clflush_object(obj);
@@ -466,12 +497,12 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
-static void gen6_ggtt_insert_entries(struct drm_device *dev,
+static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int first_entry,
enum i915_cache_level level)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen6_gtt_pte_t __iomem *gtt_entries =
(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
@@ -480,8 +511,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
- iowrite32(dev_priv->gtt.pte_encode(dev, addr, level),
- &gtt_entries[i]);
+ iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]);
i++;
}
@@ -492,8 +522,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
- WARN_ON(readl(&gtt_entries[i-1])
- != dev_priv->gtt.pte_encode(dev, addr, level));
+ WARN_ON(readl(&gtt_entries[i-1]) !=
+ vm->pte_encode(addr, level));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -503,11 +533,11 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
-static void gen6_ggtt_clear_range(struct drm_device *dev,
+static void gen6_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -518,16 +548,14 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = dev_priv->gtt.pte_encode(dev,
- dev_priv->gtt.scratch_page_dma,
- I915_CACHE_LLC);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
}
-static void i915_ggtt_insert_entries(struct drm_device *dev,
+static void i915_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level)
@@ -539,7 +567,7 @@ static void i915_ggtt_insert_entries(struct drm_device *dev,
}
-static void i915_ggtt_clear_range(struct drm_device *dev,
+static void i915_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries)
{
@@ -552,10 +580,11 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
- dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
- cache_level);
+ dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
+ entry,
+ cache_level);
obj->has_global_gtt_mapping = 1;
}
@@ -564,10 +593,11 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
- dev_priv->gtt.gtt_clear_range(obj->base.dev,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+ entry,
+ obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
}
@@ -626,37 +656,42 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
BUG_ON(mappable_end > end);
/* Subtract the guard page ... */
- drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+ drm_mm_init(&dev_priv->gtt.base.mm, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
- dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
+ dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
- obj->gtt_offset, obj->base.size);
-
- BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
- obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
- obj->gtt_offset,
- obj->base.size,
- false);
+ struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
+ int ret;
+ DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
+ i915_gem_obj_ggtt_offset(obj), obj->base.size);
+
+ WARN_ON(i915_gem_obj_ggtt_bound(obj));
+ ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
+ if (ret)
+ DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
+ list_add(&vma->vma_link, &obj->vma_list);
}
- dev_priv->gtt.start = start;
- dev_priv->gtt.total = end - start;
+ dev_priv->gtt.base.start = start;
+ dev_priv->gtt.base.total = end - start;
/* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
+ drm_mm_for_each_hole(entry, &dev_priv->gtt.base.mm,
hole_start, hole_end) {
+ const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
- (hole_end-hole_start) / PAGE_SIZE);
+ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+ hole_start / PAGE_SIZE,
+ count);
}
/* And finally clear the reserved guard page */
- dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
+ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+ end / PAGE_SIZE - 1, 1);
}
static bool
@@ -679,7 +714,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long gtt_size, mappable_size;
- gtt_size = dev_priv->gtt.total;
+ gtt_size = dev_priv->gtt.base.total;
mappable_size = dev_priv->gtt.mappable_end;
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
@@ -688,7 +723,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
if (INTEL_INFO(dev)->gen <= 7) {
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
- gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+ gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
}
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
@@ -698,8 +733,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
return;
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
- drm_mm_takedown(&dev_priv->mm.gtt_space);
- gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
+ gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
}
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
@@ -724,8 +759,8 @@ static int setup_scratch_page(struct drm_device *dev)
#else
dma_addr = page_to_phys(page);
#endif
- dev_priv->gtt.scratch_page = page;
- dev_priv->gtt.scratch_page_dma = dma_addr;
+ dev_priv->gtt.base.scratch.page = page;
+ dev_priv->gtt.base.scratch.addr = dma_addr;
return 0;
}
@@ -733,11 +768,13 @@ static int setup_scratch_page(struct drm_device *dev)
static void teardown_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- set_pages_wb(dev_priv->gtt.scratch_page, 1);
- pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
+ struct page *page = dev_priv->gtt.base.scratch.page;
+
+ set_pages_wb(page, 1);
+ pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- put_page(dev_priv->gtt.scratch_page);
- __free_page(dev_priv->gtt.scratch_page);
+ put_page(page);
+ __free_page(page);
}
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -800,17 +837,18 @@ static int gen6_gmch_probe(struct drm_device *dev,
if (ret)
DRM_ERROR("Scratch setup failed\n");
- dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
- dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
+ dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
+ dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
return ret;
}
-static void gen6_gmch_remove(struct drm_device *dev)
+static void gen6_gmch_remove(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- iounmap(dev_priv->gtt.gsm);
- teardown_scratch_page(dev_priv->dev);
+
+ struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
+ iounmap(gtt->gsm);
+ teardown_scratch_page(vm->dev);
}
static int i915_gmch_probe(struct drm_device *dev,
@@ -831,13 +869,13 @@ static int i915_gmch_probe(struct drm_device *dev,
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
- dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
- dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
+ dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
+ dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
return 0;
}
-static void i915_gmch_remove(struct drm_device *dev)
+static void i915_gmch_remove(struct i915_address_space *vm)
{
intel_gmch_remove();
}
@@ -849,34 +887,33 @@ int i915_gem_gtt_init(struct drm_device *dev)
int ret;
if (INTEL_INFO(dev)->gen <= 5) {
- dev_priv->gtt.gtt_probe = i915_gmch_probe;
- dev_priv->gtt.gtt_remove = i915_gmch_remove;
+ gtt->gtt_probe = i915_gmch_probe;
+ gtt->base.cleanup = i915_gmch_remove;
} else {
- dev_priv->gtt.gtt_probe = gen6_gmch_probe;
- dev_priv->gtt.gtt_remove = gen6_gmch_remove;
- if (IS_HASWELL(dev)) {
- dev_priv->gtt.pte_encode = hsw_pte_encode;
- } else if (IS_VALLEYVIEW(dev)) {
- dev_priv->gtt.pte_encode = byt_pte_encode;
- } else {
- dev_priv->gtt.pte_encode = gen6_pte_encode;
- }
+ gtt->gtt_probe = gen6_gmch_probe;
+ gtt->base.cleanup = gen6_gmch_remove;
+ if (IS_HASWELL(dev) && dev_priv->ellc_size)
+ gtt->base.pte_encode = iris_pte_encode;
+ else if (IS_HASWELL(dev))
+ gtt->base.pte_encode = hsw_pte_encode;
+ else if (IS_VALLEYVIEW(dev))
+ gtt->base.pte_encode = byt_pte_encode;
+ else
+ gtt->base.pte_encode = gen6_pte_encode;
}
- ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
- &dev_priv->gtt.stolen_size,
- &gtt->mappable_base,
- &gtt->mappable_end);
+ ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
+ &gtt->mappable_base, &gtt->mappable_end);
if (ret)
return ret;
+ gtt->base.dev = dev;
+
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %zdM\n",
- dev_priv->gtt.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
- dev_priv->gtt.mappable_end >> 20);
- DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
- dev_priv->gtt.stolen_size >> 20);
+ gtt->base.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 982d4732cecf..38afadf5eaf6 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -45,49 +45,48 @@
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct pci_dev *pdev = dev_priv->bridge_dev;
+ struct resource *r;
u32 base;
- /* On the machines I have tested the Graphics Base of Stolen Memory
- * is unreliable, so on those compute the base by subtracting the
- * stolen memory from the Top of Low Usable DRAM which is where the
- * BIOS places the graphics stolen memory.
+ /* Almost universally we can find the Graphics Base of Stolen Memory
+ * at offset 0x5c in the igfx configuration space. On a few (desktop)
+ * machines this is also mirrored in the bridge device at different
+ * locations, or in the MCHBAR. On gen2, the layout is again slightly
+ * different with the Graphics Segment immediately following Top of
+ * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
+ * reported by 865g, so we just use the top of memory as determined
+ * by the e820 probe.
*
- * On gen2, the layout is slightly different with the Graphics Segment
- * immediately following Top of Memory (or Top of Usable DRAM). Note
- * it appears that TOUD is only reported by 865g, so we just use the
- * top of memory as determined by the e820 probe.
- *
- * XXX gen2 requires an unavailable symbol and 945gm fails with
- * its value of TOLUD.
+ * XXX However gen2 requires an unavailable symbol.
*/
base = 0;
- if (IS_VALLEYVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen >= 3) {
+ /* Read Graphics Base of Stolen Memory directly */
pci_read_config_dword(dev->pdev, 0x5c, &base);
base &= ~((1<<20) - 1);
- } else if (INTEL_INFO(dev)->gen >= 6) {
- /* Read Base Data of Stolen Memory Register (BDSM) directly.
- * Note that there is also a MCHBAR miror at 0x1080c0 or
- * we could use device 2:0x5c instead.
- */
- pci_read_config_dword(pdev, 0xB0, &base);
- base &= ~4095; /* lower bits used for locking register */
- } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- /* Read Graphics Base of Stolen Memory directly */
- pci_read_config_dword(pdev, 0xA4, &base);
+ } else { /* GEN2 */
#if 0
- } else if (IS_GEN3(dev)) {
- u8 val;
- /* Stolen is immediately below Top of Low Usable DRAM */
- pci_read_config_byte(pdev, 0x9c, &val);
- base = val >> 3 << 27;
- base -= dev_priv->mm.gtt->stolen_size;
- } else {
/* Stolen is immediately above Top of Memory */
base = max_low_pfn_mapped << PAGE_SHIFT;
#endif
}
+ if (base == 0)
+ return 0;
+
+ /* Verify that nothing else uses this physical address. Stolen
+ * memory should be reserved by the BIOS and hidden from the
+ * kernel. So if the region is already marked as busy, something
+ * is seriously wrong.
+ */
+ r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
+ "Graphics Stolen Memory");
+ if (r == NULL) {
+ DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
+ base, base + (uint32_t)dev_priv->gtt.stolen_size);
+ base = 0;
+ }
+
return base;
}
@@ -95,32 +94,37 @@ static int i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+ int ret;
- /* Try to over-allocate to reduce reallocations and fragmentation */
- compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
- size <<= 1, 4096, 0);
+ compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
if (!compressed_fb)
- compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
- size >>= 1, 4096, 0);
- if (compressed_fb)
- compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
- if (!compressed_fb)
- goto err;
+ goto err_llb;
+
+ /* Try to over-allocate to reduce reallocations and fragmentation */
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+ size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+ size >>= 1, 4096,
+ DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ goto err_llb;
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
- compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
- 4096, 4096, 0);
- if (compressed_llb)
- compressed_llb = drm_mm_get_block(compressed_llb,
- 4096, 4096);
+ compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
goto err_fb;
- dev_priv->compressed_llb = compressed_llb;
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
+ 4096, 4096, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ goto err_fb;
+
+ dev_priv->fbc.compressed_llb = compressed_llb;
I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + compressed_fb->start);
@@ -128,8 +132,8 @@ static int i915_setup_compression(struct drm_device *dev, int size)
dev_priv->mm.stolen_base + compressed_llb->start);
}
- dev_priv->compressed_fb = compressed_fb;
- dev_priv->cfb_size = size;
+ dev_priv->fbc.compressed_fb = compressed_fb;
+ dev_priv->fbc.size = size;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
@@ -137,8 +141,10 @@ static int i915_setup_compression(struct drm_device *dev, int size)
return 0;
err_fb:
- drm_mm_put_block(compressed_fb);
-err:
+ kfree(compressed_llb);
+ drm_mm_remove_node(compressed_fb);
+err_llb:
+ kfree(compressed_fb);
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
@@ -150,7 +156,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
- if (size < dev_priv->cfb_size)
+ if (size < dev_priv->fbc.size)
return 0;
/* Release any current block */
@@ -163,16 +169,20 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->cfb_size == 0)
+ if (dev_priv->fbc.size == 0)
return;
- if (dev_priv->compressed_fb)
- drm_mm_put_block(dev_priv->compressed_fb);
+ if (dev_priv->fbc.compressed_fb) {
+ drm_mm_remove_node(dev_priv->fbc.compressed_fb);
+ kfree(dev_priv->fbc.compressed_fb);
+ }
- if (dev_priv->compressed_llb)
- drm_mm_put_block(dev_priv->compressed_llb);
+ if (dev_priv->fbc.compressed_llb) {
+ drm_mm_remove_node(dev_priv->fbc.compressed_llb);
+ kfree(dev_priv->fbc.compressed_llb);
+ }
- dev_priv->cfb_size = 0;
+ dev_priv->fbc.size = 0;
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
@@ -201,6 +211,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
if (IS_VALLEYVIEW(dev))
bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
+ if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
+ return 0;
+
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
bios_reserved);
@@ -271,9 +284,7 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
if (obj == NULL)
return NULL;
- if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
- goto cleanup;
-
+ drm_gem_private_object_init(dev, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->pages = i915_pages_create_for_stolen(dev,
@@ -302,6 +313,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
+ int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
@@ -310,17 +322,23 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
if (size == 0)
return NULL;
- stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
- if (stolen)
- stolen = drm_mm_get_block(stolen, size, 4096);
- if (stolen == NULL)
+ stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+ if (!stolen)
+ return NULL;
+
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
+ 4096, DRM_MM_SEARCH_DEFAULT);
+ if (ret) {
+ kfree(stolen);
return NULL;
+ }
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj)
return obj;
- drm_mm_put_block(stolen);
+ drm_mm_remove_node(stolen);
+ kfree(stolen);
return NULL;
}
@@ -331,8 +349,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
+ struct i915_vma *vma;
+ int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
@@ -347,56 +368,73 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (WARN_ON(size == 0))
return NULL;
- stolen = drm_mm_create_block(&dev_priv->mm.stolen,
- stolen_offset, size,
- false);
- if (stolen == NULL) {
+ stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+ if (!stolen)
+ return NULL;
+
+ stolen->start = stolen_offset;
+ stolen->size = size;
+ ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
+ if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen space\n");
+ kfree(stolen);
return NULL;
}
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj == NULL) {
DRM_DEBUG_KMS("failed to allocate stolen object\n");
- drm_mm_put_block(stolen);
+ drm_mm_remove_node(stolen);
+ kfree(stolen);
return NULL;
}
/* Some objects just need physical mem from stolen space */
- if (gtt_offset == -1)
+ if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
+ vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_out;
+ }
+
/* To simplify the initialisation sequence between KMS and GTT,
* we allow construction of the stolen object prior to
* setting up the GTT space. The actual reservation will occur
* later.
*/
- if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
- obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
- gtt_offset, size,
- false);
- if (obj->gtt_space == NULL) {
+ vma->node.start = gtt_offset;
+ vma->node.size = size;
+ if (drm_mm_initialized(&dev_priv->gtt.base.mm)) {
+ ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
+ if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
- drm_gem_object_unreference(&obj->base);
- return NULL;
+ i915_gem_vma_destroy(vma);
+ goto err_out;
}
- } else
- obj->gtt_space = I915_GTT_RESERVED;
+ }
- obj->gtt_offset = gtt_offset;
obj->has_global_gtt_mapping = 1;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_add_tail(&obj->mm_list, &vm->inactive_list);
return obj;
+
+err_out:
+ drm_mm_remove_node(stolen);
+ kfree(stolen);
+ drm_gem_object_unreference(&obj->base);
+ return NULL;
}
void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
if (obj->stolen) {
- drm_mm_put_block(obj->stolen);
+ drm_mm_remove_node(obj->stolen);
+ kfree(obj->stolen);
obj->stolen = NULL;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 537545be69db..92a8d279ca39 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
return true;
if (INTEL_INFO(obj->base.dev)->gen == 3) {
- if (obj->gtt_offset & ~I915_FENCE_START_MASK)
+ if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false;
} else {
- if (obj->gtt_offset & ~I830_FENCE_START_MASK)
+ if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
return false;
}
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
- if (obj->gtt_space->size != size)
+ if (i915_gem_obj_ggtt_size(obj) != size)
return false;
- if (obj->gtt_offset & (size - 1))
+ if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
return false;
return true;
@@ -359,8 +359,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/
obj->map_and_fenceable =
- obj->gtt_space == NULL ||
- (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
+ !i915_gem_obj_ggtt_bound(obj) ||
+ (i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */
@@ -369,7 +369,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
i915_gem_get_gtt_alignment(dev, obj->base.size,
args->tiling_mode,
false);
- if (obj->gtt_offset & (unfenced_alignment - 1))
+ if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
new file mode 100644
index 000000000000..d970d84da65f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -0,0 +1,973 @@
+/*
+ * Copyright (c) 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Keith Packard <keithp@keithp.com>
+ * Mika Kuoppala <mika.kuoppala@intel.com>
+ *
+ */
+
+#include <generated/utsrelease.h>
+#include "i915_drv.h"
+
+static const char *yesno(int v)
+{
+ return v ? "yes" : "no";
+}
+
+static const char *ring_str(int ring)
+{
+ switch (ring) {
+ case RCS: return "render";
+ case VCS: return "bsd";
+ case BCS: return "blt";
+ case VECS: return "vebox";
+ default: return "";
+ }
+}
+
+static const char *pin_flag(int pinned)
+{
+ if (pinned > 0)
+ return " P";
+ else if (pinned < 0)
+ return " p";
+ else
+ return "";
+}
+
+static const char *tiling_flag(int tiling)
+{
+ switch (tiling) {
+ default:
+ case I915_TILING_NONE: return "";
+ case I915_TILING_X: return " X";
+ case I915_TILING_Y: return " Y";
+ }
+}
+
+static const char *dirty_flag(int dirty)
+{
+ return dirty ? " dirty" : "";
+}
+
+static const char *purgeable_flag(int purgeable)
+{
+ return purgeable ? " purgeable" : "";
+}
+
+static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
+{
+
+ if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
+ e->err = -ENOSPC;
+ return false;
+ }
+
+ if (e->bytes == e->size - 1 || e->err)
+ return false;
+
+ return true;
+}
+
+static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
+ unsigned len)
+{
+ if (e->pos + len <= e->start) {
+ e->pos += len;
+ return false;
+ }
+
+ /* First vsnprintf needs to fit in its entirety for memmove */
+ if (len >= e->size) {
+ e->err = -EIO;
+ return false;
+ }
+
+ return true;
+}
+
+static void __i915_error_advance(struct drm_i915_error_state_buf *e,
+ unsigned len)
+{
+ /* If this is first printf in this window, adjust it so that
+ * start position matches start of the buffer
+ */
+
+ if (e->pos < e->start) {
+ const size_t off = e->start - e->pos;
+
+ /* Should not happen but be paranoid */
+ if (off > len || e->bytes) {
+ e->err = -EIO;
+ return;
+ }
+
+ memmove(e->buf, e->buf + off, len - off);
+ e->bytes = len - off;
+ e->pos = e->start;
+ return;
+ }
+
+ e->bytes += len;
+ e->pos += len;
+}
+
+static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
+ const char *f, va_list args)
+{
+ unsigned len;
+
+ if (!__i915_error_ok(e))
+ return;
+
+ /* Seek the first printf which is hits start position */
+ if (e->pos < e->start) {
+ len = vsnprintf(NULL, 0, f, args);
+ if (!__i915_error_seek(e, len))
+ return;
+ }
+
+ len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
+ if (len >= e->size - e->bytes)
+ len = e->size - e->bytes - 1;
+
+ __i915_error_advance(e, len);
+}
+
+static void i915_error_puts(struct drm_i915_error_state_buf *e,
+ const char *str)
+{
+ unsigned len;
+
+ if (!__i915_error_ok(e))
+ return;
+
+ len = strlen(str);
+
+ /* Seek the first printf which is hits start position */
+ if (e->pos < e->start) {
+ if (!__i915_error_seek(e, len))
+ return;
+ }
+
+ if (len >= e->size - e->bytes)
+ len = e->size - e->bytes - 1;
+ memcpy(e->buf + e->bytes, str, len);
+
+ __i915_error_advance(e, len);
+}
+
+#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
+#define err_puts(e, s) i915_error_puts(e, s)
+
+static void print_error_buffers(struct drm_i915_error_state_buf *m,
+ const char *name,
+ struct drm_i915_error_buffer *err,
+ int count)
+{
+ err_printf(m, "%s [%d]:\n", name, count);
+
+ while (count--) {
+ err_printf(m, " %08x %8u %02x %02x %x %x",
+ err->gtt_offset,
+ err->size,
+ err->read_domains,
+ err->write_domain,
+ err->rseqno, err->wseqno);
+ err_puts(m, pin_flag(err->pinned));
+ err_puts(m, tiling_flag(err->tiling));
+ err_puts(m, dirty_flag(err->dirty));
+ err_puts(m, purgeable_flag(err->purgeable));
+ err_puts(m, err->ring != -1 ? " " : "");
+ err_puts(m, ring_str(err->ring));
+ err_puts(m, i915_cache_level_str(err->cache_level));
+
+ if (err->name)
+ err_printf(m, " (name: %d)", err->name);
+ if (err->fence_reg != I915_FENCE_REG_NONE)
+ err_printf(m, " (fence: %d)", err->fence_reg);
+
+ err_puts(m, "\n");
+ err++;
+ }
+}
+
+static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
+ struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ unsigned ring)
+{
+ BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
+ err_printf(m, "%s command stream:\n", ring_str(ring));
+ err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
+ err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
+ err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
+ err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
+ err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
+ err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
+ err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
+ if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
+ err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
+ err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
+ err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
+ err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+ err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][0],
+ error->semaphore_seqno[ring][0]);
+ err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][1],
+ error->semaphore_seqno[ring][1]);
+ }
+ err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
+ err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
+ err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
+ err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+}
+
+void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
+{
+ va_list args;
+
+ va_start(args, f);
+ i915_error_vprintf(e, f, args);
+ va_end(args);
+}
+
+int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
+ const struct i915_error_state_file_priv *error_priv)
+{
+ struct drm_device *dev = error_priv->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error = error_priv->error;
+ struct intel_ring_buffer *ring;
+ int i, j, page, offset, elt;
+
+ if (!error) {
+ err_printf(m, "no error state collected\n");
+ goto out;
+ }
+
+ err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
+ error->time.tv_usec);
+ err_printf(m, "Kernel: " UTS_RELEASE "\n");
+ err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+ err_printf(m, "EIR: 0x%08x\n", error->eir);
+ err_printf(m, "IER: 0x%08x\n", error->ier);
+ err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+ err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
+ err_printf(m, "CCID: 0x%08x\n", error->ccid);
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+
+ for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
+ err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
+ error->extra_instdone[i]);
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ err_printf(m, "ERROR: 0x%08x\n", error->error);
+ err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
+ }
+
+ if (INTEL_INFO(dev)->gen == 7)
+ err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+
+ for_each_ring(ring, dev_priv, i)
+ i915_ring_error_state(m, dev, error, i);
+
+ if (error->active_bo)
+ print_error_buffers(m, "Active",
+ error->active_bo,
+ error->active_bo_count);
+
+ if (error->pinned_bo)
+ print_error_buffers(m, "Pinned",
+ error->pinned_bo,
+ error->pinned_bo_count);
+
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ struct drm_i915_error_object *obj;
+
+ if ((obj = error->ring[i].batchbuffer)) {
+ err_printf(m, "%s --- gtt_offset = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ err_printf(m, "%08x : %08x\n", offset,
+ obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+ }
+
+ if (error->ring[i].num_requests) {
+ err_printf(m, "%s --- %d requests\n",
+ dev_priv->ring[i].name,
+ error->ring[i].num_requests);
+ for (j = 0; j < error->ring[i].num_requests; j++) {
+ err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
+ error->ring[i].requests[j].seqno,
+ error->ring[i].requests[j].jiffies,
+ error->ring[i].requests[j].tail);
+ }
+ }
+
+ if ((obj = error->ring[i].ringbuffer)) {
+ err_printf(m, "%s --- ringbuffer = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ err_printf(m, "%08x : %08x\n",
+ offset,
+ obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+ }
+
+ obj = error->ring[i].ctx;
+ if (obj) {
+ err_printf(m, "%s --- HW Context = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+ err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+ offset,
+ obj->pages[0][elt],
+ obj->pages[0][elt+1],
+ obj->pages[0][elt+2],
+ obj->pages[0][elt+3]);
+ offset += 16;
+ }
+ }
+ }
+
+ if (error->overlay)
+ intel_overlay_print_error_state(m, error->overlay);
+
+ if (error->display)
+ intel_display_print_error_state(m, dev, error->display);
+
+out:
+ if (m->bytes == 0 && m->err)
+ return m->err;
+
+ return 0;
+}
+
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
+ size_t count, loff_t pos)
+{
+ memset(ebuf, 0, sizeof(*ebuf));
+
+ /* We need to have enough room to store any i915_error_state printf
+ * so that we can move it to start position.
+ */
+ ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
+ ebuf->buf = kmalloc(ebuf->size,
+ GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
+
+ if (ebuf->buf == NULL) {
+ ebuf->size = PAGE_SIZE;
+ ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
+ }
+
+ if (ebuf->buf == NULL) {
+ ebuf->size = 128;
+ ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
+ }
+
+ if (ebuf->buf == NULL)
+ return -ENOMEM;
+
+ ebuf->start = pos;
+
+ return 0;
+}
+
+static void i915_error_object_free(struct drm_i915_error_object *obj)
+{
+ int page;
+
+ if (obj == NULL)
+ return;
+
+ for (page = 0; page < obj->page_count; page++)
+ kfree(obj->pages[page]);
+
+ kfree(obj);
+}
+
+static void i915_error_state_free(struct kref *error_ref)
+{
+ struct drm_i915_error_state *error = container_of(error_ref,
+ typeof(*error), ref);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ i915_error_object_free(error->ring[i].batchbuffer);
+ i915_error_object_free(error->ring[i].ringbuffer);
+ i915_error_object_free(error->ring[i].ctx);
+ kfree(error->ring[i].requests);
+ }
+
+ kfree(error->active_bo);
+ kfree(error->overlay);
+ kfree(error->display);
+ kfree(error);
+}
+
+static struct drm_i915_error_object *
+i915_error_object_create_sized(struct drm_i915_private *dev_priv,
+ struct drm_i915_gem_object *src,
+ const int num_pages)
+{
+ struct drm_i915_error_object *dst;
+ int i;
+ u32 reloc_offset;
+
+ if (src == NULL || src->pages == NULL)
+ return NULL;
+
+ dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
+ if (dst == NULL)
+ return NULL;
+
+ reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
+ for (i = 0; i < num_pages; i++) {
+ unsigned long flags;
+ void *d;
+
+ d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (d == NULL)
+ goto unwind;
+
+ local_irq_save(flags);
+ if (reloc_offset < dev_priv->gtt.mappable_end &&
+ src->has_global_gtt_mapping) {
+ void __iomem *s;
+
+ /* Simply ignore tiling or any overlapping fence.
+ * It's part of the error state, and this hopefully
+ * captures what the GPU read.
+ */
+
+ s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ reloc_offset);
+ memcpy_fromio(d, s, PAGE_SIZE);
+ io_mapping_unmap_atomic(s);
+ } else if (src->stolen) {
+ unsigned long offset;
+
+ offset = dev_priv->mm.stolen_base;
+ offset += src->stolen->start;
+ offset += i << PAGE_SHIFT;
+
+ memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
+ } else {
+ struct page *page;
+ void *s;
+
+ page = i915_gem_object_get_page(src, i);
+
+ drm_clflush_pages(&page, 1);
+
+ s = kmap_atomic(page);
+ memcpy(d, s, PAGE_SIZE);
+ kunmap_atomic(s);
+
+ drm_clflush_pages(&page, 1);
+ }
+ local_irq_restore(flags);
+
+ dst->pages[i] = d;
+
+ reloc_offset += PAGE_SIZE;
+ }
+ dst->page_count = num_pages;
+
+ return dst;
+
+unwind:
+ while (i--)
+ kfree(dst->pages[i]);
+ kfree(dst);
+ return NULL;
+}
+#define i915_error_object_create(dev_priv, src) \
+ i915_error_object_create_sized((dev_priv), (src), \
+ (src)->base.size>>PAGE_SHIFT)
+
+static void capture_bo(struct drm_i915_error_buffer *err,
+ struct drm_i915_gem_object *obj)
+{
+ err->size = obj->base.size;
+ err->name = obj->base.name;
+ err->rseqno = obj->last_read_seqno;
+ err->wseqno = obj->last_write_seqno;
+ err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
+ err->read_domains = obj->base.read_domains;
+ err->write_domain = obj->base.write_domain;
+ err->fence_reg = obj->fence_reg;
+ err->pinned = 0;
+ if (obj->pin_count > 0)
+ err->pinned = 1;
+ if (obj->user_pin_count > 0)
+ err->pinned = -1;
+ err->tiling = obj->tiling_mode;
+ err->dirty = obj->dirty;
+ err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->ring = obj->ring ? obj->ring->id : -1;
+ err->cache_level = obj->cache_level;
+}
+
+static u32 capture_active_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, mm_list) {
+ capture_bo(err++, obj);
+ if (++i == count)
+ break;
+ }
+
+ return i;
+}
+
+static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, global_list) {
+ if (obj->pin_count == 0)
+ continue;
+
+ capture_bo(err++, obj);
+ if (++i == count)
+ break;
+ }
+
+ return i;
+}
+
+static void i915_gem_record_fences(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+static struct drm_i915_error_object *
+i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+ struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct drm_i915_gem_object *obj;
+ u32 seqno;
+
+ if (!ring->get_seqno)
+ return NULL;
+
+ if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
+ u32 acthd = I915_READ(ACTHD);
+
+ if (WARN_ON(ring->id != RCS))
+ return NULL;
+
+ obj = ring->private;
+ if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
+ acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
+ return i915_error_object_create(dev_priv, obj);
+ }
+
+ seqno = ring->get_seqno(ring, false);
+ list_for_each_entry(obj, &vm->active_list, mm_list) {
+ if (obj->ring != ring)
+ continue;
+
+ if (i915_seqno_passed(seqno, obj->last_read_seqno))
+ continue;
+
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
+ continue;
+
+ /* We need to copy these to an anonymous buffer as the simplest
+ * method to avoid being overwritten by userspace.
+ */
+ return i915_error_object_create(dev_priv, obj);
+ }
+
+ return NULL;
+}
+
+static void i915_record_ring_state(struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
+ error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
+ error->semaphore_mboxes[ring->id][0]
+ = I915_READ(RING_SYNC_0(ring->mmio_base));
+ error->semaphore_mboxes[ring->id][1]
+ = I915_READ(RING_SYNC_1(ring->mmio_base));
+ error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
+ error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
+ error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
+ error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
+ error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
+ error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
+ if (ring->id == RCS)
+ error->bbaddr = I915_READ64(BB_ADDR);
+ } else {
+ error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
+ error->ipeir[ring->id] = I915_READ(IPEIR);
+ error->ipehr[ring->id] = I915_READ(IPEHR);
+ error->instdone[ring->id] = I915_READ(INSTDONE);
+ }
+
+ error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
+ error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
+ error->seqno[ring->id] = ring->get_seqno(ring, false);
+ error->acthd[ring->id] = intel_ring_get_active_head(ring);
+ error->head[ring->id] = I915_READ_HEAD(ring);
+ error->tail[ring->id] = I915_READ_TAIL(ring);
+ error->ctl[ring->id] = I915_READ_CTL(ring);
+
+ error->cpu_ring_head[ring->id] = ring->head;
+ error->cpu_ring_tail[ring->id] = ring->tail;
+}
+
+
+static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
+ struct drm_i915_error_state *error,
+ struct drm_i915_error_ring *ering)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ /* Currently render ring is the only HW context user */
+ if (ring->id != RCS || !error->ccid)
+ return;
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
+ ering->ctx = i915_error_object_create_sized(dev_priv,
+ obj, 1);
+ break;
+ }
+ }
+}
+
+static void i915_gem_record_rings(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ struct drm_i915_gem_request *request;
+ int i, count;
+
+ for_each_ring(ring, dev_priv, i) {
+ i915_record_ring_state(dev, error, ring);
+
+ error->ring[i].batchbuffer =
+ i915_error_first_batchbuffer(dev_priv, ring);
+
+ error->ring[i].ringbuffer =
+ i915_error_object_create(dev_priv, ring->obj);
+
+
+ i915_gem_record_active_context(ring, error, &error->ring[i]);
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list)
+ count++;
+
+ error->ring[i].num_requests = count;
+ error->ring[i].requests =
+ kmalloc(count*sizeof(struct drm_i915_error_request),
+ GFP_ATOMIC);
+ if (error->ring[i].requests == NULL) {
+ error->ring[i].num_requests = 0;
+ continue;
+ }
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list) {
+ struct drm_i915_error_request *erq;
+
+ erq = &error->ring[i].requests[count++];
+ erq->seqno = request->seqno;
+ erq->jiffies = request->emitted_jiffies;
+ erq->tail = request->tail;
+ }
+ }
+}
+
+static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct drm_i915_gem_object *obj;
+ int i;
+
+ i = 0;
+ list_for_each_entry(obj, &vm->active_list, mm_list)
+ i++;
+ error->active_bo_count = i;
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+ if (obj->pin_count)
+ i++;
+ error->pinned_bo_count = i - error->active_bo_count;
+
+ if (i) {
+ error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
+ GFP_ATOMIC);
+ if (error->active_bo)
+ error->pinned_bo =
+ error->active_bo + error->active_bo_count;
+ }
+
+ if (error->active_bo)
+ error->active_bo_count =
+ capture_active_bo(error->active_bo,
+ error->active_bo_count,
+ &vm->active_list);
+
+ if (error->pinned_bo)
+ error->pinned_bo_count =
+ capture_pinned_bo(error->pinned_bo,
+ error->pinned_bo_count,
+ &dev_priv->mm.bound_list);
+}
+
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @dev: drm device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error. Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
+void i915_capture_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+ unsigned long flags;
+ int pipe;
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error = dev_priv->gpu_error.first_error;
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+ if (error)
+ return;
+
+ /* Account for pipe specific data like PIPE*STAT */
+ error = kzalloc(sizeof(*error), GFP_ATOMIC);
+ if (!error) {
+ DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+ return;
+ }
+
+ DRM_INFO("capturing error event; look for more information in "
+ "/sys/class/drm/card%d/error\n", dev->primary->index);
+
+ kref_init(&error->ref);
+ error->eir = I915_READ(EIR);
+ error->pgtbl_er = I915_READ(PGTBL_ER);
+ if (HAS_HW_CONTEXTS(dev))
+ error->ccid = I915_READ(CCID);
+
+ if (HAS_PCH_SPLIT(dev))
+ error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else if (IS_VALLEYVIEW(dev))
+ error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+ else if (IS_GEN2(dev))
+ error->ier = I915_READ16(IER);
+ else
+ error->ier = I915_READ(IER);
+
+ if (INTEL_INFO(dev)->gen >= 6)
+ error->derrmr = I915_READ(DERRMR);
+
+ if (IS_VALLEYVIEW(dev))
+ error->forcewake = I915_READ(FORCEWAKE_VLV);
+ else if (INTEL_INFO(dev)->gen >= 7)
+ error->forcewake = I915_READ(FORCEWAKE_MT);
+ else if (INTEL_INFO(dev)->gen == 6)
+ error->forcewake = I915_READ(FORCEWAKE);
+
+ if (!HAS_PCH_SPLIT(dev))
+ for_each_pipe(pipe)
+ error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->error = I915_READ(ERROR_GEN6);
+ error->done_reg = I915_READ(DONE_REG);
+ }
+
+ if (INTEL_INFO(dev)->gen == 7)
+ error->err_int = I915_READ(GEN7_ERR_INT);
+
+ i915_get_extra_instdone(dev, error->extra_instdone);
+
+ i915_gem_capture_buffers(dev_priv, error);
+ i915_gem_record_fences(dev, error);
+ i915_gem_record_rings(dev, error);
+
+ do_gettimeofday(&error->time);
+
+ error->overlay = intel_overlay_capture_error_state(dev);
+ error->display = intel_display_capture_error_state(dev);
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ if (dev_priv->gpu_error.first_error == NULL) {
+ dev_priv->gpu_error.first_error = error;
+ error = NULL;
+ }
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+ if (error)
+ i915_error_state_free(&error->ref);
+}
+
+void i915_error_state_get(struct drm_device *dev,
+ struct i915_error_state_file_priv *error_priv)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error_priv->error = dev_priv->gpu_error.first_error;
+ if (error_priv->error)
+ kref_get(&error_priv->error->ref);
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+}
+
+void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
+{
+ if (error_priv->error)
+ kref_put(&error_priv->error->ref, i915_error_state_free);
+}
+
+void i915_destroy_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error = dev_priv->gpu_error.first_error;
+ dev_priv->gpu_error.first_error = NULL;
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+ if (error)
+ kref_put(&error->ref, i915_error_state_free);
+}
+
+const char *i915_cache_level_str(int type)
+{
+ switch (type) {
+ case I915_CACHE_NONE: return " uncached";
+ case I915_CACHE_LLC: return " snooped (LLC)";
+ case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
+ default: return "";
+ }
+}
+
+/* NB: please notice the memset */
+void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ case 3:
+ instdone[0] = I915_READ(INSTDONE);
+ break;
+ case 4:
+ case 5:
+ case 6:
+ instdone[0] = I915_READ(INSTDONE_I965);
+ instdone[1] = I915_READ(INSTDONE1);
+ break;
+ default:
+ WARN_ONCE(1, "Unsupported platform\n");
+ case 7:
+ instdone[0] = I915_READ(GEN7_INSTDONE_1);
+ instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+ instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+ instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3d92a7cef154..6a1c207a296b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -128,6 +128,8 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *crtc;
+ assert_spin_locked(&dev_priv->irq_lock);
+
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
@@ -152,38 +154,66 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
}
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
- bool enable)
+ enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
-
if (enable) {
+ I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+
if (!ivb_can_enable_err_int(dev))
return;
- I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
- ERR_INT_FIFO_UNDERRUN_B |
- ERR_INT_FIFO_UNDERRUN_C);
-
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
} else {
+ bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
+
+ /* Change the state _after_ we've read out the current one. */
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+
+ if (!was_enabled &&
+ (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
+ DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
+ pipe_name(pipe));
+ }
}
}
-static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
+/**
+ * ibx_display_interrupt_update - update SDEIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
+{
+ uint32_t sdeimr = I915_READ(SDEIMR);
+ sdeimr &= ~interrupt_mask;
+ sdeimr |= (~enabled_irq_mask & interrupt_mask);
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ I915_WRITE(SDEIMR, sdeimr);
+ POSTING_READ(SDEIMR);
+}
+#define ibx_enable_display_interrupt(dev_priv, bits) \
+ ibx_display_interrupt_update((dev_priv), (bits), (bits))
+#define ibx_disable_display_interrupt(dev_priv, bits) \
+ ibx_display_interrupt_update((dev_priv), (bits), 0)
+
+static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
bool enable)
{
- struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
- SDE_TRANSB_FIFO_UNDER;
+ uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
+ SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
if (enable)
- I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
+ ibx_enable_display_interrupt(dev_priv, bit);
else
- I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
-
- POSTING_READ(SDEIMR);
+ ibx_disable_display_interrupt(dev_priv, bit);
}
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -193,19 +223,26 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
if (enable) {
+ I915_WRITE(SERR_INT,
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+
if (!cpt_can_enable_serr_int(dev))
return;
- I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
- SERR_INT_TRANS_B_FIFO_UNDERRUN |
- SERR_INT_TRANS_C_FIFO_UNDERRUN);
-
- I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
+ ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
} else {
- I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
- }
+ uint32_t tmp = I915_READ(SERR_INT);
+ bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
- POSTING_READ(SDEIMR);
+ /* Change the state _after_ we've read out the current one. */
+ ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+
+ if (!was_enabled &&
+ (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
+ DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
+ transcoder_name(pch_transcoder));
+ }
+ }
}
/**
@@ -243,7 +280,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
if (IS_GEN5(dev) || IS_GEN6(dev))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN7(dev))
- ivybridge_set_fifo_underrun_reporting(dev, enable);
+ ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
done:
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -269,29 +306,19 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe p;
- struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool ret;
- if (HAS_PCH_LPT(dev)) {
- crtc = NULL;
- for_each_pipe(p) {
- struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
- if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
- crtc = c;
- break;
- }
- }
- if (!crtc) {
- DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
- return false;
- }
- } else {
- crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
- }
- intel_crtc = to_intel_crtc(crtc);
+ /*
+ * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
+ * has only one pch transcoder A that all pipes can use. To avoid racy
+ * pch transcoder -> pipe lookups from interrupt code simply store the
+ * underrun statistics in crtc A. Since we never expose this anywhere
+ * nor use it outside of the fifo underrun code here using the "wrong"
+ * crtc on LPT won't cause issues.
+ */
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -303,7 +330,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
intel_crtc->pch_fifo_underrun_disabled = !enable;
if (HAS_PCH_IBX(dev))
- ibx_set_fifo_underrun_reporting(intel_crtc, enable);
+ ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
else
cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
@@ -319,6 +346,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
+ assert_spin_locked(&dev_priv->irq_lock);
+
if ((pipestat & mask) == mask)
return;
@@ -334,6 +363,8 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
+ assert_spin_locked(&dev_priv->irq_lock);
+
if ((pipestat & mask) == 0)
return;
@@ -625,14 +656,13 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_kms_helper_hotplug_event(dev);
}
-static void ironlake_handle_rps_change(struct drm_device *dev)
+static void ironlake_rps_change_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
u8 new_delay;
- unsigned long flags;
- spin_lock_irqsave(&mchdev_lock, flags);
+ spin_lock(&mchdev_lock);
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
@@ -660,7 +690,7 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
if (ironlake_set_drps(dev, new_delay))
dev_priv->ips.cur_delay = new_delay;
- spin_unlock_irqrestore(&mchdev_lock, flags);
+ spin_unlock(&mchdev_lock);
return;
}
@@ -668,18 +698,13 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (ring->obj == NULL)
return;
trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
wake_up_all(&ring->irq_queue);
- if (i915_enable_hangcheck) {
- mod_timer(&dev_priv->gpu_error.hangcheck_timer,
- round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
- }
+ i915_queue_hangcheck(dev);
}
static void gen6_pm_rps_work(struct work_struct *work)
@@ -689,13 +714,13 @@ static void gen6_pm_rps_work(struct work_struct *work)
u32 pm_iir, pm_imr;
u8 new_delay;
- spin_lock_irq(&dev_priv->rps.lock);
+ spin_lock_irq(&dev_priv->irq_lock);
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
pm_imr = I915_READ(GEN6_PMIMR);
/* Make sure not to corrupt PMIMR state used by ringbuffer code */
I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
- spin_unlock_irq(&dev_priv->rps.lock);
+ spin_unlock_irq(&dev_priv->irq_lock);
if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
return;
@@ -787,7 +812,7 @@ static void ivybridge_parity_work(struct work_struct *work)
mutex_unlock(&dev_priv->dev->struct_mutex);
- parity_event[0] = "L3_PARITY_ERROR=1";
+ parity_event[0] = I915_L3_PARITY_UEVENT "=1";
parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
@@ -804,22 +829,32 @@ static void ivybridge_parity_work(struct work_struct *work)
kfree(parity_event[1]);
}
-static void ivybridge_handle_parity_error(struct drm_device *dev)
+static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long flags;
if (!HAS_L3_GPU_CACHE(dev))
return;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ spin_lock(&dev_priv->irq_lock);
dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ spin_unlock(&dev_priv->irq_lock);
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
+static void ilk_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 gt_iir)
+{
+ if (gt_iir &
+ (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (gt_iir & ILK_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+}
+
static void snb_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 gt_iir)
@@ -841,15 +876,13 @@ static void snb_gt_irq_handler(struct drm_device *dev,
}
if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
- ivybridge_handle_parity_error(dev);
+ ivybridge_parity_error_irq_handler(dev);
}
/* Legacy way of handling PM interrupts */
-static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
- u32 pm_iir)
+static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv,
+ u32 pm_iir)
{
- unsigned long flags;
-
/*
* IIR bits should never already be set because IMR should
* prevent an interrupt from being shown in IIR. The warning
@@ -860,11 +893,11 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
* The mask bit in IMR is cleared by dev_priv->rps.work.
*/
- spin_lock_irqsave(&dev_priv->rps.lock, flags);
+ spin_lock(&dev_priv->irq_lock);
dev_priv->rps.pm_iir |= pm_iir;
I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
POSTING_READ(GEN6_PMIMR);
- spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+ spin_unlock(&dev_priv->irq_lock);
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
@@ -886,6 +919,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
+ WARN(((hpd[i] & hotplug_trigger) &&
+ dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
+ "Received HPD interrupt although disabled\n");
+
if (!(hpd[i] & hotplug_trigger) ||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
continue;
@@ -896,6 +933,7 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
+ msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
dev_priv->hpd_stats[i].hpd_cnt = 0;
+ DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
dev_priv->hpd_event_bits &= ~(1 << i);
@@ -903,6 +941,8 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
storm_detected = true;
} else {
dev_priv->hpd_stats[i].hpd_cnt++;
+ DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
+ dev_priv->hpd_stats[i].hpd_cnt);
}
}
@@ -928,7 +968,7 @@ static void dp_aux_irq_handler(struct drm_device *dev)
wake_up_all(&dev_priv->gmbus_wait_queue);
}
-/* Unlike gen6_queue_rps_work() from which this function is originally derived,
+/* Unlike gen6_rps_irq_handler() from which this function is originally derived,
* we must be able to deal with other PM interrupts. This is complicated because
* of the way in which we use the masks to defer the RPS work (which for
* posterity is necessary because of forcewake).
@@ -936,27 +976,23 @@ static void dp_aux_irq_handler(struct drm_device *dev)
static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
u32 pm_iir)
{
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->rps.lock, flags);
- dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
- if (dev_priv->rps.pm_iir) {
+ if (pm_iir & GEN6_PM_RPS_EVENTS) {
+ spin_lock(&dev_priv->irq_lock);
+ dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
/* never want to mask useful interrupts. (also posting read) */
WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
- /* TODO: if queue_work is slow, move it out of the spinlock */
+ spin_unlock(&dev_priv->irq_lock);
+
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
- spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
- if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
- if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
+ if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+ notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
- if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
- DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
- i915_handle_error(dev_priv->dev, false);
- }
+ if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
+ DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
+ i915_handle_error(dev_priv->dev, false);
}
}
@@ -1029,7 +1065,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
gmbus_irq_handler(dev);
if (pm_iir & GEN6_PM_RPS_EVENTS)
- gen6_queue_rps_work(dev_priv, pm_iir);
+ gen6_rps_irq_handler(dev_priv, pm_iir);
I915_WRITE(GTIIR, gt_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -1179,27 +1215,111 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
cpt_serr_int_handler(dev);
}
-static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
+static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (de_iir & DE_AUX_CHANNEL_A)
+ dp_aux_irq_handler(dev);
+
+ if (de_iir & DE_GSE)
+ intel_opregion_asle_intr(dev);
+
+ if (de_iir & DE_PIPEA_VBLANK)
+ drm_handle_vblank(dev, 0);
+
+ if (de_iir & DE_PIPEB_VBLANK)
+ drm_handle_vblank(dev, 1);
+
+ if (de_iir & DE_POISON)
+ DRM_ERROR("Poison interrupt\n");
+
+ if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
+ if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
+ DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
+
+ if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
+ if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
+ DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+
+ if (de_iir & DE_PLANEA_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip_plane(dev, 0);
+ }
+
+ if (de_iir & DE_PLANEB_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 1);
+ intel_finish_page_flip_plane(dev, 1);
+ }
+
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT) {
+ u32 pch_iir = I915_READ(SDEIIR);
+
+ if (HAS_PCH_CPT(dev))
+ cpt_irq_handler(dev, pch_iir);
+ else
+ ibx_irq_handler(dev, pch_iir);
+
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ }
+
+ if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
+ ironlake_rps_change_irq_handler(dev);
+}
+
+static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (de_iir & DE_ERR_INT_IVB)
+ ivb_err_int_handler(dev);
+
+ if (de_iir & DE_AUX_CHANNEL_A_IVB)
+ dp_aux_irq_handler(dev);
+
+ if (de_iir & DE_GSE_IVB)
+ intel_opregion_asle_intr(dev);
+
+ for (i = 0; i < 3; i++) {
+ if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+ drm_handle_vblank(dev, i);
+ if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+ intel_prepare_page_flip(dev, i);
+ intel_finish_page_flip_plane(dev, i);
+ }
+ }
+
+ /* check event from PCH */
+ if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
+ u32 pch_iir = I915_READ(SDEIIR);
+
+ cpt_irq_handler(dev, pch_iir);
+
+ /* clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ }
+}
+
+static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
+ u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
- int i;
atomic_inc(&dev_priv->irq_received);
/* We get interrupts on unclaimed registers, so check for this before we
* do any I915_{READ,WRITE}. */
- if (IS_HASWELL(dev) &&
- (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
- DRM_ERROR("Unclaimed register before interrupt\n");
- I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- }
+ intel_uncore_check_errors(dev);
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+ POSTING_READ(DEIER);
/* Disable south interrupts. We'll only write to SDEIIR once, so further
* interrupts will will be stored on its back queue, and then we'll be
@@ -1223,53 +1343,34 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
- snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ if (INTEL_INFO(dev)->gen >= 6)
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ else
+ ilk_gt_irq_handler(dev, dev_priv, gt_iir);
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
}
de_iir = I915_READ(DEIIR);
if (de_iir) {
- if (de_iir & DE_ERR_INT_IVB)
- ivb_err_int_handler(dev);
-
- if (de_iir & DE_AUX_CHANNEL_A_IVB)
- dp_aux_irq_handler(dev);
-
- if (de_iir & DE_GSE_IVB)
- intel_opregion_asle_intr(dev);
-
- for (i = 0; i < 3; i++) {
- if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
- drm_handle_vblank(dev, i);
- if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
- intel_prepare_page_flip(dev, i);
- intel_finish_page_flip_plane(dev, i);
- }
- }
-
- /* check event from PCH */
- if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
- u32 pch_iir = I915_READ(SDEIIR);
-
- cpt_irq_handler(dev, pch_iir);
-
- /* clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
- }
-
+ if (INTEL_INFO(dev)->gen >= 7)
+ ivb_display_irq_handler(dev, de_iir);
+ else
+ ilk_display_irq_handler(dev, de_iir);
I915_WRITE(DEIIR, de_iir);
ret = IRQ_HANDLED;
}
- pm_iir = I915_READ(GEN6_PMIIR);
- if (pm_iir) {
- if (IS_HASWELL(dev))
- hsw_pm_irq_handler(dev_priv, pm_iir);
- else if (pm_iir & GEN6_PM_RPS_EVENTS)
- gen6_queue_rps_work(dev_priv, pm_iir);
- I915_WRITE(GEN6_PMIIR, pm_iir);
- ret = IRQ_HANDLED;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ u32 pm_iir = I915_READ(GEN6_PMIIR);
+ if (pm_iir) {
+ if (IS_HASWELL(dev))
+ hsw_pm_irq_handler(dev_priv, pm_iir);
+ else if (pm_iir & GEN6_PM_RPS_EVENTS)
+ gen6_rps_irq_handler(dev_priv, pm_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ ret = IRQ_HANDLED;
+ }
}
if (IS_HASWELL(dev)) {
@@ -1289,119 +1390,6 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
return ret;
}
-static void ilk_gt_irq_handler(struct drm_device *dev,
- struct drm_i915_private *dev_priv,
- u32 gt_iir)
-{
- if (gt_iir &
- (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
- notify_ring(dev, &dev_priv->ring[RCS]);
- if (gt_iir & ILK_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
-}
-
-static irqreturn_t ironlake_irq_handler(int irq, void *arg)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
-
- atomic_inc(&dev_priv->irq_received);
-
- /* disable master interrupt before clearing iir */
- de_ier = I915_READ(DEIER);
- I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- POSTING_READ(DEIER);
-
- /* Disable south interrupts. We'll only write to SDEIIR once, so further
- * interrupts will will be stored on its back queue, and then we'll be
- * able to process them after we restore SDEIER (as soon as we restore
- * it, we'll get an interrupt if SDEIIR still has something to process
- * due to its back queue). */
- sde_ier = I915_READ(SDEIER);
- I915_WRITE(SDEIER, 0);
- POSTING_READ(SDEIER);
-
- de_iir = I915_READ(DEIIR);
- gt_iir = I915_READ(GTIIR);
- pm_iir = I915_READ(GEN6_PMIIR);
-
- if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
- goto done;
-
- ret = IRQ_HANDLED;
-
- if (IS_GEN5(dev))
- ilk_gt_irq_handler(dev, dev_priv, gt_iir);
- else
- snb_gt_irq_handler(dev, dev_priv, gt_iir);
-
- if (de_iir & DE_AUX_CHANNEL_A)
- dp_aux_irq_handler(dev);
-
- if (de_iir & DE_GSE)
- intel_opregion_asle_intr(dev);
-
- if (de_iir & DE_PIPEA_VBLANK)
- drm_handle_vblank(dev, 0);
-
- if (de_iir & DE_PIPEB_VBLANK)
- drm_handle_vblank(dev, 1);
-
- if (de_iir & DE_POISON)
- DRM_ERROR("Poison interrupt\n");
-
- if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
- if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
- DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
-
- if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
- if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
- DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
-
- if (de_iir & DE_PLANEA_FLIP_DONE) {
- intel_prepare_page_flip(dev, 0);
- intel_finish_page_flip_plane(dev, 0);
- }
-
- if (de_iir & DE_PLANEB_FLIP_DONE) {
- intel_prepare_page_flip(dev, 1);
- intel_finish_page_flip_plane(dev, 1);
- }
-
- /* check event from PCH */
- if (de_iir & DE_PCH_EVENT) {
- u32 pch_iir = I915_READ(SDEIIR);
-
- if (HAS_PCH_CPT(dev))
- cpt_irq_handler(dev, pch_iir);
- else
- ibx_irq_handler(dev, pch_iir);
-
- /* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
- }
-
- if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
- ironlake_handle_rps_change(dev);
-
- if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
- gen6_queue_rps_work(dev_priv, pm_iir);
-
- I915_WRITE(GTIIR, gt_iir);
- I915_WRITE(DEIIR, de_iir);
- I915_WRITE(GEN6_PMIIR, pm_iir);
-
-done:
- I915_WRITE(DEIER, de_ier);
- POSTING_READ(DEIER);
- I915_WRITE(SDEIER, sde_ier);
- POSTING_READ(SDEIER);
-
- return ret;
-}
-
/**
* i915_error_work_func - do process context error handling work
* @work: work struct
@@ -1417,9 +1405,9 @@ static void i915_error_work_func(struct work_struct *work)
gpu_error);
struct drm_device *dev = dev_priv->dev;
struct intel_ring_buffer *ring;
- char *error_event[] = { "ERROR=1", NULL };
- char *reset_event[] = { "RESET=1", NULL };
- char *reset_done_event[] = { "ERROR=0", NULL };
+ char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
+ char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
+ char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
int i, ret;
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
@@ -1470,535 +1458,6 @@ static void i915_error_work_func(struct work_struct *work)
}
}
-/* NB: please notice the memset */
-static void i915_get_extra_instdone(struct drm_device *dev,
- uint32_t *instdone)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
-
- switch(INTEL_INFO(dev)->gen) {
- case 2:
- case 3:
- instdone[0] = I915_READ(INSTDONE);
- break;
- case 4:
- case 5:
- case 6:
- instdone[0] = I915_READ(INSTDONE_I965);
- instdone[1] = I915_READ(INSTDONE1);
- break;
- default:
- WARN_ONCE(1, "Unsupported platform\n");
- case 7:
- instdone[0] = I915_READ(GEN7_INSTDONE_1);
- instdone[1] = I915_READ(GEN7_SC_INSTDONE);
- instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
- instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
- break;
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-static struct drm_i915_error_object *
-i915_error_object_create_sized(struct drm_i915_private *dev_priv,
- struct drm_i915_gem_object *src,
- const int num_pages)
-{
- struct drm_i915_error_object *dst;
- int i;
- u32 reloc_offset;
-
- if (src == NULL || src->pages == NULL)
- return NULL;
-
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
- if (dst == NULL)
- return NULL;
-
- reloc_offset = src->gtt_offset;
- for (i = 0; i < num_pages; i++) {
- unsigned long flags;
- void *d;
-
- d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
- if (d == NULL)
- goto unwind;
-
- local_irq_save(flags);
- if (reloc_offset < dev_priv->gtt.mappable_end &&
- src->has_global_gtt_mapping) {
- void __iomem *s;
-
- /* Simply ignore tiling or any overlapping fence.
- * It's part of the error state, and this hopefully
- * captures what the GPU read.
- */
-
- s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
- reloc_offset);
- memcpy_fromio(d, s, PAGE_SIZE);
- io_mapping_unmap_atomic(s);
- } else if (src->stolen) {
- unsigned long offset;
-
- offset = dev_priv->mm.stolen_base;
- offset += src->stolen->start;
- offset += i << PAGE_SHIFT;
-
- memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
- } else {
- struct page *page;
- void *s;
-
- page = i915_gem_object_get_page(src, i);
-
- drm_clflush_pages(&page, 1);
-
- s = kmap_atomic(page);
- memcpy(d, s, PAGE_SIZE);
- kunmap_atomic(s);
-
- drm_clflush_pages(&page, 1);
- }
- local_irq_restore(flags);
-
- dst->pages[i] = d;
-
- reloc_offset += PAGE_SIZE;
- }
- dst->page_count = num_pages;
- dst->gtt_offset = src->gtt_offset;
-
- return dst;
-
-unwind:
- while (i--)
- kfree(dst->pages[i]);
- kfree(dst);
- return NULL;
-}
-#define i915_error_object_create(dev_priv, src) \
- i915_error_object_create_sized((dev_priv), (src), \
- (src)->base.size>>PAGE_SHIFT)
-
-static void
-i915_error_object_free(struct drm_i915_error_object *obj)
-{
- int page;
-
- if (obj == NULL)
- return;
-
- for (page = 0; page < obj->page_count; page++)
- kfree(obj->pages[page]);
-
- kfree(obj);
-}
-
-void
-i915_error_state_free(struct kref *error_ref)
-{
- struct drm_i915_error_state *error = container_of(error_ref,
- typeof(*error), ref);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- i915_error_object_free(error->ring[i].batchbuffer);
- i915_error_object_free(error->ring[i].ringbuffer);
- i915_error_object_free(error->ring[i].ctx);
- kfree(error->ring[i].requests);
- }
-
- kfree(error->active_bo);
- kfree(error->overlay);
- kfree(error->display);
- kfree(error);
-}
-static void capture_bo(struct drm_i915_error_buffer *err,
- struct drm_i915_gem_object *obj)
-{
- err->size = obj->base.size;
- err->name = obj->base.name;
- err->rseqno = obj->last_read_seqno;
- err->wseqno = obj->last_write_seqno;
- err->gtt_offset = obj->gtt_offset;
- err->read_domains = obj->base.read_domains;
- err->write_domain = obj->base.write_domain;
- err->fence_reg = obj->fence_reg;
- err->pinned = 0;
- if (obj->pin_count > 0)
- err->pinned = 1;
- if (obj->user_pin_count > 0)
- err->pinned = -1;
- err->tiling = obj->tiling_mode;
- err->dirty = obj->dirty;
- err->purgeable = obj->madv != I915_MADV_WILLNEED;
- err->ring = obj->ring ? obj->ring->id : -1;
- err->cache_level = obj->cache_level;
-}
-
-static u32 capture_active_bo(struct drm_i915_error_buffer *err,
- int count, struct list_head *head)
-{
- struct drm_i915_gem_object *obj;
- int i = 0;
-
- list_for_each_entry(obj, head, mm_list) {
- capture_bo(err++, obj);
- if (++i == count)
- break;
- }
-
- return i;
-}
-
-static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
- int count, struct list_head *head)
-{
- struct drm_i915_gem_object *obj;
- int i = 0;
-
- list_for_each_entry(obj, head, global_list) {
- if (obj->pin_count == 0)
- continue;
-
- capture_bo(err++, obj);
- if (++i == count)
- break;
- }
-
- return i;
-}
-
-static void i915_gem_record_fences(struct drm_device *dev,
- struct drm_i915_error_state *error)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
- for (i = 0; i < 8; i++)
- error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
-
- default:
- BUG();
- }
-}
-
-static struct drm_i915_error_object *
-i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
-{
- struct drm_i915_gem_object *obj;
- u32 seqno;
-
- if (!ring->get_seqno)
- return NULL;
-
- if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
- u32 acthd = I915_READ(ACTHD);
-
- if (WARN_ON(ring->id != RCS))
- return NULL;
-
- obj = ring->private;
- if (acthd >= obj->gtt_offset &&
- acthd < obj->gtt_offset + obj->base.size)
- return i915_error_object_create(dev_priv, obj);
- }
-
- seqno = ring->get_seqno(ring, false);
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (obj->ring != ring)
- continue;
-
- if (i915_seqno_passed(seqno, obj->last_read_seqno))
- continue;
-
- if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
- continue;
-
- /* We need to copy these to an anonymous buffer as the simplest
- * method to avoid being overwritten by userspace.
- */
- return i915_error_object_create(dev_priv, obj);
- }
-
- return NULL;
-}
-
-static void i915_record_ring_state(struct drm_device *dev,
- struct drm_i915_error_state *error,
- struct intel_ring_buffer *ring)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen >= 6) {
- error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
- error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
- error->semaphore_mboxes[ring->id][0]
- = I915_READ(RING_SYNC_0(ring->mmio_base));
- error->semaphore_mboxes[ring->id][1]
- = I915_READ(RING_SYNC_1(ring->mmio_base));
- error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
- error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
- }
-
- if (INTEL_INFO(dev)->gen >= 4) {
- error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
- error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
- error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
- error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
- error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
- if (ring->id == RCS)
- error->bbaddr = I915_READ64(BB_ADDR);
- } else {
- error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
- error->ipeir[ring->id] = I915_READ(IPEIR);
- error->ipehr[ring->id] = I915_READ(IPEHR);
- error->instdone[ring->id] = I915_READ(INSTDONE);
- }
-
- error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
- error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
- error->seqno[ring->id] = ring->get_seqno(ring, false);
- error->acthd[ring->id] = intel_ring_get_active_head(ring);
- error->head[ring->id] = I915_READ_HEAD(ring);
- error->tail[ring->id] = I915_READ_TAIL(ring);
- error->ctl[ring->id] = I915_READ_CTL(ring);
-
- error->cpu_ring_head[ring->id] = ring->head;
- error->cpu_ring_tail[ring->id] = ring->tail;
-}
-
-
-static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
- struct drm_i915_error_state *error,
- struct drm_i915_error_ring *ering)
-{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct drm_i915_gem_object *obj;
-
- /* Currently render ring is the only HW context user */
- if (ring->id != RCS || !error->ccid)
- return;
-
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
- ering->ctx = i915_error_object_create_sized(dev_priv,
- obj, 1);
- }
- }
-}
-
-static void i915_gem_record_rings(struct drm_device *dev,
- struct drm_i915_error_state *error)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- struct drm_i915_gem_request *request;
- int i, count;
-
- for_each_ring(ring, dev_priv, i) {
- i915_record_ring_state(dev, error, ring);
-
- error->ring[i].batchbuffer =
- i915_error_first_batchbuffer(dev_priv, ring);
-
- error->ring[i].ringbuffer =
- i915_error_object_create(dev_priv, ring->obj);
-
-
- i915_gem_record_active_context(ring, error, &error->ring[i]);
-
- count = 0;
- list_for_each_entry(request, &ring->request_list, list)
- count++;
-
- error->ring[i].num_requests = count;
- error->ring[i].requests =
- kmalloc(count*sizeof(struct drm_i915_error_request),
- GFP_ATOMIC);
- if (error->ring[i].requests == NULL) {
- error->ring[i].num_requests = 0;
- continue;
- }
-
- count = 0;
- list_for_each_entry(request, &ring->request_list, list) {
- struct drm_i915_error_request *erq;
-
- erq = &error->ring[i].requests[count++];
- erq->seqno = request->seqno;
- erq->jiffies = request->emitted_jiffies;
- erq->tail = request->tail;
- }
- }
-}
-
-/**
- * i915_capture_error_state - capture an error record for later analysis
- * @dev: drm device
- *
- * Should be called when an error is detected (either a hang or an error
- * interrupt) to capture error state from the time of the error. Fills
- * out a structure which becomes available in debugfs for user level tools
- * to pick up.
- */
-static void i915_capture_error_state(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj;
- struct drm_i915_error_state *error;
- unsigned long flags;
- int i, pipe;
-
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- error = dev_priv->gpu_error.first_error;
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
- if (error)
- return;
-
- /* Account for pipe specific data like PIPE*STAT */
- error = kzalloc(sizeof(*error), GFP_ATOMIC);
- if (!error) {
- DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
- return;
- }
-
- DRM_INFO("capturing error event; look for more information in "
- "/sys/kernel/debug/dri/%d/i915_error_state\n",
- dev->primary->index);
-
- kref_init(&error->ref);
- error->eir = I915_READ(EIR);
- error->pgtbl_er = I915_READ(PGTBL_ER);
- if (HAS_HW_CONTEXTS(dev))
- error->ccid = I915_READ(CCID);
-
- if (HAS_PCH_SPLIT(dev))
- error->ier = I915_READ(DEIER) | I915_READ(GTIER);
- else if (IS_VALLEYVIEW(dev))
- error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
- else if (IS_GEN2(dev))
- error->ier = I915_READ16(IER);
- else
- error->ier = I915_READ(IER);
-
- if (INTEL_INFO(dev)->gen >= 6)
- error->derrmr = I915_READ(DERRMR);
-
- if (IS_VALLEYVIEW(dev))
- error->forcewake = I915_READ(FORCEWAKE_VLV);
- else if (INTEL_INFO(dev)->gen >= 7)
- error->forcewake = I915_READ(FORCEWAKE_MT);
- else if (INTEL_INFO(dev)->gen == 6)
- error->forcewake = I915_READ(FORCEWAKE);
-
- if (!HAS_PCH_SPLIT(dev))
- for_each_pipe(pipe)
- error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
-
- if (INTEL_INFO(dev)->gen >= 6) {
- error->error = I915_READ(ERROR_GEN6);
- error->done_reg = I915_READ(DONE_REG);
- }
-
- if (INTEL_INFO(dev)->gen == 7)
- error->err_int = I915_READ(GEN7_ERR_INT);
-
- i915_get_extra_instdone(dev, error->extra_instdone);
-
- i915_gem_record_fences(dev, error);
- i915_gem_record_rings(dev, error);
-
- /* Record buffers on the active and pinned lists. */
- error->active_bo = NULL;
- error->pinned_bo = NULL;
-
- i = 0;
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
- i++;
- error->active_bo_count = i;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
- if (obj->pin_count)
- i++;
- error->pinned_bo_count = i - error->active_bo_count;
-
- error->active_bo = NULL;
- error->pinned_bo = NULL;
- if (i) {
- error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
- GFP_ATOMIC);
- if (error->active_bo)
- error->pinned_bo =
- error->active_bo + error->active_bo_count;
- }
-
- if (error->active_bo)
- error->active_bo_count =
- capture_active_bo(error->active_bo,
- error->active_bo_count,
- &dev_priv->mm.active_list);
-
- if (error->pinned_bo)
- error->pinned_bo_count =
- capture_pinned_bo(error->pinned_bo,
- error->pinned_bo_count,
- &dev_priv->mm.bound_list);
-
- do_gettimeofday(&error->time);
-
- error->overlay = intel_overlay_capture_error_state(dev);
- error->display = intel_display_capture_error_state(dev);
-
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- if (dev_priv->gpu_error.first_error == NULL) {
- dev_priv->gpu_error.first_error = error;
- error = NULL;
- }
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
-
- if (error)
- i915_error_state_free(&error->ref);
-}
-
-void i915_destroy_error_state(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_error_state *error;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- error = dev_priv->gpu_error.first_error;
- dev_priv->gpu_error.first_error = NULL;
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
-
- if (error)
- kref_put(&error->ref, i915_error_state_free);
-}
-#else
-#define i915_capture_error_state(x)
-#endif
-
static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2155,10 +1614,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = DSPSURF(intel_crtc->plane);
stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
- obj->gtt_offset;
+ i915_gem_obj_ggtt_offset(obj);
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
- stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
+ stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
crtc->y * crtc->fb->pitches[0] +
crtc->x * crtc->fb->bits_per_pixel/8);
}
@@ -2202,29 +1661,14 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
+ uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
+ DE_PIPE_VBLANK_ILK(pipe);
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
- return 0;
-}
-
-static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
-
- if (!i915_pipe_enabled(dev, pipe))
- return -EINVAL;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_enable_display_irq(dev_priv,
- DE_PIPEA_VBLANK_IVB << (5 * pipe));
+ ironlake_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -2275,21 +1719,11 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
+ uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
+ DE_PIPE_VBLANK_ILK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-}
-
-static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_disable_display_irq(dev_priv,
- DE_PIPEA_VBLANK_IVB << (pipe * 5));
+ ironlake_disable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -2536,9 +1970,17 @@ void i915_hangcheck_elapsed(unsigned long data)
if (busy_count)
/* Reset timer case chip hangs without another request
* being added */
- mod_timer(&dev_priv->gpu_error.hangcheck_timer,
- round_jiffies_up(jiffies +
- DRM_I915_HANGCHECK_JIFFIES));
+ i915_queue_hangcheck(dev);
+}
+
+void i915_queue_hangcheck(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (!i915_enable_hangcheck)
+ return;
+
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+ round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
static void ibx_irq_preinstall(struct drm_device *dev)
@@ -2560,31 +2002,26 @@ static void ibx_irq_preinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
-/* drm_dma.h hooks
-*/
-static void ironlake_irq_preinstall(struct drm_device *dev)
+static void gen5_gt_irq_preinstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- atomic_set(&dev_priv->irq_received, 0);
-
- I915_WRITE(HWSTAM, 0xeffe);
-
- /* XXX hotplug from PCH */
-
- I915_WRITE(DEIMR, 0xffffffff);
- I915_WRITE(DEIER, 0x0);
- POSTING_READ(DEIER);
+ struct drm_i915_private *dev_priv = dev->dev_private;
/* and GT */
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
POSTING_READ(GTIER);
- ibx_irq_preinstall(dev);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ /* and PM */
+ I915_WRITE(GEN6_PMIMR, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, 0x0);
+ POSTING_READ(GEN6_PMIER);
+ }
}
-static void ivybridge_irq_preinstall(struct drm_device *dev)
+/* drm_dma.h hooks
+*/
+static void ironlake_irq_preinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2592,21 +2029,11 @@ static void ivybridge_irq_preinstall(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xeffe);
- /* XXX hotplug from PCH */
-
I915_WRITE(DEIMR, 0xffffffff);
I915_WRITE(DEIER, 0x0);
POSTING_READ(DEIER);
- /* and GT */
- I915_WRITE(GTIMR, 0xffffffff);
- I915_WRITE(GTIER, 0x0);
- POSTING_READ(GTIER);
-
- /* Power management */
- I915_WRITE(GEN6_PMIMR, 0xffffffff);
- I915_WRITE(GEN6_PMIER, 0x0);
- POSTING_READ(GEN6_PMIER);
+ gen5_gt_irq_preinstall(dev);
ibx_irq_preinstall(dev);
}
@@ -2627,9 +2054,8 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
/* and GT */
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, 0xffffffff);
- I915_WRITE(GTIER, 0x0);
- POSTING_READ(GTIER);
+
+ gen5_gt_irq_preinstall(dev);
I915_WRITE(DPINVGTT, 0xff);
@@ -2648,22 +2074,21 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *intel_encoder;
- u32 mask = ~I915_READ(SDEIMR);
- u32 hotplug;
+ u32 hotplug_irqs, hotplug, enabled_irqs = 0;
if (HAS_PCH_IBX(dev)) {
- mask &= ~SDE_HOTPLUG_MASK;
+ hotplug_irqs = SDE_HOTPLUG_MASK;
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
- mask |= hpd_ibx[intel_encoder->hpd_pin];
+ enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
} else {
- mask &= ~SDE_HOTPLUG_MASK_CPT;
+ hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
- mask |= hpd_cpt[intel_encoder->hpd_pin];
+ enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
}
- I915_WRITE(SDEIMR, ~mask);
+ ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
@@ -2700,123 +2125,102 @@ static void ibx_irq_postinstall(struct drm_device *dev)
I915_WRITE(SDEIMR, ~mask);
}
-static int ironlake_irq_postinstall(struct drm_device *dev)
+static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
- unsigned long irqflags;
-
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- /* enable kind of interrupts always enabled */
- u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
- DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
- DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
- DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
- u32 gt_irqs;
-
- dev_priv->irq_mask = ~display_mask;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pm_irqs, gt_irqs;
- /* should always can generate irq */
- I915_WRITE(DEIIR, I915_READ(DEIIR));
- I915_WRITE(DEIMR, dev_priv->irq_mask);
- I915_WRITE(DEIER, display_mask |
- DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
- POSTING_READ(DEIER);
+ pm_irqs = gt_irqs = 0;
dev_priv->gt_irq_mask = ~0;
+ if (HAS_L3_GPU_CACHE(dev)) {
+ /* L3 parity interrupt is always unmasked. */
+ dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ }
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
- gt_irqs = GT_RENDER_USER_INTERRUPT;
-
- if (IS_GEN6(dev))
- gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
- else
+ gt_irqs |= GT_RENDER_USER_INTERRUPT;
+ if (IS_GEN5(dev)) {
gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
ILK_BSD_USER_INTERRUPT;
+ } else {
+ gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
+ }
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
I915_WRITE(GTIER, gt_irqs);
POSTING_READ(GTIER);
- ibx_irq_postinstall(dev);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ pm_irqs |= GEN6_PM_RPS_EVENTS;
- if (IS_IRONLAKE_M(dev)) {
- /* Enable PCU event interrupts
- *
- * spinlocking not required here for correctness since interrupt
- * setup is guaranteed to run in single-threaded context. But we
- * need it to make the assert_spin_locked happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- }
+ if (HAS_VEBOX(dev))
+ pm_irqs |= PM_VEBOX_USER_INTERRUPT;
- return 0;
+ I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+ I915_WRITE(GEN6_PMIMR, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, pm_irqs);
+ POSTING_READ(GEN6_PMIER);
+ }
}
-static int ivybridge_irq_postinstall(struct drm_device *dev)
+static int ironlake_irq_postinstall(struct drm_device *dev)
{
+ unsigned long irqflags;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- /* enable kind of interrupts always enabled */
- u32 display_mask =
- DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
- DE_PLANEC_FLIP_DONE_IVB |
- DE_PLANEB_FLIP_DONE_IVB |
- DE_PLANEA_FLIP_DONE_IVB |
- DE_AUX_CHANNEL_A_IVB |
- DE_ERR_INT_IVB;
- u32 pm_irqs = GEN6_PM_RPS_EVENTS;
- u32 gt_irqs;
+ u32 display_mask, extra_mask;
+
+ if (INTEL_INFO(dev)->gen >= 7) {
+ display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
+ DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
+ DE_PLANEB_FLIP_DONE_IVB |
+ DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
+ DE_ERR_INT_IVB);
+ extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
+ DE_PIPEA_VBLANK_IVB);
+
+ I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
+ } else {
+ display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
+ DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
+ DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
+ extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
+ }
dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
- I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
I915_WRITE(DEIIR, I915_READ(DEIIR));
I915_WRITE(DEIMR, dev_priv->irq_mask);
- I915_WRITE(DEIER,
- display_mask |
- DE_PIPEC_VBLANK_IVB |
- DE_PIPEB_VBLANK_IVB |
- DE_PIPEA_VBLANK_IVB);
+ I915_WRITE(DEIER, display_mask | extra_mask);
POSTING_READ(DEIER);
- dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
- gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- I915_WRITE(GTIER, gt_irqs);
- POSTING_READ(GTIER);
-
- I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
- if (HAS_VEBOX(dev))
- pm_irqs |= PM_VEBOX_USER_INTERRUPT |
- PM_VEBOX_CS_ERROR_INTERRUPT;
-
- /* Our enable/disable rps functions may touch these registers so
- * make sure to set a known state for only the non-RPS bits.
- * The RMW is extra paranoia since this should be called after being set
- * to a known state in preinstall.
- * */
- I915_WRITE(GEN6_PMIMR,
- (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
- I915_WRITE(GEN6_PMIER,
- (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
- POSTING_READ(GEN6_PMIER);
+ gen5_gt_irq_postinstall(dev);
ibx_irq_postinstall(dev);
+ if (IS_IRONLAKE_M(dev)) {
+ /* Enable PCU event interrupts
+ *
+ * spinlocking not required here for correctness since interrupt
+ * setup is guaranteed to run in single-threaded context. But we
+ * need it to make the assert_spin_locked happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+
return 0;
}
static int valleyview_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 gt_irqs;
u32 enable_mask;
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+ unsigned long irqflags;
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -2842,20 +2246,18 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_WRITE(PIPESTAT(1), 0xffff);
POSTING_READ(VLV_IER);
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, 0, pipestat_enable);
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, 1, pipestat_enable);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
- gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT;
- I915_WRITE(GTIER, gt_irqs);
- POSTING_READ(GTIER);
+ gen5_gt_irq_postinstall(dev);
/* ack & enable invalid PTE error interrupts */
#if 0 /* FIXME: add support to irq handler for checking these bits */
@@ -3323,6 +2725,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
u32 error_mask;
+ unsigned long irqflags;
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
@@ -3341,7 +2744,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
if (IS_G4X(dev))
enable_mask |= I915_BSD_USER_INTERRUPT;
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
/*
* Enable some error detection, note the instruction error mask
@@ -3616,15 +3023,6 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- /* Share uninstall handlers with ILK/SNB */
- dev->driver->irq_handler = ivybridge_irq_handler;
- dev->driver->irq_preinstall = ivybridge_irq_preinstall;
- dev->driver->irq_postinstall = ivybridge_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_uninstall;
- dev->driver->enable_vblank = ivybridge_enable_vblank;
- dev->driver->disable_vblank = ivybridge_disable_vblank;
- dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f2326fc60ac9..3aebe5dee4df 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -61,6 +61,12 @@
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
+#define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4)
+#define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4)
+#define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4)
+#define GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4)
+#define GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4)
+#define GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4)
#define GC_DISPLAY_CLOCK_MASK (7 << 4)
#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0)
#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0)
@@ -363,6 +369,7 @@
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
+#define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
@@ -680,6 +687,7 @@
#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
+#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
#define FPGA_DBG 0x42300
#define FPGA_DBG_RM_NOCLAIM (1<<31)
@@ -1125,7 +1133,8 @@
#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
-#define DPLL_DVO_HIGH_SPEED (1 << 30)
+#define DPLL_SDVO_HIGH_SPEED (1 << 30)
+#define DPLL_DVO_2X_MODE (1 << 30)
#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
@@ -1776,6 +1785,71 @@
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+/* HSW eDP PSR registers */
+#define EDP_PSR_CTL 0x64800
+#define EDP_PSR_ENABLE (1<<31)
+#define EDP_PSR_LINK_DISABLE (0<<27)
+#define EDP_PSR_LINK_STANDBY (1<<27)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25)
+#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20
+#define EDP_PSR_SKIP_AUX_EXIT (1<<12)
+#define EDP_PSR_TP1_TP2_SEL (0<<11)
+#define EDP_PSR_TP1_TP3_SEL (1<<11)
+#define EDP_PSR_TP2_TP3_TIME_500us (0<<8)
+#define EDP_PSR_TP2_TP3_TIME_100us (1<<8)
+#define EDP_PSR_TP2_TP3_TIME_2500us (2<<8)
+#define EDP_PSR_TP2_TP3_TIME_0us (3<<8)
+#define EDP_PSR_TP1_TIME_500us (0<<4)
+#define EDP_PSR_TP1_TIME_100us (1<<4)
+#define EDP_PSR_TP1_TIME_2500us (2<<4)
+#define EDP_PSR_TP1_TIME_0us (3<<4)
+#define EDP_PSR_IDLE_FRAME_SHIFT 0
+
+#define EDP_PSR_AUX_CTL 0x64810
+#define EDP_PSR_AUX_DATA1 0x64814
+#define EDP_PSR_DPCD_COMMAND 0x80060000
+#define EDP_PSR_AUX_DATA2 0x64818
+#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
+#define EDP_PSR_AUX_DATA3 0x6481c
+#define EDP_PSR_AUX_DATA4 0x64820
+#define EDP_PSR_AUX_DATA5 0x64824
+
+#define EDP_PSR_STATUS_CTL 0x64840
+#define EDP_PSR_STATUS_STATE_MASK (7<<29)
+#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
+#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
+#define EDP_PSR_STATUS_STATE_SRDENT (2<<29)
+#define EDP_PSR_STATUS_STATE_BUFOFF (3<<29)
+#define EDP_PSR_STATUS_STATE_BUFON (4<<29)
+#define EDP_PSR_STATUS_STATE_AUXACK (5<<29)
+#define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29)
+#define EDP_PSR_STATUS_LINK_MASK (3<<26)
+#define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26)
+#define EDP_PSR_STATUS_LINK_FULL_ON (1<<26)
+#define EDP_PSR_STATUS_LINK_STANDBY (2<<26)
+#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
+#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f
+#define EDP_PSR_STATUS_COUNT_SHIFT 16
+#define EDP_PSR_STATUS_COUNT_MASK 0xf
+#define EDP_PSR_STATUS_AUX_ERROR (1<<15)
+#define EDP_PSR_STATUS_AUX_SENDING (1<<12)
+#define EDP_PSR_STATUS_SENDING_IDLE (1<<9)
+#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8)
+#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
+#define EDP_PSR_STATUS_IDLE_MASK 0xf
+
+#define EDP_PSR_PERF_CNT 0x64844
+#define EDP_PSR_PERF_CNT_MASK 0xffffff
+
+#define EDP_PSR_DEBUG_CTL 0x64860
+#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
+#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
+#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
+
/* VGA port control */
#define ADPA 0x61100
#define PCH_ADPA 0xe1100
@@ -2045,6 +2119,7 @@
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
+#define VIDEO_DIP_VSC_DATA_SIZE 36
#define VIDEO_DIP_CTL 0x61170
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
@@ -2192,6 +2267,8 @@
#define BLC_PWM_CPU_CTL2 0x48250
#define BLC_PWM_CPU_CTL 0x48254
+#define HSW_BLC_PWM2_CTL 0x48350
+
/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
* like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
#define BLC_PWM_PCH_CTL1 0xc8250
@@ -2200,6 +2277,12 @@
#define BLM_PCH_POLARITY (1 << 29)
#define BLC_PWM_PCH_CTL2 0xc8254
+#define UTIL_PIN_CTL 0x48400
+#define UTIL_PIN_ENABLE (1 << 31)
+
+#define PCH_GTC_CTL 0xe7000
+#define PCH_GTC_ENABLE (1 << 31)
+
/* TV port control */
#define TV_CTL 0x68000
/** Enables the TV encoder */
@@ -3718,6 +3801,9 @@
#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEA_VBLANK_IVB (1<<0)
+#define DE_PIPE_VBLANK_ILK(pipe) (1 << ((pipe * 8) + 7))
+#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5))
+
#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
#define MASTER_INTERRUPT_ENABLE (1<<31)
@@ -3880,6 +3966,7 @@
#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
+#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
/* digital port hotplug */
#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
@@ -4080,6 +4167,13 @@
#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
+#define HSW_STEREO_3D_CTL_A 0x70020
+#define S3D_ENABLE (1<<31)
+#define HSW_STEREO_3D_CTL_B 0x71020
+
+#define HSW_STEREO_3D_CTL(trans) \
+ _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A)
+
#define _PCH_TRANS_HTOTAL_B 0xe1000
#define _PCH_TRANS_HBLANK_B 0xe1004
#define _PCH_TRANS_HSYNC_B 0xe1008
@@ -4468,6 +4562,10 @@
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
+#define HSW_IDICR 0x9008
+#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
+#define HSW_EDRAM_PRESENT 0x120010
+
#define GEN6_UCGCTL1 0x9400
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
@@ -4858,7 +4956,8 @@
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
#define SBI_DBUFF0 0x2a00
-#define SBI_DBUFF0_ENABLE (1<<0)
+#define SBI_GEN0 0x1f00
+#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0)
/* LPT PIXCLK_GATE */
#define PIXCLK_GATE 0xC6020
@@ -4924,7 +5023,14 @@
#define LCPLL_CLK_FREQ_450 (0<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
+#define LCPLL_POWER_DOWN_ALLOW (1<<22)
#define LCPLL_CD_SOURCE_FCLK (1<<21)
+#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
+
+#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
+#define D_COMP_COMP_FORCE (1<<8)
+#define D_COMP_COMP_DISABLE (1<<0)
/* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 6875b5654c63..a777e7f3b0df 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -409,6 +409,71 @@ static const struct attribute *gen6_attrs[] = {
NULL,
};
+static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+
+ struct device *kdev = container_of(kobj, struct device, kobj);
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct i915_error_state_file_priv error_priv;
+ struct drm_i915_error_state_buf error_str;
+ ssize_t ret_count = 0;
+ int ret;
+
+ memset(&error_priv, 0, sizeof(error_priv));
+
+ ret = i915_error_state_buf_init(&error_str, count, off);
+ if (ret)
+ return ret;
+
+ error_priv.dev = dev;
+ i915_error_state_get(dev, &error_priv);
+
+ ret = i915_error_state_to_str(&error_str, &error_priv);
+ if (ret)
+ goto out;
+
+ ret_count = count < error_str.bytes ? count : error_str.bytes;
+
+ memcpy(buf, error_str.buf, ret_count);
+out:
+ i915_error_state_put(&error_priv);
+ i915_error_state_buf_release(&error_str);
+
+ return ret ?: ret_count;
+}
+
+static ssize_t error_state_write(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *kdev = container_of(kobj, struct device, kobj);
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ DRM_DEBUG_DRIVER("Resetting error state\n");
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ i915_destroy_error_state(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+}
+
+static struct bin_attribute error_state_attr = {
+ .attr.name = "error",
+ .attr.mode = S_IRUSR | S_IWUSR,
+ .size = 0,
+ .read = error_state_read,
+ .write = error_state_write,
+};
+
void i915_setup_sysfs(struct drm_device *dev)
{
int ret;
@@ -432,10 +497,16 @@ void i915_setup_sysfs(struct drm_device *dev)
if (ret)
DRM_ERROR("gen6 sysfs setup failed\n");
}
+
+ ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
+ &error_state_attr);
+ if (ret)
+ DRM_ERROR("error_state sysfs setup failed\n");
}
void i915_teardown_sysfs(struct drm_device *dev)
{
+ sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 3db4a6817713..2933e2ffeaa4 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind,
TP_fast_assign(
__entry->obj = obj;
- __entry->offset = obj->gtt_space->start;
- __entry->size = obj->gtt_space->size;
+ __entry->offset = i915_gem_obj_ggtt_offset(obj);
+ __entry->size = i915_gem_obj_ggtt_size(obj);
__entry->mappable = mappable;
),
@@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind,
TP_fast_assign(
__entry->obj = obj;
- __entry->offset = obj->gtt_space->start;
- __entry->size = obj->gtt_space->size;
+ __entry->offset = i915_gem_obj_ggtt_offset(obj);
+ __entry->size = i915_gem_obj_ggtt_size(obj);
),
TP_printk("obj=%p, offset=%08x size=%x",
@@ -406,10 +406,12 @@ TRACE_EVENT(i915_flip_complete,
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
);
-TRACE_EVENT(i915_reg_rw,
- TP_PROTO(bool write, u32 reg, u64 val, int len),
+TRACE_EVENT_CONDITION(i915_reg_rw,
+ TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace),
- TP_ARGS(write, reg, val, len),
+ TP_ARGS(write, reg, val, len, trace),
+
+ TP_CONDITION(trace),
TP_STRUCT__entry(
__field(u64, val)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3acec8c48166..0c0d4e8d768e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -613,6 +613,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
struct intel_load_detect_pipe tmp;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
+ connector->base.id, drm_get_connector_name(connector),
+ force);
+
if (I915_HAS_HOTPLUG(dev)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b042ee5c4070..931b4bb1f9dc 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1118,6 +1118,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
intel_dp_stop_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
+ intel_edp_psr_enable(intel_dp);
}
if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
@@ -1148,6 +1149,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_edp_psr_disable(intel_dp);
ironlake_edp_backlight_off(intel_dp);
}
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5fb305840db8..3e66f05ea342 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -45,6 +45,11 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config);
+static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config);
+
typedef struct {
int min, max;
} intel_range_t;
@@ -84,7 +89,7 @@ intel_fdi_link_freq(struct drm_device *dev)
return 27;
}
-static const intel_limit_t intel_limits_i8xx_dvo = {
+static const intel_limit_t intel_limits_i8xx_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
.n = { .min = 3, .max = 16 },
@@ -97,6 +102,19 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
.p2_slow = 4, .p2_fast = 2 },
};
+static const intel_limit_t intel_limits_i8xx_dvo = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 930000, .max = 1400000 },
+ .n = { .min = 3, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 4 },
+};
+
static const intel_limit_t intel_limits_i8xx_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
@@ -405,8 +423,10 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
- else
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
limit = &intel_limits_i8xx_dvo;
+ else
+ limit = &intel_limits_i8xx_dac;
}
return limit;
}
@@ -892,8 +912,8 @@ static const char *state_string(bool enabled)
}
/* Only for pre-ILK configs */
-static void assert_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
+void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
{
int reg;
u32 val;
@@ -906,10 +926,8 @@ static void assert_pll(struct drm_i915_private *dev_priv,
"PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
-#define assert_pll_enabled(d, p) assert_pll(d, p, true)
-#define assert_pll_disabled(d, p) assert_pll(d, p, false)
-static struct intel_shared_dpll *
+struct intel_shared_dpll *
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
@@ -921,9 +939,9 @@ intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
}
/* For ILK+ */
-static void assert_shared_dpll(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- bool state)
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ bool state)
{
bool cur_state;
struct intel_dpll_hw_state hw_state;
@@ -942,8 +960,6 @@ static void assert_shared_dpll(struct drm_i915_private *dev_priv,
"%s assertion failure (expected %s, current %s)\n",
pll->name, state_string(state), state_string(cur_state));
}
-#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
-#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
@@ -1007,15 +1023,19 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
-static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
{
int reg;
u32 val;
+ bool cur_state;
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
- WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
+ cur_state = !!(val & FDI_RX_PLL_ENABLE);
+ WARN(cur_state != state,
+ "FDI RX PLL assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
}
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
@@ -1111,7 +1131,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
}
/* Need to check both planes against the pipe */
- for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+ for_each_pipe(i) {
reg = DSPCNTR(i);
val = I915_READ(reg);
cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
@@ -1301,51 +1321,92 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
}
-/**
- * intel_enable_pll - enable a PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to enable
- *
- * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
- * make sure the PLL reg is writable first though, since the panel write
- * protect mechanism may be enabled.
- *
- * Note! This is for pre-ILK only.
- *
- * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
- */
-static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+static void vlv_enable_pll(struct intel_crtc *crtc)
{
- int reg;
- u32 val;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int reg = DPLL(crtc->pipe);
+ u32 dpll = crtc->config.dpll_hw_state.dpll;
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, crtc->pipe);
/* No really, not for ILK+ */
- BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
+ BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
- assert_panel_unlocked(dev_priv, pipe);
+ assert_panel_unlocked(dev_priv, crtc->pipe);
- reg = DPLL(pipe);
- val = I915_READ(reg);
- val |= DPLL_VCO_ENABLE;
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150);
+
+ if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
+
+ I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
+ POSTING_READ(DPLL_MD(crtc->pipe));
/* We do this three times for luck */
- I915_WRITE(reg, val);
+ I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
- I915_WRITE(reg, val);
+ I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
- I915_WRITE(reg, val);
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+}
+
+static void i9xx_enable_pll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int reg = DPLL(crtc->pipe);
+ u32 dpll = crtc->config.dpll_hw_state.dpll;
+
+ assert_pipe_disabled(dev_priv, crtc->pipe);
+
+ /* No really, not for ILK+ */
+ BUG_ON(dev_priv->info->gen >= 5);
+
+ /* PLL is protected by panel, make sure we can write it */
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ assert_panel_unlocked(dev_priv, crtc->pipe);
+
+ I915_WRITE(reg, dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(reg);
+ udelay(150);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(DPLL_MD(crtc->pipe),
+ crtc->config.dpll_hw_state.dpll_md);
+ } else {
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(reg, dpll);
+ }
+
+ /* We do this three times for luck */
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
}
/**
- * intel_disable_pll - disable a PLL
+ * i9xx_disable_pll - disable a PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to disable
*
@@ -1353,11 +1414,8 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
*
* Note! This is for pre-ILK only.
*/
-static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- int reg;
- u32 val;
-
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
return;
@@ -1365,11 +1423,8 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
- reg = DPLL(pipe);
- val = I915_READ(reg);
- val &= ~DPLL_VCO_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ I915_WRITE(DPLL(pipe), 0);
+ POSTING_READ(DPLL(pipe));
}
void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
@@ -1942,16 +1997,17 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
intel_crtc->dspaddr_offset = linear_offset;
}
- DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
- obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
+ fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_MODIFY_DISPBASE(DSPSURF(plane),
- obj->gtt_offset + intel_crtc->dspaddr_offset);
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
- I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
+ I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
POSTING_READ(reg);
return 0;
@@ -2031,11 +2087,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
- DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
- obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
+ fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane),
- obj->gtt_offset + intel_crtc->dspaddr_offset);
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
@@ -2183,6 +2240,20 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
+ /* Update pipe size and adjust fitter if needed */
+ if (i915_fastboot) {
+ I915_WRITE(PIPESRC(intel_crtc->pipe),
+ ((crtc->mode.hdisplay - 1) << 16) |
+ (crtc->mode.vdisplay - 1));
+ if (!intel_crtc->config.pch_pfit.size &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+ I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
+ I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
+ I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
+ }
+ }
+
ret = dev_priv->display.update_plane(crtc, fb, x, y);
if (ret) {
intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
@@ -2203,6 +2274,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
}
intel_update_fbc(dev);
+ intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
intel_crtc_update_sarea_pos(crtc, x, y);
@@ -2927,15 +2999,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
- /* XXX: pch pll's can be enabled any time before we enable the PCH
- * transcoder, and we actually should do this to not upset any PCH
- * transcoder that already use the clock when we share it.
- *
- * Note that enable_shared_dpll tries to do the right thing, but
- * get_shared_dpll unconditionally resets the pll - we need that to have
- * the right LVDS enable sequence. */
- ironlake_enable_shared_dpll(intel_crtc);
-
+ /* We need to program the right clock selection before writing the pixel
+ * mutliplier into the DPLL. */
if (HAS_PCH_CPT(dev)) {
u32 sel;
@@ -2949,6 +3014,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_DPLL_SEL, temp);
}
+ /* XXX: pch pll's can be enabled any time before we enable the PCH
+ * transcoder, and we actually should do this to not upset any PCH
+ * transcoder that already use the clock when we share it.
+ *
+ * Note that enable_shared_dpll tries to do the right thing, but
+ * get_shared_dpll unconditionally resets the pll - we need that to have
+ * the right LVDS enable sequence. */
+ ironlake_enable_shared_dpll(intel_crtc);
+
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
@@ -3031,7 +3105,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc)
crtc->config.shared_dpll = DPLL_ID_PRIVATE;
}
-static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, u32 dpll, u32 fp)
+static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3045,7 +3119,7 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
if (HAS_PCH_IBX(dev_priv->dev)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
- i = crtc->pipe;
+ i = (enum intel_dpll_id) crtc->pipe;
pll = &dev_priv->shared_dplls[i];
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
@@ -3061,8 +3135,8 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
if (pll->refcount == 0)
continue;
- if (dpll == (I915_READ(PCH_DPLL(pll->id)) & 0x7fffffff) &&
- fp == I915_READ(PCH_FP0(pll->id))) {
+ if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
+ sizeof(pll->hw_state)) == 0) {
DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
crtc->base.base.id,
pll->name, pll->refcount, pll->active);
@@ -3096,13 +3170,7 @@ found:
WARN_ON(pll->on);
assert_shared_dpll_disabled(dev_priv, pll);
- /* Wait for the clocks to stabilize before rewriting the regs */
- I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
- POSTING_READ(PCH_DPLL(pll->id));
- udelay(150);
-
- I915_WRITE(PCH_FP0(pll->id), fp);
- I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
+ pll->mode_set(dev_priv, pll);
}
pll->refcount++;
@@ -3174,7 +3242,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- u32 temp;
WARN_ON(!crtc->enabled);
@@ -3188,12 +3255,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(dev);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
- if ((temp & LVDS_PORT_EN) == 0)
- I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
- }
-
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
if (intel_crtc->config.has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
@@ -3205,10 +3269,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
assert_fdi_rx_disabled(dev_priv, pipe);
}
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->pre_enable)
- encoder->pre_enable(encoder);
-
ironlake_pfit_enable(intel_crtc);
/*
@@ -3389,7 +3449,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
- if (dev_priv->cfb_plane == plane)
+ if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
intel_crtc_update_cursor(crtc, false);
@@ -3462,7 +3522,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
drm_vblank_off(dev, pipe);
/* FBC must be disabled before disabling the plane on HSW. */
- if (dev_priv->cfb_plane == plane)
+ if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
hsw_disable_ips(intel_crtc);
@@ -3599,7 +3659,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
- intel_enable_pll(dev_priv, pipe);
+ vlv_enable_pll(intel_crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
@@ -3640,12 +3700,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
intel_update_watermarks(dev);
- intel_enable_pll(dev_priv, pipe);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
+ i9xx_enable_pll(intel_crtc);
+
i9xx_pfit_enable(intel_crtc);
intel_crtc_load_lut(crtc);
@@ -3701,7 +3761,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
- if (dev_priv->cfb_plane == plane)
+ if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
intel_crtc_dpms_overlay(intel_crtc, false);
@@ -3717,7 +3777,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (encoder->post_disable)
encoder->post_disable(encoder);
- intel_disable_pll(dev_priv, pipe);
+ i9xx_disable_pll(dev_priv, pipe);
intel_crtc->active = false;
intel_update_fbc(dev);
@@ -4103,6 +4163,30 @@ static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
return 200000;
}
+static int pnv_get_display_clock_speed(struct drm_device *dev)
+{
+ u16 gcfgc = 0;
+
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_267_MHZ_PNV:
+ return 267000;
+ case GC_DISPLAY_CLOCK_333_MHZ_PNV:
+ return 333000;
+ case GC_DISPLAY_CLOCK_444_MHZ_PNV:
+ return 444000;
+ case GC_DISPLAY_CLOCK_200_MHZ_PNV:
+ return 200000;
+ default:
+ DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
+ case GC_DISPLAY_CLOCK_133_MHZ_PNV:
+ return 133000;
+ case GC_DISPLAY_CLOCK_167_MHZ_PNV:
+ return 167000;
+ }
+}
+
static int i915gm_get_display_clock_speed(struct drm_device *dev)
{
u16 gcfgc = 0;
@@ -4266,14 +4350,17 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
}
I915_WRITE(FP0(pipe), fp);
+ crtc->config.dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915_powersave) {
I915_WRITE(FP1(pipe), fp2);
+ crtc->config.dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
I915_WRITE(FP1(pipe), fp);
+ crtc->config.dpll_hw_state.fp1 = fp;
}
}
@@ -4351,7 +4438,6 @@ static void vlv_update_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *encoder;
int pipe = crtc->pipe;
u32 dpll, mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
@@ -4407,7 +4493,7 @@ static void vlv_update_pll(struct intel_crtc *crtc)
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
- 0x005f0021);
+ 0x009f0003);
else
vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
0x00d0000f);
@@ -4440,10 +4526,6 @@ static void vlv_update_pll(struct intel_crtc *crtc)
vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
- for_each_encoder_on_crtc(dev, &crtc->base, encoder)
- if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder);
-
/* Enable DPIO clock input */
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
@@ -4451,17 +4533,11 @@ static void vlv_update_pll(struct intel_crtc *crtc)
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
dpll |= DPLL_VCO_ENABLE;
- I915_WRITE(DPLL(pipe), dpll);
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
- DRM_ERROR("DPLL %d failed to lock\n", pipe);
+ crtc->config.dpll_hw_state.dpll = dpll;
dpll_md = (crtc->config.pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
- I915_WRITE(DPLL_MD(pipe), dpll_md);
- POSTING_READ(DPLL_MD(pipe));
+ crtc->config.dpll_hw_state.dpll_md = dpll_md;
if (crtc->config.has_dp_encoder)
intel_dp_set_m_n(crtc);
@@ -4475,8 +4551,6 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *encoder;
- int pipe = crtc->pipe;
u32 dpll;
bool is_sdvo;
struct dpll *clock = &crtc->config.dpll;
@@ -4499,10 +4573,10 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
}
if (is_sdvo)
- dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |= DPLL_SDVO_HIGH_SPEED;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
- dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
if (IS_PINEVIEW(dev))
@@ -4538,35 +4612,16 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- for_each_encoder_on_crtc(dev, &crtc->base, encoder)
- if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder);
-
- if (crtc->config.has_dp_encoder)
- intel_dp_set_m_n(crtc);
-
- I915_WRITE(DPLL(pipe), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
- udelay(150);
+ crtc->config.dpll_hw_state.dpll = dpll;
if (INTEL_INFO(dev)->gen >= 4) {
u32 dpll_md = (crtc->config.pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
- I915_WRITE(DPLL_MD(pipe), dpll_md);
- } else {
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(DPLL(pipe), dpll);
+ crtc->config.dpll_hw_state.dpll_md = dpll_md;
}
+
+ if (crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(crtc);
}
static void i8xx_update_pll(struct intel_crtc *crtc,
@@ -4575,8 +4630,6 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *encoder;
- int pipe = crtc->pipe;
u32 dpll;
struct dpll *clock = &crtc->config.dpll;
@@ -4595,6 +4648,9 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
+ dpll |= DPLL_DVO_2X_MODE;
+
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
@@ -4602,26 +4658,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- for_each_encoder_on_crtc(dev, &crtc->base, encoder)
- if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder);
-
- I915_WRITE(DPLL(pipe), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(DPLL(pipe), dpll);
+ crtc->config.dpll_hw_state.dpll = dpll;
}
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
@@ -4727,6 +4764,27 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
}
+static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+
+ crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
+ crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
+ crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
+ crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
+
+ crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
+ crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
+ crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
+ crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
+
+ crtc->mode.flags = pipe_config->adjusted_mode.flags;
+
+ crtc->mode.clock = pipe_config->adjusted_mode.clock;
+ crtc->mode.flags |= pipe_config->adjusted_mode.flags;
+}
+
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
@@ -4939,7 +4997,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
- pipe_config->cpu_transcoder = crtc->pipe;
+ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -4955,6 +5013,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier =
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
+ pipe_config->dpll_hw_state.dpll_md = tmp;
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
tmp = I915_READ(DPLL(crtc->pipe));
pipe_config->pixel_multiplier =
@@ -4966,6 +5025,16 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
* function. */
pipe_config->pixel_multiplier = 1;
}
+ pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
+ if (!IS_VALLEYVIEW(dev)) {
+ pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
+ pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
+ } else {
+ /* Mask out read-only status bits. */
+ pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
+ DPLL_PORTC_READY_MASK |
+ DPLL_PORTB_READY_MASK);
+ }
return true;
}
@@ -5119,74 +5188,37 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
BUG_ON(val != final);
}
-/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
-static void lpt_init_pch_refclk(struct drm_device *dev)
+static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_encoder *encoder;
- bool has_vga = false;
- bool is_sdv = false;
- u32 tmp;
-
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- switch (encoder->type) {
- case INTEL_OUTPUT_ANALOG:
- has_vga = true;
- break;
- }
- }
-
- if (!has_vga)
- return;
-
- mutex_lock(&dev_priv->dpio_lock);
-
- /* XXX: Rip out SDV support once Haswell ships for real. */
- if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
- is_sdv = true;
-
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
- tmp &= ~SBI_SSCCTL_DISABLE;
- tmp |= SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
-
- udelay(24);
+ uint32_t tmp;
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
- tmp &= ~SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
- if (!is_sdv) {
- tmp = I915_READ(SOUTH_CHICKEN2);
- tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
- I915_WRITE(SOUTH_CHICKEN2, tmp);
+ if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ DRM_ERROR("FDI mPHY reset assert timeout\n");
- if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS, 100))
- DRM_ERROR("FDI mPHY reset assert timeout\n");
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
- tmp = I915_READ(SOUTH_CHICKEN2);
- tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
- I915_WRITE(SOUTH_CHICKEN2, tmp);
+ if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+ DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+}
- if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
- 100))
- DRM_ERROR("FDI mPHY reset de-assert timeout\n");
- }
+/* WaMPhyProgramming:hsw */
+static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
+{
+ uint32_t tmp;
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
tmp |= (0x12 << 24);
intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
- if (is_sdv) {
- tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
- tmp |= 0x7FFF;
- intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
- }
-
tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
tmp |= (1 << 11);
intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
@@ -5195,24 +5227,6 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
tmp |= (1 << 11);
intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
- if (is_sdv) {
- tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
- tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
- intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
- tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
- intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
- tmp |= (0x3F << 8);
- intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
- tmp |= (0x3F << 8);
- intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
- }
-
tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
@@ -5221,17 +5235,15 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
- if (!is_sdv) {
- tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
- tmp &= ~(7 << 13);
- tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+ tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
- tmp &= ~(7 << 13);
- tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
- }
+ tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
tmp &= ~0xFF;
@@ -5253,34 +5265,120 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
tmp |= (0x1C << 16);
intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
- if (!is_sdv) {
- tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
- tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+ tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+}
+
+/* Implements 3 different sequences from BSpec chapter "Display iCLK
+ * Programming" based on the parameters passed:
+ * - Sequence to enable CLKOUT_DP
+ * - Sequence to enable CLKOUT_DP without spread
+ * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
+ */
+static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
+ bool with_fdi)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t reg, tmp;
+
+ if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
+ with_spread = true;
+ if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
+ with_fdi, "LP PCH doesn't have FDI\n"))
+ with_fdi = false;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_DISABLE;
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
- tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
- tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+ udelay(24);
- tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
- tmp &= ~(0xF << 28);
- tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+ if (with_spread) {
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
- tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
- tmp &= ~(0xF << 28);
- tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+ if (with_fdi) {
+ lpt_reset_fdi_mphy(dev_priv);
+ lpt_program_fdi_mphy(dev_priv);
+ }
}
- /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
- tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
- tmp |= SBI_DBUFF0_ENABLE;
- intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+ reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
+ SBI_GEN0 : SBI_DBUFF0;
+ tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+ intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
mutex_unlock(&dev_priv->dpio_lock);
}
+/* Sequence to disable CLKOUT_DP */
+static void lpt_disable_clkout_dp(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t reg, tmp;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
+ SBI_GEN0 : SBI_DBUFF0;
+ tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+ intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ if (!(tmp & SBI_SSCCTL_DISABLE)) {
+ if (!(tmp & SBI_SSCCTL_PATHALT)) {
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ udelay(32);
+ }
+ tmp |= SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ }
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static void lpt_init_pch_refclk(struct drm_device *dev)
+{
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+ bool has_vga = false;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ has_vga = true;
+ break;
+ }
+ }
+
+ if (has_vga)
+ lpt_enable_clkout_dp(dev, true, true);
+ else
+ lpt_disable_clkout_dp(dev);
+}
+
/*
* Initialize reference clocks when the driver loads
*/
@@ -5610,9 +5708,9 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
if (is_sdvo)
- dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |= DPLL_SDVO_HIGH_SPEED;
if (intel_crtc->config.has_dp_encoder)
- dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
@@ -5708,7 +5806,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
intel_crtc->config.dpll_hw_state.fp1 = fp;
- pll = intel_get_shared_dpll(intel_crtc, dpll, fp);
+ pll = intel_get_shared_dpll(intel_crtc);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(pipe));
@@ -5720,10 +5818,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder);
-
if (is_lvds && has_reduced_clock && i915_powersave)
intel_crtc->lowfreq_avail = true;
else
@@ -5732,23 +5826,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (intel_crtc->config.has_pch_encoder) {
pll = intel_crtc_to_shared_dpll(intel_crtc);
- I915_WRITE(PCH_DPLL(pll->id), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(PCH_DPLL(pll->id));
- udelay(150);
-
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(PCH_DPLL(pll->id), dpll);
-
- if (has_reduced_clock)
- I915_WRITE(PCH_FP1(pll->id), fp2);
- else
- I915_WRITE(PCH_FP1(pll->id), fp);
}
intel_set_pipe_timings(intel_crtc);
@@ -5820,7 +5897,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
- pipe_config->cpu_transcoder = crtc->pipe;
+ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -5838,12 +5915,9 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
ironlake_get_fdi_m_n_config(crtc, pipe_config);
- /* XXX: Can't properly read out the pch dpll pixel multiplier
- * since we don't have state tracking for pch clocks yet. */
- pipe_config->pixel_multiplier = 1;
-
if (HAS_PCH_IBX(dev_priv->dev)) {
- pipe_config->shared_dpll = crtc->pipe;
+ pipe_config->shared_dpll =
+ (enum intel_dpll_id) crtc->pipe;
} else {
tmp = I915_READ(PCH_DPLL_SEL);
if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
@@ -5856,6 +5930,11 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
WARN_ON(!pll->get_hw_state(dev_priv, pll,
&pipe_config->dpll_hw_state));
+
+ tmp = pipe_config->dpll_hw_state.dpll;
+ pipe_config->pixel_multiplier =
+ ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
+ >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -5867,6 +5946,142 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
return true;
}
+static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ struct intel_crtc *crtc;
+ unsigned long irqflags;
+ uint32_t val, pch_hpd_mask;
+
+ pch_hpd_mask = SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT;
+ if (!(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE))
+ pch_hpd_mask |= SDE_PORTD_HOTPLUG_CPT | SDE_CRT_HOTPLUG_CPT;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
+ WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
+ pipe_name(crtc->pipe));
+
+ WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
+ WARN(plls->spll_refcount, "SPLL enabled\n");
+ WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
+ WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
+ WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
+ WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+ "CPU PWM1 enabled\n");
+ WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ "CPU PWM2 enabled\n");
+ WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+ "PCH PWM1 enabled\n");
+ WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Utility pin enabled\n");
+ WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ val = I915_READ(DEIMR);
+ WARN((val & ~DE_PCH_EVENT_IVB) != val,
+ "Unexpected DEIMR bits enabled: 0x%x\n", val);
+ val = I915_READ(SDEIMR);
+ WARN((val & ~pch_hpd_mask) != val,
+ "Unexpected SDEIMR bits enabled: 0x%x\n", val);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+/*
+ * This function implements pieces of two sequences from BSpec:
+ * - Sequence for display software to disable LCPLL
+ * - Sequence for display software to allow package C8+
+ * The steps implemented here are just the steps that actually touch the LCPLL
+ * register. Callers should take care of disabling all the display engine
+ * functions, doing the mode unset, fixing interrupts, etc.
+ */
+void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+ bool switch_to_fclk, bool allow_power_down)
+{
+ uint32_t val;
+
+ assert_can_disable_lcpll(dev_priv);
+
+ val = I915_READ(LCPLL_CTL);
+
+ if (switch_to_fclk) {
+ val |= LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ DRM_ERROR("Switching to FCLK failed\n");
+
+ val = I915_READ(LCPLL_CTL);
+ }
+
+ val |= LCPLL_PLL_DISABLE;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+
+ if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
+ DRM_ERROR("LCPLL still locked\n");
+
+ val = I915_READ(D_COMP);
+ val |= D_COMP_COMP_DISABLE;
+ I915_WRITE(D_COMP, val);
+ POSTING_READ(D_COMP);
+ ndelay(100);
+
+ if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+ DRM_ERROR("D_COMP RCOMP still in progress\n");
+
+ if (allow_power_down) {
+ val = I915_READ(LCPLL_CTL);
+ val |= LCPLL_POWER_DOWN_ALLOW;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+ }
+}
+
+/*
+ * Fully restores LCPLL, disallowing power down and switching back to LCPLL
+ * source.
+ */
+void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ val = I915_READ(LCPLL_CTL);
+
+ if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
+ LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
+ return;
+
+ if (val & LCPLL_POWER_DOWN_ALLOW) {
+ val &= ~LCPLL_POWER_DOWN_ALLOW;
+ I915_WRITE(LCPLL_CTL, val);
+ }
+
+ val = I915_READ(D_COMP);
+ val |= D_COMP_COMP_FORCE;
+ val &= ~D_COMP_COMP_DISABLE;
+ I915_WRITE(D_COMP, val);
+ I915_READ(D_COMP);
+
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_PLL_DISABLE;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
+ DRM_ERROR("LCPLL not locked yet\n");
+
+ if (val & LCPLL_CD_SOURCE_FCLK) {
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ DRM_ERROR("Switching back to LCPLL failed\n");
+ }
+}
+
static void haswell_modeset_global_resources(struct drm_device *dev)
{
bool enable = false;
@@ -5935,7 +6150,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
enum intel_display_power_domain pfit_domain;
uint32_t tmp;
- pipe_config->cpu_transcoder = crtc->pipe;
+ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -6548,7 +6763,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
goto fail_unpin;
}
- addr = obj->gtt_offset;
+ addr = i915_gem_obj_ggtt_offset(obj);
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, obj,
@@ -6875,11 +7090,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
}
/* Returns the clock of the currently programmed mode of the given pipe. */
-static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
{
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ int pipe = pipe_config->cpu_transcoder;
u32 dpll = I915_READ(DPLL(pipe));
u32 fp;
intel_clock_t clock;
@@ -6918,7 +7134,8 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
default:
DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
- return 0;
+ pipe_config->adjusted_mode.clock = 0;
+ return;
}
if (IS_PINEVIEW(dev))
@@ -6955,12 +7172,55 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
}
}
- /* XXX: It would be nice to validate the clocks, but we can't reuse
- * i830PllIsValid() because it relies on the xf86_config connector
- * configuration being accurate, which it isn't necessarily.
+ pipe_config->adjusted_mode.clock = clock.dot *
+ pipe_config->pixel_multiplier;
+}
+
+static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ int link_freq, repeat;
+ u64 clock;
+ u32 link_m, link_n;
+
+ repeat = pipe_config->pixel_multiplier;
+
+ /*
+ * The calculation for the data clock is:
+ * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
+ * But we want to avoid losing precison if possible, so:
+ * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
+ *
+ * and the link clock is simpler:
+ * link_clock = (m * link_clock * repeat) / n
+ */
+
+ /*
+ * We need to get the FDI or DP link clock here to derive
+ * the M/N dividers.
+ *
+ * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
+ * For DP, it's either 1.62GHz or 2.7GHz.
+ * We do our calculations in 10*MHz since we don't need much precison.
*/
+ if (pipe_config->has_pch_encoder)
+ link_freq = intel_fdi_link_freq(dev) * 10000;
+ else
+ link_freq = pipe_config->port_clock;
+
+ link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
+ link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
+
+ if (!link_m || !link_n)
+ return;
- return clock.dot;
+ clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
+ do_div(clock, link_n);
+
+ pipe_config->adjusted_mode.clock = clock;
}
/** Returns the currently programmed mode of the given pipe. */
@@ -6971,6 +7231,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct drm_display_mode *mode;
+ struct intel_crtc_config pipe_config;
int htot = I915_READ(HTOTAL(cpu_transcoder));
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
@@ -6980,7 +7241,18 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
if (!mode)
return NULL;
- mode->clock = intel_crtc_clock_get(dev, crtc);
+ /*
+ * Construct a pipe_config sufficient for getting the clock info
+ * back out of crtc_clock_get.
+ *
+ * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
+ * to use a real value here instead.
+ */
+ pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
+ pipe_config.pixel_multiplier = 1;
+ i9xx_crtc_clock_get(intel_crtc, &pipe_config);
+
+ mode->clock = pipe_config.adjusted_mode.clock;
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7263,7 +7535,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc);
@@ -7304,7 +7576,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP);
intel_mark_page_flip_active(intel_crtc);
@@ -7344,7 +7616,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring,
- (obj->gtt_offset + intel_crtc->dspaddr_offset) |
+ (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -7387,7 +7659,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
- intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -7452,7 +7724,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
- intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc);
@@ -7806,7 +8078,8 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
drm_mode_copy(&pipe_config->adjusted_mode, mode);
drm_mode_copy(&pipe_config->requested_mode, mode);
- pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe;
+ pipe_config->cpu_transcoder =
+ (enum transcoder) to_intel_crtc(crtc)->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
/* Compute a starting value for pipe_config->pipe_bpp taking the source
@@ -8041,6 +8314,28 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
}
+static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
+ struct intel_crtc_config *new)
+{
+ int clock1, clock2, diff;
+
+ clock1 = cur->adjusted_mode.clock;
+ clock2 = new->adjusted_mode.clock;
+
+ if (clock1 == clock2)
+ return true;
+
+ if (!clock1 || !clock2)
+ return false;
+
+ diff = abs(clock1 - clock2);
+
+ if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
+ return true;
+
+ return false;
+}
+
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
list_for_each_entry((intel_crtc), \
&(dev)->mode_config.crtc_list, \
@@ -8072,7 +8367,7 @@ intel_pipe_config_compare(struct drm_device *dev,
#define PIPE_CONF_CHECK_FLAGS(name, mask) \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
- DRM_ERROR("mismatch in " #name " " \
+ DRM_ERROR("mismatch in " #name "(" #mask ") " \
"(expected %i, found %i)\n", \
current_config->name & (mask), \
pipe_config->name & (mask)); \
@@ -8106,8 +8401,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
- if (!HAS_PCH_SPLIT(dev))
- PIPE_CONF_CHECK_I(pixel_multiplier);
+ PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
@@ -8138,6 +8432,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
+ PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
@@ -8146,6 +8441,15 @@ intel_pipe_config_compare(struct drm_device *dev,
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_QUIRK
+ if (!IS_HASWELL(dev)) {
+ if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
+ DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
+ current_config->adjusted_mode.clock,
+ pipe_config->adjusted_mode.clock);
+ return false;
+ }
+ }
+
return true;
}
@@ -8275,6 +8579,9 @@ check_crtc_state(struct drm_device *dev)
encoder->get_config(encoder, &pipe_config);
}
+ if (dev_priv->display.get_clock)
+ dev_priv->display.get_clock(crtc, &pipe_config);
+
WARN(crtc->active != active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", crtc->active, active);
@@ -8571,8 +8878,16 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
} else if (set->crtc->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->fb == NULL) {
- DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
- config->mode_changed = true;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(set->crtc);
+
+ if (intel_crtc->active && i915_fastboot) {
+ DRM_DEBUG_KMS("crtc has no fb, will flip\n");
+ config->fb_changed = true;
+ } else {
+ DRM_DEBUG_KMS("inactive crtc, full mode set\n");
+ config->mode_changed = true;
+ }
} else if (set->fb == NULL) {
config->mode_changed = true;
} else if (set->fb->pixel_format !=
@@ -8802,19 +9117,32 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
return val & DPLL_VCO_ENABLE;
}
+static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
+ I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
+}
+
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- uint32_t reg, val;
-
/* PCH refclock must be enabled first */
assert_pch_refclk_enabled(dev_priv);
- reg = PCH_DPLL(pll->id);
- val = I915_READ(reg);
- val |= DPLL_VCO_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(PCH_DPLL(pll->id));
+ udelay(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+ POSTING_READ(PCH_DPLL(pll->id));
udelay(200);
}
@@ -8823,7 +9151,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *crtc;
- uint32_t reg, val;
/* Make sure no transcoder isn't still depending on us. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
@@ -8831,11 +9158,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
}
- reg = PCH_DPLL(pll->id);
- val = I915_READ(reg);
- val &= ~DPLL_VCO_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ I915_WRITE(PCH_DPLL(pll->id), 0);
+ POSTING_READ(PCH_DPLL(pll->id));
udelay(200);
}
@@ -8854,6 +9178,7 @@ static void ibx_pch_dpll_init(struct drm_device *dev)
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
dev_priv->shared_dplls[i].id = i;
dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
+ dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
dev_priv->shared_dplls[i].get_hw_state =
@@ -9270,6 +9595,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_plane = ironlake_update_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
+ dev_priv->display.get_clock = ironlake_crtc_clock_get;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -9277,6 +9603,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_plane = ironlake_update_plane;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_clock = i9xx_crtc_clock_get;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9284,6 +9611,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_plane = i9xx_update_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_clock = i9xx_crtc_clock_get;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9301,9 +9629,12 @@ static void intel_init_display(struct drm_device *dev)
else if (IS_I915G(dev))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
- else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
+ else if (IS_I945GM(dev) || IS_845G(dev))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
+ else if (IS_PINEVIEW(dev))
+ dev_priv->display.get_display_clock_speed =
+ pnv_get_display_clock_speed;
else if (IS_I915GM(dev))
dev_priv->display.get_display_clock_speed =
i915gm_get_display_clock_speed;
@@ -9584,7 +9915,7 @@ void intel_modeset_init(struct drm_device *dev)
INTEL_INFO(dev)->num_pipes,
INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
- for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+ for_each_pipe(i) {
intel_crtc_init(dev, i);
for (j = 0; j < dev_priv->num_plane; j++) {
ret = intel_plane_init(dev, i, j);
@@ -9860,6 +10191,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pipe);
}
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ if (!crtc->active)
+ continue;
+ if (dev_priv->display.get_clock)
+ dev_priv->display.get_clock(crtc,
+ &crtc->config);
+ }
+
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (connector->get_hw_state(connector)) {
@@ -9891,6 +10231,22 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_readout_hw_state(dev);
+ /*
+ * Now that we have the config, copy it to each CRTC struct
+ * Note that this could go away if we move to using crtc_config
+ * checking everywhere.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ if (crtc->active && i915_fastboot) {
+ intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
+
+ DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
+ crtc->base.base.id);
+ drm_mode_debug_printmodeline(&crtc->base.mode);
+ }
+ }
+
/* HW state is read out, now we need to sanitize this mess. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
@@ -10033,9 +10389,6 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
return 0;
}
-#ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
-
struct intel_display_error_state {
u32 power_well_driver;
@@ -10127,8 +10480,7 @@ intel_display_capture_error_state(struct drm_device *dev)
* well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
* prevent the next I915_WRITE from detecting it and printing an error
* message. */
- if (HAS_POWER_WELL(dev))
- I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ intel_uncore_clear_errors(dev);
return error;
}
@@ -10179,4 +10531,3 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " BASE: %08x\n", error->cursor[i].base);
}
}
-#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 26e162bb3a51..d0c3f9b08387 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -276,29 +276,13 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
return status;
}
-static int
-intel_dp_aux_ch(struct intel_dp *intel_dp,
- uint8_t *send, int send_bytes,
- uint8_t *recv, int recv_size)
+static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
+ int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
- uint32_t ch_data = ch_ctl + 4;
- int i, ret, recv_bytes;
- uint32_t status;
- uint32_t aux_clock_divider;
- int try, precharge;
- bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
- /* dp aux is extremely sensitive to irq latency, hence request the
- * lowest possible wakeup latency and so prevent the cpu from going into
- * deep sleep states.
- */
- pm_qos_update_request(&dev_priv->pm_qos, 0);
-
- intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
@@ -307,23 +291,53 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (IS_VALLEYVIEW(dev)) {
- aux_clock_divider = 100;
+ return index ? 0 : 100;
} else if (intel_dig_port->port == PORT_A) {
+ if (index)
+ return 0;
if (HAS_DDI(dev))
- aux_clock_divider = DIV_ROUND_CLOSEST(
- intel_ddi_get_cdclk_freq(dev_priv), 2000);
+ return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
else if (IS_GEN6(dev) || IS_GEN7(dev))
- aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
+ return 200; /* SNB & IVB eDP input clock at 400Mhz */
else
- aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+ return 225; /* eDP input clock at 450Mhz */
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
/* Workaround for non-ULT HSW */
- aux_clock_divider = 74;
+ switch (index) {
+ case 0: return 63;
+ case 1: return 72;
+ default: return 0;
+ }
} else if (HAS_PCH_SPLIT(dev)) {
- aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+ return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
} else {
- aux_clock_divider = intel_hrawclk(dev) / 2;
+ return index ? 0 :intel_hrawclk(dev) / 2;
}
+}
+
+static int
+intel_dp_aux_ch(struct intel_dp *intel_dp,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_size)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+ uint32_t ch_data = ch_ctl + 4;
+ uint32_t aux_clock_divider;
+ int i, ret, recv_bytes;
+ uint32_t status;
+ int try, precharge, clock = 0;
+ bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+
+ /* dp aux is extremely sensitive to irq latency, hence request the
+ * lowest possible wakeup latency and so prevent the cpu from going into
+ * deep sleep states.
+ */
+ pm_qos_update_request(&dev_priv->pm_qos, 0);
+
+ intel_dp_check_edp(intel_dp);
if (IS_GEN6(dev))
precharge = 3;
@@ -345,37 +359,41 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
goto out;
}
- /* Must try at least 3 times according to DP spec */
- for (try = 0; try < 5; try++) {
- /* Load the send data into the aux channel data registers */
- for (i = 0; i < send_bytes; i += 4)
- I915_WRITE(ch_data + i,
- pack_aux(send + i, send_bytes - i));
-
- /* Send the command and wait for it to complete */
- I915_WRITE(ch_ctl,
- DP_AUX_CH_CTL_SEND_BUSY |
- (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
- DP_AUX_CH_CTL_TIME_OUT_400us |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
-
- status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
-
- /* Clear done status and any errors */
- I915_WRITE(ch_ctl,
- status |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
-
- if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR))
- continue;
+ while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4)
+ I915_WRITE(ch_data + i,
+ pack_aux(send + i, send_bytes - i));
+
+ /* Send the command and wait for it to complete */
+ I915_WRITE(ch_ctl,
+ DP_AUX_CH_CTL_SEND_BUSY |
+ (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
+
+ /* Clear done status and any errors */
+ I915_WRITE(ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR))
+ continue;
+ if (status & DP_AUX_CH_CTL_DONE)
+ break;
+ }
if (status & DP_AUX_CH_CTL_DONE)
break;
}
@@ -710,8 +728,11 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = pipe_config->pipe_bpp;
- if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
+ if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) {
+ DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
+ dev_priv->vbt.edp_bpp);
bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
+ }
for (; bpp >= 6*3; bpp -= 2*3) {
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
@@ -1360,6 +1381,275 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
pipe_config->adjusted_mode.flags |= flags;
+
+ if (dp_to_dig_port(intel_dp)->port == PORT_A) {
+ if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
+ pipe_config->port_clock = 162000;
+ else
+ pipe_config->port_clock = 270000;
+ }
+}
+
+static bool is_edp_psr(struct intel_dp *intel_dp)
+{
+ return is_edp(intel_dp) &&
+ intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+}
+
+static bool intel_edp_is_psr_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_HASWELL(dev))
+ return false;
+
+ return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
+}
+
+static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
+ struct edp_vsc_psr *vsc_psr)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
+ u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
+ uint32_t *data = (uint32_t *) vsc_psr;
+ unsigned int i;
+
+ /* As per BSPec (Pipe Video Data Island Packet), we need to disable
+ the video DIP being updated before program video DIP data buffer
+ registers for DIP being updated. */
+ I915_WRITE(ctl_reg, 0);
+ POSTING_READ(ctl_reg);
+
+ for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
+ if (i < sizeof(struct edp_vsc_psr))
+ I915_WRITE(data_reg + i, *data++);
+ else
+ I915_WRITE(data_reg + i, 0);
+ }
+
+ I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
+ POSTING_READ(ctl_reg);
+}
+
+static void intel_edp_psr_setup(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct edp_vsc_psr psr_vsc;
+
+ if (intel_dp->psr_setup_done)
+ return;
+
+ /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
+ intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
+
+ /* Avoid continuous PSR exit by masking memup and hpd */
+ I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD);
+
+ intel_dp->psr_setup_done = true;
+}
+
+static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
+ int precharge = 0x3;
+ int msg_size = 5; /* Header(4) + Message(1) */
+
+ /* Enable PSR in sink */
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
+ intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE &
+ ~DP_PSR_MAIN_LINK_ACTIVE);
+ else
+ intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE |
+ DP_PSR_MAIN_LINK_ACTIVE);
+
+ /* Setup AUX registers */
+ I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
+ I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
+ I915_WRITE(EDP_PSR_AUX_CTL,
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
+}
+
+static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t max_sleep_time = 0x1f;
+ uint32_t idle_frames = 1;
+ uint32_t val = 0x0;
+
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
+ val |= EDP_PSR_LINK_STANDBY;
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
+ val |= EDP_PSR_TP1_TIME_0us;
+ val |= EDP_PSR_SKIP_AUX_EXIT;
+ } else
+ val |= EDP_PSR_LINK_DISABLE;
+
+ I915_WRITE(EDP_PSR_CTL, val |
+ EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
+ max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
+ idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
+ EDP_PSR_ENABLE);
+}
+
+static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
+ struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+
+ if (!IS_HASWELL(dev)) {
+ DRM_DEBUG_KMS("PSR not supported on this platform\n");
+ dev_priv->no_psr_reason = PSR_NO_SOURCE;
+ return false;
+ }
+
+ if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
+ (dig_port->port != PORT_A)) {
+ DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
+ dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
+ return false;
+ }
+
+ if (!is_edp_psr(intel_dp)) {
+ DRM_DEBUG_KMS("PSR not supported by this panel\n");
+ dev_priv->no_psr_reason = PSR_NO_SINK;
+ return false;
+ }
+
+ if (!i915_enable_psr) {
+ DRM_DEBUG_KMS("PSR disable by flag\n");
+ dev_priv->no_psr_reason = PSR_MODULE_PARAM;
+ return false;
+ }
+
+ crtc = dig_port->base.base.crtc;
+ if (crtc == NULL) {
+ DRM_DEBUG_KMS("crtc not active for PSR\n");
+ dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
+ return false;
+ }
+
+ intel_crtc = to_intel_crtc(crtc);
+ if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
+ DRM_DEBUG_KMS("crtc not active for PSR\n");
+ dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
+ return false;
+ }
+
+ obj = to_intel_framebuffer(crtc->fb)->obj;
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
+ dev_priv->no_psr_reason = PSR_NOT_TILED;
+ return false;
+ }
+
+ if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
+ DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
+ dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
+ return false;
+ }
+
+ if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
+ S3D_ENABLE) {
+ DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+ dev_priv->no_psr_reason = PSR_S3D_ENABLED;
+ return false;
+ }
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
+ dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
+ return false;
+ }
+
+ return true;
+}
+
+static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ if (!intel_edp_psr_match_conditions(intel_dp) ||
+ intel_edp_is_psr_enabled(dev))
+ return;
+
+ /* Setup PSR once */
+ intel_edp_psr_setup(intel_dp);
+
+ /* Enable PSR on the panel */
+ intel_edp_psr_enable_sink(intel_dp);
+
+ /* Enable PSR on the host */
+ intel_edp_psr_enable_source(intel_dp);
+}
+
+void intel_edp_psr_enable(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ if (intel_edp_psr_match_conditions(intel_dp) &&
+ !intel_edp_is_psr_enabled(dev))
+ intel_edp_psr_do_enable(intel_dp);
+}
+
+void intel_edp_psr_disable(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!intel_edp_is_psr_enabled(dev))
+ return;
+
+ I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
+
+ /* Wait till PSR is idle */
+ if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+ DRM_ERROR("Timed out waiting for PSR Idle State\n");
+}
+
+void intel_edp_psr_update(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_dp *intel_dp = NULL;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (!is_edp_psr(intel_dp))
+ return;
+
+ if (!intel_edp_psr_match_conditions(intel_dp))
+ intel_edp_psr_disable(intel_dp);
+ else
+ if (!intel_edp_is_psr_enabled(dev))
+ intel_edp_psr_do_enable(intel_dp);
+ }
}
static void intel_disable_dp(struct intel_encoder *encoder)
@@ -2275,6 +2565,13 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
return false; /* DPCD not present */
+ /* Check if the panel supports PSR */
+ memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
+ intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
+ intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+ if (is_edp_psr(intel_dp))
+ DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
@@ -2542,6 +2839,9 @@ intel_dp_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
struct edid *edid = NULL;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
intel_dp->has_audio = false;
if (HAS_PCH_SPLIT(dev))
@@ -3166,6 +3466,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
error, port_name(port));
+ intel_dp->psr_setup_done = false;
+
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
i2c_del_adapter(&intel_dp->adapter);
if (is_edp(intel_dp)) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b7d6e09456ce..d9f50e368fe9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -487,6 +487,7 @@ struct intel_dp {
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+ uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
@@ -498,6 +499,7 @@ struct intel_dp {
int backlight_off_delay;
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
+ bool psr_setup_done;
struct intel_connector *attached_connector;
};
@@ -549,13 +551,6 @@ struct intel_unpin_work {
bool enable_stall_check;
};
-struct intel_fbc_work {
- struct delayed_work work;
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
- int interval;
-};
-
int intel_pch_rawclk(struct drm_device *dev);
int intel_connector_update_modes(struct drm_connector *connector,
@@ -747,6 +742,22 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
extern void intel_fb_output_poll_changed(struct drm_device *dev);
extern void intel_fb_restore_mode(struct drm_device *dev);
+struct intel_shared_dpll *
+intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
+
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ bool state);
+#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
+#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state);
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state);
+#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
+#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
@@ -780,7 +791,6 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
extern void intel_init_pm(struct drm_device *dev);
/* FBC */
extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern void intel_update_fbc(struct drm_device *dev);
/* IPS */
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@@ -796,7 +806,6 @@ extern void intel_init_power_well(struct drm_device *dev);
extern void intel_set_power_well(struct drm_device *dev, bool enable);
extern void intel_enable_gt_powersave(struct drm_device *dev);
extern void intel_disable_gt_powersave(struct drm_device *dev);
-extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev);
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -825,4 +834,11 @@ extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
+extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
+extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
+extern void intel_edp_psr_update(struct drm_device *dev);
+extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+ bool switch_to_fclk, bool allow_power_down);
+extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index eb2020eb2b7e..8b4ad27791f3 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -283,7 +283,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
int pipe = intel_crtc->pipe;
u32 dvo_val;
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
- int dpll_reg = DPLL(pipe);
switch (dvo_reg) {
case DVOA:
@@ -314,8 +313,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
- I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
-
/*I915_WRITE(DVOB_SRCDIM,
(adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
@@ -335,6 +332,8 @@ static enum drm_connector_status
intel_dvo_detect(struct drm_connector *connector, bool force)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index dff669e2387f..f3c97e05b0d8 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
- info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
+ info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size;
info->screen_base =
- ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
+ ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
size);
if (!info->screen_base) {
ret = -ENOSPC;
@@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
+ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
fb->width, fb->height,
- obj->gtt_offset, obj);
+ i915_gem_obj_ggtt_offset(obj), obj);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2fd3fd5b943e..044d11d05944 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -879,6 +879,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 61348eae2f04..2110df24454b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -122,17 +122,25 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
-static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
+static void intel_pre_enable_lvds(struct intel_encoder *encoder)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *fixed_mode =
lvds_encoder->attached_connector->base.panel.fixed_mode;
- int pipe = intel_crtc->pipe;
+ int pipe = crtc->pipe;
u32 temp;
+ if (HAS_PCH_SPLIT(dev)) {
+ assert_fdi_rx_pll_disabled(dev_priv, pipe);
+ assert_shared_dpll_disabled(dev_priv,
+ intel_crtc_to_shared_dpll(crtc));
+ } else {
+ assert_pll_disabled(dev_priv, pipe);
+ }
+
temp = I915_READ(lvds_encoder->reg);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
@@ -149,7 +157,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
/* set the corresponsding LVDS_BORDER bit */
temp &= ~LVDS_BORDER_ENABLE;
- temp |= intel_crtc->config.gmch_pfit.lvds_border_bits;
+ temp |= crtc->config.gmch_pfit.lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
@@ -169,8 +177,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
if (INTEL_INFO(dev)->gen == 4) {
/* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels. */
- if (intel_crtc->config.dither &&
- intel_crtc->config.pipe_bpp == 18)
+ if (crtc->config.dither && crtc->config.pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
@@ -336,6 +343,9 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
enum drm_connector_status status;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
status = intel_panel_detect(dev);
if (status != connector_status_unknown)
return status;
@@ -959,7 +969,7 @@ void intel_lvds_init(struct drm_device *dev)
DRM_MODE_ENCODER_LVDS);
intel_encoder->enable = intel_enable_lvds;
- intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
+ intel_encoder->pre_enable = intel_pre_enable_lvds;
intel_encoder->compute_config = intel_lvds_compute_config;
intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a3698812e9c7..9ec5a4e12af2 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_wc(dev_priv->gtt.mappable,
- overlay->reg_bo->gtt_offset);
+ i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs;
}
@@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
swidth = params->src_w;
swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
sheight = params->src_h;
- iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y);
+ iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale);
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16;
- iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U);
- iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V);
+ iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
+ iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
ostride |= params->stride_UV << 16;
}
@@ -1333,7 +1333,9 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->dev = dev;
- reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
+ reg_bo = NULL;
+ if (!OVERLAY_NEEDS_PHYSICAL(dev))
+ reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
if (reg_bo == NULL)
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
if (reg_bo == NULL)
@@ -1355,7 +1357,7 @@ void intel_setup_overlay(struct drm_device *dev)
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
}
- overlay->flip_addr = reg_bo->gtt_offset;
+ overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) {
@@ -1412,9 +1414,6 @@ void intel_cleanup_overlay(struct drm_device *dev)
kfree(dev_priv->overlay);
}
-#ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
-
struct intel_overlay_error_state {
struct overlay_registers regs;
unsigned long base;
@@ -1435,7 +1434,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
- overlay->reg_bo->gtt_offset);
+ i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs;
}
@@ -1468,7 +1467,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
else
- error->base = overlay->reg_bo->gtt_offset;
+ error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
@@ -1537,4 +1536,3 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
P(UVSCALEV);
#undef P
}
-#endif
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 51a2a60f5bfc..0a5ba92a4b12 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,8 +30,7 @@
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
-
-#define FORCEWAKE_ACK_TIMEOUT_MS 2
+#include <drm/i915_powerwell.h>
/* FBC, or Frame Buffer Compression, is a technique employed to compress the
* framebuffer contents in-memory, aiming at reducing the required bandwidth
@@ -86,7 +85,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
int plane, i;
u32 fbc_ctl, fbc_ctl2;
- cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+ cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
@@ -217,7 +216,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+ I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -274,7 +273,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
+ I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
IVB_DPFC_CTL_FENCE_EN |
@@ -325,7 +324,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
- if (work == dev_priv->fbc_work) {
+ if (work == dev_priv->fbc.fbc_work) {
/* Double check that we haven't switched fb without cancelling
* the prior work.
*/
@@ -333,12 +332,12 @@ static void intel_fbc_work_fn(struct work_struct *__work)
dev_priv->display.enable_fbc(work->crtc,
work->interval);
- dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
- dev_priv->cfb_fb = work->crtc->fb->base.id;
- dev_priv->cfb_y = work->crtc->y;
+ dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->fbc.fb_id = work->crtc->fb->base.id;
+ dev_priv->fbc.y = work->crtc->y;
}
- dev_priv->fbc_work = NULL;
+ dev_priv->fbc.fbc_work = NULL;
}
mutex_unlock(&dev->struct_mutex);
@@ -347,28 +346,28 @@ static void intel_fbc_work_fn(struct work_struct *__work)
static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
{
- if (dev_priv->fbc_work == NULL)
+ if (dev_priv->fbc.fbc_work == NULL)
return;
DRM_DEBUG_KMS("cancelling pending FBC enable\n");
/* Synchronisation is provided by struct_mutex and checking of
- * dev_priv->fbc_work, so we can perform the cancellation
+ * dev_priv->fbc.fbc_work, so we can perform the cancellation
* entirely asynchronously.
*/
- if (cancel_delayed_work(&dev_priv->fbc_work->work))
+ if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
/* tasklet was killed before being run, clean up */
- kfree(dev_priv->fbc_work);
+ kfree(dev_priv->fbc.fbc_work);
/* Mark the work as no longer wanted so that if it does
* wake-up (because the work was already running and waiting
* for our mutex), it will discover that is no longer
* necessary to run.
*/
- dev_priv->fbc_work = NULL;
+ dev_priv->fbc.fbc_work = NULL;
}
-void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct intel_fbc_work *work;
struct drm_device *dev = crtc->dev;
@@ -381,6 +380,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL) {
+ DRM_ERROR("Failed to allocate FBC work structure\n");
dev_priv->display.enable_fbc(crtc, interval);
return;
}
@@ -390,9 +390,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
work->interval = interval;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
- dev_priv->fbc_work = work;
-
- DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+ dev_priv->fbc.fbc_work = work;
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression. Note that
@@ -404,6 +402,8 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
* following the termination of the page-flipping sequence
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
+ *
+ * WaFbcWaitForVBlankBeforeEnable:ilk,snb
*/
schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}
@@ -418,7 +418,7 @@ void intel_disable_fbc(struct drm_device *dev)
return;
dev_priv->display.disable_fbc(dev);
- dev_priv->cfb_plane = -1;
+ dev_priv->fbc.plane = -1;
}
/**
@@ -448,7 +448,6 @@ void intel_update_fbc(struct drm_device *dev)
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
- int enable_fbc;
unsigned int max_hdisplay, max_vdisplay;
if (!i915_powersave)
@@ -471,7 +470,8 @@ void intel_update_fbc(struct drm_device *dev)
!to_intel_crtc(tmp_crtc)->primary_disabled) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+ dev_priv->fbc.no_fbc_reason =
+ FBC_MULTIPLE_PIPES;
goto out_disable;
}
crtc = tmp_crtc;
@@ -480,7 +480,7 @@ void intel_update_fbc(struct drm_device *dev)
if (!crtc || crtc->fb == NULL) {
DRM_DEBUG_KMS("no output, disabling\n");
- dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+ dev_priv->fbc.no_fbc_reason = FBC_NO_OUTPUT;
goto out_disable;
}
@@ -489,23 +489,22 @@ void intel_update_fbc(struct drm_device *dev)
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
- enable_fbc = i915_enable_fbc;
- if (enable_fbc < 0) {
- DRM_DEBUG_KMS("fbc set to per-chip default\n");
- enable_fbc = 1;
- if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
- enable_fbc = 0;
+ if (i915_enable_fbc < 0 &&
+ INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
+ DRM_DEBUG_KMS("disabled per chip default\n");
+ dev_priv->fbc.no_fbc_reason = FBC_CHIP_DEFAULT;
+ goto out_disable;
}
- if (!enable_fbc) {
+ if (!i915_enable_fbc) {
DRM_DEBUG_KMS("fbc disabled per module param\n");
- dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
+ dev_priv->fbc.no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable;
}
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, "
"disabling\n");
- dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+ dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED_MODE;
goto out_disable;
}
@@ -519,13 +518,13 @@ void intel_update_fbc(struct drm_device *dev)
if ((crtc->mode.hdisplay > max_hdisplay) ||
(crtc->mode.vdisplay > max_vdisplay)) {
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
- dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+ dev_priv->fbc.no_fbc_reason = FBC_MODE_TOO_LARGE;
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
intel_crtc->plane != 0) {
DRM_DEBUG_KMS("plane not 0, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+ dev_priv->fbc.no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
@@ -535,7 +534,7 @@ void intel_update_fbc(struct drm_device *dev)
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_NOT_TILED;
+ dev_priv->fbc.no_fbc_reason = FBC_NOT_TILED;
goto out_disable;
}
@@ -545,7 +544,7 @@ void intel_update_fbc(struct drm_device *dev)
if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ dev_priv->fbc.no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
@@ -554,9 +553,9 @@ void intel_update_fbc(struct drm_device *dev)
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
- if (dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_fb == fb->base.id &&
- dev_priv->cfb_y == crtc->y)
+ if (dev_priv->fbc.plane == intel_crtc->plane &&
+ dev_priv->fbc.fb_id == fb->base.id &&
+ dev_priv->fbc.y == crtc->y)
return;
if (intel_fbc_enabled(dev)) {
@@ -2468,8 +2467,8 @@ static void hsw_compute_wm_results(struct drm_device *dev,
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
* case both are at the same level. Prefer r1 in case they're the same. */
-struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
- struct hsw_wm_values *r2)
+static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
+ struct hsw_wm_values *r2)
{
int i, val_r1 = 0, val_r2 = 0;
@@ -3076,19 +3075,12 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
*/
static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
{
- unsigned long timeout = jiffies + msecs_to_jiffies(10);
u32 pval;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- do {
- pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- if (time_after(jiffies, timeout)) {
- DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
- break;
- }
- udelay(10);
- } while (pval & 1);
+ if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
+ DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
pval >>= 8;
@@ -3129,13 +3121,10 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
}
-
-static void gen6_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(GEN6_RC_CONTROL, 0);
- I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
/* Complete PM interrupt masking here doesn't race with the rps work
@@ -3143,30 +3132,30 @@ static void gen6_disable_rps(struct drm_device *dev)
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
- spin_lock_irq(&dev_priv->rps.lock);
+ spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->rps.lock);
+ spin_unlock_irq(&dev_priv->irq_lock);
I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
}
-static void valleyview_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
- I915_WRITE(GEN6_PMIER, 0);
- /* Complete PM interrupt masking here doesn't race with the rps work
- * item again unmasking PM interrupts because that is using a different
- * register (PMIMR) to mask PM interrupts. The only risk is in leaving
- * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+ I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
- spin_lock_irq(&dev_priv->rps.lock);
- dev_priv->rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->rps.lock);
+ gen6_disable_rps_interrupts(dev);
+}
- I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+static void valleyview_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ gen6_disable_rps_interrupts(dev);
if (dev_priv->vlv_pctx) {
drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
@@ -3176,6 +3165,10 @@ static void valleyview_disable_rps(struct drm_device *dev)
int intel_enable_rc6(const struct drm_device *dev)
{
+ /* No RC6 before Ironlake */
+ if (INTEL_INFO(dev)->gen < 5)
+ return 0;
+
/* Respect the kernel parameter if it is set */
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
@@ -3199,6 +3192,19 @@ int intel_enable_rc6(const struct drm_device *dev)
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
}
+static void gen6_enable_rps_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ WARN_ON(dev_priv->rps.pm_iir);
+ I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
+ I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
+ spin_unlock_irq(&dev_priv->irq_lock);
+ /* unmask all PM interrupts */
+ I915_WRITE(GEN6_PMINTRMSK, 0);
+}
+
static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3327,17 +3333,7 @@ static void gen6_enable_rps(struct drm_device *dev)
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
- /* requires MSI enabled */
- I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
- spin_lock_irq(&dev_priv->rps.lock);
- /* FIXME: Our interrupt enabling sequence is bonghits.
- * dev_priv->rps.pm_iir really should be 0 here. */
- dev_priv->rps.pm_iir = 0;
- I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
- I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
- spin_unlock_irq(&dev_priv->rps.lock);
- /* unmask all PM interrupts */
- I915_WRITE(GEN6_PMINTRMSK, 0);
+ gen6_enable_rps_interrupts(dev);
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -3482,7 +3478,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
pcbr_offset,
- -1,
+ I915_GTT_OFFSET_NONE,
pctx_size);
goto out;
}
@@ -3607,14 +3603,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
- /* requires MSI enabled */
- I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
- spin_lock_irq(&dev_priv->rps.lock);
- WARN_ON(dev_priv->rps.pm_iir != 0);
- I915_WRITE(GEN6_PMIMR, 0);
- spin_unlock_irq(&dev_priv->rps.lock);
- /* enable all PM interrupts */
- I915_WRITE(GEN6_PMINTRMSK, 0);
+ gen6_enable_rps_interrupts(dev);
gen6_gt_force_wake_put(dev_priv);
}
@@ -3708,7 +3697,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -3731,7 +3720,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
return;
}
- I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
@@ -4429,7 +4418,10 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
- /* Required for FBC */
+ /*
+ * Required for FBC
+ * WaFbcDisableDpfcClockGating:ilk
+ */
dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
@@ -4466,6 +4458,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
* The bit 7,8,9 of 0x42020.
*/
if (IS_IRONLAKE_M(dev)) {
+ /* WaFbcAsynchFlipDisableFbcQueue:ilk */
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
@@ -4602,6 +4595,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
* The bit5 and bit7 of 0x42020
* The bit14 of 0x70180
* The bit14 of 0x71180
+ *
+ * WaFbcAsynchFlipDisableFbcQueue:snb
*/
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
@@ -5292,254 +5287,6 @@ void intel_init_pm(struct drm_device *dev)
}
}
-static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
-{
- u32 gt_thread_status_mask;
-
- if (IS_HASWELL(dev_priv->dev))
- gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
- else
- gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
-
- /* w/a for a sporadic read returning 0 by waiting for the GT
- * thread to wake up.
- */
- if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
- DRM_ERROR("GT thread status wait timed out\n");
-}
-
-static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE, 0);
- POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
-}
-
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
-{
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
-
- I915_WRITE_NOTRACE(FORCEWAKE, 1);
- POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
-
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
-
- /* WaRsForcewakeWaitTC0:snb */
- __gen6_gt_wait_for_thread_c0(dev_priv);
-}
-
-static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
- /* something from same cacheline, but !FORCEWAKE_MT */
- POSTING_READ(ECOBUS);
-}
-
-static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
-{
- u32 forcewake_ack;
-
- if (IS_HASWELL(dev_priv->dev))
- forcewake_ack = FORCEWAKE_ACK_HSW;
- else
- forcewake_ack = FORCEWAKE_MT_ACK;
-
- if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
-
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
- /* something from same cacheline, but !FORCEWAKE_MT */
- POSTING_READ(ECOBUS);
-
- if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
-
- /* WaRsForcewakeWaitTC0:ivb,hsw */
- __gen6_gt_wait_for_thread_c0(dev_priv);
-}
-
-/*
- * Generally this is called implicitly by the register read function. However,
- * if some sequence requires the GT to not power down then this function should
- * be called at the beginning of the sequence followed by a call to
- * gen6_gt_force_wake_put() at the end of the sequence.
- */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
- if (dev_priv->forcewake_count++ == 0)
- dev_priv->gt.force_wake_get(dev_priv);
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
-}
-
-void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
-{
- u32 gtfifodbg;
- gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
- if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
- "MMIO read or write has been dropped %x\n", gtfifodbg))
- I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
-}
-
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE, 0);
- /* something from same cacheline, but !FORCEWAKE */
- POSTING_READ(ECOBUS);
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- /* something from same cacheline, but !FORCEWAKE_MT */
- POSTING_READ(ECOBUS);
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-/*
- * see gen6_gt_force_wake_get()
- */
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
- if (--dev_priv->forcewake_count == 0)
- dev_priv->gt.force_wake_put(dev_priv);
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
-}
-
-int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
-{
- int ret = 0;
-
- if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
- int loop = 500;
- u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
- udelay(10);
- fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- }
- if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
- ++ret;
- dev_priv->gt_fifo_count = fifo;
- }
- dev_priv->gt_fifo_count--;
-
- return ret;
-}
-
-static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
- /* something from same cacheline, but !FORCEWAKE_VLV */
- POSTING_READ(FORCEWAKE_ACK_VLV);
-}
-
-static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
-{
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
-
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
- I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
-
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
-
- /* WaRsForcewakeWaitTC0:vlv */
- __gen6_gt_wait_for_thread_c0(dev_priv);
-}
-
-static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- /* The below doubles as a POSTING_READ */
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-void intel_gt_sanitize(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_VALLEYVIEW(dev)) {
- vlv_force_wake_reset(dev_priv);
- } else if (INTEL_INFO(dev)->gen >= 6) {
- __gen6_gt_force_wake_reset(dev_priv);
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- __gen6_gt_force_wake_mt_reset(dev_priv);
- }
-
- /* BIOS often leaves RC6 enabled, but disable it for hw init */
- if (INTEL_INFO(dev)->gen >= 6)
- intel_disable_gt_powersave(dev);
-}
-
-void intel_gt_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_VALLEYVIEW(dev)) {
- dev_priv->gt.force_wake_get = vlv_force_wake_get;
- dev_priv->gt.force_wake_put = vlv_force_wake_put;
- } else if (IS_HASWELL(dev)) {
- dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
- dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
- } else if (IS_IVYBRIDGE(dev)) {
- u32 ecobus;
-
- /* IVB configs may use multi-threaded forcewake */
-
- /* A small trick here - if the bios hasn't configured
- * MT forcewake, and if the device is in RC6, then
- * force_wake_mt_get will not wake the device and the
- * ECOBUS read will return zero. Which will be
- * (correctly) interpreted by the test below as MT
- * forcewake being disabled.
- */
- mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = I915_READ_NOTRACE(ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- dev_priv->gt.force_wake_get =
- __gen6_gt_force_wake_mt_get;
- dev_priv->gt.force_wake_put =
- __gen6_gt_force_wake_mt_put;
- } else {
- DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
- DRM_INFO("when using vblank-synced partial screen updates.\n");
- dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
- dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
- }
- } else if (IS_GEN6(dev)) {
- dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
- dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
- }
- INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
- intel_gen6_powersave_work);
-}
-
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -5642,3 +5389,11 @@ int vlv_freq_opcode(int ddr_freq, int val)
return val;
}
+void intel_pm_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
+ intel_gen6_powersave_work);
+}
+
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 664118d8c1d6..8527ea05124b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -440,14 +440,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
- I915_WRITE_START(ring, obj->gtt_offset);
+ I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
- I915_READ_START(ring) == obj->gtt_offset &&
+ I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
(I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
@@ -505,7 +505,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
if (ret)
goto err_unref;
- pc->gtt_offset = obj->gtt_offset;
+ pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
pc->cpu_page = kmap(sg_page(obj->pages->sgl));
if (pc->cpu_page == NULL) {
ret = -ENOMEM;
@@ -836,7 +836,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount.gt++ == 0) {
+ if (ring->irq_refcount++ == 0) {
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
@@ -854,7 +854,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount.gt == 0) {
+ if (--ring->irq_refcount == 0) {
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
@@ -873,7 +873,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount.gt++ == 0) {
+ if (ring->irq_refcount++ == 0) {
dev_priv->irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
@@ -891,7 +891,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount.gt == 0) {
+ if (--ring->irq_refcount == 0) {
dev_priv->irq_mask |= ring->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
@@ -910,7 +910,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount.gt++ == 0) {
+ if (ring->irq_refcount++ == 0) {
dev_priv->irq_mask &= ~ring->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR);
@@ -928,7 +928,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount.gt == 0) {
+ if (--ring->irq_refcount == 0) {
dev_priv->irq_mask |= ring->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR);
@@ -1021,7 +1021,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
gen6_gt_force_wake_get(dev_priv);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount.gt++ == 0) {
+ if (ring->irq_refcount++ == 0) {
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring,
~(ring->irq_enable_mask |
@@ -1045,7 +1045,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount.gt == 0) {
+ if (--ring->irq_refcount == 0) {
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring,
~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
@@ -1070,14 +1070,14 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
if (!dev->irq_enabled)
return false;
- spin_lock_irqsave(&dev_priv->rps.lock, flags);
- if (ring->irq_refcount.pm++ == 0) {
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
u32 pm_imr = I915_READ(GEN6_PMIMR);
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
POSTING_READ(GEN6_PMIMR);
}
- spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
@@ -1092,14 +1092,14 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
if (!dev->irq_enabled)
return;
- spin_lock_irqsave(&dev_priv->rps.lock, flags);
- if (--ring->irq_refcount.pm == 0) {
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
u32 pm_imr = I915_READ(GEN6_PMIMR);
I915_WRITE_IMR(ring, ~0);
I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
POSTING_READ(GEN6_PMIMR);
}
- spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
static int
@@ -1144,7 +1144,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_advance(ring);
} else {
struct drm_i915_gem_object *obj = ring->private;
- u32 cs_offset = obj->gtt_offset;
+ u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
@@ -1229,7 +1229,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
goto err_unref;
}
- ring->status_page.gfx_addr = obj->gtt_offset;
+ ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
@@ -1316,7 +1316,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto err_unpin;
ring->virtual_start =
- ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
+ ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
@@ -2008,8 +2008,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
- ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT |
- PM_VEBOX_CS_ERROR_INTERRUPT;
+ ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
ring->irq_get = hsw_vebox_get_irq;
ring->irq_put = hsw_vebox_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 799f04c9da45..6e38256d41e1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -78,10 +78,7 @@ struct intel_ring_buffer {
*/
u32 last_retired_head;
- struct {
- u32 gt; /* protected by dev_priv->irq_lock */
- u32 pm; /* protected by dev_priv->rps.lock (sucks) */
- } irq_refcount;
+ unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2628d5622449..c3b59b8593b9 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1357,22 +1357,21 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
/* Cross check the port pixel multiplier with the sdvo encoder state. */
- intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, &val, 1);
- switch (val) {
- case SDVO_CLOCK_RATE_MULT_1X:
- encoder_pixel_multiplier = 1;
- break;
- case SDVO_CLOCK_RATE_MULT_2X:
- encoder_pixel_multiplier = 2;
- break;
- case SDVO_CLOCK_RATE_MULT_4X:
- encoder_pixel_multiplier = 4;
- break;
+ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
+ &val, 1)) {
+ switch (val) {
+ case SDVO_CLOCK_RATE_MULT_1X:
+ encoder_pixel_multiplier = 1;
+ break;
+ case SDVO_CLOCK_RATE_MULT_2X:
+ encoder_pixel_multiplier = 2;
+ break;
+ case SDVO_CLOCK_RATE_MULT_4X:
+ encoder_pixel_multiplier = 4;
+ break;
+ }
}
- if(HAS_PCH_SPLIT(dev))
- return; /* no pixel multiplier readout support yet */
-
WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
"SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
@@ -1697,6 +1696,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS,
&response, 2))
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 1fa5612a4572..55bdf70b548b 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -133,7 +133,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
- I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
+ I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
}
@@ -308,7 +308,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
- I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe),
+ i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
/* potentially re-enable LP watermarks */
@@ -478,7 +479,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe),
+ i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 39debd80d190..b0b446f630f7 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1305,6 +1305,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
struct intel_tv *intel_tv = intel_attached_tv(connector);
int type;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
+ connector->base.id, drm_get_connector_name(connector),
+ force);
+
mode = reported_modes[0];
if (force) {
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
new file mode 100644
index 000000000000..8f5bc869c023
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -0,0 +1,595 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+#define FORCEWAKE_ACK_TIMEOUT_MS 2
+
+#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
+#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
+#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
+#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
+#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
+
+
+static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+{
+ u32 gt_thread_status_mask;
+
+ if (IS_HASWELL(dev_priv->dev))
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
+ else
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
+
+ /* w/a for a sporadic read returning 0 by waiting for the GT
+ * thread to wake up.
+ */
+ if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
+ DRM_ERROR("GT thread status wait timed out\n");
+}
+
+static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE, 0);
+ /* something from same cacheline, but !FORCEWAKE */
+ __raw_posting_read(dev_priv, ECOBUS);
+}
+
+static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE, 1);
+ /* something from same cacheline, but !FORCEWAKE */
+ __raw_posting_read(dev_priv, ECOBUS);
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+
+ /* WaRsForcewakeWaitTC0:snb */
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ __raw_posting_read(dev_priv, ECOBUS);
+}
+
+static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+ u32 forcewake_ack;
+
+ if (IS_HASWELL(dev_priv->dev))
+ forcewake_ack = FORCEWAKE_ACK_HSW;
+ else
+ forcewake_ack = FORCEWAKE_MT_ACK;
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_MT,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ __raw_posting_read(dev_priv, ECOBUS);
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+
+ /* WaRsForcewakeWaitTC0:ivb,hsw */
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
+{
+ u32 gtfifodbg;
+
+ gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
+ if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
+ "MMIO read or write has been dropped %x\n", gtfifodbg))
+ __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+}
+
+static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE, 0);
+ /* something from same cacheline, but !FORCEWAKE */
+ __raw_posting_read(dev_priv, ECOBUS);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_MT,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ __raw_posting_read(dev_priv, ECOBUS);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+ int ret = 0;
+
+ if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
+ int loop = 500;
+ u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+ while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
+ udelay(10);
+ fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+ }
+ if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
+ ++ret;
+ dev_priv->uncore.fifo_count = fifo;
+ }
+ dev_priv->uncore.fifo_count--;
+
+ return ret;
+}
+
+static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
+}
+
+static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
+
+ /* WaRsForcewakeWaitTC0:vlv */
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* The below doubles as a POSTING_READ */
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+void intel_uncore_early_sanitize(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_FPGA_DBG_UNCLAIMED(dev))
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+}
+
+void intel_uncore_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
+ } else if (IS_HASWELL(dev)) {
+ dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
+ dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
+ } else if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ /* IVB configs may use multi-threaded forcewake */
+
+ /* A small trick here - if the bios hasn't configured
+ * MT forcewake, and if the device is in RC6, then
+ * force_wake_mt_get will not wake the device and the
+ * ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT
+ * forcewake being disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = __raw_i915_read32(dev_priv, ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ } else {
+ DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
+ DRM_INFO("when using vblank-synced partial screen updates.\n");
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_put;
+ }
+ } else if (IS_GEN6(dev)) {
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_put;
+ }
+}
+
+void intel_uncore_sanitize(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ vlv_force_wake_reset(dev_priv);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ __gen6_gt_force_wake_reset(dev_priv);
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ __gen6_gt_force_wake_mt_reset(dev_priv);
+ }
+
+ /* BIOS often leaves RC6 enabled, but disable it for hw init */
+ intel_disable_gt_powersave(dev);
+}
+
+/*
+ * Generally this is called implicitly by the register read function. However,
+ * if some sequence requires the GT to not power down then this function should
+ * be called at the beginning of the sequence followed by a call to
+ * gen6_gt_force_wake_put() at the end of the sequence.
+ */
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (dev_priv->uncore.forcewake_count++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/*
+ * see gen6_gt_force_wake_get()
+ */
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (--dev_priv->uncore.forcewake_count == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+ ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE))
+
+static void
+ilk_dummy_write(struct drm_i915_private *dev_priv)
+{
+ /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
+ * the chip from rc6 before touching it for real. MI_MODE is masked,
+ * hence harmless to write 0 into. */
+ __raw_i915_write32(dev_priv, MI_MODE, 0);
+}
+
+static void
+hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
+{
+ if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
+ (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ DRM_ERROR("Unknown unclaimed register before writing to %x\n",
+ reg);
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+}
+
+static void
+hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
+{
+ if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
+ (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ DRM_ERROR("Unclaimed write to %x\n", reg);
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+}
+
+#define __i915_read(x) \
+u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
+ unsigned long irqflags; \
+ u##x val = 0; \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+ if (dev_priv->info->gen == 5) \
+ ilk_dummy_write(dev_priv); \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ if (dev_priv->uncore.forcewake_count == 0) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ if (dev_priv->uncore.forcewake_count == 0) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+ } else { \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ } \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+ return val; \
+}
+
+__i915_read(8)
+__i915_read(16)
+__i915_read(32)
+__i915_read(64)
+#undef __i915_read
+
+#define __i915_write(x) \
+void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \
+ unsigned long irqflags; \
+ u32 __fifo_ret = 0; \
+ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+ } \
+ if (dev_priv->info->gen == 5) \
+ ilk_dummy_write(dev_priv); \
+ hsw_unclaimed_reg_clear(dev_priv, reg); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (unlikely(__fifo_ret)) { \
+ gen6_gt_check_fifodbg(dev_priv); \
+ } \
+ hsw_unclaimed_reg_check(dev_priv, reg); \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+__i915_write(8)
+__i915_write(16)
+__i915_write(32)
+__i915_write(64)
+#undef __i915_write
+
+static const struct register_whitelist {
+ uint64_t offset;
+ uint32_t size;
+ uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+} whitelist[] = {
+ { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+};
+
+int i915_reg_read_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_reg_read *reg = data;
+ struct register_whitelist const *entry = whitelist;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+ if (entry->offset == reg->offset &&
+ (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(whitelist))
+ return -EINVAL;
+
+ switch (entry->size) {
+ case 8:
+ reg->val = I915_READ64(reg->offset);
+ break;
+ case 4:
+ reg->val = I915_READ(reg->offset);
+ break;
+ case 2:
+ reg->val = I915_READ16(reg->offset);
+ break;
+ case 1:
+ reg->val = I915_READ8(reg->offset);
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int i8xx_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_I85X(dev))
+ return -ENODEV;
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ I915_WRITE(DEBUG_RESET_I830,
+ DEBUG_RESET_DISPLAY |
+ DEBUG_RESET_RENDER |
+ DEBUG_RESET_FULL);
+ POSTING_READ(DEBUG_RESET_I830);
+ msleep(1);
+
+ I915_WRITE(DEBUG_RESET_I830, 0);
+ POSTING_READ(DEBUG_RESET_I830);
+ }
+
+ msleep(1);
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ return 0;
+}
+
+static int i965_reset_complete(struct drm_device *dev)
+{
+ u8 gdrst;
+ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
+
+static int i965_do_reset(struct drm_device *dev)
+{
+ int ret;
+
+ /*
+ * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+ * well as the reset bit (GR/bit 0). Setting the GR bit
+ * triggers the reset; when done, the hardware will clear it.
+ */
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ /* We can't reset render&media without also resetting display ... */
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+
+ return 0;
+}
+
+static int ironlake_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 gdrst;
+ int ret;
+
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ gdrst &= ~GRDOM_MASK;
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+ if (ret)
+ return ret;
+
+ /* We can't reset render&media without also resetting display ... */
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ gdrst &= ~GRDOM_MASK;
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+ return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+}
+
+static int gen6_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ unsigned long irqflags;
+
+ /* Hold uncore.lock across reset to prevent any register access
+ * with forcewake not set correctly
+ */
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /* Reset the chip */
+
+ /* GEN6_GDRST is not in the gt power well, no need to check
+ * for fifo space for the write or forcewake the chip for
+ * the read
+ */
+ __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
+
+ /* Spin waiting for the device to ack the reset request */
+ ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+
+ /* If reset with a user forcewake, try to restore, otherwise turn it off */
+ if (dev_priv->uncore.forcewake_count)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv);
+ else
+ dev_priv->uncore.funcs.force_wake_put(dev_priv);
+
+ /* Restore fifo count */
+ dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ return ret;
+}
+
+int intel_gpu_reset(struct drm_device *dev)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6: return gen6_do_reset(dev);
+ case 5: return ironlake_do_reset(dev);
+ case 4: return i965_do_reset(dev);
+ case 2: return i8xx_do_reset(dev);
+ default: return -ENODEV;
+ }
+}
+
+void intel_uncore_clear_errors(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* XXX needs spinlock around caller's grouping */
+ if (HAS_FPGA_DBG_UNCLAIMED(dev))
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+}
+
+void intel_uncore_check_errors(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
+ (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ DRM_ERROR("Unclaimed register before interrupt\n");
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+}
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index 54558a01969a..ca4bc54ea214 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -149,7 +149,7 @@ typedef struct drm_mga_private {
unsigned int agp_size;
} drm_mga_private_t;
-extern struct drm_ioctl_desc mga_ioctls[];
+extern const struct drm_ioctl_desc mga_ioctls[];
extern int mga_max_ioctl;
/* mga_dma.c */
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index 9c145143ad0f..37cc2fb4eadd 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1083,7 +1083,7 @@ file_priv)
return 0;
}
-struct drm_ioctl_desc mga_ioctls[] = {
+const struct drm_ioctl_desc mga_ioctls[] = {
DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 122b571ccc7c..bd9196478735 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -104,7 +104,7 @@ static struct drm_driver driver = {
.gem_free_object = mgag200_gem_free_object,
.dumb_create = mgag200_dumb_create,
.dumb_map_offset = mgag200_dumb_mmap_offset,
- .dumb_destroy = mgag200_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
};
static struct pci_driver mgag200_pci_driver = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 12e2499d9352..baaae19332e2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -264,9 +264,6 @@ int mgag200_gem_init_object(struct drm_gem_object *obj);
int mgag200_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int mgag200_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle);
void mgag200_gem_free_object(struct drm_gem_object *obj);
int
mgag200_dumb_mmap_offset(struct drm_file *file,
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 9fa5685baee0..0f8b861b10b3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -310,13 +310,6 @@ int mgag200_dumb_create(struct drm_file *file,
return 0;
}
-int mgag200_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
int mgag200_gem_init_object(struct drm_gem_object *obj)
{
BUG();
@@ -349,7 +342,7 @@ void mgag200_gem_free_object(struct drm_gem_object *obj)
static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
{
- return bo->bo.addr_space_offset;
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
}
int
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 31cc8fe8e7f0..054d9cff4f53 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -150,7 +150,7 @@ dp_link_train_update(struct dp_state *dp, u32 delay)
if (ret)
return ret;
- DBG("status %*ph\n", 6, dp->stat);
+ DBG("status %6ph\n", dp->stat);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 907d20ef6d4d..78637afb9b94 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -674,13 +674,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
}
int
-nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file_priv, handle);
-}
-
-int
nouveau_display_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *poffset)
@@ -690,7 +683,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, handle);
if (gem) {
struct nouveau_bo *bo = gem->driver_private;
- *poffset = bo->bo.addr_space_offset;
+ *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
drm_gem_object_unreference_unlocked(gem);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 1ea3e4734b62..185e74132a6d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -68,8 +68,6 @@ int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
struct drm_mode_create_dumb *args);
int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
u32 handle, u64 *offset);
-int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
- u32 handle);
void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 61972668fd05..1faa75f42393 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -649,7 +649,7 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
nouveau_cli_destroy(cli);
}
-static struct drm_ioctl_desc
+static const struct drm_ioctl_desc
nouveau_ioctls[] = {
DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -684,7 +684,7 @@ nouveau_driver_fops = {
static struct drm_driver
driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
+ DRIVER_USE_AGP |
DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
.load = nouveau_drm_load,
@@ -704,6 +704,7 @@ driver = {
.disable_vblank = nouveau_drm_vblank_disable,
.ioctls = nouveau_ioctls,
+ .num_ioctls = ARRAY_SIZE(nouveau_ioctls),
.fops = &nouveau_driver_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -724,7 +725,7 @@ driver = {
.dumb_create = nouveau_display_dumb_create,
.dumb_map_offset = nouveau_display_dumb_map_offset,
- .dumb_destroy = nouveau_display_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -774,8 +775,6 @@ nouveau_drm_pci_driver = {
static int __init
nouveau_drm_init(void)
{
- driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
-
if (nouveau_modeset == -1) {
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force())
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 830cb7bad922..487242fb3fdc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -220,7 +220,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
}
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
- rep->map_handle = nvbo->bo.addr_space_offset;
+ rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
rep->tile_mode = nvbo->tile_mode;
rep->tile_flags = nvbo->tile_flags;
return 0;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index a3004f12b9a3..2f9e22e22bd4 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -419,7 +419,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
return ret;
}
-static struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
+static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
@@ -633,7 +633,7 @@ static struct drm_driver omap_drm_driver = {
.gem_vm_ops = &omap_gem_vm_ops,
.dumb_create = omap_gem_dumb_create,
.dumb_map_offset = omap_gem_dumb_map_offset,
- .dumb_destroy = omap_gem_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = ioctls,
.num_ioctls = DRM_OMAP_NUM_IOCTLS,
.fops = &omapdriver_fops,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 14f17da2ce25..f2ba425d80dd 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -225,8 +225,6 @@ int omap_gem_init_object(struct drm_gem_object *obj);
void *omap_gem_vaddr(struct drm_gem_object *obj);
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
-int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle);
int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index ebbdf4132e9c..b1f19702550f 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -20,6 +20,7 @@
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
+#include <drm/drm_vma_manager.h>
#include "omap_drv.h"
#include "omap_dmm_tiler.h"
@@ -308,21 +309,20 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj)
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ int ret;
+ size_t size;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- if (!obj->map_list.map) {
- /* Make it mmapable */
- size_t size = omap_gem_mmap_size(obj);
- int ret = _drm_gem_create_mmap_offset_size(obj, size);
-
- if (ret) {
- dev_err(dev->dev, "could not allocate mmap offset\n");
- return 0;
- }
+ /* Make it mmapable */
+ size = omap_gem_mmap_size(obj);
+ ret = _drm_gem_create_mmap_offset_size(obj, size);
+ if (ret) {
+ dev_err(dev->dev, "could not allocate mmap offset\n");
+ return 0;
}
- return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
+ return drm_vma_node_offset_addr(&obj->vma_node);
}
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
@@ -629,21 +629,6 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
}
/**
- * omap_gem_dumb_destroy - destroy a dumb buffer
- * @file: client file
- * @dev: our DRM device
- * @handle: the object handle
- *
- * Destroy a handle that was created via omap_gem_dumb_create.
- */
-int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle)
-{
- /* No special work needed, drop the reference and see what falls out */
- return drm_gem_handle_delete(file, handle);
-}
-
-/**
* omap_gem_dumb_map - buffer mapping for dumb interface
* @file: our drm client file
* @dev: drm device
@@ -997,12 +982,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- uint64_t off = 0;
+ uint64_t off;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- if (obj->map_list.map)
- off = (uint64_t)obj->map_list.hash.key;
+ off = drm_vma_node_start(&obj->vma_node);
seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
omap_obj->flags, obj->name, obj->refcount.refcount.counter,
@@ -1309,8 +1293,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
list_del(&omap_obj->mm_list);
- if (obj->map_list.map)
- drm_gem_free_mmap_offset(obj);
+ drm_gem_free_mmap_offset(obj);
/* this means the object is still pinned.. which really should
* not happen. I think..
@@ -1427,8 +1410,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
omap_obj->height = gsize.tiled.height;
}
+ ret = 0;
if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
- ret = drm_gem_private_object_init(dev, obj, size);
+ drm_gem_private_object_init(dev, obj, size);
else
ret = drm_gem_object_init(dev, obj, size);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
index f9eb679eb79b..dbb157542f8f 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
@@ -118,52 +118,7 @@ _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list;
- struct drm_local_map *map;
- int ret = 0;
-
- /* Set the object up for mmap'ing */
- list = &obj->map_list;
- list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
- if (!list->map)
- return -ENOMEM;
-
- map = list->map;
- map->type = _DRM_GEM;
- map->size = size;
- map->handle = obj;
-
- /* Get a DRM GEM mmap offset allocated... */
- list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- size / PAGE_SIZE, 0, 0);
-
- if (!list->file_offset_node) {
- DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
- ret = -ENOSPC;
- goto out_free_list;
- }
-
- list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- size / PAGE_SIZE, 0);
- if (!list->file_offset_node) {
- ret = -ENOMEM;
- goto out_free_list;
- }
-
- list->hash.key = list->file_offset_node->start;
- ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
- if (ret) {
- DRM_ERROR("failed to add to map hash\n");
- goto out_free_mm;
- }
-
- return 0;
-
-out_free_mm:
- drm_mm_put_block(list->file_offset_node);
-out_free_list:
- kfree(list->map);
- list->map = NULL;
- return ret;
+ return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
+ size / PAGE_SIZE);
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index df0b577a6608..48f2dfdeabcb 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -221,7 +221,7 @@ static struct drm_driver qxl_driver = {
.dumb_create = qxl_mode_dumb_create,
.dumb_map_offset = qxl_mode_dumb_mmap,
- .dumb_destroy = qxl_mode_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
.debugfs_cleanup = qxl_debugfs_takedown,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 7e96f4f11738..4708621fe720 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -328,7 +328,7 @@ struct qxl_device {
/* forward declaration for QXL_INFO_IO */
void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
-extern struct drm_ioctl_desc qxl_ioctls[];
+extern const struct drm_ioctl_desc qxl_ioctls[];
extern int qxl_max_ioctl;
int qxl_driver_load(struct drm_device *dev, unsigned long flags);
@@ -427,9 +427,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
int qxl_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int qxl_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
int qxl_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index 847c4ee798f7..d34bb4130ff0 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -68,13 +68,6 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
return 0;
}
-int qxl_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file_priv, handle);
-}
-
int qxl_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p)
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 6de33563d6f1..7b95c75e9626 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -433,7 +433,7 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
return ret;
}
-struct drm_ioctl_desc qxl_ioctls[] = {
+const struct drm_ioctl_desc qxl_ioctls[] = {
DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 8cb6167038e5..d458a140c024 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -59,7 +59,7 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
{
- return bo->tbo.addr_space_offset;
+ return drm_vma_node_offset_addr(&bo->tbo.vma_node);
}
static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b61449e52cd5..0109a9644cb2 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -88,7 +88,7 @@ qxl_release_free(struct qxl_device *qdev,
list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
QXL_INFO(qdev, "release %llx\n",
- entry->tv.bo->addr_space_offset
+ drm_vma_node_offset_addr(&entry->tv.bo->vma_node)
- DRM_FILE_OFFSET);
qxl_fence_remove_release(&bo->fence, release->id);
qxl_bo_unref(&bo);
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index d4660cf942a5..c451257f08fb 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -540,7 +540,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
- dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+ dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 930c71b2fb5e..56eb5e3f5439 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -131,7 +131,7 @@ typedef struct drm_r128_buf_priv {
drm_r128_freelist_t *list_entry;
} drm_r128_buf_priv_t;
-extern struct drm_ioctl_desc r128_ioctls[];
+extern const struct drm_ioctl_desc r128_ioctls[];
extern int r128_max_ioctl;
/* r128_cce.c */
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 19bb7e6f3d9a..01dd9aef9f0e 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1643,7 +1643,7 @@ void r128_driver_lastclose(struct drm_device *dev)
r128_do_cleanup_cce(dev);
}
-struct drm_ioctl_desc r128_ioctls[] = {
+const struct drm_ioctl_desc r128_ioctls[] = {
DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 32501f6ec991..3569d89b9e41 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -585,7 +585,7 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
return false;
}
- DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
+ DRM_DEBUG_KMS("link status %6ph\n", link_status);
return true;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 6dacec4e2090..6adbc998349e 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -2535,8 +2535,8 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
/* ring 0 - compute and gfx */
/* Set ring buffer size */
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -2915,7 +2915,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
tmp = RREG32(CP_HPD_EOP_CONTROL);
tmp &= ~EOP_SIZE_MASK;
- tmp |= drm_order(MEC_HPD_SIZE / 8);
+ tmp |= order_base_2(MEC_HPD_SIZE / 8);
WREG32(CP_HPD_EOP_CONTROL, tmp);
}
cik_srbm_select(rdev, 0, 0, 0, 0);
@@ -3030,9 +3030,9 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
mqd->queue_state.cp_hqd_pq_control |=
- drm_order(rdev->ring[idx].ring_size / 8);
+ order_base_2(rdev->ring[idx].ring_size / 8);
mqd->queue_state.cp_hqd_pq_control |=
- (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8);
+ (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
#ifdef __BIG_ENDIAN
mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
#endif
@@ -3375,7 +3375,7 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev)
WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
/* Set ring buffer size in dwords */
- rb_bufsz = drm_order(ring->ring_size / 4);
+ rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = rb_bufsz << 1;
#ifdef __BIG_ENDIAN
rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
@@ -5030,7 +5030,7 @@ static int cik_irq_init(struct radeon_device *rdev)
WREG32(INTERRUPT_CNTL, interrupt_cntl);
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
- rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+ rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 038dcac7670c..b67c9ec7f690 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2881,8 +2881,8 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
RREG32(GRBM_SOFT_RESET);
/* Set ring buffer size */
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 56bd4f3be4fe..5b6e47765656 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1560,8 +1560,8 @@ static int cayman_cp_resume(struct radeon_device *rdev)
/* Set ring buffer size */
ring = &rdev->ring[ridx[i]];
- rb_cntl = drm_order(ring->ring_size / 8);
- rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
+ rb_cntl = order_base_2(ring->ring_size / 8);
+ rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
#ifdef __BIG_ENDIAN
rb_cntl |= BUF_SWAP_32BIT;
#endif
@@ -1720,7 +1720,7 @@ int cayman_dma_resume(struct radeon_device *rdev)
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
/* Set ring buffer size in dwords */
- rb_bufsz = drm_order(ring->ring_size / 4);
+ rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = rb_bufsz << 1;
#ifdef __BIG_ENDIAN
rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 75349cdaa84b..5625cf706f0c 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1097,7 +1097,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
}
/* Align ring size */
- rb_bufsz = drm_order(ring_size / 8);
+ rb_bufsz = order_base_2(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
r100_cp_load_microcode(rdev);
r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 10f712e37003..cfc1d28ade39 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2413,8 +2413,8 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(GRBM_SOFT_RESET, 0);
/* Set ring buffer size */
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -2467,7 +2467,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsign
int r;
/* Align ring size */
- rb_bufsz = drm_order(ring_size / 8);
+ rb_bufsz = order_base_2(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
ring->ring_size = ring_size;
ring->align_mask = 16 - 1;
@@ -2547,7 +2547,7 @@ int r600_dma_resume(struct radeon_device *rdev)
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
/* Set ring buffer size in dwords */
- rb_bufsz = drm_order(ring->ring_size / 4);
+ rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = rb_bufsz << 1;
#ifdef __BIG_ENDIAN
rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
@@ -2656,7 +2656,7 @@ int r600_uvd_rbc_start(struct radeon_device *rdev)
WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
/* Set ring buffer size */
- rb_bufsz = drm_order(ring->ring_size);
+ rb_bufsz = order_base_2(ring->ring_size);
rb_bufsz = (0x1 << 8) | rb_bufsz;
WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
@@ -3815,7 +3815,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
u32 rb_bufsz;
/* Align ring size */
- rb_bufsz = drm_order(ring_size / 4);
+ rb_bufsz = order_base_2(ring_size / 4);
ring_size = (1 << rb_bufsz) * 4;
rdev->ih.ring_size = ring_size;
rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
@@ -4052,7 +4052,7 @@ int r600_irq_init(struct radeon_device *rdev)
WREG32(INTERRUPT_CNTL, interrupt_cntl);
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
- rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+ rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 1c51c08b1fde..d8eb48bff0ed 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2200,13 +2200,13 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
- dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+ dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
- dev_priv->ring.rptr_update_l2qw = drm_order(/* init->rptr_update */ 4096 / 8);
+ dev_priv->ring.rptr_update_l2qw = order_base_2(/* init->rptr_update */ 4096 / 8);
dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
- dev_priv->ring.fetch_size_l2ow = drm_order(/* init->fetch_size */ 32 / 16);
+ dev_priv->ring.fetch_size_l2ow = order_base_2(/* init->fetch_size */ 32 / 16);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2f08219c39b6..19066d1dcb7d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -492,9 +492,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
int radeon_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
-int radeon_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
/*
* Semaphores.
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index efc4f6441ef4..3cae2bbc1854 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1444,13 +1444,13 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
- dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+ dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
- dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
+ dev_priv->ring.rptr_update_l2qw = order_base_2( /* init->rptr_update */ 4096 / 8);
dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
- dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
+ dev_priv->ring.fetch_size_l2ow = order_base_2( /* init->fetch_size */ 32 / 16);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 29876b1be8ec..fa7a7e13da6c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -101,8 +101,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
-int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int radeon_gem_object_init(struct drm_gem_object *obj);
void radeon_gem_object_free(struct drm_gem_object *obj);
int radeon_gem_object_open(struct drm_gem_object *obj,
@@ -111,7 +109,7 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
int *vpos, int *hpos);
-extern struct drm_ioctl_desc radeon_ioctls_kms[];
+extern const struct drm_ioctl_desc radeon_ioctls_kms[];
extern int radeon_max_kms_ioctl;
int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
int radeon_mode_dumb_mmap(struct drm_file *filp,
@@ -120,9 +118,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
int radeon_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int radeon_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
size_t size,
@@ -390,8 +385,8 @@ static const struct file_operations radeon_driver_kms_fops = {
static struct drm_driver kms_driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
- DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM |
+ DRIVER_USE_AGP | DRIVER_USE_MTRR |
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
DRIVER_PRIME,
.dev_priv_size = 0,
.load = radeon_driver_load_kms,
@@ -421,10 +416,9 @@ static struct drm_driver kms_driver = {
.gem_free_object = radeon_gem_object_free,
.gem_open_object = radeon_gem_object_open,
.gem_close_object = radeon_gem_object_close,
- .dma_ioctl = radeon_dma_ioctl_kms,
.dumb_create = radeon_mode_dumb_create,
.dumb_map_offset = radeon_mode_dumb_mmap,
- .dumb_destroy = radeon_mode_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.fops = &radeon_driver_kms_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index aa796031ab65..dce99c8a5835 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -570,13 +570,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
return 0;
}
-int radeon_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file_priv, handle);
-}
-
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
{
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 49ff3d1a6102..866c2b70aa6f 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -683,16 +683,6 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
drmcrtc);
}
-/*
- * IOCTL.
- */
-int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* Not valid in KMS. */
- return -EINVAL;
-}
-
#define KMS_INVALID_IOCTL(name) \
int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
{ \
@@ -732,7 +722,7 @@ KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
KMS_INVALID_IOCTL(radeon_surface_free_kms)
-struct drm_ioctl_desc radeon_ioctls_kms[] = {
+const struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 49c82c480013..209b11150263 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -113,13 +113,10 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
* @bo: radeon object for which we query the offset
*
* Returns mmap offset of the object.
- *
- * Note: addr_space_offset is constant after ttm bo init thus isn't protected
- * by any lock.
*/
static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
{
- return bo->tbo.addr_space_offset;
+ return drm_vma_node_offset_addr(&bo->tbo.vma_node);
}
extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d325280e2f9f..d71037f4f68f 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3383,8 +3383,8 @@ static int si_cp_resume(struct radeon_device *rdev)
/* ring 0 - compute and gfx */
/* Set ring buffer size */
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -3416,8 +3416,8 @@ static int si_cp_resume(struct radeon_device *rdev)
/* ring1 - compute only */
/* Set ring buffer size */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -3442,8 +3442,8 @@ static int si_cp_resume(struct radeon_device *rdev)
/* ring2 - compute only */
/* Set ring buffer size */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -5651,7 +5651,7 @@ static int si_irq_init(struct radeon_device *rdev)
WREG32(INTERRUPT_CNTL, interrupt_cntl);
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
- rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+ rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index e113352bacdc..0a9f1bb88337 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -175,7 +175,7 @@ static struct drm_driver rcar_du_driver = {
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.dumb_create = rcar_du_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_cma_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.fops = &rcar_du_fops,
.name = "rcar-du",
.desc = "Renesas R-Car Display Unit",
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index bd6b2cf508d5..b17d0710871a 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -1072,7 +1072,7 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
drm_idlelock_release(&file_priv->master->lock);
}
-struct drm_ioctl_desc savage_ioctls[] = {
+const struct drm_ioctl_desc savage_ioctls[] = {
DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
index c05082a59f6f..335f8fcf1041 100644
--- a/drivers/gpu/drm/savage/savage_drv.h
+++ b/drivers/gpu/drm/savage/savage_drv.h
@@ -104,7 +104,7 @@ enum savage_family {
S3_LAST
};
-extern struct drm_ioctl_desc savage_ioctls[];
+extern const struct drm_ioctl_desc savage_ioctls[];
extern int savage_max_ioctl;
#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 5f83f9a3ef59..7f2ea1a5a45f 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -285,7 +285,7 @@ static struct drm_driver shmob_drm_driver = {
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_cma_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.fops = &shmob_drm_fops,
.name = "shmob-drm",
.desc = "Renesas SH Mobile DRM",
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index 13b527bb83be..c31c0253054d 100644
--- a/drivers/gpu/drm/sis/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -70,7 +70,7 @@ extern void sis_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv);
extern void sis_lastclose(struct drm_device *dev);
-extern struct drm_ioctl_desc sis_ioctls[];
+extern const struct drm_ioctl_desc sis_ioctls[];
extern int sis_max_ioctl;
#endif
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 9a43d98e5003..01857d836350 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -109,7 +109,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
if (pool == AGP_TYPE) {
retval = drm_mm_insert_node(&dev_priv->agp_mm,
&item->mm_node,
- mem->size, 0);
+ mem->size, 0,
+ DRM_MM_SEARCH_DEFAULT);
offset = item->mm_node.start;
} else {
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
@@ -121,7 +122,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
#else
retval = drm_mm_insert_node(&dev_priv->vram_mm,
&item->mm_node,
- mem->size, 0);
+ mem->size, 0,
+ DRM_MM_SEARCH_DEFAULT);
offset = item->mm_node.start;
#endif
}
@@ -348,7 +350,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
return;
}
-struct drm_ioctl_desc sis_ioctls[] = {
+const struct drm_ioctl_desc sis_ioctls[] = {
DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 7418dcd986d3..6d0524095fe3 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -379,7 +379,12 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
else
tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ /*
+ * use value from adjusted_mode here as this might have been
+ * changed as part of the fixup for slave encoders to solve the
+ * issue where tilcdc timings are not VESA compliant
+ */
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
else
tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 40b71da5a214..14801c2235ae 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -519,7 +519,7 @@ static struct drm_driver tilcdc_driver = {
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_cma_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = tilcdc_debugfs_init,
.debugfs_cleanup = tilcdc_debugfs_cleanup,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index dfffaf014022..23b3203d8241 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -73,13 +73,38 @@ static void slave_encoder_prepare(struct drm_encoder *encoder)
tilcdc_crtc_set_panel_info(encoder->crtc, &slave_info);
}
+static bool slave_encoder_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /*
+ * tilcdc does not generate VESA-complient sync but aligns
+ * VS on the second edge of HS instead of first edge.
+ * We use adjusted_mode, to fixup sync by aligning both rising
+ * edges and add HSKEW offset to let the slave encoder fix it up.
+ */
+ adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
+ adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
+ adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
+ } else {
+ adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+ adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
+ }
+
+ return drm_i2c_encoder_mode_fixup(encoder, mode, adjusted_mode);
+}
+
+
static const struct drm_encoder_funcs slave_encoder_funcs = {
.destroy = slave_encoder_destroy,
};
static const struct drm_encoder_helper_funcs slave_encoder_helper_funcs = {
.dpms = drm_i2c_encoder_dpms,
- .mode_fixup = drm_i2c_encoder_mode_fixup,
+ .mode_fixup = slave_encoder_fixup,
.prepare = slave_encoder_prepare,
.commit = drm_i2c_encoder_commit,
.mode_set = drm_i2c_encoder_mode_set,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cb9dd674670c..f1a857ec1021 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -45,7 +45,6 @@
#define TTM_DEBUG(fmt, arg...)
#define TTM_BO_HASH_ORDER 13
-static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
static void ttm_bo_global_kobj_release(struct kobject *kobj);
@@ -615,13 +614,7 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
- write_lock(&bdev->vm_lock);
- if (likely(bo->vm_node != NULL)) {
- rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
- drm_mm_put_block(bo->vm_node);
- bo->vm_node = NULL;
- }
- write_unlock(&bdev->vm_lock);
+ drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
ttm_mem_io_lock(man, false);
ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man);
@@ -1129,6 +1122,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->resv = &bo->ttm_resv;
reservation_object_init(bo->resv);
atomic_inc(&bo->glob->bo_count);
+ drm_vma_node_reset(&bo->vma_node);
ret = ttm_bo_check_placement(bo, placement);
@@ -1139,7 +1133,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
if (likely(!ret) &&
(bo->type == ttm_bo_type_device ||
bo->type == ttm_bo_type_sg))
- ret = ttm_bo_setup_vm(bo);
+ ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
+ bo->mem.num_pages);
locked = ww_mutex_trylock(&bo->resv->lock);
WARN_ON(!locked);
@@ -1424,10 +1419,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
TTM_DEBUG("Swap list was clean\n");
spin_unlock(&glob->lru_lock);
- BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
- write_lock(&bdev->vm_lock);
- drm_mm_takedown(&bdev->addr_space_mm);
- write_unlock(&bdev->vm_lock);
+ drm_vma_offset_manager_destroy(&bdev->vma_manager);
return ret;
}
@@ -1441,7 +1433,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
{
int ret = -EINVAL;
- rwlock_init(&bdev->vm_lock);
bdev->driver = driver;
memset(bdev->man, 0, sizeof(bdev->man));
@@ -1454,9 +1445,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
if (unlikely(ret != 0))
goto out_no_sys;
- bdev->addr_space_rb = RB_ROOT;
- drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
-
+ drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
+ 0x10000000);
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = NULL;
@@ -1498,12 +1488,8 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- loff_t offset = (loff_t) bo->addr_space_offset;
- loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
- if (!bdev->dev_mapping)
- return;
- unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+ drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
ttm_mem_io_free_vm(bo);
}
@@ -1520,78 +1506,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
-static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- struct rb_node **cur = &bdev->addr_space_rb.rb_node;
- struct rb_node *parent = NULL;
- struct ttm_buffer_object *cur_bo;
- unsigned long offset = bo->vm_node->start;
- unsigned long cur_offset;
-
- while (*cur) {
- parent = *cur;
- cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
- cur_offset = cur_bo->vm_node->start;
- if (offset < cur_offset)
- cur = &parent->rb_left;
- else if (offset > cur_offset)
- cur = &parent->rb_right;
- else
- BUG();
- }
-
- rb_link_node(&bo->vm_rb, parent, cur);
- rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
-}
-
-/**
- * ttm_bo_setup_vm:
- *
- * @bo: the buffer to allocate address space for
- *
- * Allocate address space in the drm device so that applications
- * can mmap the buffer and access the contents. This only
- * applies to ttm_bo_type_device objects as others are not
- * placed in the drm device address space.
- */
-
-static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- int ret;
-
-retry_pre_get:
- ret = drm_mm_pre_get(&bdev->addr_space_mm);
- if (unlikely(ret != 0))
- return ret;
-
- write_lock(&bdev->vm_lock);
- bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
- bo->mem.num_pages, 0, 0);
-
- if (unlikely(bo->vm_node == NULL)) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
- bo->mem.num_pages, 0);
-
- if (unlikely(bo->vm_node == NULL)) {
- write_unlock(&bdev->vm_lock);
- goto retry_pre_get;
- }
-
- ttm_bo_vm_insert_rb(bo);
- write_unlock(&bdev->vm_lock);
- bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
-
- return 0;
-out_unlock:
- write_unlock(&bdev->vm_lock);
- return ret;
-}
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool lazy, bool interruptible, bool no_wait)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index e4367f91472a..c58eba33bd5f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -61,28 +61,25 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
lpfn = placement->lpfn;
if (!lpfn)
lpfn = man->size;
- do {
- ret = drm_mm_pre_get(mm);
- if (unlikely(ret))
- return ret;
- spin_lock(&rman->lock);
- node = drm_mm_search_free_in_range(mm,
- mem->num_pages, mem->page_alignment,
- placement->fpfn, lpfn, 1);
- if (unlikely(node == NULL)) {
- spin_unlock(&rman->lock);
- return 0;
- }
- node = drm_mm_get_block_atomic_range(node, mem->num_pages,
- mem->page_alignment,
- placement->fpfn,
- lpfn);
- spin_unlock(&rman->lock);
- } while (node == NULL);
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ spin_lock(&rman->lock);
+ ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+ mem->page_alignment,
+ placement->fpfn, lpfn,
+ DRM_MM_SEARCH_BEST);
+ spin_unlock(&rman->lock);
+
+ if (unlikely(ret)) {
+ kfree(node);
+ } else {
+ mem->mm_node = node;
+ mem->start = node->start;
+ }
- mem->mm_node = node;
- mem->start = node->start;
return 0;
}
@@ -93,8 +90,10 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
if (mem->mm_node) {
spin_lock(&rman->lock);
- drm_mm_put_block(mem->mm_node);
+ drm_mm_remove_node(mem->mm_node);
spin_unlock(&rman->lock);
+
+ kfree(mem->mm_node);
mem->mm_node = NULL;
}
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 319cf4127c5b..7cc904d3a4d1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -30,6 +30,7 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/drm_vma_manager.h>
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
@@ -450,7 +451,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
INIT_LIST_HEAD(&fbo->io_reserve_lru);
- fbo->vm_node = NULL;
+ drm_vma_node_reset(&fbo->vma_node);
atomic_set(&fbo->cpu_writers, 0);
spin_lock(&bdev->fence_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3df9f16b041c..8c0e2c020215 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -33,6 +33,7 @@
#include <ttm/ttm_module.h>
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
+#include <drm/drm_vma_manager.h>
#include <linux/mm.h>
#include <linux/rbtree.h>
#include <linux/module.h>
@@ -40,37 +41,6 @@
#define TTM_BO_VM_NUM_PREFAULT 16
-static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
- unsigned long page_start,
- unsigned long num_pages)
-{
- struct rb_node *cur = bdev->addr_space_rb.rb_node;
- unsigned long cur_offset;
- struct ttm_buffer_object *bo;
- struct ttm_buffer_object *best_bo = NULL;
-
- while (likely(cur != NULL)) {
- bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
- cur_offset = bo->vm_node->start;
- if (page_start >= cur_offset) {
- cur = cur->rb_right;
- best_bo = bo;
- if (page_start == cur_offset)
- break;
- } else
- cur = cur->rb_left;
- }
-
- if (unlikely(best_bo == NULL))
- return NULL;
-
- if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
- (page_start + num_pages)))
- return NULL;
-
- return best_bo;
-}
-
static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -146,9 +116,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
- bo->vm_node->start - vma->vm_pgoff;
+ drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
page_last = vma_pages(vma) +
- bo->vm_node->start - vma->vm_pgoff;
+ drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
if (unlikely(page_offset >= bo->num_pages)) {
retval = VM_FAULT_SIGBUS;
@@ -249,6 +219,30 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
.close = ttm_bo_vm_close
};
+static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
+ unsigned long offset,
+ unsigned long pages)
+{
+ struct drm_vma_offset_node *node;
+ struct ttm_buffer_object *bo = NULL;
+
+ drm_vma_offset_lock_lookup(&bdev->vma_manager);
+
+ node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
+ if (likely(node)) {
+ bo = container_of(node, struct ttm_buffer_object, vma_node);
+ if (!kref_get_unless_zero(&bo->kref))
+ bo = NULL;
+ }
+
+ drm_vma_offset_unlock_lookup(&bdev->vma_manager);
+
+ if (!bo)
+ pr_err("Could not find buffer object to map\n");
+
+ return bo;
+}
+
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev)
{
@@ -256,17 +250,9 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_buffer_object *bo;
int ret;
- read_lock(&bdev->vm_lock);
- bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
- vma_pages(vma));
- if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
- bo = NULL;
- read_unlock(&bdev->vm_lock);
-
- if (unlikely(bo == NULL)) {
- pr_err("Could not find buffer object to map\n");
+ bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
+ if (unlikely(!bo))
return -EINVAL;
- }
driver = bo->bdev->driver;
if (unlikely(!driver->verify_access)) {
@@ -324,12 +310,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
bool no_wait = false;
bool dummy;
- read_lock(&bdev->vm_lock);
- bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
- if (likely(bo != NULL))
- ttm_bo_reference(bo);
- read_unlock(&bdev->vm_lock);
-
+ bo = ttm_bo_vm_lookup(bdev, dev_offset, 1);
if (unlikely(bo == NULL))
return -EFAULT;
@@ -343,7 +324,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
if (unlikely(ret != 0))
goto out_unref;
- kmap_offset = dev_offset - bo->vm_node->start;
+ kmap_offset = dev_offset - drm_vma_node_start(&bo->vma_node);
if (unlikely(kmap_offset >= bo->num_pages)) {
ret = -EFBIG;
goto out_unref;
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index c0770dbba74a..bb0af58c769a 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -84,7 +84,7 @@ static struct drm_driver driver = {
.dumb_create = udl_dumb_create,
.dumb_map_offset = udl_gem_mmap,
- .dumb_destroy = udl_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.fops = &udl_driver_fops,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index cc6d90f28c71..56aec9409fa3 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -114,8 +114,6 @@ int udl_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args);
int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
-int udl_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle);
int udl_gem_init_object(struct drm_gem_object *obj);
void udl_gem_free_object(struct drm_gem_object *gem_obj);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index ef034fa3e6f5..b5e3b8038253 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -66,12 +66,6 @@ int udl_dumb_create(struct drm_file *file,
args->size, &args->handle);
}
-int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
@@ -223,8 +217,7 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->pages)
udl_gem_put_pages(obj);
- if (gem_obj->map_list.map)
- drm_gem_free_mmap_offset(gem_obj);
+ drm_gem_free_mmap_offset(gem_obj);
}
/* the dumb interface doesn't work with the GEM straight MMAP
@@ -247,13 +240,11 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
ret = udl_gem_get_pages(gobj, GFP_KERNEL);
if (ret)
goto out;
- if (!gobj->base.map_list.map) {
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
- }
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
- *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;
+ *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
out:
drm_gem_object_unreference(&gobj->base);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 0ce2d7195256..f5ae57406f34 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -41,8 +41,8 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
if (total_len > 5) {
- DRM_INFO("vendor descriptor length:%x data:%*ph\n",
- total_len, 11, desc);
+ DRM_INFO("vendor descriptor length:%x data:%11ph\n",
+ total_len, desc);
if ((desc[0] != total_len) || /* descriptor length */
(desc[1] != 0x5f) || /* vendor descriptor type */
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 13558f5a2422..652f9b43ec9d 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -720,7 +720,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
return ret;
}
-struct drm_ioctl_desc via_ioctls[] = {
+const struct drm_ioctl_desc via_ioctls[] = {
DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 893a65090c36..a811ef2b505f 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -114,7 +114,7 @@ enum via_family {
#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
#define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val)
-extern struct drm_ioctl_desc via_ioctls[];
+extern const struct drm_ioctl_desc via_ioctls[];
extern int via_max_ioctl;
extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 0ab93ff09873..7e3ad87c366c 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data,
if (mem->type == VIA_MEM_AGP)
retval = drm_mm_insert_node(&dev_priv->agp_mm,
&item->mm_node,
- tmpSize, 0);
+ tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
else
retval = drm_mm_insert_node(&dev_priv->vram_mm,
&item->mm_node,
- tmpSize, 0);
+ tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
if (retval)
goto fail_alloc;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 78e21649d48a..50861504b5d9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -124,7 +124,7 @@
* Ioctl definitions.
*/
-static struct drm_ioctl_desc vmw_ioctls[] = {
+static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
@@ -782,7 +782,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
- struct drm_ioctl_desc *ioctl =
+ const struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE];
if (unlikely(ioctl->cmd_drv != cmd)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 7953d1f90b63..0e67cf41065d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -500,7 +500,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
goto out_no_dmabuf;
rep->handle = handle;
- rep->map_handle = dma_buf->base.addr_space_offset;
+ rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
@@ -834,7 +834,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
if (ret != 0)
return -EINVAL;
- *offset = out_buf->base.addr_space_offset;
+ *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
vmw_dmabuf_unreference(&out_buf);
return 0;
}
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
index e184b00faacd..b128b90a94f6 100644
--- a/drivers/gpu/host1x/drm/drm.c
+++ b/drivers/gpu/host1x/drm/drm.c
@@ -487,7 +487,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
}
#endif
-static struct drm_ioctl_desc tegra_drm_ioctls[] = {
+static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
#ifdef CONFIG_DRM_TEGRA_STAGING
DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
@@ -633,7 +633,7 @@ struct drm_driver tegra_drm_driver = {
.gem_vm_ops = &tegra_bo_vm_ops,
.dumb_create = tegra_bo_dumb_create,
.dumb_map_offset = tegra_bo_dumb_map_offset,
- .dumb_destroy = tegra_bo_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = tegra_drm_ioctls,
.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c
index c5e9a9b494c2..3c35622c9f15 100644
--- a/drivers/gpu/host1x/drm/gem.c
+++ b/drivers/gpu/host1x/drm/gem.c
@@ -108,7 +108,7 @@ static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
{
- return (unsigned int)bo->gem.map_list.hash.key << PAGE_SHIFT;
+ return (unsigned int)drm_vma_node_offset_addr(&bo->gem.vma_node);
}
struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
@@ -182,8 +182,7 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
{
struct tegra_bo *bo = to_tegra_bo(gem);
- if (gem->map_list.map)
- drm_gem_free_mmap_offset(gem);
+ drm_gem_free_mmap_offset(gem);
drm_gem_object_release(gem);
tegra_bo_destroy(gem->dev, bo);
@@ -262,9 +261,3 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
}
-
-int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
- unsigned int handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/host1x/drm/gem.h
index 34de2b486eb7..2e93b0379da8 100644
--- a/drivers/gpu/host1x/drm/gem.h
+++ b/drivers/gpu/host1x/drm/gem.h
@@ -49,8 +49,6 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args);
int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
uint32_t handle, uint64_t *offset);
-int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
- unsigned int handle);
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 9854a1daf606..a8900496b980 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -783,7 +783,7 @@ int imx_drm_remove_connector(struct imx_drm_connector *imx_drm_connector)
}
EXPORT_SYMBOL_GPL(imx_drm_remove_connector);
-static struct drm_ioctl_desc imx_drm_ioctls[] = {
+static const struct drm_ioctl_desc imx_drm_ioctls[] = {
/* none so far */
};
@@ -797,7 +797,7 @@ static struct drm_driver imx_drm_driver = {
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_cma_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = imx_drm_enable_vblank,