diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-10-19 10:36:51 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-10-20 10:51:51 +0100 |
commit | 69dc4987cbe5fe70ae1c2a08906d431d53cdd242 (patch) | |
tree | e8864d862538096b70a7d12eb1f9763f6faa8c39 /drivers/gpu | |
parent | 87acb0a550694ff1a7725ea3a73b80d8ccf56180 (diff) | |
download | linux-69dc4987cbe5fe70ae1c2a08906d431d53cdd242.tar.bz2 |
drm/i915: Track objects in global active list (as well as per-ring)
To handle retirements, we need per-ring tracking of active objects.
To handle evictions, we need global tracking of active objects.
As we enable more rings, rebuilding the global list from the individual
per-ring lists quickly grows tiresome and overly complicated. Tracking the
active objects in two lists is the lesser of two evils.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 23 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 47 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_evict.c | 59 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 11 |
5 files changed, 58 insertions, 96 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d598070fb279..f9e3295f0457 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -41,8 +41,7 @@ #if defined(CONFIG_DEBUG_FS) enum { - RENDER_LIST, - BSD_LIST, + ACTIVE_LIST, FLUSHING_LIST, INACTIVE_LIST, PINNED_LIST, @@ -125,6 +124,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (fence: %d)", obj->fence_reg); if (obj->gtt_space != NULL) seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); + if (obj->ring != NULL) + seq_printf(m, " (%s)", obj->ring->name); } static int i915_gem_object_list_info(struct seq_file *m, void *data) @@ -143,13 +144,9 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) return ret; switch (list) { - case RENDER_LIST: - seq_printf(m, "Render:\n"); - head = &dev_priv->render_ring.active_list; - break; - case BSD_LIST: - seq_printf(m, "BSD:\n"); - head = &dev_priv->bsd_ring.active_list; + case ACTIVE_LIST: + seq_printf(m, "Active:\n"); + head = &dev_priv->mm.active_list; break; case INACTIVE_LIST: seq_printf(m, "Inactive:\n"); @@ -173,7 +170,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) } total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(obj_priv, head, list) { + list_for_each_entry(obj_priv, head, mm_list) { seq_printf(m, " "); describe_obj(m, obj_priv); seq_printf(m, "\n"); @@ -460,8 +457,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) if (ret) return ret; - list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, - list) { + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { obj = &obj_priv->base; if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { seq_printf(m, "--- gtt_offset = 0x%08x\n", @@ -1020,8 +1016,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) static struct drm_info_list i915_debugfs_list[] = { {"i915_capabilities", i915_capabilities, 0, 0}, {"i915_gem_objects", i915_gem_object_info, 0}, - {"i915_gem_render_active", i915_gem_object_list_info, 0, (void *) RENDER_LIST}, - {"i915_gem_bsd_active", i915_gem_object_list_info, 0, (void *) BSD_LIST}, + {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST}, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 84e33aeececd..817d8be6ff49 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -548,6 +548,17 @@ typedef struct drm_i915_private { struct list_head shrink_list; /** + * List of objects currently involved in rendering. + * + * Includes buffers having the contents of their GPU caches + * flushed, not necessarily primitives. last_rendering_seqno + * represents when the rendering involved will be completed. + * + * A reference is held on the buffer while on this list. + */ + struct list_head active_list; + + /** * List of objects which are not in the ringbuffer but which * still have a write_domain which needs to be flushed before * unbinding. @@ -714,7 +725,8 @@ struct drm_i915_gem_object { struct drm_mm_node *gtt_space; /** This object's place on the active/flushing/inactive lists */ - struct list_head list; + struct list_head ring_list; + struct list_head mm_list; /** This object's place on GPU write list */ struct list_head gpu_write_list; /** This object's place on eviction list */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 56153685d145..6e85496f9164 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1139,7 +1139,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, /* Maintain LRU order of "inactive" objects */ if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) - list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); drm_gem_object_unreference(obj); unlock: @@ -1271,7 +1271,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } if (i915_gem_object_is_inactive(obj_priv)) - list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + page_offset; @@ -1565,6 +1565,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, struct intel_ring_buffer *ring) { struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t seqno = i915_gem_next_request_seqno(dev, ring); @@ -1578,7 +1579,8 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, } /* Move from whatever list we were on to the tail of execution. */ - list_move_tail(&obj_priv->list, &ring->active_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list); + list_move_tail(&obj_priv->ring_list, &ring->active_list); obj_priv->last_rendering_seqno = seqno; } @@ -1590,7 +1592,8 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); BUG_ON(!obj_priv->active); - list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list); + list_del_init(&obj_priv->ring_list); obj_priv->last_rendering_seqno = 0; } @@ -1629,9 +1632,10 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (obj_priv->pin_count != 0) - list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); else - list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); + list_del_init(&obj_priv->ring_list); BUG_ON(!list_empty(&obj_priv->gpu_write_list)); @@ -1780,7 +1784,7 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, obj_priv = list_first_entry(&ring->active_list, struct drm_i915_gem_object, - list); + ring_list); obj_priv->base.write_domain = 0; list_del_init(&obj_priv->gpu_write_list); @@ -1804,7 +1808,7 @@ void i915_gem_reset(struct drm_device *dev) while (!list_empty(&dev_priv->mm.flushing_list)) { obj_priv = list_first_entry(&dev_priv->mm.flushing_list, struct drm_i915_gem_object, - list); + mm_list); obj_priv->base.write_domain = 0; list_del_init(&obj_priv->gpu_write_list); @@ -1816,7 +1820,7 @@ void i915_gem_reset(struct drm_device *dev) */ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, - list) + mm_list) { obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; } @@ -1876,7 +1880,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, obj_priv = list_first_entry(&ring->active_list, struct drm_i915_gem_object, - list); + ring_list); if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) break; @@ -1912,7 +1916,7 @@ i915_gem_retire_requests(struct drm_device *dev) */ list_for_each_entry_safe(obj_priv, tmp, &dev_priv->mm.deferred_free_list, - list) + mm_list) i915_gem_free_object_tail(&obj_priv->base); } @@ -2145,7 +2149,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) BUG_ON(obj_priv->pages_refcount); i915_gem_info_remove_gtt(dev_priv, obj->size); - list_del_init(&obj_priv->list); + list_del_init(&obj_priv->mm_list); drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; @@ -2700,7 +2704,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) } /* keep track of bounds object by adding it to the inactive list */ - list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); i915_gem_info_add_gtt(dev_priv, obj->size); /* Assert that the object is not currently in any GPU domain. As it @@ -4022,7 +4026,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) if (obj_priv->pin_count == 1) { i915_gem_info_add_pin(dev_priv, obj->size); if (!obj_priv->active) - list_move_tail(&obj_priv->list, + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); } @@ -4048,7 +4052,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) */ if (obj_priv->pin_count == 0) { if (!obj_priv->active) - list_move_tail(&obj_priv->list, + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); i915_gem_info_remove_pin(dev_priv, obj->size); } @@ -4280,7 +4284,8 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, obj->agp_type = AGP_USER_MEMORY; obj->base.driver_private = NULL; obj->fence_reg = I915_FENCE_REG_NONE; - INIT_LIST_HEAD(&obj->list); + INIT_LIST_HEAD(&obj->mm_list); + INIT_LIST_HEAD(&obj->ring_list); INIT_LIST_HEAD(&obj->gpu_write_list); obj->madv = I915_MADV_WILLNEED; @@ -4303,7 +4308,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj) ret = i915_gem_object_unbind(obj); if (ret == -ERESTARTSYS) { - list_move(&obj_priv->list, + list_move(&obj_priv->mm_list, &dev_priv->mm.deferred_free_list); return; } @@ -4511,6 +4516,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, return ret; } + BUG_ON(!list_empty(&dev_priv->mm.active_list)); BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list)); BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); @@ -4564,6 +4570,7 @@ i915_gem_load(struct drm_device *dev) int i; drm_i915_private_t *dev_priv = dev->dev_private; + INIT_LIST_HEAD(&dev_priv->mm.active_list); INIT_LIST_HEAD(&dev_priv->mm.flushing_list); INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); @@ -4859,7 +4866,7 @@ i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) if (mutex_trylock(&dev->struct_mutex)) { list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, - list) + mm_list) cnt++; mutex_unlock(&dev->struct_mutex); } @@ -4885,7 +4892,7 @@ rescan: list_for_each_entry_safe(obj_priv, next_obj, &dev_priv->mm.inactive_list, - list) { + mm_list) { if (i915_gem_object_is_purgeable(obj_priv)) { i915_gem_object_unbind(&obj_priv->base); if (--nr_to_scan <= 0) @@ -4914,7 +4921,7 @@ rescan: list_for_each_entry_safe(obj_priv, next_obj, &dev_priv->mm.inactive_list, - list) { + mm_list) { if (nr_to_scan > 0) { i915_gem_object_unbind(&obj_priv->base); nr_to_scan--; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index d2733a1e2bcc..70db2f1ee369 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -31,49 +31,6 @@ #include "i915_drv.h" #include "i915_drm.h" -static struct drm_i915_gem_object * -i915_gem_next_active_object(struct drm_device *dev, - struct list_head **render_iter, - struct list_head **bsd_iter) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL; - - if (*render_iter != &dev_priv->render_ring.active_list) - render_obj = list_entry(*render_iter, - struct drm_i915_gem_object, - list); - - if (HAS_BSD(dev)) { - if (*bsd_iter != &dev_priv->bsd_ring.active_list) - bsd_obj = list_entry(*bsd_iter, - struct drm_i915_gem_object, - list); - - if (render_obj == NULL) { - *bsd_iter = (*bsd_iter)->next; - return bsd_obj; - } - - if (bsd_obj == NULL) { - *render_iter = (*render_iter)->next; - return render_obj; - } - - /* XXX can we handle seqno wrapping? */ - if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) { - *render_iter = (*render_iter)->next; - return render_obj; - } else { - *bsd_iter = (*bsd_iter)->next; - return bsd_obj; - } - } else { - *render_iter = (*render_iter)->next; - return render_obj; - } -} - static bool mark_free(struct drm_i915_gem_object *obj_priv, struct list_head *unwind) @@ -83,18 +40,12 @@ mark_free(struct drm_i915_gem_object *obj_priv, return drm_mm_scan_add_block(obj_priv->gtt_space); } -#define i915_for_each_active_object(OBJ, R, B) \ - *(R) = dev_priv->render_ring.active_list.next; \ - *(B) = dev_priv->bsd_ring.active_list.next; \ - while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL) - int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) { drm_i915_private_t *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; struct drm_i915_gem_object *obj_priv; - struct list_head *render_iter, *bsd_iter; int ret = 0; i915_gem_retire_requests(dev); @@ -131,13 +82,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); /* First see if there is a large enough contiguous idle region... */ - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { if (mark_free(obj_priv, &unwind_list)) goto found; } /* Now merge in the soon-to-be-expired objects... */ - i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { /* Does the object require an outstanding flush? */ if (obj_priv->base.write_domain || obj_priv->pin_count) continue; @@ -147,14 +98,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen } /* Finally add anything with a pending flush (in order of retirement) */ - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { if (obj_priv->pin_count) continue; if (mark_free(obj_priv, &unwind_list)) goto found; } - i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { if (! obj_priv->base.write_domain || obj_priv->pin_count) continue; @@ -251,7 +202,7 @@ i915_gem_evict_inactive(struct drm_device *dev) obj = &list_first_entry(&dev_priv->mm.inactive_list, struct drm_i915_gem_object, - list)->base; + mm_list)->base; ret = i915_gem_object_unbind(obj); if (ret != 0) { diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1e30c250140b..f94cd7ffd74d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -608,9 +608,7 @@ static void i915_capture_error_state(struct drm_device *dev) batchbuffer[0] = NULL; batchbuffer[1] = NULL; count = 0; - list_for_each_entry(obj_priv, - &dev_priv->render_ring.active_list, list) { - + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { struct drm_gem_object *obj = &obj_priv->base; if (batchbuffer[0] == NULL && @@ -627,7 +625,7 @@ static void i915_capture_error_state(struct drm_device *dev) } /* Scan the other lists for completeness for those bizarre errors. */ if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { struct drm_gem_object *obj = &obj_priv->base; if (batchbuffer[0] == NULL && @@ -645,7 +643,7 @@ static void i915_capture_error_state(struct drm_device *dev) } } if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { struct drm_gem_object *obj = &obj_priv->base; if (batchbuffer[0] == NULL && @@ -686,8 +684,7 @@ static void i915_capture_error_state(struct drm_device *dev) if (error->active_bo) { int i = 0; - list_for_each_entry(obj_priv, - &dev_priv->render_ring.active_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { struct drm_gem_object *obj = &obj_priv->base; error->active_bo[i].size = obj->size; |