summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2015-10-01 12:18:29 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-10-07 16:05:41 +0200
commit5763ff04dc4ebdd13d069d44513b10805ebebd8c (patch)
tree8dec95710f402ff7c46e9cd5322fc0289b0ac289 /drivers
parentce8daef3580ae38fea9599e2193e4c368357c4c6 (diff)
downloadlinux-5763ff04dc4ebdd13d069d44513b10805ebebd8c.tar.bz2
drm/i915: Avoid GPU stalls from kswapd
Exclude active GPU pages from the purview of the background shrinker (kswapd), as these cause uncontrollable GPU stalls. Given that the shrinker is rerun until the freelists are satisfied, we should have opportunity in subsequent passes to recover the pages once idle. If the machine does run out of memory entirely, we have the forced idling in the oom-notifier as a means of releasing all the pages we can before an oom is prematurely executed. Note that this relies upon an up-front retire_requests to keep the inactive list in shape, which was added in a previous patch, mostly as execlist ctx pinning band-aids. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> [danvet: Add note about retire_requests.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c9
2 files changed, 8 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 803105224e0d..b39b5cc0c096 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3212,6 +3212,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
#define I915_SHRINK_PURGEABLE 0x1
#define I915_SHRINK_UNBOUND 0x2
#define I915_SHRINK_BOUND 0x4
+#define I915_SHRINK_ACTIVE 0x8
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 03cf3ccc4152..f7df54a8ee2b 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -126,6 +126,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
obj->madv != I915_MADV_DONTNEED)
continue;
+ if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
+ continue;
+
drm_gem_object_reference(&obj->base);
/* For the unbound phase, this should be a no-op! */
@@ -164,7 +167,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
return i915_gem_shrink(dev_priv, -1UL,
- I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_ACTIVE);
}
static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
@@ -217,7 +222,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (obj->pages_pin_count == num_vma_bound(obj))
+ if (!obj->active && obj->pages_pin_count == num_vma_bound(obj))
count += obj->base.size >> PAGE_SHIFT;
}