diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-07-24 13:55:11 +0200 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-07-27 10:24:39 +0200 |
commit | 41a36b739ae378043ecc3984bec3dba00353fa34 (patch) | |
tree | f3ef23583999a0183b7ba590c7bef5bd0521cdbc /drivers/gpu/drm | |
parent | 042794b1f4234de36ce2d6edae071054c6985a03 (diff) | |
download | linux-41a36b739ae378043ecc3984bec3dba00353fa34.tar.bz2 |
drm/i915: Extract i915_gem_fence.c
No code changes, just moving all the fence related code into a
separate file (and avoiding a bunch of forward declarations while at
it).
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/i915/Makefile | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 401 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_fence.c | 422 |
4 files changed, 432 insertions, 408 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index bf91482e14a3..41fb8a9c5bef 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -25,6 +25,7 @@ i915-y += i915_cmd_parser.o \ i915_gem_dmabuf.o \ i915_gem_evict.o \ i915_gem_execbuffer.o \ + i915_gem_fence.o \ i915_gem_gtt.o \ i915_gem.o \ i915_gem_render_state.o \ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b94ada96b348..d575029af541 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2868,11 +2868,6 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); -int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); -int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); - -bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); -void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); struct drm_i915_gem_request * i915_gem_find_active_request(struct intel_engine_cs *ring); @@ -2970,8 +2965,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags); -void i915_gem_restore_fences(struct drm_device *dev); - unsigned long i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, const struct i915_ggtt_view *view); @@ -3066,6 +3059,15 @@ i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal); } +/* i915_gem_fence.c */ +int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); +int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); + +bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); +void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); + +void i915_gem_restore_fences(struct drm_device *dev); + /* i915_gem_context.c */ int __must_check i915_gem_context_init(struct drm_device *dev); void i915_gem_context_fini(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 322bbefafbc3..5d685789b1f9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -46,11 +46,6 @@ static void i915_gem_object_retire__write(struct drm_i915_gem_object *obj); static void i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring); -static void i915_gem_write_fence(struct drm_device *dev, int reg, - struct drm_i915_gem_object *obj); -static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, - struct drm_i915_fence_reg *fence, - bool enable); static bool cpu_cache_is_coherent(struct drm_device *dev, enum i915_cache_level level) @@ -66,18 +61,6 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) return obj->pin_display; } -static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) -{ - if (obj->tiling_mode) - i915_gem_release_mmap(obj); - - /* As we do not have an associated fence register, we will force - * a tiling change if we ever need to acquire one. - */ - obj->fence_dirty = false; - obj->fence_reg = I915_FENCE_REG_NONE; -} - /* some bookkeeping */ static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, size_t size) @@ -2793,27 +2776,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, } } -void i915_gem_restore_fences(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - - for (i = 0; i < dev_priv->num_fence_regs; i++) { - struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; - - /* - * Commit delayed tiling changes if we have an object still - * attached to the fence, otherwise just clear the fence. - */ - if (reg->obj) { - i915_gem_object_update_fence(reg->obj, reg, - reg->obj->tiling_mode); - } else { - i915_gem_write_fence(dev, i, NULL); - } - } -} - void i915_gem_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3340,343 +3302,6 @@ int i915_gpu_idle(struct drm_device *dev) return 0; } -static void i965_write_fence_reg(struct drm_device *dev, int reg, - struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int fence_reg; - int fence_pitch_shift; - - if (INTEL_INFO(dev)->gen >= 6) { - fence_reg = FENCE_REG_SANDYBRIDGE_0; - fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT; - } else { - fence_reg = FENCE_REG_965_0; - fence_pitch_shift = I965_FENCE_PITCH_SHIFT; - } - - fence_reg += reg * 8; - - /* To w/a incoherency with non-atomic 64-bit register updates, - * we split the 64-bit update into two 32-bit writes. In order - * for a partial fence not to be evaluated between writes, we - * precede the update with write to turn off the fence register, - * and only enable the fence as the last step. - * - * For extra levels of paranoia, we make sure each step lands - * before applying the next step. - */ - I915_WRITE(fence_reg, 0); - POSTING_READ(fence_reg); - - if (obj) { - u32 size = i915_gem_obj_ggtt_size(obj); - uint64_t val; - - /* Adjust fence size to match tiled area */ - if (obj->tiling_mode != I915_TILING_NONE) { - uint32_t row_size = obj->stride * - (obj->tiling_mode == I915_TILING_Y ? 32 : 8); - size = (size / row_size) * row_size; - } - - val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & - 0xfffff000) << 32; - val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; - val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; - if (obj->tiling_mode == I915_TILING_Y) - val |= 1 << I965_FENCE_TILING_Y_SHIFT; - val |= I965_FENCE_REG_VALID; - - I915_WRITE(fence_reg + 4, val >> 32); - POSTING_READ(fence_reg + 4); - - I915_WRITE(fence_reg + 0, val); - POSTING_READ(fence_reg); - } else { - I915_WRITE(fence_reg + 4, 0); - POSTING_READ(fence_reg + 4); - } -} - -static void i915_write_fence_reg(struct drm_device *dev, int reg, - struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 val; - - if (obj) { - u32 size = i915_gem_obj_ggtt_size(obj); - int pitch_val; - int tile_width; - - WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) || - (size & -size) != size || - (i915_gem_obj_ggtt_offset(obj) & (size - 1)), - "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", - i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size); - - if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) - tile_width = 128; - else - tile_width = 512; - - /* Note: pitch better be a power of two tile widths */ - pitch_val = obj->stride / tile_width; - pitch_val = ffs(pitch_val) - 1; - - val = i915_gem_obj_ggtt_offset(obj); - if (obj->tiling_mode == I915_TILING_Y) - val |= 1 << I830_FENCE_TILING_Y_SHIFT; - val |= I915_FENCE_SIZE_BITS(size); - val |= pitch_val << I830_FENCE_PITCH_SHIFT; - val |= I830_FENCE_REG_VALID; - } else - val = 0; - - if (reg < 8) - reg = FENCE_REG_830_0 + reg * 4; - else - reg = FENCE_REG_945_8 + (reg - 8) * 4; - - I915_WRITE(reg, val); - POSTING_READ(reg); -} - -static void i830_write_fence_reg(struct drm_device *dev, int reg, - struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t val; - - if (obj) { - u32 size = i915_gem_obj_ggtt_size(obj); - uint32_t pitch_val; - - WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) || - (size & -size) != size || - (i915_gem_obj_ggtt_offset(obj) & (size - 1)), - "object 0x%08lx not 512K or pot-size 0x%08x aligned\n", - i915_gem_obj_ggtt_offset(obj), size); - - pitch_val = obj->stride / 128; - pitch_val = ffs(pitch_val) - 1; - - val = i915_gem_obj_ggtt_offset(obj); - if (obj->tiling_mode == I915_TILING_Y) - val |= 1 << I830_FENCE_TILING_Y_SHIFT; - val |= I830_FENCE_SIZE_BITS(size); - val |= pitch_val << I830_FENCE_PITCH_SHIFT; - val |= I830_FENCE_REG_VALID; - } else - val = 0; - - I915_WRITE(FENCE_REG_830_0 + reg * 4, val); - POSTING_READ(FENCE_REG_830_0 + reg * 4); -} - -inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj) -{ - return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT; -} - -static void i915_gem_write_fence(struct drm_device *dev, int reg, - struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - /* Ensure that all CPU reads are completed before installing a fence - * and all writes before removing the fence. - */ - if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) - mb(); - - WARN(obj && (!obj->stride || !obj->tiling_mode), - "bogus fence setup with stride: 0x%x, tiling mode: %i\n", - obj->stride, obj->tiling_mode); - - if (IS_GEN2(dev)) - i830_write_fence_reg(dev, reg, obj); - else if (IS_GEN3(dev)) - i915_write_fence_reg(dev, reg, obj); - else if (INTEL_INFO(dev)->gen >= 4) - i965_write_fence_reg(dev, reg, obj); - - /* And similarly be paranoid that no direct access to this region - * is reordered to before the fence is installed. - */ - if (i915_gem_object_needs_mb(obj)) - mb(); -} - -static inline int fence_number(struct drm_i915_private *dev_priv, - struct drm_i915_fence_reg *fence) -{ - return fence - dev_priv->fence_regs; -} - -static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, - struct drm_i915_fence_reg *fence, - bool enable) -{ - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - int reg = fence_number(dev_priv, fence); - - i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); - - if (enable) { - obj->fence_reg = reg; - fence->obj = obj; - list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); - } else { - obj->fence_reg = I915_FENCE_REG_NONE; - fence->obj = NULL; - list_del_init(&fence->lru_list); - } - obj->fence_dirty = false; -} - -static int -i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) -{ - if (obj->last_fenced_req) { - int ret = i915_wait_request(obj->last_fenced_req); - if (ret) - return ret; - - i915_gem_request_assign(&obj->last_fenced_req, NULL); - } - - return 0; -} - -int -i915_gem_object_put_fence(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - struct drm_i915_fence_reg *fence; - int ret; - - ret = i915_gem_object_wait_fence(obj); - if (ret) - return ret; - - if (obj->fence_reg == I915_FENCE_REG_NONE) - return 0; - - fence = &dev_priv->fence_regs[obj->fence_reg]; - - if (WARN_ON(fence->pin_count)) - return -EBUSY; - - i915_gem_object_fence_lost(obj); - i915_gem_object_update_fence(obj, fence, false); - - return 0; -} - -static struct drm_i915_fence_reg * -i915_find_fence_reg(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_fence_reg *reg, *avail; - int i; - - /* First try to find a free reg */ - avail = NULL; - for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { - reg = &dev_priv->fence_regs[i]; - if (!reg->obj) - return reg; - - if (!reg->pin_count) - avail = reg; - } - - if (avail == NULL) - goto deadlock; - - /* None available, try to steal one or wait for a user to finish */ - list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { - if (reg->pin_count) - continue; - - return reg; - } - -deadlock: - /* Wait for completion of pending flips which consume fences */ - if (intel_has_pending_fb_unpin(dev)) - return ERR_PTR(-EAGAIN); - - return ERR_PTR(-EDEADLK); -} - -/** - * i915_gem_object_get_fence - set up fencing for an object - * @obj: object to map through a fence reg - * - * When mapping objects through the GTT, userspace wants to be able to write - * to them without having to worry about swizzling if the object is tiled. - * This function walks the fence regs looking for a free one for @obj, - * stealing one if it can't find any. - * - * It then sets up the reg based on the object's properties: address, pitch - * and tiling format. - * - * For an untiled surface, this removes any existing fence. - */ -int -i915_gem_object_get_fence(struct drm_i915_gem_object *obj) -{ - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - bool enable = obj->tiling_mode != I915_TILING_NONE; - struct drm_i915_fence_reg *reg; - int ret; - - /* Have we updated the tiling parameters upon the object and so - * will need to serialise the write to the associated fence register? - */ - if (obj->fence_dirty) { - ret = i915_gem_object_wait_fence(obj); - if (ret) - return ret; - } - - /* Just update our place in the LRU if our fence is getting reused. */ - if (obj->fence_reg != I915_FENCE_REG_NONE) { - reg = &dev_priv->fence_regs[obj->fence_reg]; - if (!obj->fence_dirty) { - list_move_tail(®->lru_list, - &dev_priv->mm.fence_list); - return 0; - } - } else if (enable) { - if (WARN_ON(!obj->map_and_fenceable)) - return -EINVAL; - - reg = i915_find_fence_reg(dev); - if (IS_ERR(reg)) - return PTR_ERR(reg); - - if (reg->obj) { - struct drm_i915_gem_object *old = reg->obj; - - ret = i915_gem_object_wait_fence(old); - if (ret) - return ret; - - i915_gem_object_fence_lost(old); - } - } else - return 0; - - i915_gem_object_update_fence(obj, reg, enable); - - return 0; -} - static bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) { @@ -4476,32 +4101,6 @@ i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, --vma->pin_count; } -bool -i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) -{ - if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj); - - WARN_ON(!ggtt_vma || - dev_priv->fence_regs[obj->fence_reg].pin_count > - ggtt_vma->pin_count); - dev_priv->fence_regs[obj->fence_reg].pin_count++; - return true; - } else - return false; -} - -void -i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) -{ - if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); - dev_priv->fence_regs[obj->fence_reg].pin_count--; - } -} - int i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *file) diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c new file mode 100644 index 000000000000..d3284ee04794 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_fence.c @@ -0,0 +1,422 @@ +/* + * Copyright © 2008-2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include <drm/drmP.h> +#include <drm/i915_drm.h> +#include "i915_drv.h" + +static void i965_write_fence_reg(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int fence_reg; + int fence_pitch_shift; + + if (INTEL_INFO(dev)->gen >= 6) { + fence_reg = FENCE_REG_SANDYBRIDGE_0; + fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT; + } else { + fence_reg = FENCE_REG_965_0; + fence_pitch_shift = I965_FENCE_PITCH_SHIFT; + } + + fence_reg += reg * 8; + + /* To w/a incoherency with non-atomic 64-bit register updates, + * we split the 64-bit update into two 32-bit writes. In order + * for a partial fence not to be evaluated between writes, we + * precede the update with write to turn off the fence register, + * and only enable the fence as the last step. + * + * For extra levels of paranoia, we make sure each step lands + * before applying the next step. + */ + I915_WRITE(fence_reg, 0); + POSTING_READ(fence_reg); + + if (obj) { + u32 size = i915_gem_obj_ggtt_size(obj); + uint64_t val; + + /* Adjust fence size to match tiled area */ + if (obj->tiling_mode != I915_TILING_NONE) { + uint32_t row_size = obj->stride * + (obj->tiling_mode == I915_TILING_Y ? 32 : 8); + size = (size / row_size) * row_size; + } + + val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & + 0xfffff000) << 32; + val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; + val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; + if (obj->tiling_mode == I915_TILING_Y) + val |= 1 << I965_FENCE_TILING_Y_SHIFT; + val |= I965_FENCE_REG_VALID; + + I915_WRITE(fence_reg + 4, val >> 32); + POSTING_READ(fence_reg + 4); + + I915_WRITE(fence_reg + 0, val); + POSTING_READ(fence_reg); + } else { + I915_WRITE(fence_reg + 4, 0); + POSTING_READ(fence_reg + 4); + } +} + +static void i915_write_fence_reg(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val; + + if (obj) { + u32 size = i915_gem_obj_ggtt_size(obj); + int pitch_val; + int tile_width; + + WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) || + (size & -size) != size || + (i915_gem_obj_ggtt_offset(obj) & (size - 1)), + "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", + i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size); + + if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) + tile_width = 128; + else + tile_width = 512; + + /* Note: pitch better be a power of two tile widths */ + pitch_val = obj->stride / tile_width; + pitch_val = ffs(pitch_val) - 1; + + val = i915_gem_obj_ggtt_offset(obj); + if (obj->tiling_mode == I915_TILING_Y) + val |= 1 << I830_FENCE_TILING_Y_SHIFT; + val |= I915_FENCE_SIZE_BITS(size); + val |= pitch_val << I830_FENCE_PITCH_SHIFT; + val |= I830_FENCE_REG_VALID; + } else + val = 0; + + if (reg < 8) + reg = FENCE_REG_830_0 + reg * 4; + else + reg = FENCE_REG_945_8 + (reg - 8) * 4; + + I915_WRITE(reg, val); + POSTING_READ(reg); +} + +static void i830_write_fence_reg(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t val; + + if (obj) { + u32 size = i915_gem_obj_ggtt_size(obj); + uint32_t pitch_val; + + WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) || + (size & -size) != size || + (i915_gem_obj_ggtt_offset(obj) & (size - 1)), + "object 0x%08lx not 512K or pot-size 0x%08x aligned\n", + i915_gem_obj_ggtt_offset(obj), size); + + pitch_val = obj->stride / 128; + pitch_val = ffs(pitch_val) - 1; + + val = i915_gem_obj_ggtt_offset(obj); + if (obj->tiling_mode == I915_TILING_Y) + val |= 1 << I830_FENCE_TILING_Y_SHIFT; + val |= I830_FENCE_SIZE_BITS(size); + val |= pitch_val << I830_FENCE_PITCH_SHIFT; + val |= I830_FENCE_REG_VALID; + } else + val = 0; + + I915_WRITE(FENCE_REG_830_0 + reg * 4, val); + POSTING_READ(FENCE_REG_830_0 + reg * 4); +} + +inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj) +{ + return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT; +} + +static void i915_gem_write_fence(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Ensure that all CPU reads are completed before installing a fence + * and all writes before removing the fence. + */ + if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) + mb(); + + WARN(obj && (!obj->stride || !obj->tiling_mode), + "bogus fence setup with stride: 0x%x, tiling mode: %i\n", + obj->stride, obj->tiling_mode); + + if (IS_GEN2(dev)) + i830_write_fence_reg(dev, reg, obj); + else if (IS_GEN3(dev)) + i915_write_fence_reg(dev, reg, obj); + else if (INTEL_INFO(dev)->gen >= 4) + i965_write_fence_reg(dev, reg, obj); + + /* And similarly be paranoid that no direct access to this region + * is reordered to before the fence is installed. + */ + if (i915_gem_object_needs_mb(obj)) + mb(); +} + +static inline int fence_number(struct drm_i915_private *dev_priv, + struct drm_i915_fence_reg *fence) +{ + return fence - dev_priv->fence_regs; +} + +static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, + struct drm_i915_fence_reg *fence, + bool enable) +{ + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + int reg = fence_number(dev_priv, fence); + + i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); + + if (enable) { + obj->fence_reg = reg; + fence->obj = obj; + list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); + } else { + obj->fence_reg = I915_FENCE_REG_NONE; + fence->obj = NULL; + list_del_init(&fence->lru_list); + } + obj->fence_dirty = false; +} + +static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) +{ + if (obj->tiling_mode) + i915_gem_release_mmap(obj); + + /* As we do not have an associated fence register, we will force + * a tiling change if we ever need to acquire one. + */ + obj->fence_dirty = false; + obj->fence_reg = I915_FENCE_REG_NONE; +} + +static int +i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) +{ + if (obj->last_fenced_req) { + int ret = i915_wait_request(obj->last_fenced_req); + if (ret) + return ret; + + i915_gem_request_assign(&obj->last_fenced_req, NULL); + } + + return 0; +} + +int +i915_gem_object_put_fence(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_fence_reg *fence; + int ret; + + ret = i915_gem_object_wait_fence(obj); + if (ret) + return ret; + + if (obj->fence_reg == I915_FENCE_REG_NONE) + return 0; + + fence = &dev_priv->fence_regs[obj->fence_reg]; + + if (WARN_ON(fence->pin_count)) + return -EBUSY; + + i915_gem_object_fence_lost(obj); + i915_gem_object_update_fence(obj, fence, false); + + return 0; +} + +static struct drm_i915_fence_reg * +i915_find_fence_reg(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_fence_reg *reg, *avail; + int i; + + /* First try to find a free reg */ + avail = NULL; + for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { + reg = &dev_priv->fence_regs[i]; + if (!reg->obj) + return reg; + + if (!reg->pin_count) + avail = reg; + } + + if (avail == NULL) + goto deadlock; + + /* None available, try to steal one or wait for a user to finish */ + list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { + if (reg->pin_count) + continue; + + return reg; + } + +deadlock: + /* Wait for completion of pending flips which consume fences */ + if (intel_has_pending_fb_unpin(dev)) + return ERR_PTR(-EAGAIN); + + return ERR_PTR(-EDEADLK); +} + +/** + * i915_gem_object_get_fence - set up fencing for an object + * @obj: object to map through a fence reg + * + * When mapping objects through the GTT, userspace wants to be able to write + * to them without having to worry about swizzling if the object is tiled. + * This function walks the fence regs looking for a free one for @obj, + * stealing one if it can't find any. + * + * It then sets up the reg based on the object's properties: address, pitch + * and tiling format. + * + * For an untiled surface, this removes any existing fence. + */ +int +i915_gem_object_get_fence(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + bool enable = obj->tiling_mode != I915_TILING_NONE; + struct drm_i915_fence_reg *reg; + int ret; + + /* Have we updated the tiling parameters upon the object and so + * will need to serialise the write to the associated fence register? + */ + if (obj->fence_dirty) { + ret = i915_gem_object_wait_fence(obj); + if (ret) + return ret; + } + + /* Just update our place in the LRU if our fence is getting reused. */ + if (obj->fence_reg != I915_FENCE_REG_NONE) { + reg = &dev_priv->fence_regs[obj->fence_reg]; + if (!obj->fence_dirty) { + list_move_tail(®->lru_list, + &dev_priv->mm.fence_list); + return 0; + } + } else if (enable) { + if (WARN_ON(!obj->map_and_fenceable)) + return -EINVAL; + + reg = i915_find_fence_reg(dev); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + if (reg->obj) { + struct drm_i915_gem_object *old = reg->obj; + + ret = i915_gem_object_wait_fence(old); + if (ret) + return ret; + + i915_gem_object_fence_lost(old); + } + } else + return 0; + + i915_gem_object_update_fence(obj, reg, enable); + + return 0; +} + +bool +i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) +{ + if (obj->fence_reg != I915_FENCE_REG_NONE) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj); + + WARN_ON(!ggtt_vma || + dev_priv->fence_regs[obj->fence_reg].pin_count > + ggtt_vma->pin_count); + dev_priv->fence_regs[obj->fence_reg].pin_count++; + return true; + } else + return false; +} + +void +i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) +{ + if (obj->fence_reg != I915_FENCE_REG_NONE) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); + dev_priv->fence_regs[obj->fence_reg].pin_count--; + } +} + +void i915_gem_restore_fences(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + for (i = 0; i < dev_priv->num_fence_regs; i++) { + struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; + + /* + * Commit delayed tiling changes if we have an object still + * attached to the fence, otherwise just clear the fence. + */ + if (reg->obj) { + i915_gem_object_update_fence(reg->obj, reg, + reg->obj->tiling_mode); + } else { + i915_gem_write_fence(dev, i, NULL); + } + } +} |