diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.h')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.h | 85 |
1 files changed, 61 insertions, 24 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index aec4f73574f4..2a116a91420b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -58,6 +58,7 @@ struct drm_i915_file_private; struct drm_i915_fence_reg; +struct i915_vma; typedef u32 gen6_pte_t; typedef u64 gen8_pte_t; @@ -65,7 +66,7 @@ typedef u64 gen8_pde_t; typedef u64 gen8_ppgtt_pdpe_t; typedef u64 gen8_ppgtt_pml4e_t; -#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT) +#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) @@ -254,6 +255,26 @@ struct i915_pml4 { struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4]; }; +struct i915_vma_ops { + /* Map an object into an address space with the given cache flags. */ + int (*bind_vma)(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags); + /* + * Unmap an object from an address space. This usually consists of + * setting the valid PTE entries to a reserved scratch page. + */ + void (*unbind_vma)(struct i915_vma *vma); + + int (*set_pages)(struct i915_vma *vma); + void (*clear_pages)(struct i915_vma *vma); +}; + +struct pagestash { + spinlock_t lock; + struct pagevec pvec; +}; + struct i915_address_space { struct drm_mm mm; struct drm_i915_private *i915; @@ -267,12 +288,13 @@ struct i915_address_space { * assign blame. */ struct drm_i915_file_private *file; - struct list_head global_link; u64 total; /* size addr space maps (ex. 2GB for ggtt) */ u64 reserved; /* size addr space reserved */ bool closed; + struct mutex mutex; /* protects vma and our lists */ + struct i915_page_dma scratch_page; struct i915_page_table *scratch_pt; struct i915_page_directory *scratch_pd; @@ -308,8 +330,13 @@ struct i915_address_space { */ struct list_head unbound_list; - struct pagevec free_pages; - bool pt_kmap_wc; + struct pagestash free_pages; + + /* Some systems require uncached updates of the page directories */ + bool pt_kmap_wc:1; + + /* Some systems support read-only mappings for GGTT and/or PPGTT */ + bool has_read_only:1; /* FIXME: Need a more generic return type */ gen6_pte_t (*pte_encode)(dma_addr_t addr, @@ -331,15 +358,8 @@ struct i915_address_space { enum i915_cache_level cache_level, u32 flags); void (*cleanup)(struct i915_address_space *vm); - /** Unmap an object from an address space. This usually consists of - * setting the valid PTE entries to a reserved scratch page. */ - void (*unbind_vma)(struct i915_vma *vma); - /* Map an object into an address space with the given cache flags. */ - int (*bind_vma)(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags); - int (*set_pages)(struct i915_vma *vma); - void (*clear_pages)(struct i915_vma *vma); + + struct i915_vma_ops vma_ops; I915_SELFTEST_DECLARE(struct fault_attr fault_attr); I915_SELFTEST_DECLARE(bool scrub_64K); @@ -367,7 +387,7 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm) * the spec. */ struct i915_ggtt { - struct i915_address_space base; + struct i915_address_space vm; struct io_mapping iomap; /* Mapping to our CPU mappable region */ struct resource gmadr; /* GMADR resource */ @@ -385,9 +405,9 @@ struct i915_ggtt { }; struct i915_hw_ppgtt { - struct i915_address_space base; + struct i915_address_space vm; struct kref ref; - struct drm_mm_node node; + unsigned long pd_dirty_rings; union { struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */ @@ -395,13 +415,28 @@ struct i915_hw_ppgtt { struct i915_page_directory pd; /* GEN6-7 */ }; + void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); +}; + +struct gen6_hw_ppgtt { + struct i915_hw_ppgtt base; + + struct i915_vma *vma; gen6_pte_t __iomem *pd_addr; + gen6_pte_t scratch_pte; - int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, - struct i915_request *rq); - void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); + unsigned int pin_count; + bool scan_for_unused_pt; }; +#define __to_gen6_ppgtt(base) container_of(base, struct gen6_hw_ppgtt, base) + +static inline struct gen6_hw_ppgtt *to_gen6_ppgtt(struct i915_hw_ppgtt *base) +{ + BUILD_BUG_ON(offsetof(struct gen6_hw_ppgtt, base)); + return __to_gen6_ppgtt(base); +} + /* * gen6_for_each_pde() iterates over every pde from start until start+length. * If start and start+length are not perfectly divisible, the macro will round @@ -440,8 +475,8 @@ static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift) const u64 mask = ~((1ULL << pde_shift) - 1); u64 end; - WARN_ON(length == 0); - WARN_ON(offset_in_page(addr|length)); + GEM_BUG_ON(length == 0); + GEM_BUG_ON(offset_in_page(addr | length)); end = addr + length; @@ -543,7 +578,7 @@ static inline struct i915_ggtt * i915_vm_to_ggtt(struct i915_address_space *vm) { GEM_BUG_ON(!i915_is_ggtt(vm)); - return container_of(vm, struct i915_ggtt, base); + return container_of(vm, struct i915_ggtt, vm); } #define INTEL_MAX_PPAT_ENTRIES 8 @@ -591,8 +626,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv); void i915_ppgtt_release(struct kref *kref); struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv, - struct drm_i915_file_private *fpriv, - const char *name); + struct drm_i915_file_private *fpriv); void i915_ppgtt_close(struct i915_address_space *vm); static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) { @@ -605,6 +639,9 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) kref_put(&ppgtt->ref, i915_ppgtt_release); } +int gen6_ppgtt_pin(struct i915_hw_ppgtt *base); +void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base); + void i915_check_and_clear_faults(struct drm_i915_private *dev_priv); void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv); void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv); |