summaryrefslogtreecommitdiffstats
path: root/include/drm
diff options
context:
space:
mode:
authorSean Paul <seanpaul@chromium.org>2018-09-27 02:54:54 -0400
committerSean Paul <seanpaul@chromium.org>2018-09-27 02:54:54 -0400
commit7b76d0588477d4b6097a9048b42835a45caf5c48 (patch)
treefa4e0bcd49f8d17f26795224290c8f8460aa4116 /include/drm
parenta74c0aa524050e5fd6c275a153b1f37283f6e37c (diff)
parentbf78296ab1cb215d0609ac6cff4e43e941e51265 (diff)
downloadlinux-7b76d0588477d4b6097a9048b42835a45caf5c48.tar.bz2
Merge drm/drm-next into drm-misc-next
Backmerging 4.19-rc5 to pick up sun4i fix Signed-off-by: Sean Paul <seanpaul@chromium.org>
Diffstat (limited to 'include/drm')
-rw-r--r--include/drm/drm_drv.h2
-rw-r--r--include/drm/gpu_scheduler.h42
-rw-r--r--include/drm/i915_pciids.h1
-rw-r--r--include/drm/ttm/ttm_bo_api.h16
-rw-r--r--include/drm/ttm/ttm_bo_driver.h28
5 files changed, 75 insertions, 14 deletions
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 8830e3de3a86..3199ef70c007 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -674,7 +674,7 @@ static inline bool drm_core_check_feature(struct drm_device *dev, u32 feature)
static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
{
return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
- dev->mode_config.funcs->atomic_commit != NULL;
+ (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL);
}
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 21c648b0b2a1..daec50f887b3 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -50,7 +50,10 @@ enum drm_sched_priority {
*
* @list: used to append this struct to the list of entities in the
* runqueue.
- * @rq: runqueue to which this entity belongs.
+ * @rq: runqueue on which this entity is currently scheduled.
+ * @rq_list: a list of run queues on which jobs from this entity can
+ * be scheduled
+ * @num_rq_list: number of run queues in the rq_list
* @rq_lock: lock to modify the runqueue to which this entity belongs.
* @job_queue: the list of jobs of this entity.
* @fence_seq: a linearly increasing seqno incremented with each
@@ -67,6 +70,7 @@ enum drm_sched_priority {
* @fini_status: contains the exit status in case the process was signalled.
* @last_scheduled: points to the finished fence of the last scheduled job.
* @last_user: last group leader pushing a job into the entity.
+ * @stopped: Marks the enity as removed from rq and destined for termination.
*
* Entities will emit jobs in order to their corresponding hardware
* ring, and the scheduler will alternate between entities based on
@@ -75,6 +79,8 @@ enum drm_sched_priority {
struct drm_sched_entity {
struct list_head list;
struct drm_sched_rq *rq;
+ struct drm_sched_rq **rq_list;
+ unsigned int num_rq_list;
spinlock_t rq_lock;
struct spsc_queue job_queue;
@@ -87,6 +93,7 @@ struct drm_sched_entity {
atomic_t *guilty;
struct dma_fence *last_scheduled;
struct task_struct *last_user;
+ bool stopped;
};
/**
@@ -257,6 +264,7 @@ struct drm_sched_backend_ops {
* @job_list_lock: lock to protect the ring_mirror_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
+ * @num_jobs: the number of jobs in queue in the scheduler
*
* One scheduler is implemented for each hardware ring.
*/
@@ -274,6 +282,7 @@ struct drm_gpu_scheduler {
struct list_head ring_mirror_list;
spinlock_t job_list_lock;
int hang_limit;
+ atomic_t num_jobs;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
@@ -281,6 +290,21 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
uint32_t hw_submission, unsigned hang_limit, long timeout,
const char *name);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
+int drm_sched_job_init(struct drm_sched_job *job,
+ struct drm_sched_entity *entity,
+ void *owner);
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
+void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
+ struct drm_sched_job *job);
+void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
+bool drm_sched_dependency_optimized(struct dma_fence* fence,
+ struct drm_sched_entity *entity);
+void drm_sched_job_kickout(struct drm_sched_job *s_job);
+
+void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity);
+void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity);
int drm_sched_entity_init(struct drm_sched_entity *entity,
struct drm_sched_rq **rq_list,
@@ -289,23 +313,17 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
+void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
+struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity);
-void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
- struct drm_sched_rq *rq);
+void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
+ enum drm_sched_priority priority);
+bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
struct drm_sched_fence *drm_sched_fence_create(
struct drm_sched_entity *s_entity, void *owner);
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
void drm_sched_fence_finished(struct drm_sched_fence *fence);
-int drm_sched_job_init(struct drm_sched_job *job,
- struct drm_sched_entity *entity,
- void *owner);
-void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
- struct drm_sched_job *job);
-void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
-bool drm_sched_dependency_optimized(struct dma_fence* fence,
- struct drm_sched_entity *entity);
-void drm_sched_job_kickout(struct drm_sched_job *s_job);
#endif
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index fbf5cfc9b352..fd965ffbb92e 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -386,6 +386,7 @@
INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
+ INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */
/* CFL H */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index a01ba2032f0e..8c19470785e2 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -51,6 +51,8 @@ struct ttm_placement;
struct ttm_place;
+struct ttm_lru_bulk_move;
+
/**
* struct ttm_bus_placement
*
@@ -405,12 +407,24 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
* ttm_bo_move_to_lru_tail
*
* @bo: The buffer object.
+ * @bulk: optional bulk move structure to remember BO positions
*
* Move this BO to the tail of all lru lists used to lookup and reserve an
* object. This function must be called with struct ttm_bo_global::lru_lock
* held, and is used to make a BO less likely to be considered for eviction.
*/
-void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
+void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
+ struct ttm_lru_bulk_move *bulk);
+
+/**
+ * ttm_bo_bulk_move_lru_tail
+ *
+ * @bulk: bulk move structure
+ *
+ * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
+ * BO order never changes. Should be called with ttm_bo_global::lru_lock held.
+ */
+void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
/**
* ttm_bo_lock_delayed_workqueue
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 3234cc322e70..e4fee8e02559 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -491,6 +491,34 @@ struct ttm_bo_device {
};
/**
+ * struct ttm_lru_bulk_move_pos
+ *
+ * @first: first BO in the bulk move range
+ * @last: last BO in the bulk move range
+ *
+ * Positions for a lru bulk move.
+ */
+struct ttm_lru_bulk_move_pos {
+ struct ttm_buffer_object *first;
+ struct ttm_buffer_object *last;
+};
+
+/**
+ * struct ttm_lru_bulk_move
+ *
+ * @tt: first/last lru entry for BOs in the TT domain
+ * @vram: first/last lru entry for BOs in the VRAM domain
+ * @swap: first/last lru entry for BOs on the swap list
+ *
+ * Helper structure for bulk moves on the LRU list.
+ */
+struct ttm_lru_bulk_move {
+ struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
+ struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
+ struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY];
+};
+
+/**
* ttm_flag_masked
*
* @old: Pointer to the result and original value.