@@ -104,19 +104,12 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
104
104
* is initialized itself.
105
105
*/
106
106
entity -> sched_list = num_sched_list > 1 ? sched_list : NULL ;
107
+ if (num_sched_list ) {
108
+ entity -> sched_list = num_sched_list > 1 ? sched_list : NULL ;
109
+ entity -> rq = & sched_list [0 ]-> rq ;
110
+ }
107
111
RCU_INIT_POINTER (entity -> last_scheduled , NULL );
108
112
RB_CLEAR_NODE (& entity -> rb_tree_node );
109
-
110
- if (num_sched_list && !sched_list [0 ]-> rq ) {
111
- /* Since every entry covered by num_sched_list
112
- * should be non-NULL and therefore we warn drivers
113
- * not to do this and to fix their DRM calling order.
114
- */
115
- pr_warn ("%s: called with uninitialized scheduler\n" , __func__ );
116
- } else if (num_sched_list ) {
117
- entity -> rq = sched_list [0 ]-> rq ;
118
- }
119
-
120
113
init_completion (& entity -> entity_idle );
121
114
122
115
/* We start in an idle state. */
@@ -302,7 +295,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
302
295
if (!entity -> rq )
303
296
return 0 ;
304
297
305
- sched = entity -> rq -> sched ;
298
+ sched = container_of ( entity -> rq , typeof ( * sched ), rq ) ;
306
299
/**
307
300
* The client will not queue more IBs during this fini, consume existing
308
301
* queued IBs or discard them on SIGKILL
@@ -394,9 +387,11 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
394
387
{
395
388
struct drm_sched_entity * entity =
396
389
container_of (cb , struct drm_sched_entity , cb );
390
+ struct drm_gpu_scheduler * sched =
391
+ container_of (entity -> rq , typeof (* sched ), rq );
397
392
398
393
drm_sched_entity_clear_dep (f , cb );
399
- drm_sched_wakeup (entity -> rq -> sched );
394
+ drm_sched_wakeup (sched );
400
395
}
401
396
402
397
/**
@@ -422,7 +417,8 @@ EXPORT_SYMBOL(drm_sched_entity_set_priority);
422
417
*/
423
418
static bool drm_sched_entity_add_dependency_cb (struct drm_sched_entity * entity )
424
419
{
425
- struct drm_gpu_scheduler * sched = entity -> rq -> sched ;
420
+ struct drm_gpu_scheduler * sched =
421
+ container_of (entity -> rq , typeof (* sched ), rq );
426
422
struct dma_fence * fence = entity -> dependency ;
427
423
struct drm_sched_fence * s_fence ;
428
424
@@ -561,7 +557,7 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
561
557
562
558
spin_lock (& entity -> lock );
563
559
sched = drm_sched_pick_best (entity -> sched_list , entity -> num_sched_list );
564
- rq = sched ? sched -> rq : NULL ;
560
+ rq = sched ? & sched -> rq : NULL ;
565
561
if (rq != entity -> rq ) {
566
562
drm_sched_rq_remove_entity (entity -> rq , entity );
567
563
entity -> rq = rq ;
@@ -584,10 +580,12 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
584
580
void drm_sched_entity_push_job (struct drm_sched_job * sched_job )
585
581
{
586
582
struct drm_sched_entity * entity = sched_job -> entity ;
583
+ struct drm_gpu_scheduler * sched =
584
+ container_of (entity -> rq , typeof (* sched ), rq );
587
585
bool first ;
588
586
589
587
trace_drm_sched_job (sched_job , entity );
590
- atomic_inc (entity -> rq -> sched -> score );
588
+ atomic_inc (sched -> score );
591
589
WRITE_ONCE (entity -> last_user , current -> group_leader );
592
590
593
591
/*
@@ -598,8 +596,6 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
598
596
599
597
/* first job wakes up scheduler */
600
598
if (first ) {
601
- struct drm_gpu_scheduler * sched ;
602
-
603
599
sched = drm_sched_rq_add_entity (entity );
604
600
if (sched )
605
601
drm_sched_wakeup (sched );
0 commit comments