@@ -732,14 +732,8 @@ int drm_sched_job_init(struct drm_sched_job *job,
732732 struct drm_sched_entity * entity ,
733733 u32 credits , void * owner )
734734{
735- if (!entity -> rq ) {
736- /* This will most likely be followed by missing frames
737- * or worse--a blank screen--leave a trail in the
738- * logs, so this can be debugged easier.
739- */
740- drm_err (job -> sched , "%s: entity has no rq!\n" , __func__ );
735+ if (!entity -> rq )
741736 return - ENOENT ;
742- }
743737
744738 if (unlikely (!credits )) {
745739 pr_err ("*ERROR* %s: credits cannot be 0!\n" , __func__ );
@@ -784,7 +778,7 @@ void drm_sched_job_arm(struct drm_sched_job *job)
784778 sched = entity -> rq -> sched ;
785779
786780 job -> sched = sched ;
787- job -> s_priority = entity -> priority ;
781+ job -> s_priority = entity -> rq - sched -> sched_rq ;
788782 job -> id = atomic64_inc_return (& sched -> job_id_count );
789783
790784 drm_sched_fence_init (job -> s_fence , job -> entity );
@@ -999,11 +993,10 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
999993 int i ;
1000994
1001995 /* Kernel run queue has higher priority than normal run queue*/
1002- for (i = DRM_SCHED_PRIORITY_KERNEL ; i < sched -> num_rqs ; i ++ ) {
1003- entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
1004- drm_sched_rq_select_entity_fifo (sched , sched -> sched_rq [i ]) :
1005- drm_sched_rq_select_entity_rr (sched , sched -> sched_rq [i ]);
1006-
996+ for (i = DRM_SCHED_PRIORITY_COUNT - 1 ; i >= DRM_SCHED_PRIORITY_KERNEL ; i -- ) {
997+ entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
998+ drm_sched_rq_select_entity_fifo (sched , & sched -> sched_rq [i ]) :
999+ drm_sched_rq_select_entity_rr (sched , & sched -> sched_rq [i ]);
10071000 if (entity )
10081001 break ;
10091002 }
@@ -1183,9 +1176,7 @@ static int drm_sched_main(void *param)
11831176 *
11841177 * @sched: scheduler instance
11851178 * @ops: backend operations for this scheduler
1186- * @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT
11871179 * @credit_limit: the number of credits this scheduler can hold from all jobs
1188- * @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT
11891180 * @hang_limit: number of times to allow a job to hang before dropping it
11901181 * @timeout: timeout value in jiffies for the scheduler
11911182 * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
@@ -1198,12 +1189,11 @@ static int drm_sched_main(void *param)
11981189 */
11991190int drm_sched_init (struct drm_gpu_scheduler * sched ,
12001191 const struct drm_sched_backend_ops * ops ,
1201- u32 num_rqs , u32 credit_limit , unsigned int hang_limit ,
1192+ u32 credit_limit , unsigned int hang_limit ,
12021193 long timeout , struct workqueue_struct * timeout_wq ,
12031194 atomic_t * score , const char * name , struct device * dev )
12041195{
12051196 int i , ret ;
1206-
12071197 sched -> ops = ops ;
12081198 sched -> credit_limit = credit_limit ;
12091199 sched -> name = name ;
@@ -1213,35 +1203,8 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
12131203 sched -> score = score ? score : & sched -> _score ;
12141204 sched -> dev = dev ;
12151205
1216- if (num_rqs > DRM_SCHED_PRIORITY_COUNT ) {
1217- /* This is a gross violation--tell drivers what the problem is.
1218- */
1219- drm_err (sched , "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n" ,
1220- __func__ );
1221- return - EINVAL ;
1222- } else if (sched -> sched_rq ) {
1223- /* Not an error, but warn anyway so drivers can
1224- * fine-tune their DRM calling order, and return all
1225- * is good.
1226- */
1227- drm_warn (sched , "%s: scheduler already initialized!\n" , __func__ );
1228- return 0 ;
1229- }
1230-
1231- sched -> sched_rq = kmalloc_array (num_rqs , sizeof (* sched -> sched_rq ),
1232- GFP_KERNEL | __GFP_ZERO );
1233- if (!sched -> sched_rq ) {
1234- drm_err (sched , "%s: out of memory for sched_rq\n" , __func__ );
1235- return - ENOMEM ;
1236- }
1237- sched -> num_rqs = num_rqs ;
1238- ret = - ENOMEM ;
1239- for (i = DRM_SCHED_PRIORITY_KERNEL ; i < sched -> num_rqs ; i ++ ) {
1240- sched -> sched_rq [i ] = kzalloc (sizeof (* sched -> sched_rq [i ]), GFP_KERNEL );
1241- if (!sched -> sched_rq [i ])
1242- goto Out_unroll ;
1243- drm_sched_rq_init (sched , sched -> sched_rq [i ]);
1244- }
1206+ for (i = DRM_SCHED_PRIORITY_KERNEL ; i < DRM_SCHED_PRIORITY_COUNT ; i ++ )
1207+ drm_sched_rq_init (sched , & sched -> sched_rq [i ]);
12451208
12461209 init_waitqueue_head (& sched -> wake_up_worker );
12471210 init_waitqueue_head (& sched -> job_scheduled );
@@ -1252,24 +1215,18 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
12521215 atomic_set (& sched -> _score , 0 );
12531216 atomic64_set (& sched -> job_id_count , 0 );
12541217
1255- /* Each scheduler will run on a seperate kernel thread */
1256- sched -> thread = kthread_run (drm_sched_main , sched , sched -> name );
1257- if (IS_ERR (sched -> thread )) {
1258- ret = PTR_ERR (sched -> thread );
1259- sched -> thread = NULL ;
1260- DRM_DEV_ERROR (sched -> dev , "Failed to create scheduler for %s.\n" , name );
1261- goto Out_unroll ;
1262- }
1218+
1219+ /* Each scheduler will run on a seperate kernel thread */
1220+ sched -> thread = kthread_run (drm_sched_main , sched , sched -> name );
1221+ if (IS_ERR (sched -> thread )) {
1222+ ret = PTR_ERR (sched -> thread );
1223+ sched -> thread = NULL ;
1224+ DRM_DEV_ERROR (sched -> dev , "Failed to create scheduler for %s.\n" , name );
1225+ return ret ;
1226+ }
12631227
12641228 sched -> ready = true;
12651229 return 0 ;
1266- Out_unroll :
1267- for (-- i ; i >= DRM_SCHED_PRIORITY_KERNEL ; i -- )
1268- kfree (sched -> sched_rq [i ]);
1269- kfree (sched -> sched_rq );
1270- sched -> sched_rq = NULL ;
1271- drm_err (sched , "%s: Failed to setup GPU scheduler--out of memory\n" , __func__ );
1272- return ret ;
12731230}
12741231EXPORT_SYMBOL (drm_sched_init );
12751232
@@ -1288,8 +1245,8 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
12881245 if (sched -> thread )
12891246 kthread_stop (sched -> thread );
12901247
1291- for (i = DRM_SCHED_PRIORITY_KERNEL ; i < sched -> num_rqs ; i ++ ) {
1292- struct drm_sched_rq * rq = sched -> sched_rq [i ];
1248+ for (i = DRM_SCHED_PRIORITY_COUNT - 1 ; i >= DRM_SCHED_PRIORITY_KERNEL ; i -- ) {
1249+ struct drm_sched_rq * rq = & sched -> sched_rq [i ];
12931250
12941251 spin_lock (& rq -> lock );
12951252 list_for_each_entry (s_entity , & rq -> entities , list )
@@ -1300,7 +1257,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
13001257 */
13011258 s_entity -> stopped = true;
13021259 spin_unlock (& rq -> lock );
1303- kfree ( sched -> sched_rq [ i ]);
1260+
13041261 }
13051262
13061263 /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
@@ -1310,8 +1267,6 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
13101267 cancel_delayed_work_sync (& sched -> work_tdr );
13111268
13121269 sched -> ready = false;
1313- kfree (sched -> sched_rq );
1314- sched -> sched_rq = NULL ;
13151270}
13161271EXPORT_SYMBOL (drm_sched_fini );
13171272
@@ -1337,8 +1292,10 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
13371292 */
13381293 if (bad -> s_priority != DRM_SCHED_PRIORITY_KERNEL ) {
13391294 atomic_inc (& bad -> karma );
1340- for (i = DRM_SCHED_PRIORITY_HIGH ; i < sched -> num_rqs ; i ++ ) {
1341- struct drm_sched_rq * rq = sched -> sched_rq [i ];
1295+
1296+ for (i = DRM_SCHED_PRIORITY_KERNEL ; i < DRM_SCHED_PRIORITY_COUNT ;
1297+ i ++ ) {
1298+ struct drm_sched_rq * rq = & sched -> sched_rq [i ];
13421299
13431300 spin_lock (& rq -> lock );
13441301 list_for_each_entry_safe (entity , tmp , & rq -> entities , list ) {
0 commit comments