From 96c7c2f4d5bd94b15fe63448c087f01607b56f4a Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Tue, 18 Apr 2023 12:04:53 +0200 Subject: drm/scheduler: set entity to NULL in drm_sched_entity_pop_job() It already happend a few times that patches slipped through which implemented access to an entity through a job that was already removed from the entities queue. Since jobs and entities might have different lifecycles, this can potentially cause UAF bugs. In order to make it obvious that a jobs entity pointer shouldn't be accessed after drm_sched_entity_pop_job() was called successfully, set the jobs entity pointer to NULL once the job is removed from the entity queue. Moreover, debugging a potential NULL pointer dereference is way easier than potentially corrupted memory through a UAF. Signed-off-by: Danilo Krummrich Link: https://lore.kernel.org/r/20230418100453.4433-1-dakr@redhat.com Reviewed-by: Luben Tuikov Signed-off-by: Luben Tuikov --- drivers/gpu/drm/scheduler/sched_main.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/scheduler/sched_main.c') diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index f6801eb21820..c3582c4fc299 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -42,6 +42,10 @@ * the hardware. * * The jobs in a entity are always scheduled in the order that they were pushed. + * + * Note that once a job was taken from the entities queue and pushed to the + * hardware, i.e. the pending queue, the entity must not be referenced anymore + * through the jobs entity pointer. */ #include -- cgit v1.2.3 From 539f9ee4b52a8bec95ff064e22dd2fb1e258e818 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 17 Apr 2023 13:36:02 +0200 Subject: drm/scheduler: properly forward fence errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a hw fence is signaled with an error properly forward that to the finished fence. Signed-off-by: Christian König Reviewed-by: Luben Tuikov Link: https://patchwork.freedesktop.org/patch/msgid/20230420115752.31470-1-christian.koenig@amd.com --- drivers/gpu/drm/scheduler/sched_entity.c | 4 +--- drivers/gpu/drm/scheduler/sched_fence.c | 4 +++- drivers/gpu/drm/scheduler/sched_main.c | 18 ++++++++---------- include/drm/gpu_scheduler.h | 2 +- 4 files changed, 13 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm/scheduler/sched_main.c') diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 3e2a31d8190e..1795cd7e42ed 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -144,7 +144,7 @@ static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) { struct drm_sched_job *job = container_of(wrk, typeof(*job), work); - drm_sched_fence_finished(job->s_fence); + drm_sched_fence_finished(job->s_fence, -ESRCH); WARN_ON(job->s_fence->parent); job->sched->ops->free_job(job); } @@ -195,8 +195,6 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity) while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { struct drm_sched_fence *s_fence = job->s_fence; - dma_fence_set_error(&s_fence->finished, -ESRCH); - dma_fence_get(&s_fence->finished); if (!prev || dma_fence_add_callback(prev, &job->finish_cb, drm_sched_entity_kill_jobs_cb)) diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index fe9c6468e440..ef120475e7c6 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -53,8 +53,10 @@ void drm_sched_fence_scheduled(struct drm_sched_fence *fence) dma_fence_signal(&fence->scheduled); } -void drm_sched_fence_finished(struct drm_sched_fence *fence) +void drm_sched_fence_finished(struct drm_sched_fence *fence, int result) { + if (result) + dma_fence_set_error(&fence->finished, result); dma_fence_signal(&fence->finished); } diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index c3582c4fc299..8d248ce233c8 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -262,7 +262,7 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq) * * Finish the job's fence and wake up the worker thread. */ -static void drm_sched_job_done(struct drm_sched_job *s_job) +static void drm_sched_job_done(struct drm_sched_job *s_job, int result) { struct drm_sched_fence *s_fence = s_job->s_fence; struct drm_gpu_scheduler *sched = s_fence->sched; @@ -273,7 +273,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job) trace_drm_sched_process_job(s_fence); dma_fence_get(&s_fence->finished); - drm_sched_fence_finished(s_fence); + drm_sched_fence_finished(s_fence, result); dma_fence_put(&s_fence->finished); wake_up_interruptible(&sched->wake_up_worker); } @@ -287,7 +287,7 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb) { struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb); - drm_sched_job_done(s_job); + drm_sched_job_done(s_job, f->error); } /** @@ -537,12 +537,12 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) r = dma_fence_add_callback(fence, &s_job->cb, drm_sched_job_done_cb); if (r == -ENOENT) - drm_sched_job_done(s_job); + drm_sched_job_done(s_job, fence->error); else if (r) DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r); } else - drm_sched_job_done(s_job); + drm_sched_job_done(s_job, 0); } if (full_recovery) { @@ -1059,15 +1059,13 @@ static int drm_sched_main(void *param) r = dma_fence_add_callback(fence, &sched_job->cb, drm_sched_job_done_cb); if (r == -ENOENT) - drm_sched_job_done(sched_job); + drm_sched_job_done(sched_job, fence->error); else if (r) DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r); } else { - if (IS_ERR(fence)) - dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); - - drm_sched_job_done(sched_job); + drm_sched_job_done(sched_job, IS_ERR(fence) ? + PTR_ERR(fence) : 0); } wake_up(&sched->job_scheduled); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index c0ec6719282a..3b4800e0b24b 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -598,7 +598,7 @@ void drm_sched_fence_init(struct drm_sched_fence *fence, void drm_sched_fence_free(struct drm_sched_fence *fence); void drm_sched_fence_scheduled(struct drm_sched_fence *fence); -void drm_sched_fence_finished(struct drm_sched_fence *fence); +void drm_sched_fence_finished(struct drm_sched_fence *fence, int result); unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, -- cgit v1.2.3 From 03877d621db082610c9b7602c6e8cd6ebcb75a8f Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 27 Apr 2023 14:05:43 +0200 Subject: drm/scheduler: mark jobs without fence as canceled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When no hw fence is provided for a job that means that the job didn't executed. Signed-off-by: Christian König Reviewed-by: Luben Tuikov Link: https://patchwork.freedesktop.org/patch/msgid/20230427122726.1290170-1-christian.koenig@amd.com --- drivers/gpu/drm/scheduler/sched_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/scheduler/sched_main.c') diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 8d248ce233c8..3e2d453296dd 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -542,7 +542,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r); } else - drm_sched_job_done(s_job, 0); + drm_sched_job_done(s_job, -ECANCELED); } if (full_recovery) { -- cgit v1.2.3 From e072700869dd96405a9c3752d3741a79bca6e2e2 Mon Sep 17 00:00:00 2001 From: Luben Tuikov Date: Wed, 17 May 2023 19:35:49 -0400 Subject: drm/sched: Rename to drm_sched_can_queue() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename drm_sched_ready() to drm_sched_can_queue(). "ready" can mean many things and is thus meaningless in this context. Instead, rename to a name which precisely conveys what is being checked. Cc: Christian König Cc: Alex Deucher Signed-off-by: Luben Tuikov Reviewed-by: Alex Deucher Link: https://lore.kernel.org/r/20230517233550.377847-1-luben.tuikov@amd.com --- drivers/gpu/drm/scheduler/sched_main.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/scheduler/sched_main.c') diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 172e63c87bfc..8739322c3032 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -848,13 +848,12 @@ void drm_sched_job_cleanup(struct drm_sched_job *job) EXPORT_SYMBOL(drm_sched_job_cleanup); /** - * drm_sched_ready - is the scheduler ready - * + * drm_sched_can_queue -- Can we queue more to the hardware? * @sched: scheduler instance * * Return true if we can push more jobs to the hw, otherwise false. */ -static bool drm_sched_ready(struct drm_gpu_scheduler *sched) +static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched) { return atomic_read(&sched->hw_rq_count) < sched->hw_submission_limit; @@ -868,7 +867,7 @@ static bool drm_sched_ready(struct drm_gpu_scheduler *sched) */ void drm_sched_wakeup(struct drm_gpu_scheduler *sched) { - if (drm_sched_ready(sched)) + if (drm_sched_can_queue(sched)) wake_up_interruptible(&sched->wake_up_worker); } @@ -885,7 +884,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) struct drm_sched_entity *entity; int i; - if (!drm_sched_ready(sched)) + if (!drm_sched_can_queue(sched)) return NULL; /* Kernel run queue has higher priority than normal run queue*/ -- cgit v1.2.3 From 3655c5900f4d49881ad09e3893e5f5516b06a9f1 Mon Sep 17 00:00:00 2001 From: Luben Tuikov Date: Wed, 17 May 2023 19:35:50 -0400 Subject: drm/sched: Rename to drm_sched_wakeup_if_can_queue() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename drm_sched_wakeup() to drm_sched_wakeup_if_canqueue() since the former is misleading, as it wakes up the GPU scheduler _only if_ more jobs can be queued to the underlying hardware. This distinction is important to make, since the wake conditional in the GPU scheduler thread wakes up when other conditions are also true, e.g. when there are jobs to be cleaned. For instance, a user might want to wake up the scheduler only because there are more jobs to clean, but whether we can queue more jobs is irrelevant. v2: Separate "canqueue" to "can_queue". (Alex D.) Cc: Christian König Cc: Alex Deucher Signed-off-by: Luben Tuikov Link: https://lore.kernel.org/r/20230517233550.377847-2-luben.tuikov@amd.com Reviewed-by: Alex Deucher --- drivers/gpu/drm/scheduler/sched_entity.c | 4 ++-- drivers/gpu/drm/scheduler/sched_main.c | 6 +++--- include/drm/gpu_scheduler.h | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/scheduler/sched_main.c') diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index cfb433e92005..68e807ae136a 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -342,7 +342,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, container_of(cb, struct drm_sched_entity, cb); drm_sched_entity_clear_dep(f, cb); - drm_sched_wakeup(entity->rq->sched); + drm_sched_wakeup_if_can_queue(entity->rq->sched); } /** @@ -565,7 +565,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) drm_sched_rq_update_fifo(entity, submit_ts); - drm_sched_wakeup(entity->rq->sched); + drm_sched_wakeup_if_can_queue(entity->rq->sched); } } EXPORT_SYMBOL(drm_sched_entity_push_job); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 8739322c3032..b352227a6055 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -860,12 +860,12 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched) } /** - * drm_sched_wakeup - Wake up the scheduler when it is ready - * + * drm_sched_wakeup_if_can_queue - Wake up the scheduler * @sched: scheduler instance * + * Wake up the scheduler if we can queue jobs. */ -void drm_sched_wakeup(struct drm_gpu_scheduler *sched) +void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched) { if (drm_sched_can_queue(sched)) wake_up_interruptible(&sched->wake_up_worker); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 31d1f5166c79..e95b4837e5a3 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -549,7 +549,7 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, unsigned int num_sched_list); void drm_sched_job_cleanup(struct drm_sched_job *job); -void drm_sched_wakeup(struct drm_gpu_scheduler *sched); +void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched); void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); -- cgit v1.2.3