From 2440ff2c9151120c8ae27de6565b11831ee07e08 Mon Sep 17 00:00:00 2001 From: Junwei Zhang Date: Sat, 10 Oct 2015 08:48:42 +0800 Subject: drm/amdgpu: add timer to fence to detect scheduler lockup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change-Id: I67e987db0efdca28faa80b332b75571192130d33 Signed-off-by: Junwei Zhang Reviewed-by: David Zhou Reviewed-by: Christian König --- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 43 ++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c') diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 3697eeeecf82..a413dee7cd19 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -327,19 +327,49 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) struct amd_sched_fence *s_fence = container_of(cb, struct amd_sched_fence, cb); struct amd_gpu_scheduler *sched = s_fence->sched; + unsigned long flags; atomic_dec(&sched->hw_rq_count); amd_sched_fence_signal(s_fence); + if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { + cancel_delayed_work_sync(&s_fence->dwork); + spin_lock_irqsave(&sched->fence_list_lock, flags); + list_del_init(&s_fence->list); + spin_unlock_irqrestore(&sched->fence_list_lock, flags); + } fence_put(&s_fence->base); wake_up_interruptible(&sched->wake_up_worker); } +static void amd_sched_fence_work_func(struct work_struct *work) +{ + struct amd_sched_fence *s_fence = + container_of(work, struct amd_sched_fence, dwork.work); + struct amd_gpu_scheduler *sched = s_fence->sched; + struct amd_sched_fence *entity, *tmp; + unsigned long flags; + + DRM_ERROR("[%s] scheduler is timeout!\n", sched->name); + + /* Clean all pending fences */ + list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) { + DRM_ERROR(" fence no %d\n", entity->base.seqno); + cancel_delayed_work_sync(&entity->dwork); + spin_lock_irqsave(&sched->fence_list_lock, flags); + list_del_init(&entity->list); + spin_unlock_irqrestore(&sched->fence_list_lock, flags); + fence_put(&entity->base); + } +} + static int amd_sched_main(void *param) { struct sched_param sparam = {.sched_priority = 1}; struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; int r, count; + spin_lock_init(&sched->fence_list_lock); + INIT_LIST_HEAD(&sched->fence_list); sched_setscheduler(current, SCHED_FIFO, &sparam); while (!kthread_should_stop()) { @@ -347,6 +377,7 @@ static int amd_sched_main(void *param) struct amd_sched_fence *s_fence; struct amd_sched_job *sched_job; struct fence *fence; + unsigned long flags; wait_event_interruptible(sched->wake_up_worker, kthread_should_stop() || @@ -357,6 +388,15 @@ static int amd_sched_main(void *param) entity = sched_job->s_entity; s_fence = sched_job->s_fence; + + if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { + INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func); + schedule_delayed_work(&s_fence->dwork, sched->timeout); + spin_lock_irqsave(&sched->fence_list_lock, flags); + list_add_tail(&s_fence->list, &sched->fence_list); + spin_unlock_irqrestore(&sched->fence_list_lock, flags); + } + atomic_inc(&sched->hw_rq_count); fence = sched->ops->run_job(sched_job); if (fence) { @@ -392,11 +432,12 @@ static int amd_sched_main(void *param) */ int amd_sched_init(struct amd_gpu_scheduler *sched, struct amd_sched_backend_ops *ops, - unsigned hw_submission, const char *name) + unsigned hw_submission, long timeout, const char *name) { sched->ops = ops; sched->hw_submission_limit = hw_submission; sched->name = name; + sched->timeout = timeout; amd_sched_rq_init(&sched->sched_rq); amd_sched_rq_init(&sched->kernel_rq); -- cgit v1.2.3 From 2fcef6ec87a044221fc3c2f16873f7c02b9ae991 Mon Sep 17 00:00:00 2001 From: Junwei Zhang Date: Tue, 13 Oct 2015 11:14:23 +0800 Subject: drm/amdgpu: fix lockup when clean pending fences MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The first lockup fence will lock the fence list of scheduler. Then cancel the delayed workqueues for all clean pending fences without waiting the workqueues to finish. Change-Id: I9bec826de1aa49d587b0662f3fb4a95333979429 Signed-off-by: Junwei Zhang Reviewed-by: Christian König --- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c') diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index a413dee7cd19..7fa1d7a438e9 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -352,14 +352,14 @@ static void amd_sched_fence_work_func(struct work_struct *work) DRM_ERROR("[%s] scheduler is timeout!\n", sched->name); /* Clean all pending fences */ + spin_lock_irqsave(&sched->fence_list_lock, flags); list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) { DRM_ERROR(" fence no %d\n", entity->base.seqno); - cancel_delayed_work_sync(&entity->dwork); - spin_lock_irqsave(&sched->fence_list_lock, flags); + cancel_delayed_work(&entity->dwork); list_del_init(&entity->list); - spin_unlock_irqrestore(&sched->fence_list_lock, flags); fence_put(&entity->base); } + spin_unlock_irqrestore(&sched->fence_list_lock, flags); } static int amd_sched_main(void *param) -- cgit v1.2.3 From fe537d003f9a97c65848e47b3b9acbb0c5002fd9 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 27 Oct 2015 17:00:17 +0100 Subject: drm/amdgpu: ignore scheduler fences from the same entity MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We are going to submit them before the job anyway. Signed-off-by: Christian König Reviewed-by: Chunming Zhou --- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c') diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 7fa1d7a438e9..8dd7316b1d4f 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -222,6 +222,12 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity) while ((entity->dependency = sched->ops->dependency(sched_job))) { + if (entity->dependency->context == entity->fence_context) { + /* We can ignore fences from ourself */ + fence_put(entity->dependency); + continue; + } + if (fence_add_callback(entity->dependency, &entity->cb, amd_sched_entity_wakeup)) fence_put(entity->dependency); -- cgit v1.2.3 From 32544d021515f767e651648e5702472d48ab9436 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 3 Nov 2015 11:10:03 -0500 Subject: drm/amd/scheduler: don't oops on failure to load MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In two places amdgpu tries to tear down something it hasn't initalised when failing. This is what happens when you enable experimental support on topaz which then fails in ring init. This patch allows it to fail cleanly. agd: Split out from from the original patch since the scheduler is a driver independent. Reviewed-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Dave Airlie Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c') diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 8dd7316b1d4f..b3e84d0f4ace 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -468,5 +468,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, */ void amd_sched_fini(struct amd_gpu_scheduler *sched) { - kthread_stop(sched->thread); + if (sched->thread) + kthread_stop(sched->thread); } -- cgit v1.2.3 From 424839a6a913f2d2f473ce6d5b6465aeddf694db Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 2 Nov 2015 16:25:10 +0100 Subject: drm/amdgpu: fix stoping the scheduler timeout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit cancel_delayed_work_sync is forbidden in interrupt context. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c') diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index b3e84d0f4ace..89619a5a4289 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -338,7 +338,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) atomic_dec(&sched->hw_rq_count); amd_sched_fence_signal(s_fence); if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { - cancel_delayed_work_sync(&s_fence->dwork); + cancel_delayed_work(&s_fence->dwork); spin_lock_irqsave(&sched->fence_list_lock, flags); list_del_init(&s_fence->list); spin_unlock_irqrestore(&sched->fence_list_lock, flags); -- cgit v1.2.3 From f5617f9dde5ae2466560f7cb008c741e2b88adab Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Thu, 5 Nov 2015 11:41:50 +0800 Subject: drm/amd: add kmem cache for sched fence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change-Id: I45bb8ff10ef05dc3b15e31a77fbcf31117705f11 Signed-off-by: Chunming Zhou Reviewed-by: Christian König --- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 12 ++++++++++++ drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 3 +++ drivers/gpu/drm/amd/scheduler/sched_fence.c | 10 ++++++++-- 3 files changed, 23 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c') diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 89619a5a4289..fe5b3c415f18 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -34,6 +34,9 @@ static struct amd_sched_job * amd_sched_entity_pop_job(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); +struct kmem_cache *sched_fence_slab; +atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); + /* Initialize a given run queue struct */ static void amd_sched_rq_init(struct amd_sched_rq *rq) { @@ -450,6 +453,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, init_waitqueue_head(&sched->wake_up_worker); init_waitqueue_head(&sched->job_scheduled); atomic_set(&sched->hw_rq_count, 0); + if (atomic_inc_return(&sched_fence_slab_ref) == 1) { + sched_fence_slab = kmem_cache_create( + "amd_sched_fence", sizeof(struct amd_sched_fence), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!sched_fence_slab) + return -ENOMEM; + } /* Each scheduler will run on a seperate kernel thread */ sched->thread = kthread_run(amd_sched_main, sched, sched->name); @@ -470,4 +480,6 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched) { if (sched->thread) kthread_stop(sched->thread); + if (atomic_dec_and_test(&sched_fence_slab_ref)) + kmem_cache_destroy(sched_fence_slab); } diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 929e9aced041..4d05ca6fb057 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -30,6 +30,9 @@ struct amd_gpu_scheduler; struct amd_sched_rq; +extern struct kmem_cache *sched_fence_slab; +extern atomic_t sched_fence_slab_ref; + /** * A scheduler entity is a wrapper around a job queue or a group * of other entities. Entities take turns emitting jobs from their diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index d802638094f4..8d2130b9ff05 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -32,7 +32,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity struct amd_sched_fence *fence = NULL; unsigned seq; - fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL); + fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); if (fence == NULL) return NULL; fence->owner = owner; @@ -71,11 +71,17 @@ static bool amd_sched_fence_enable_signaling(struct fence *f) return true; } +static void amd_sched_fence_release(struct fence *f) +{ + struct amd_sched_fence *fence = to_amd_sched_fence(f); + kmem_cache_free(sched_fence_slab, fence); +} + const struct fence_ops amd_sched_fence_ops = { .get_driver_name = amd_sched_fence_get_driver_name, .get_timeline_name = amd_sched_fence_get_timeline_name, .enable_signaling = amd_sched_fence_enable_signaling, .signaled = NULL, .wait = fence_default_wait, - .release = NULL, + .release = amd_sched_fence_release, }; -- cgit v1.2.3 From 7034decf6a5b1ff778d83ff9d7ce1f0b404804e4 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Wed, 11 Nov 2015 14:56:00 +0800 Subject: drm/amdgpu: add command submission workflow tracepoint OGL needs these tracepoints to investigate performance issue. Change-Id: I5e58187d061253f7d665dfce8e4e163ba91d3e2b Signed-off-by: Chunming Zhou --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 51 +++++++++++++++++++++++++ drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | 24 ++++++++++-- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 1 + 5 files changed, 76 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index bf32096b8eb7..2ae73d5232dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -888,7 +888,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ttm_eu_fence_buffer_objects(&parser.ticket, &parser.validated, &job->base.s_fence->base); - + trace_amdgpu_cs_ioctl(job); mutex_unlock(&job->job_lock); amdgpu_cs_parser_fini_late(&parser); mutex_unlock(&vm->mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index dcf4a8aca680..67f778f6eedb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -26,6 +26,7 @@ #include #include #include "amdgpu.h" +#include "amdgpu_trace.h" static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) { @@ -45,6 +46,7 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) } job = to_amdgpu_job(sched_job); mutex_lock(&job->job_lock); + trace_amdgpu_sched_run_job(job); r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 26e2d50a6f64..8f9834ab1bd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -48,6 +48,57 @@ TRACE_EVENT(amdgpu_cs, __entry->fences) ); +TRACE_EVENT(amdgpu_cs_ioctl, + TP_PROTO(struct amdgpu_job *job), + TP_ARGS(job), + TP_STRUCT__entry( + __field(struct amdgpu_device *, adev) + __field(struct amd_sched_job *, sched_job) + __field(struct amdgpu_ib *, ib) + __field(struct fence *, fence) + __field(char *, ring_name) + __field(u32, num_ibs) + ), + + TP_fast_assign( + __entry->adev = job->adev; + __entry->sched_job = &job->base; + __entry->ib = job->ibs; + __entry->fence = &job->base.s_fence->base; + __entry->ring_name = job->ibs[0].ring->name; + __entry->num_ibs = job->num_ibs; + ), + TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", + __entry->adev, __entry->sched_job, __entry->ib, + __entry->fence, __entry->ring_name, __entry->num_ibs) +); + +TRACE_EVENT(amdgpu_sched_run_job, + TP_PROTO(struct amdgpu_job *job), + TP_ARGS(job), + TP_STRUCT__entry( + __field(struct amdgpu_device *, adev) + __field(struct amd_sched_job *, sched_job) + __field(struct amdgpu_ib *, ib) + __field(struct fence *, fence) + __field(char *, ring_name) + __field(u32, num_ibs) + ), + + TP_fast_assign( + __entry->adev = job->adev; + __entry->sched_job = &job->base; + __entry->ib = job->ibs; + __entry->fence = &job->base.s_fence->base; + __entry->ring_name = job->ibs[0].ring->name; + __entry->num_ibs = job->num_ibs; + ), + TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", + __entry->adev, __entry->sched_job, __entry->ib, + __entry->fence, __entry->ring_name, __entry->num_ibs) +); + + TRACE_EVENT(amdgpu_vm_grab_id, TP_PROTO(unsigned vmid, int ring), TP_ARGS(vmid, ring), diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index 144f50acc971..c89dc777768f 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -16,6 +16,8 @@ TRACE_EVENT(amd_sched_job, TP_ARGS(sched_job), TP_STRUCT__entry( __field(struct amd_sched_entity *, entity) + __field(struct amd_sched_job *, sched_job) + __field(struct fence *, fence) __field(const char *, name) __field(u32, job_count) __field(int, hw_job_count) @@ -23,16 +25,32 @@ TRACE_EVENT(amd_sched_job, TP_fast_assign( __entry->entity = sched_job->s_entity; + __entry->sched_job = sched_job; + __entry->fence = &sched_job->s_fence->base; __entry->name = sched_job->sched->name; __entry->job_count = kfifo_len( &sched_job->s_entity->job_queue) / sizeof(sched_job); __entry->hw_job_count = atomic_read( &sched_job->sched->hw_rq_count); ), - TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", - __entry->entity, __entry->name, __entry->job_count, - __entry->hw_job_count) + TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->sched_job, __entry->fence, __entry->name, + __entry->job_count, __entry->hw_job_count) ); + +TRACE_EVENT(amd_sched_process_job, + TP_PROTO(struct amd_sched_fence *fence), + TP_ARGS(fence), + TP_STRUCT__entry( + __field(struct fence *, fence) + ), + + TP_fast_assign( + __entry->fence = &fence->base; + ), + TP_printk("fence=%p signaled", __entry->fence) +); + #endif /* This part must be outside protection */ diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index fe5b3c415f18..b8925fea577c 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -346,6 +346,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) list_del_init(&s_fence->list); spin_unlock_irqrestore(&sched->fence_list_lock, flags); } + trace_amd_sched_process_job(s_fence); fence_put(&s_fence->base); wake_up_interruptible(&sched->wake_up_worker); } -- cgit v1.2.3 From 4a562283376197722b295d27633134401bbc80f5 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 6 Nov 2015 14:09:21 +0100 Subject: drm/amdgpu: cleanup scheduler fence get/put dance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The code was correct, but getting two references when the ownership is linearly moved on is a bit awkward and just overhead. Signed: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 1 - drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 1 - 2 files changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index 67f778f6eedb..8ef9e4415fcc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -64,7 +64,6 @@ err: job->free_job(job); mutex_unlock(&job->job_lock); - fence_put(&job->base.s_fence->base); kfree(job); return fence ? &fence->base : NULL; } diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index b8925fea577c..ccb7c1554f5e 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -285,7 +285,6 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job) if (!fence) return -ENOMEM; - fence_get(&fence->base); sched_job->s_fence = fence; wait_event(entity->sched->job_scheduled, -- cgit v1.2.3 From e284022163716ecf11c37fd1057c35d689ef2c11 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 5 Nov 2015 19:49:48 +0100 Subject: drm/amdgpu: fix incorrect mutex usage v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before this patch the scheduler fence was created when we push the job into the queue, so we could only get the fence after pushing it. The mutex now was necessary to prevent the thread pushing the jobs to the hardware from running faster than the thread pushing the jobs into the queue. Otherwise the thread pushing jobs into the queue would have accessed possible freed up memory when it tries to get a reference to the fence. So what you get in the end is thread A: mutex_lock(&job->lock); ... Kick of thread B. ... mutex_unlock(&job->lock); And thread B: mutex_lock(&job->lock); .... mutex_unlock(&job->lock); kfree(job); I'm actually not sure if I'm still up to date on this, but this usage pattern used to be not allowed with mutexes. See here as well https://lwn.net/Articles/575460/. v2: remove unrelated changes, fix missing owner v3: rebased, add more commit message Signed-off-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 43 +++++++++++++++------------ drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 27 +++++++---------- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 10 +------ drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 3 +- 5 files changed, 37 insertions(+), 48 deletions(-) (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 7b02e3455172..0f187027c753 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1225,7 +1225,7 @@ struct amdgpu_job { struct amdgpu_device *adev; struct amdgpu_ib *ibs; uint32_t num_ibs; - struct mutex job_lock; + void *owner; struct amdgpu_user_fence uf; int (*free_job)(struct amdgpu_job *job); }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 2ae73d5232dd..44cf977ae4f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -845,8 +845,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) goto out; if (amdgpu_enable_scheduler && parser.num_ibs) { - struct amdgpu_job *job; struct amdgpu_ring * ring = parser.ibs->ring; + struct amd_sched_fence *fence; + struct amdgpu_job *job; job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); if (!job) { @@ -859,37 +860,41 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) job->adev = parser.adev; job->ibs = parser.ibs; job->num_ibs = parser.num_ibs; - job->base.owner = parser.filp; - mutex_init(&job->job_lock); + job->owner = parser.filp; + job->free_job = amdgpu_cs_free_job; + if (job->ibs[job->num_ibs - 1].user) { job->uf = parser.uf; job->ibs[job->num_ibs - 1].user = &job->uf; parser.uf.bo = NULL; } - parser.ibs = NULL; - parser.num_ibs = 0; - - job->free_job = amdgpu_cs_free_job; - mutex_lock(&job->job_lock); - r = amd_sched_entity_push_job(&job->base); - if (r) { - mutex_unlock(&job->job_lock); + fence = amd_sched_fence_create(job->base.s_entity, + parser.filp); + if (!fence) { + r = -ENOMEM; amdgpu_cs_free_job(job); kfree(job); goto out; } - cs->out.handle = - amdgpu_ctx_add_fence(parser.ctx, ring, - &job->base.s_fence->base); + job->base.s_fence = fence; + fence_get(&fence->base); + + cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring, + &fence->base); job->ibs[job->num_ibs - 1].sequence = cs->out.handle; - list_sort(NULL, &parser.validated, cmp_size_smaller_first); - ttm_eu_fence_buffer_objects(&parser.ticket, - &parser.validated, - &job->base.s_fence->base); + parser.ibs = NULL; + parser.num_ibs = 0; + trace_amdgpu_cs_ioctl(job); - mutex_unlock(&job->job_lock); + amd_sched_entity_push_job(&job->base); + + list_sort(NULL, &parser.validated, cmp_size_smaller_first); + ttm_eu_fence_buffer_objects(&parser.ticket, &parser.validated, + &fence->base); + fence_put(&fence->base); + amdgpu_cs_parser_fini_late(&parser); mutex_unlock(&vm->mutex); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index 8ef9e4415fcc..438c05254695 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -45,12 +45,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) return NULL; } job = to_amdgpu_job(sched_job); - mutex_lock(&job->job_lock); trace_amdgpu_sched_run_job(job); - r = amdgpu_ib_schedule(job->adev, - job->num_ibs, - job->ibs, - job->base.owner); + r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner); if (r) { DRM_ERROR("Error scheduling IBs (%d)\n", r); goto err; @@ -63,7 +59,6 @@ err: if (job->free_job) job->free_job(job); - mutex_unlock(&job->job_lock); kfree(job); return fence ? &fence->base : NULL; } @@ -89,21 +84,19 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, return -ENOMEM; job->base.sched = &ring->sched; job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; + job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); + if (!job->base.s_fence) { + kfree(job); + return -ENOMEM; + } + *f = fence_get(&job->base.s_fence->base); + job->adev = adev; job->ibs = ibs; job->num_ibs = num_ibs; - job->base.owner = owner; - mutex_init(&job->job_lock); + job->owner = owner; job->free_job = free_job; - mutex_lock(&job->job_lock); - r = amd_sched_entity_push_job(&job->base); - if (r) { - mutex_unlock(&job->job_lock); - kfree(job); - return r; - } - *f = fence_get(&job->base.s_fence->base); - mutex_unlock(&job->job_lock); + amd_sched_entity_push_job(&job->base); } else { r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); if (r) diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index ccb7c1554f5e..ea30d6ad4c13 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -276,21 +276,13 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) * * Returns 0 for success, negative error code otherwise. */ -int amd_sched_entity_push_job(struct amd_sched_job *sched_job) +void amd_sched_entity_push_job(struct amd_sched_job *sched_job) { struct amd_sched_entity *entity = sched_job->s_entity; - struct amd_sched_fence *fence = amd_sched_fence_create( - entity, sched_job->owner); - - if (!fence) - return -ENOMEM; - - sched_job->s_fence = fence; wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); trace_amd_sched_job(sched_job); - return 0; } /** diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 4d05ca6fb057..939692b14f4b 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -79,7 +79,6 @@ struct amd_sched_job { struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; - void *owner; }; extern const struct fence_ops amd_sched_fence_ops; @@ -131,7 +130,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, uint32_t jobs); void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity); -int amd_sched_entity_push_job(struct amd_sched_job *sched_job); +void amd_sched_entity_push_job(struct amd_sched_job *sched_job); struct amd_sched_fence *amd_sched_fence_create( struct amd_sched_entity *s_entity, void *owner); -- cgit v1.2.3 From 393a0bd4376a8ecdde079344681a014ec3eb1291 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 5 Nov 2015 12:57:10 +0100 Subject: drm/amdgpu: optimize scheduler fence handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We only need to wait for jobs to be scheduled when the dependency is from the same scheduler. Signed-off-by: Christian König Reviewed-by: Chunming Zhou --- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 51 ++++++++++++++++++++------- drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 5 ++- drivers/gpu/drm/amd/scheduler/sched_fence.c | 13 +++++++ 3 files changed, 55 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c') diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index ea30d6ad4c13..b7cd108212c2 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -211,6 +211,41 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) amd_sched_wakeup(entity->sched); } +static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) +{ + struct amd_gpu_scheduler *sched = entity->sched; + struct fence * fence = entity->dependency; + struct amd_sched_fence *s_fence; + + if (fence->context == entity->fence_context) { + /* We can ignore fences from ourself */ + fence_put(entity->dependency); + return false; + } + + s_fence = to_amd_sched_fence(fence); + if (s_fence && s_fence->sched == sched) { + /* Fence is from the same scheduler */ + if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) { + /* Ignore it when it is already scheduled */ + fence_put(entity->dependency); + return false; + } + + /* Wait for fence to be scheduled */ + entity->cb.func = amd_sched_entity_wakeup; + list_add_tail(&entity->cb.node, &s_fence->scheduled_cb); + return true; + } + + if (!fence_add_callback(entity->dependency, &entity->cb, + amd_sched_entity_wakeup)) + return true; + + fence_put(entity->dependency); + return false; +} + static struct amd_sched_job * amd_sched_entity_pop_job(struct amd_sched_entity *entity) { @@ -223,20 +258,9 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity) if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) return NULL; - while ((entity->dependency = sched->ops->dependency(sched_job))) { - - if (entity->dependency->context == entity->fence_context) { - /* We can ignore fences from ourself */ - fence_put(entity->dependency); - continue; - } - - if (fence_add_callback(entity->dependency, &entity->cb, - amd_sched_entity_wakeup)) - fence_put(entity->dependency); - else + while ((entity->dependency = sched->ops->dependency(sched_job))) + if (amd_sched_entity_add_dependency_cb(entity)) return NULL; - } return sched_job; } @@ -400,6 +424,7 @@ static int amd_sched_main(void *param) atomic_inc(&sched->hw_rq_count); fence = sched->ops->run_job(sched_job); + amd_sched_fence_scheduled(s_fence); if (fence) { r = fence_add_callback(fence, &s_fence->cb, amd_sched_process_job); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 939692b14f4b..a0f0ae53aacd 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -27,6 +27,8 @@ #include #include +#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS + struct amd_gpu_scheduler; struct amd_sched_rq; @@ -68,6 +70,7 @@ struct amd_sched_rq { struct amd_sched_fence { struct fence base; struct fence_cb cb; + struct list_head scheduled_cb; struct amd_gpu_scheduler *sched; spinlock_t lock; void *owner; @@ -134,7 +137,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job); struct amd_sched_fence *amd_sched_fence_create( struct amd_sched_entity *s_entity, void *owner); +void amd_sched_fence_scheduled(struct amd_sched_fence *fence); void amd_sched_fence_signal(struct amd_sched_fence *fence); - #endif diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 8d2130b9ff05..87c78eecea64 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -35,6 +35,8 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); if (fence == NULL) return NULL; + + INIT_LIST_HEAD(&fence->scheduled_cb); fence->owner = owner; fence->sched = s_entity->sched; spin_lock_init(&fence->lock); @@ -55,6 +57,17 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence) FENCE_TRACE(&fence->base, "was already signaled\n"); } +void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence) +{ + struct fence_cb *cur, *tmp; + + set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags); + list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) { + list_del_init(&cur->node); + cur->func(&s_fence->base, cur); + } +} + static const char *amd_sched_fence_get_driver_name(struct fence *fence) { return "amd_sched"; -- cgit v1.2.3 From 3d65193635e122d0783b97cb2202b7f21601037a Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 12 Nov 2015 21:10:35 +0100 Subject: drm/amdgpu: move dependency handling out of atomic section v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This way the driver isn't limited in the dependency handling callback. v2: remove extra check in amd_sched_entity_pop_job() Signed-off-by: Christian König Reviewed-by: Chunming Zhou --- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 71 +++++++++++++++++---------- 1 file changed, 44 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c') diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index b7cd108212c2..651129f2ec1d 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -30,8 +30,7 @@ #define CREATE_TRACE_POINTS #include "gpu_sched_trace.h" -static struct amd_sched_job * -amd_sched_entity_pop_job(struct amd_sched_entity *entity); +static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); struct kmem_cache *sched_fence_slab; @@ -64,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, } /** - * Select next job from a specified run queue with round robin policy. - * Return NULL if nothing available. + * Select an entity which could provide a job to run + * + * @rq The run queue to check. + * + * Try to find a ready entity, returns NULL if none found. */ -static struct amd_sched_job * -amd_sched_rq_select_job(struct amd_sched_rq *rq) +static struct amd_sched_entity * +amd_sched_rq_select_entity(struct amd_sched_rq *rq) { struct amd_sched_entity *entity; - struct amd_sched_job *sched_job; spin_lock(&rq->lock); entity = rq->current_entity; if (entity) { list_for_each_entry_continue(entity, &rq->entities, list) { - sched_job = amd_sched_entity_pop_job(entity); - if (sched_job) { + if (amd_sched_entity_is_ready(entity)) { rq->current_entity = entity; spin_unlock(&rq->lock); - return sched_job; + return entity; } } } list_for_each_entry(entity, &rq->entities, list) { - sched_job = amd_sched_entity_pop_job(entity); - if (sched_job) { + if (amd_sched_entity_is_ready(entity)) { rq->current_entity = entity; spin_unlock(&rq->lock); - return sched_job; + return entity; } if (entity == rq->current_entity) @@ -176,6 +175,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) return false; } +/** + * Check if entity is ready + * + * @entity The pointer to a valid scheduler entity + * + * Return true if entity could provide a job. + */ +static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) +{ + if (kfifo_is_empty(&entity->job_queue)) + return false; + + if (ACCESS_ONCE(entity->dependency)) + return false; + + return true; +} + /** * Destroy a context entity * @@ -252,9 +269,6 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity) struct amd_gpu_scheduler *sched = entity->sched; struct amd_sched_job *sched_job; - if (ACCESS_ONCE(entity->dependency)) - return NULL; - if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) return NULL; @@ -328,22 +342,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) } /** - * Select next to run + * Select next entity to process */ -static struct amd_sched_job * -amd_sched_select_job(struct amd_gpu_scheduler *sched) +static struct amd_sched_entity * +amd_sched_select_entity(struct amd_gpu_scheduler *sched) { - struct amd_sched_job *sched_job; + struct amd_sched_entity *entity; if (!amd_sched_ready(sched)) return NULL; /* Kernel run queue has higher priority than normal run queue*/ - sched_job = amd_sched_rq_select_job(&sched->kernel_rq); - if (sched_job == NULL) - sched_job = amd_sched_rq_select_job(&sched->sched_rq); + entity = amd_sched_rq_select_entity(&sched->kernel_rq); + if (entity == NULL) + entity = amd_sched_rq_select_entity(&sched->sched_rq); - return sched_job; + return entity; } static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) @@ -405,13 +419,16 @@ static int amd_sched_main(void *param) unsigned long flags; wait_event_interruptible(sched->wake_up_worker, - kthread_should_stop() || - (sched_job = amd_sched_select_job(sched))); + (entity = amd_sched_select_entity(sched)) || + kthread_should_stop()); + if (!entity) + continue; + + sched_job = amd_sched_entity_pop_job(entity); if (!sched_job) continue; - entity = sched_job->s_entity; s_fence = sched_job->s_fence; if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { -- cgit v1.2.3