From e4a58a28b50f30e72292b6659d94410cbf7355ad Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 5 Nov 2015 17:00:25 +0100 Subject: drm/amdgpu: fix leaking the IBs on error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixing a memory leak when the scheduler is enabled. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index dfc4d02c7a38..ecc82dfe83f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -499,16 +499,12 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); kfree(parser->chunks); - if (!amdgpu_enable_scheduler) - { - if (parser->ibs) - for (i = 0; i < parser->num_ibs; i++) - amdgpu_ib_free(parser->adev, &parser->ibs[i]); - kfree(parser->ibs); - if (parser->uf.bo) - drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); - } - + if (parser->ibs) + for (i = 0; i < parser->num_ibs; i++) + amdgpu_ib_free(parser->adev, &parser->ibs[i]); + kfree(parser->ibs); + if (parser->uf.bo) + drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); kfree(parser); } @@ -888,11 +884,14 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) job->base.owner = parser->filp; mutex_init(&job->job_lock); if (job->ibs[job->num_ibs - 1].user) { - memcpy(&job->uf, &parser->uf, - sizeof(struct amdgpu_user_fence)); + job->uf = parser->uf; job->ibs[job->num_ibs - 1].user = &job->uf; + parser->uf.bo = NULL; } + parser->ibs = NULL; + parser->num_ibs = 0; + job->free_job = amdgpu_cs_free_job; mutex_lock(&job->job_lock); r = amd_sched_entity_push_job(&job->base); @@ -905,7 +904,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) cs->out.handle = amdgpu_ctx_add_fence(parser->ctx, ring, &job->base.s_fence->base); - parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle; + job->ibs[job->num_ibs - 1].sequence = cs->out.handle; list_sort(NULL, &parser->validated, cmp_size_smaller_first); ttm_eu_fence_buffer_objects(&parser->ticket, -- cgit v1.2.3 From 7e52a81c2f0326a85d3ebc005829bcd604731c6d Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 4 Nov 2015 15:44:39 +0100 Subject: drm/amdgpu: cleanup amdgpu_cs_parser handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need any more to allocate that structure dynamically, just put it on the stack. This is a start to cleanup some of the scheduler fallouts. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 5 -- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 88 +++++++++++++--------------------- 2 files changed, 33 insertions(+), 60 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index b801b6702812..7b02e3455172 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -2256,11 +2256,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_card_posted(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); -struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, - struct drm_file *filp, - struct amdgpu_ctx *ctx, - struct amdgpu_ib *ibs, - uint32_t num_ibs); int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index ecc82dfe83f8..bf32096b8eb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, return 0; } -struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, - struct drm_file *filp, - struct amdgpu_ctx *ctx, - struct amdgpu_ib *ibs, - uint32_t num_ibs) -{ - struct amdgpu_cs_parser *parser; - int i; - - parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL); - if (!parser) - return NULL; - - parser->adev = adev; - parser->filp = filp; - parser->ctx = ctx; - parser->ibs = ibs; - parser->num_ibs = num_ibs; - for (i = 0; i < num_ibs; i++) - ibs[i].ctx = ctx; - - return parser; -} - int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) { union drm_amdgpu_cs *cs = data; @@ -490,6 +466,7 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) { unsigned i; + if (parser->ctx) amdgpu_ctx_put(parser->ctx); if (parser->bo_list) @@ -505,7 +482,6 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) kfree(parser->ibs); if (parser->uf.bo) drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); - kfree(parser); } /** @@ -824,36 +800,36 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) union drm_amdgpu_cs *cs = data; struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; - struct amdgpu_cs_parser *parser; + struct amdgpu_cs_parser parser = {}; bool reserved_buffers = false; int i, r; if (!adev->accel_working) return -EBUSY; - parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); - if (!parser) - return -ENOMEM; - r = amdgpu_cs_parser_init(parser, data); + parser.adev = adev; + parser.filp = filp; + + r = amdgpu_cs_parser_init(&parser, data); if (r) { DRM_ERROR("Failed to initialize parser !\n"); - amdgpu_cs_parser_fini(parser, r, false); + amdgpu_cs_parser_fini(&parser, r, false); r = amdgpu_cs_handle_lockup(adev, r); return r; } mutex_lock(&vm->mutex); - r = amdgpu_cs_parser_relocs(parser); + r = amdgpu_cs_parser_relocs(&parser); if (r == -ENOMEM) DRM_ERROR("Not enough memory for command submission!\n"); else if (r && r != -ERESTARTSYS) DRM_ERROR("Failed to process the buffer list %d!\n", r); else if (!r) { reserved_buffers = true; - r = amdgpu_cs_ib_fill(adev, parser); + r = amdgpu_cs_ib_fill(adev, &parser); } if (!r) { - r = amdgpu_cs_dependencies(adev, parser); + r = amdgpu_cs_dependencies(adev, &parser); if (r) DRM_ERROR("Failed in the dependencies handling %d!\n", r); } @@ -861,36 +837,38 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r) goto out; - for (i = 0; i < parser->num_ibs; i++) - trace_amdgpu_cs(parser, i); + for (i = 0; i < parser.num_ibs; i++) + trace_amdgpu_cs(&parser, i); - r = amdgpu_cs_ib_vm_chunk(adev, parser); + r = amdgpu_cs_ib_vm_chunk(adev, &parser); if (r) goto out; - if (amdgpu_enable_scheduler && parser->num_ibs) { + if (amdgpu_enable_scheduler && parser.num_ibs) { struct amdgpu_job *job; - struct amdgpu_ring * ring = parser->ibs->ring; + struct amdgpu_ring * ring = parser.ibs->ring; + job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); if (!job) { r = -ENOMEM; goto out; } + job->base.sched = &ring->sched; - job->base.s_entity = &parser->ctx->rings[ring->idx].entity; - job->adev = parser->adev; - job->ibs = parser->ibs; - job->num_ibs = parser->num_ibs; - job->base.owner = parser->filp; + job->base.s_entity = &parser.ctx->rings[ring->idx].entity; + job->adev = parser.adev; + job->ibs = parser.ibs; + job->num_ibs = parser.num_ibs; + job->base.owner = parser.filp; mutex_init(&job->job_lock); if (job->ibs[job->num_ibs - 1].user) { - job->uf = parser->uf; + job->uf = parser.uf; job->ibs[job->num_ibs - 1].user = &job->uf; - parser->uf.bo = NULL; + parser.uf.bo = NULL; } - parser->ibs = NULL; - parser->num_ibs = 0; + parser.ibs = NULL; + parser.num_ibs = 0; job->free_job = amdgpu_cs_free_job; mutex_lock(&job->job_lock); @@ -902,24 +880,24 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) goto out; } cs->out.handle = - amdgpu_ctx_add_fence(parser->ctx, ring, + amdgpu_ctx_add_fence(parser.ctx, ring, &job->base.s_fence->base); job->ibs[job->num_ibs - 1].sequence = cs->out.handle; - list_sort(NULL, &parser->validated, cmp_size_smaller_first); - ttm_eu_fence_buffer_objects(&parser->ticket, - &parser->validated, + list_sort(NULL, &parser.validated, cmp_size_smaller_first); + ttm_eu_fence_buffer_objects(&parser.ticket, + &parser.validated, &job->base.s_fence->base); mutex_unlock(&job->job_lock); - amdgpu_cs_parser_fini_late(parser); + amdgpu_cs_parser_fini_late(&parser); mutex_unlock(&vm->mutex); return 0; } - cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; + cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence; out: - amdgpu_cs_parser_fini(parser, r, reserved_buffers); + amdgpu_cs_parser_fini(&parser, r, reserved_buffers); mutex_unlock(&vm->mutex); r = amdgpu_cs_handle_lockup(adev, r); return r; -- cgit v1.2.3 From 7034decf6a5b1ff778d83ff9d7ce1f0b404804e4 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Wed, 11 Nov 2015 14:56:00 +0800 Subject: drm/amdgpu: add command submission workflow tracepoint OGL needs these tracepoints to investigate performance issue. Change-Id: I5e58187d061253f7d665dfce8e4e163ba91d3e2b Signed-off-by: Chunming Zhou --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 51 +++++++++++++++++++++++++ drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | 24 ++++++++++-- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 1 + 5 files changed, 76 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index bf32096b8eb7..2ae73d5232dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -888,7 +888,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ttm_eu_fence_buffer_objects(&parser.ticket, &parser.validated, &job->base.s_fence->base); - + trace_amdgpu_cs_ioctl(job); mutex_unlock(&job->job_lock); amdgpu_cs_parser_fini_late(&parser); mutex_unlock(&vm->mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index dcf4a8aca680..67f778f6eedb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -26,6 +26,7 @@ #include #include #include "amdgpu.h" +#include "amdgpu_trace.h" static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) { @@ -45,6 +46,7 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) } job = to_amdgpu_job(sched_job); mutex_lock(&job->job_lock); + trace_amdgpu_sched_run_job(job); r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 26e2d50a6f64..8f9834ab1bd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -48,6 +48,57 @@ TRACE_EVENT(amdgpu_cs, __entry->fences) ); +TRACE_EVENT(amdgpu_cs_ioctl, + TP_PROTO(struct amdgpu_job *job), + TP_ARGS(job), + TP_STRUCT__entry( + __field(struct amdgpu_device *, adev) + __field(struct amd_sched_job *, sched_job) + __field(struct amdgpu_ib *, ib) + __field(struct fence *, fence) + __field(char *, ring_name) + __field(u32, num_ibs) + ), + + TP_fast_assign( + __entry->adev = job->adev; + __entry->sched_job = &job->base; + __entry->ib = job->ibs; + __entry->fence = &job->base.s_fence->base; + __entry->ring_name = job->ibs[0].ring->name; + __entry->num_ibs = job->num_ibs; + ), + TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", + __entry->adev, __entry->sched_job, __entry->ib, + __entry->fence, __entry->ring_name, __entry->num_ibs) +); + +TRACE_EVENT(amdgpu_sched_run_job, + TP_PROTO(struct amdgpu_job *job), + TP_ARGS(job), + TP_STRUCT__entry( + __field(struct amdgpu_device *, adev) + __field(struct amd_sched_job *, sched_job) + __field(struct amdgpu_ib *, ib) + __field(struct fence *, fence) + __field(char *, ring_name) + __field(u32, num_ibs) + ), + + TP_fast_assign( + __entry->adev = job->adev; + __entry->sched_job = &job->base; + __entry->ib = job->ibs; + __entry->fence = &job->base.s_fence->base; + __entry->ring_name = job->ibs[0].ring->name; + __entry->num_ibs = job->num_ibs; + ), + TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", + __entry->adev, __entry->sched_job, __entry->ib, + __entry->fence, __entry->ring_name, __entry->num_ibs) +); + + TRACE_EVENT(amdgpu_vm_grab_id, TP_PROTO(unsigned vmid, int ring), TP_ARGS(vmid, ring), diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index 144f50acc971..c89dc777768f 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -16,6 +16,8 @@ TRACE_EVENT(amd_sched_job, TP_ARGS(sched_job), TP_STRUCT__entry( __field(struct amd_sched_entity *, entity) + __field(struct amd_sched_job *, sched_job) + __field(struct fence *, fence) __field(const char *, name) __field(u32, job_count) __field(int, hw_job_count) @@ -23,16 +25,32 @@ TRACE_EVENT(amd_sched_job, TP_fast_assign( __entry->entity = sched_job->s_entity; + __entry->sched_job = sched_job; + __entry->fence = &sched_job->s_fence->base; __entry->name = sched_job->sched->name; __entry->job_count = kfifo_len( &sched_job->s_entity->job_queue) / sizeof(sched_job); __entry->hw_job_count = atomic_read( &sched_job->sched->hw_rq_count); ), - TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", - __entry->entity, __entry->name, __entry->job_count, - __entry->hw_job_count) + TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->sched_job, __entry->fence, __entry->name, + __entry->job_count, __entry->hw_job_count) ); + +TRACE_EVENT(amd_sched_process_job, + TP_PROTO(struct amd_sched_fence *fence), + TP_ARGS(fence), + TP_STRUCT__entry( + __field(struct fence *, fence) + ), + + TP_fast_assign( + __entry->fence = &fence->base; + ), + TP_printk("fence=%p signaled", __entry->fence) +); + #endif /* This part must be outside protection */ diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index fe5b3c415f18..b8925fea577c 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -346,6 +346,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) list_del_init(&s_fence->list); spin_unlock_irqrestore(&sched->fence_list_lock, flags); } + trace_amd_sched_process_job(s_fence); fence_put(&s_fence->base); wake_up_interruptible(&sched->wake_up_worker); } -- cgit v1.2.3 From e284022163716ecf11c37fd1057c35d689ef2c11 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 5 Nov 2015 19:49:48 +0100 Subject: drm/amdgpu: fix incorrect mutex usage v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before this patch the scheduler fence was created when we push the job into the queue, so we could only get the fence after pushing it. The mutex now was necessary to prevent the thread pushing the jobs to the hardware from running faster than the thread pushing the jobs into the queue. Otherwise the thread pushing jobs into the queue would have accessed possible freed up memory when it tries to get a reference to the fence. So what you get in the end is thread A: mutex_lock(&job->lock); ... Kick of thread B. ... mutex_unlock(&job->lock); And thread B: mutex_lock(&job->lock); .... mutex_unlock(&job->lock); kfree(job); I'm actually not sure if I'm still up to date on this, but this usage pattern used to be not allowed with mutexes. See here as well https://lwn.net/Articles/575460/. v2: remove unrelated changes, fix missing owner v3: rebased, add more commit message Signed-off-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 43 +++++++++++++++------------ drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 27 +++++++---------- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 10 +------ drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 3 +- 5 files changed, 37 insertions(+), 48 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 7b02e3455172..0f187027c753 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1225,7 +1225,7 @@ struct amdgpu_job { struct amdgpu_device *adev; struct amdgpu_ib *ibs; uint32_t num_ibs; - struct mutex job_lock; + void *owner; struct amdgpu_user_fence uf; int (*free_job)(struct amdgpu_job *job); }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 2ae73d5232dd..44cf977ae4f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -845,8 +845,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) goto out; if (amdgpu_enable_scheduler && parser.num_ibs) { - struct amdgpu_job *job; struct amdgpu_ring * ring = parser.ibs->ring; + struct amd_sched_fence *fence; + struct amdgpu_job *job; job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); if (!job) { @@ -859,37 +860,41 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) job->adev = parser.adev; job->ibs = parser.ibs; job->num_ibs = parser.num_ibs; - job->base.owner = parser.filp; - mutex_init(&job->job_lock); + job->owner = parser.filp; + job->free_job = amdgpu_cs_free_job; + if (job->ibs[job->num_ibs - 1].user) { job->uf = parser.uf; job->ibs[job->num_ibs - 1].user = &job->uf; parser.uf.bo = NULL; } - parser.ibs = NULL; - parser.num_ibs = 0; - - job->free_job = amdgpu_cs_free_job; - mutex_lock(&job->job_lock); - r = amd_sched_entity_push_job(&job->base); - if (r) { - mutex_unlock(&job->job_lock); + fence = amd_sched_fence_create(job->base.s_entity, + parser.filp); + if (!fence) { + r = -ENOMEM; amdgpu_cs_free_job(job); kfree(job); goto out; } - cs->out.handle = - amdgpu_ctx_add_fence(parser.ctx, ring, - &job->base.s_fence->base); + job->base.s_fence = fence; + fence_get(&fence->base); + + cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring, + &fence->base); job->ibs[job->num_ibs - 1].sequence = cs->out.handle; - list_sort(NULL, &parser.validated, cmp_size_smaller_first); - ttm_eu_fence_buffer_objects(&parser.ticket, - &parser.validated, - &job->base.s_fence->base); + parser.ibs = NULL; + parser.num_ibs = 0; + trace_amdgpu_cs_ioctl(job); - mutex_unlock(&job->job_lock); + amd_sched_entity_push_job(&job->base); + + list_sort(NULL, &parser.validated, cmp_size_smaller_first); + ttm_eu_fence_buffer_objects(&parser.ticket, &parser.validated, + &fence->base); + fence_put(&fence->base); + amdgpu_cs_parser_fini_late(&parser); mutex_unlock(&vm->mutex); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index 8ef9e4415fcc..438c05254695 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -45,12 +45,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) return NULL; } job = to_amdgpu_job(sched_job); - mutex_lock(&job->job_lock); trace_amdgpu_sched_run_job(job); - r = amdgpu_ib_schedule(job->adev, - job->num_ibs, - job->ibs, - job->base.owner); + r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner); if (r) { DRM_ERROR("Error scheduling IBs (%d)\n", r); goto err; @@ -63,7 +59,6 @@ err: if (job->free_job) job->free_job(job); - mutex_unlock(&job->job_lock); kfree(job); return fence ? &fence->base : NULL; } @@ -89,21 +84,19 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, return -ENOMEM; job->base.sched = &ring->sched; job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; + job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); + if (!job->base.s_fence) { + kfree(job); + return -ENOMEM; + } + *f = fence_get(&job->base.s_fence->base); + job->adev = adev; job->ibs = ibs; job->num_ibs = num_ibs; - job->base.owner = owner; - mutex_init(&job->job_lock); + job->owner = owner; job->free_job = free_job; - mutex_lock(&job->job_lock); - r = amd_sched_entity_push_job(&job->base); - if (r) { - mutex_unlock(&job->job_lock); - kfree(job); - return r; - } - *f = fence_get(&job->base.s_fence->base); - mutex_unlock(&job->job_lock); + amd_sched_entity_push_job(&job->base); } else { r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); if (r) diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index ccb7c1554f5e..ea30d6ad4c13 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -276,21 +276,13 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) * * Returns 0 for success, negative error code otherwise. */ -int amd_sched_entity_push_job(struct amd_sched_job *sched_job) +void amd_sched_entity_push_job(struct amd_sched_job *sched_job) { struct amd_sched_entity *entity = sched_job->s_entity; - struct amd_sched_fence *fence = amd_sched_fence_create( - entity, sched_job->owner); - - if (!fence) - return -ENOMEM; - - sched_job->s_fence = fence; wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); trace_amd_sched_job(sched_job); - return 0; } /** diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 4d05ca6fb057..939692b14f4b 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -79,7 +79,6 @@ struct amd_sched_job { struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; - void *owner; }; extern const struct fence_ops amd_sched_fence_ops; @@ -131,7 +130,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, uint32_t jobs); void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity); -int amd_sched_entity_push_job(struct amd_sched_job *sched_job); +void amd_sched_entity_push_job(struct amd_sched_job *sched_job); struct amd_sched_fence *amd_sched_fence_create( struct amd_sched_entity *s_entity, void *owner); -- cgit v1.2.3 From 5d82730af746abca2aa74e00de6370d338df7e95 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 13 Nov 2015 13:04:50 +0100 Subject: drm/amdgpu: fix handling order in scheduler CS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to clear parser.ibs and num_ibs before amd_sched_fence_create, otherwise the IB could be freed twice if fence creates fails. Signed-off-by: Christian König Reviewed-by: Chunming Zhou --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 44cf977ae4f6..6096effd6a56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -858,11 +858,14 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) job->base.sched = &ring->sched; job->base.s_entity = &parser.ctx->rings[ring->idx].entity; job->adev = parser.adev; - job->ibs = parser.ibs; - job->num_ibs = parser.num_ibs; job->owner = parser.filp; job->free_job = amdgpu_cs_free_job; + job->ibs = parser.ibs; + job->num_ibs = parser.num_ibs; + parser.ibs = NULL; + parser.num_ibs = 0; + if (job->ibs[job->num_ibs - 1].user) { job->uf = parser.uf; job->ibs[job->num_ibs - 1].user = &job->uf; @@ -884,9 +887,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) &fence->base); job->ibs[job->num_ibs - 1].sequence = cs->out.handle; - parser.ibs = NULL; - parser.num_ibs = 0; - trace_amdgpu_cs_ioctl(job); amd_sched_entity_push_job(&job->base); -- cgit v1.2.3 From 984810fc45389c545719fbb4219e8a12b27032a4 Mon Sep 17 00:00:00 2001 From: Christian König Date: Sat, 14 Nov 2015 21:05:35 +0100 Subject: drm/amdgpu: cleanup scheduler command submission MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Unify the two code path again, cause they do pretty much the same thing. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Reviewed-by: Junwei Zhang --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 63 +++++++++++++--------------------- 2 files changed, 25 insertions(+), 39 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 0f187027c753..0ce5c1e45ec8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1210,6 +1210,7 @@ struct amdgpu_cs_parser { /* relocations */ struct amdgpu_bo_list_entry *vm_bos; struct list_head validated; + struct fence *fence; struct amdgpu_ib *ibs; uint32_t num_ibs; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 6096effd6a56..3afcf0237c25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -439,8 +439,18 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a, return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; } -static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff) +/** + * cs_parser_fini() - clean parser states + * @parser: parser structure holding parsing context. + * @error: error number + * + * If error is set than unvalidate buffer, otherwise just free memory + * used by parsing context. + **/ +static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) { + unsigned i; + if (!error) { /* Sort the buffer list from the smallest to largest buffer, * which affects the order of buffers in the LRU list. @@ -455,17 +465,13 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err list_sort(NULL, &parser->validated, cmp_size_smaller_first); ttm_eu_fence_buffer_objects(&parser->ticket, - &parser->validated, - &parser->ibs[parser->num_ibs-1].fence->base); + &parser->validated, + parser->fence); } else if (backoff) { ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); } -} - -static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) -{ - unsigned i; + fence_put(parser->fence); if (parser->ctx) amdgpu_ctx_put(parser->ctx); @@ -484,20 +490,6 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); } -/** - * cs_parser_fini() - clean parser states - * @parser: parser structure holding parsing context. - * @error: error number - * - * If error is set than unvalidate buffer, otherwise just free memory - * used by parsing context. - **/ -static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) -{ - amdgpu_cs_parser_fini_early(parser, error, backoff); - amdgpu_cs_parser_fini_late(parser); -} - static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, struct amdgpu_vm *vm) { @@ -582,15 +574,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, } r = amdgpu_bo_vm_update_pte(parser, vm); - if (r) { - goto out; - } - amdgpu_cs_sync_rings(parser); - if (!amdgpu_enable_scheduler) - r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs, - parser->filp); + if (!r) + amdgpu_cs_sync_rings(parser); -out: return r; } @@ -881,7 +867,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) goto out; } job->base.s_fence = fence; - fence_get(&fence->base); + parser.fence = fence_get(&fence->base); cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring, &fence->base); @@ -890,17 +876,16 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) trace_amdgpu_cs_ioctl(job); amd_sched_entity_push_job(&job->base); - list_sort(NULL, &parser.validated, cmp_size_smaller_first); - ttm_eu_fence_buffer_objects(&parser.ticket, &parser.validated, - &fence->base); - fence_put(&fence->base); + } else { + struct amdgpu_fence *fence; - amdgpu_cs_parser_fini_late(&parser); - mutex_unlock(&vm->mutex); - return 0; + r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs, + parser.filp); + fence = parser.ibs[parser.num_ibs - 1].fence; + parser.fence = fence_get(&fence->base); + cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence; } - cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence; out: amdgpu_cs_parser_fini(&parser, r, reserved_buffers); mutex_unlock(&vm->mutex); -- cgit v1.2.3 From e98c1b0de6fe73f488df62d83d83f377b1b6e2b8 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Fri, 13 Nov 2015 15:22:04 +0800 Subject: drm/amdgpu: remove vm->mutex MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Chunming Zhou Reviewed-by: Christian König --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 -- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 ---- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 14 ++------------ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 -- 4 files changed, 2 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 50672bb19798..251b14736de9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -929,8 +929,6 @@ struct amdgpu_vm_id { }; struct amdgpu_vm { - struct mutex mutex; - struct rb_root va; /* protecting invalidated */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 3afcf0237c25..1d44d508d4d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -784,8 +784,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct amdgpu_device *adev = dev->dev_private; union drm_amdgpu_cs *cs = data; - struct amdgpu_fpriv *fpriv = filp->driver_priv; - struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_cs_parser parser = {}; bool reserved_buffers = false; int i, r; @@ -803,7 +801,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = amdgpu_cs_handle_lockup(adev, r); return r; } - mutex_lock(&vm->mutex); r = amdgpu_cs_parser_relocs(&parser); if (r == -ENOMEM) DRM_ERROR("Not enough memory for command submission!\n"); @@ -888,7 +885,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) out: amdgpu_cs_parser_fini(&parser, r, reserved_buffers); - mutex_unlock(&vm->mutex); r = amdgpu_cs_handle_lockup(adev, r); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 00c5b580f56c..fc32fc01a64b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -115,12 +115,9 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va *bo_va; int r; - mutex_lock(&vm->mutex); r = amdgpu_bo_reserve(rbo, false); - if (r) { - mutex_unlock(&vm->mutex); + if (r) return r; - } bo_va = amdgpu_vm_bo_find(vm, rbo); if (!bo_va) { @@ -129,7 +126,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri ++bo_va->ref_count; } amdgpu_bo_unreserve(rbo); - mutex_unlock(&vm->mutex); return 0; } @@ -142,10 +138,8 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va *bo_va; int r; - mutex_lock(&vm->mutex); r = amdgpu_bo_reserve(rbo, true); if (r) { - mutex_unlock(&vm->mutex); dev_err(adev->dev, "leaking bo va because " "we fail to reserve bo (%d)\n", r); return; @@ -157,7 +151,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, } } amdgpu_bo_unreserve(rbo); - mutex_unlock(&vm->mutex); } static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) @@ -553,7 +546,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, gobj = drm_gem_object_lookup(dev, filp, args->handle); if (gobj == NULL) return -ENOENT; - mutex_lock(&fpriv->vm.mutex); rbo = gem_to_amdgpu_bo(gobj); INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&duplicates); @@ -568,7 +560,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, } r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); if (r) { - mutex_unlock(&fpriv->vm.mutex); drm_gem_object_unreference_unlocked(gobj); return r; } @@ -577,7 +568,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, if (!bo_va) { ttm_eu_backoff_reservation(&ticket, &list); drm_gem_object_unreference_unlocked(gobj); - mutex_unlock(&fpriv->vm.mutex); return -ENOENT; } @@ -602,7 +592,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ttm_eu_backoff_reservation(&ticket, &list); if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) amdgpu_gem_va_update_vm(adev, bo_va, args->operation); - mutex_unlock(&fpriv->vm.mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d6904ef742f0..ae037e5b6ad0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1241,7 +1241,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->ids[i].id = 0; vm->ids[i].flushed_updates = NULL; } - mutex_init(&vm->mutex); vm->va = RB_ROOT; spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->invalidated); @@ -1325,7 +1324,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) fence_put(vm->ids[i].flushed_updates); } - mutex_destroy(&vm->mutex); } /** -- cgit v1.2.3 From f3f1769283b8dbf047c678da95b72194ac2477a1 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 3 Dec 2015 19:55:52 +0100 Subject: drm/amdgpu: take a BO reference for the user fence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need for a GEM reference here. Reviewed-by: Michel Dänzer Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1d44d508d4d4..4f352ec9dec4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -222,6 +222,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) } p->uf.bo = gem_to_amdgpu_bo(gobj); + amdgpu_bo_ref(p->uf.bo); + drm_gem_object_unreference_unlocked(gobj); p->uf.offset = fence_data->offset; } else { ret = -EINVAL; @@ -487,7 +489,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo amdgpu_ib_free(parser->adev, &parser->ibs[i]); kfree(parser->ibs); if (parser->uf.bo) - drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); + amdgpu_bo_unref(&parser->uf.bo); } static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, @@ -776,7 +778,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job) amdgpu_ib_free(job->adev, &job->ibs[i]); kfree(job->ibs); if (job->uf.bo) - drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base); + amdgpu_bo_unref(&job->uf.bo); return 0; } -- cgit v1.2.3 From 91acbeb68ab10c0c0f65f30b5b7fddbde4c97dd2 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 14 Dec 2015 16:42:31 +0100 Subject: drm/amdgpu: fix user fence handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes a random corruption under memory pressure. We need to fence the BO for the user fence as well, otherwise it might be swapped out and the GPU could write the fence value to an undesired location. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 63 ++++++++++++++++++++++------------ 2 files changed, 44 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5a5f04d0902d..048cfe073dae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1264,7 +1264,8 @@ struct amdgpu_cs_parser { struct ww_acquire_ctx ticket; /* user fence */ - struct amdgpu_user_fence uf; + struct amdgpu_user_fence uf; + struct amdgpu_bo_list_entry uf_entry; }; struct amdgpu_job { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 4f352ec9dec4..25a3e2485cc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, return 0; } +static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, + struct drm_amdgpu_cs_chunk_fence *fence_data) +{ + struct drm_gem_object *gobj; + uint32_t handle; + + handle = fence_data->handle; + gobj = drm_gem_object_lookup(p->adev->ddev, p->filp, + fence_data->handle); + if (gobj == NULL) + return -EINVAL; + + p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); + p->uf.offset = fence_data->offset; + + if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) { + drm_gem_object_unreference_unlocked(gobj); + return -EINVAL; + } + + p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo); + p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT; + p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT; + p->uf_entry.priority = 0; + p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; + p->uf_entry.tv.shared = true; + + drm_gem_object_unreference_unlocked(gobj); + return 0; +} + int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) { union drm_amdgpu_cs *cs = data; @@ -207,28 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) case AMDGPU_CHUNK_ID_FENCE: size = sizeof(struct drm_amdgpu_cs_chunk_fence); - if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) { - uint32_t handle; - struct drm_gem_object *gobj; - struct drm_amdgpu_cs_chunk_fence *fence_data; - - fence_data = (void *)p->chunks[i].kdata; - handle = fence_data->handle; - gobj = drm_gem_object_lookup(p->adev->ddev, - p->filp, handle); - if (gobj == NULL) { - ret = -EINVAL; - goto free_partial_kdata; - } - - p->uf.bo = gem_to_amdgpu_bo(gobj); - amdgpu_bo_ref(p->uf.bo); - drm_gem_object_unreference_unlocked(gobj); - p->uf.offset = fence_data->offset; - } else { + if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { ret = -EINVAL; goto free_partial_kdata; } + + ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata); + if (ret) + goto free_partial_kdata; + break; case AMDGPU_CHUNK_ID_DEPENDENCIES: @@ -391,6 +409,9 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, &p->validated); + if (p->uf.bo) + list_add(&p->uf_entry.tv.head, &p->validated); + if (need_mmap_lock) down_read(¤t->mm->mmap_sem); @@ -488,8 +509,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo for (i = 0; i < parser->num_ibs; i++) amdgpu_ib_free(parser->adev, &parser->ibs[i]); kfree(parser->ibs); - if (parser->uf.bo) - amdgpu_bo_unref(&parser->uf.bo); + amdgpu_bo_unref(&parser->uf.bo); + amdgpu_bo_unref(&parser->uf_entry.robj); } static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, -- cgit v1.2.3 From 3c0eea6c35d932c4d25070868067dc9cd9ceab91 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 11 Dec 2015 14:39:05 +0100 Subject: drm/amdgpu: put VM page tables directly into duplicates list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit They share the reservation object with the page directory anyway. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Chunming Zhou --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 12 +++++++----- 4 files changed, 12 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c3996e0e2e7e..dc3dab539e4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -982,7 +982,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct list_head *head); + struct list_head *validated, + struct list_head *duplicates); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync); void amdgpu_vm_flush(struct amdgpu_ring *ring, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1d44d508d4d4..9591c13781bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -386,13 +386,13 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) amdgpu_cs_buckets_get_list(&buckets, &p->validated); } + INIT_LIST_HEAD(&duplicates); p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, - &p->validated); + &p->validated, &duplicates); if (need_mmap_lock) down_read(¤t->mm->mmap_sem); - INIT_LIST_HEAD(&duplicates); r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); if (unlikely(r != 0)) goto error_reserve; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index fc32fc01a64b..7fe7f8afa5ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -460,7 +460,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, tv.shared = true; list_add(&tv.head, &list); - vm_bos = amdgpu_vm_get_bos(adev, bo_va->vm, &list); + vm_bos = amdgpu_vm_get_bos(adev, bo_va->vm, &list, &duplicates); if (!vm_bos) return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index fce4c6d952c8..f6c1d6f0bf37 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -78,14 +78,16 @@ static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) * amdgpu_vm_get_bos - add the vm BOs to a validation list * * @vm: vm providing the BOs - * @head: head of validation list + * @validated: head of validation list + * @duplicates: head of duplicates list * * Add the page directory to the list of BOs to * validate for command submission (cayman+). */ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct list_head *head) + struct amdgpu_vm *vm, + struct list_head *validated, + struct list_head *duplicates) { struct amdgpu_bo_list_entry *list; unsigned i, idx; @@ -103,7 +105,7 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, list[0].priority = 0; list[0].tv.bo = &vm->page_directory->tbo; list[0].tv.shared = true; - list_add(&list[0].tv.head, head); + list_add(&list[0].tv.head, validated); for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { if (!vm->page_tables[i].bo) @@ -115,7 +117,7 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, list[idx].priority = 0; list[idx].tv.bo = &list[idx].robj->tbo; list[idx].tv.shared = true; - list_add(&list[idx++].tv.head, head); + list_add(&list[idx++].tv.head, duplicates); } return list; -- cgit v1.2.3 From 56467ebfb254836dc30eb45d4ac8a46a400bfad6 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 11 Dec 2015 15:16:32 +0100 Subject: drm/amdgpu: split VM PD and PT handling during CS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This way we avoid the extra allocation for the page directory entry. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Chunming Zhou --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 10 +++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 9 +++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 16 +++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 45 ++++++++++++++++++++------------- 4 files changed, 51 insertions(+), 29 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index dc3dab539e4c..40850afa763f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -980,10 +980,11 @@ struct amdgpu_vm_manager { void amdgpu_vm_manager_fini(struct amdgpu_device *adev); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); -struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct list_head *validated, - struct list_head *duplicates); +void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, + struct list_head *validated, + struct amdgpu_bo_list_entry *entry); +struct amdgpu_bo_list_entry *amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, + struct list_head *duplicates); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync); void amdgpu_vm_flush(struct amdgpu_ring *ring, @@ -1253,6 +1254,7 @@ struct amdgpu_cs_parser { unsigned nchunks; struct amdgpu_cs_chunk *chunks; /* relocations */ + struct amdgpu_bo_list_entry vm_pd; struct amdgpu_bo_list_entry *vm_bos; struct list_head validated; struct fence *fence; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9591c13781bd..3fb21ecd29e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -387,8 +387,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) } INIT_LIST_HEAD(&duplicates); - p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, - &p->validated, &duplicates); + amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); if (need_mmap_lock) down_read(¤t->mm->mmap_sem); @@ -397,6 +396,12 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) if (unlikely(r != 0)) goto error_reserve; + p->vm_bos = amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); + if (!p->vm_bos) { + r = -ENOMEM; + goto error_validate; + } + r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); if (r) goto error_validate; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7fe7f8afa5ff..ea0fe94e4b54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -448,6 +448,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, { struct ttm_validate_buffer tv, *entry; struct amdgpu_bo_list_entry *vm_bos; + struct amdgpu_bo_list_entry vm_pd; struct ww_acquire_ctx ticket; struct list_head list, duplicates; unsigned domain; @@ -460,14 +461,18 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, tv.shared = true; list_add(&tv.head, &list); - vm_bos = amdgpu_vm_get_bos(adev, bo_va->vm, &list, &duplicates); - if (!vm_bos) - return; + amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd); /* Provide duplicates to avoid -EALREADY */ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); if (r) - goto error_free; + goto error_print; + + vm_bos = amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates); + if (!vm_bos) { + r = -ENOMEM; + goto error_unreserve; + } list_for_each_entry(entry, &list, head) { domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); @@ -489,10 +494,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, error_unreserve: ttm_eu_backoff_reservation(&ticket, &list); - -error_free: drm_free_large(vm_bos); +error_print: if (r && r != -ERESTARTSYS) DRM_ERROR("Couldn't update BO_VA (%d)\n", r); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f6c1d6f0bf37..592be6438a6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -75,39 +75,50 @@ static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) } /** - * amdgpu_vm_get_bos - add the vm BOs to a validation list + * amdgpu_vm_get_pd_bo - add the VM PD to a validation list * * @vm: vm providing the BOs * @validated: head of validation list + * @entry: entry to add + * + * Add the page directory to the list of BOs to + * validate for command submission. + */ +void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, + struct list_head *validated, + struct amdgpu_bo_list_entry *entry) +{ + entry->robj = vm->page_directory; + entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; + entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; + entry->priority = 0; + entry->tv.bo = &vm->page_directory->tbo; + entry->tv.shared = true; + list_add(&entry->tv.head, validated); +} + +/** + * amdgpu_vm_get_bos - add the vm BOs to a validation list + * + * @vm: vm providing the BOs * @duplicates: head of duplicates list * * Add the page directory to the list of BOs to * validate for command submission (cayman+). */ -struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct list_head *validated, - struct list_head *duplicates) +struct amdgpu_bo_list_entry *amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, + struct list_head *duplicates) { struct amdgpu_bo_list_entry *list; unsigned i, idx; - list = drm_malloc_ab(vm->max_pde_used + 2, + list = drm_malloc_ab(vm->max_pde_used + 1, sizeof(struct amdgpu_bo_list_entry)); - if (!list) { + if (!list) return NULL; - } /* add the vm page table to the list */ - list[0].robj = vm->page_directory; - list[0].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; - list[0].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; - list[0].priority = 0; - list[0].tv.bo = &vm->page_directory->tbo; - list[0].tv.shared = true; - list_add(&list[0].tv.head, validated); - - for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { + for (i = 0, idx = 0; i <= vm->max_pde_used; i++) { if (!vm->page_tables[i].bo) continue; -- cgit v1.2.3 From ee1782c3f27fec5462363af48f27811b049155ab Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 11 Dec 2015 21:01:23 +0100 Subject: drm/amdgpu: keep the PTs validation list in the VM v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This avoids allocating it on the fly. v2: fix grammar in comment Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Chunming Zhou --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 8 ++---- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 7 +---- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 9 +----- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 50 +++++++++++++++------------------ 4 files changed, 27 insertions(+), 47 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 40850afa763f..d4e9272b60e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -917,8 +917,8 @@ struct amdgpu_ring { #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 struct amdgpu_vm_pt { - struct amdgpu_bo *bo; - uint64_t addr; + struct amdgpu_bo_list_entry entry; + uint64_t addr; }; struct amdgpu_vm_id { @@ -983,8 +983,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry); -struct amdgpu_bo_list_entry *amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, - struct list_head *duplicates); +void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync); void amdgpu_vm_flush(struct amdgpu_ring *ring, @@ -1255,7 +1254,6 @@ struct amdgpu_cs_parser { struct amdgpu_cs_chunk *chunks; /* relocations */ struct amdgpu_bo_list_entry vm_pd; - struct amdgpu_bo_list_entry *vm_bos; struct list_head validated; struct fence *fence; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 3fb21ecd29e0..6ce595ff1aff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -396,11 +396,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) if (unlikely(r != 0)) goto error_reserve; - p->vm_bos = amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); - if (!p->vm_bos) { - r = -ENOMEM; - goto error_validate; - } + amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); if (r) @@ -483,7 +479,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo if (parser->bo_list) amdgpu_bo_list_put(parser->bo_list); - drm_free_large(parser->vm_bos); for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); kfree(parser->chunks); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index ea0fe94e4b54..8c5687e4a6d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -447,7 +447,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, uint32_t operation) { struct ttm_validate_buffer tv, *entry; - struct amdgpu_bo_list_entry *vm_bos; struct amdgpu_bo_list_entry vm_pd; struct ww_acquire_ctx ticket; struct list_head list, duplicates; @@ -468,12 +467,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (r) goto error_print; - vm_bos = amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates); - if (!vm_bos) { - r = -ENOMEM; - goto error_unreserve; - } - + amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates); list_for_each_entry(entry, &list, head) { domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); /* if anything is swapped out don't swap it in here, @@ -494,7 +488,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, error_unreserve: ttm_eu_backoff_reservation(&ticket, &list); - drm_free_large(vm_bos); error_print: if (r && r != -ERESTARTSYS) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 592be6438a6c..e0fa9d9ff5c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -98,40 +98,27 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, } /** - * amdgpu_vm_get_bos - add the vm BOs to a validation list + * amdgpu_vm_get_bos - add the vm BOs to a duplicates list * * @vm: vm providing the BOs * @duplicates: head of duplicates list * - * Add the page directory to the list of BOs to - * validate for command submission (cayman+). + * Add the page directory to the BO duplicates list + * for command submission. */ -struct amdgpu_bo_list_entry *amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, - struct list_head *duplicates) +void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates) { - struct amdgpu_bo_list_entry *list; - unsigned i, idx; - - list = drm_malloc_ab(vm->max_pde_used + 1, - sizeof(struct amdgpu_bo_list_entry)); - if (!list) - return NULL; + unsigned i; /* add the vm page table to the list */ - for (i = 0, idx = 0; i <= vm->max_pde_used; i++) { - if (!vm->page_tables[i].bo) + for (i = 0; i <= vm->max_pde_used; ++i) { + struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; + + if (!entry->robj) continue; - list[idx].robj = vm->page_tables[i].bo; - list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; - list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; - list[idx].priority = 0; - list[idx].tv.bo = &list[idx].robj->tbo; - list[idx].tv.shared = true; - list_add(&list[idx++].tv.head, duplicates); + list_add(&entry->tv.head, duplicates); } - - return list; } /** @@ -474,7 +461,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, /* walk over the address space and update the page directory */ for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { - struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo; + struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj; uint64_t pde, pt; if (bo == NULL) @@ -651,7 +638,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, /* walk over the address space and update the page tables */ for (addr = start; addr < end; ) { uint64_t pt_idx = addr >> amdgpu_vm_block_size; - struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo; + struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj; unsigned nptes; uint64_t pte; int r; @@ -1083,9 +1070,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, /* walk over the address space and allocate the page tables */ for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { struct reservation_object *resv = vm->page_directory->tbo.resv; + struct amdgpu_bo_list_entry *entry; struct amdgpu_bo *pt; - if (vm->page_tables[pt_idx].bo) + entry = &vm->page_tables[pt_idx].entry; + if (entry->robj) continue; r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, @@ -1102,8 +1091,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, goto error_free; } + entry->robj = pt; + entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; + entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; + entry->priority = 0; + entry->tv.bo = &entry->robj->tbo; + entry->tv.shared = true; vm->page_tables[pt_idx].addr = 0; - vm->page_tables[pt_idx].bo = pt; } return 0; @@ -1334,7 +1328,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) } for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) - amdgpu_bo_unref(&vm->page_tables[i].bo); + amdgpu_bo_unref(&vm->page_tables[i].entry.robj); kfree(vm->page_tables); amdgpu_bo_unref(&vm->page_directory); -- cgit v1.2.3 From eceb8a1562db6fc3e1c21cd54a4be0189aa1d0e3 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 11 Jan 2016 15:35:21 +0100 Subject: drm/amdgpu: move VM page tables to the LRU end on CS v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes it less likely to run into an ENOMEM because VM page tables are evicted last. v2: move the BOs in the LRU tail after validation Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 7 ++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 27 +++++++++++++++++++++++++++ 3 files changed, 35 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 003959f99251..313b0cc8d676 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -987,6 +987,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry); void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); +void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, + struct amdgpu_vm *vm); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync); void amdgpu_vm_flush(struct amdgpu_ring *ring, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index ce0254d4dcd7..1fffc338f69d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -428,8 +428,10 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates); error_validate: - if (r) + if (r) { + amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm); ttm_eu_backoff_reservation(&p->ticket, &p->validated); + } error_reserve: if (need_mmap_lock) @@ -473,8 +475,11 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a, **/ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) { + struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; unsigned i; + amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm); + if (!error) { /* Sort the buffer list from the smallest to largest buffer, * which affects the order of buffers in the LRU list. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8f7688e598a1..aefc668e6b5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -119,6 +119,33 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates) list_add(&entry->tv.head, duplicates); } + +} + +/** + * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail + * + * @adev: amdgpu device instance + * @vm: vm providing the BOs + * + * Move the PT BOs to the tail of the LRU. + */ +void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, + struct amdgpu_vm *vm) +{ + struct ttm_bo_global *glob = adev->mman.bdev.glob; + unsigned i; + + spin_lock(&glob->lru_lock); + for (i = 0; i <= vm->max_pde_used; ++i) { + struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; + + if (!entry->robj) + continue; + + ttm_bo_move_to_lru_tail(&entry->robj->tbo); + } + spin_unlock(&glob->lru_lock); } /** -- cgit v1.2.3 From d8e0cae645504a787b05abfb91afee8cd7fa1701 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 11 Jan 2016 15:35:22 +0100 Subject: drm/amdgpu: validate duplicates first MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Most VM BOs end up in the duplicates list, validate it first make -ENOMEM less likely. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1fffc338f69d..6f89f8e034d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -421,11 +421,11 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); - r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); + r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates); if (r) goto error_validate; - r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates); + r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); error_validate: if (r) { -- cgit v1.2.3