diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 95 |
1 files changed, 73 insertions, 22 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index c48b4fce5e57..5c8d3022fb87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -109,6 +109,29 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence) } /** + * amdgpu_sync_add_later - add the fence to the hash + * + * @sync: sync object to add the fence to + * @f: fence to add + * + * Tries to add the fence to an existing hash entry. Returns true when an entry + * was found, false otherwise. + */ +static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f) +{ + struct amdgpu_sync_entry *e; + + hash_for_each_possible(sync->fences, e, node, f->context) { + if (unlikely(e->fence->context != f->context)) + continue; + + amdgpu_sync_keep_later(&e->fence, f); + return true; + } + return false; +} + +/** * amdgpu_sync_fence - remember to sync to this fence * * @sync: sync object to add fence to @@ -127,13 +150,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM) amdgpu_sync_keep_later(&sync->last_vm_update, f); - hash_for_each_possible(sync->fences, e, node, f->context) { - if (unlikely(e->fence->context != f->context)) - continue; - - amdgpu_sync_keep_later(&e->fence, f); + if (amdgpu_sync_add_later(sync, f)) return 0; - } e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL); if (!e) @@ -204,45 +222,78 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, return r; } -struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) +/** + * amdgpu_sync_peek_fence - get the next fence not signaled yet + * + * @sync: the sync object + * @ring: optional ring to use for test + * + * Returns the next fence not signaled yet without removing it from the sync + * object. + */ +struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, + struct amdgpu_ring *ring) { struct amdgpu_sync_entry *e; struct hlist_node *tmp; - struct fence *f; int i; hash_for_each_safe(sync->fences, i, tmp, e, node) { + struct fence *f = e->fence; + struct amd_sched_fence *s_fence = to_amd_sched_fence(f); - f = e->fence; + if (ring && s_fence) { + /* For fences from the same ring it is sufficient + * when they are scheduled. + */ + if (s_fence->sched == &ring->sched) { + if (fence_is_signaled(&s_fence->scheduled)) + continue; - hash_del(&e->node); - kmem_cache_free(amdgpu_sync_slab, e); + return &s_fence->scheduled; + } + } - if (!fence_is_signaled(f)) - return f; + if (fence_is_signaled(f)) { + hash_del(&e->node); + fence_put(f); + kmem_cache_free(amdgpu_sync_slab, e); + continue; + } - fence_put(f); + return f; } + return NULL; } -int amdgpu_sync_wait(struct amdgpu_sync *sync) +/** + * amdgpu_sync_get_fence - get the next fence from the sync object + * + * @sync: sync object to use + * + * Get and removes the next fence from the sync object not signaled yet. + */ +struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) { struct amdgpu_sync_entry *e; struct hlist_node *tmp; - int i, r; + struct fence *f; + int i; hash_for_each_safe(sync->fences, i, tmp, e, node) { - r = fence_wait(e->fence, false); - if (r) - return r; + + f = e->fence; hash_del(&e->node); - fence_put(e->fence); kmem_cache_free(amdgpu_sync_slab, e); - } - return 0; + if (!fence_is_signaled(f)) + return f; + + fence_put(f); + } + return NULL; } /** |