diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 149 |
1 files changed, 89 insertions, 60 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 38f739fb727b..bd67f4cb8e6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -50,8 +50,10 @@ struct amdgpu_mn { struct hlist_node node; /* objects protected by lock */ - struct mutex lock; - struct rb_root objects; + struct rw_semaphore lock; + struct rb_root_cached objects; + struct mutex read_lock; + atomic_t recursion; }; struct amdgpu_mn_node { @@ -74,17 +76,17 @@ static void amdgpu_mn_destroy(struct work_struct *work) struct amdgpu_bo *bo, *next_bo; mutex_lock(&adev->mn_lock); - mutex_lock(&rmn->lock); + down_write(&rmn->lock); hash_del(&rmn->node); - rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, - it.rb) { + rbtree_postorder_for_each_entry_safe(node, next_node, + &rmn->objects.rb_root, it.rb) { list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { bo->mn = NULL; list_del_init(&bo->mn_list); } kfree(node); } - mutex_unlock(&rmn->lock); + up_write(&rmn->lock); mutex_unlock(&adev->mn_lock); mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); kfree(rmn); @@ -106,6 +108,53 @@ static void amdgpu_mn_release(struct mmu_notifier *mn, schedule_work(&rmn->work); } + +/** + * amdgpu_mn_lock - take the write side lock for this mn + */ +void amdgpu_mn_lock(struct amdgpu_mn *mn) +{ + if (mn) + down_write(&mn->lock); +} + +/** + * amdgpu_mn_unlock - drop the write side lock for this mn + */ +void amdgpu_mn_unlock(struct amdgpu_mn *mn) +{ + if (mn) + up_write(&mn->lock); +} + +/** + * amdgpu_mn_read_lock - take the rmn read lock + * + * @rmn: our notifier + * + * Take the rmn read side lock. + */ +static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn) +{ + mutex_lock(&rmn->read_lock); + if (atomic_inc_return(&rmn->recursion) == 1) + down_read_non_owner(&rmn->lock); + mutex_unlock(&rmn->read_lock); +} + +/** + * amdgpu_mn_read_unlock - drop the rmn read lock + * + * @rmn: our notifier + * + * Drop the rmn read side lock. + */ +static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn) +{ + if (atomic_dec_return(&rmn->recursion) == 0) + up_read_non_owner(&rmn->lock); +} + /** * amdgpu_mn_invalidate_node - unmap all BOs of a node * @@ -126,54 +175,13 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) continue; - r = amdgpu_bo_reserve(bo, true); - if (r) { - DRM_ERROR("(%ld) failed to reserve user bo\n", r); - continue; - } - r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); - amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (r) - DRM_ERROR("(%ld) failed to validate user bo\n", r); - - amdgpu_bo_unreserve(bo); - } -} - -/** - * amdgpu_mn_invalidate_page - callback to notify about mm change - * - * @mn: our notifier - * @mn: the mm this callback is about - * @address: address of invalidate page - * - * Invalidation of a single page. Blocks for all BOs mapping it - * and unmap them by move them into system domain again. - */ -static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long address) -{ - struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); - struct interval_tree_node *it; - - mutex_lock(&rmn->lock); - - it = interval_tree_iter_first(&rmn->objects, address, address); - if (it) { - struct amdgpu_mn_node *node; - - node = container_of(it, struct amdgpu_mn_node, it); - amdgpu_mn_invalidate_node(node, address, address); + amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm); } - - mutex_unlock(&rmn->lock); } /** @@ -198,7 +206,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, /* notification is exclusive, but interval is inclusive */ end -= 1; - mutex_lock(&rmn->lock); + amdgpu_mn_read_lock(rmn); it = interval_tree_iter_first(&rmn->objects, start, end); while (it) { @@ -209,14 +217,32 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, amdgpu_mn_invalidate_node(node, start, end); } +} - mutex_unlock(&rmn->lock); +/** + * amdgpu_mn_invalidate_range_end - callback to notify about mm change + * + * @mn: our notifier + * @mn: the mm this callback is about + * @start: start of updated range + * @end: end of updated range + * + * Release the lock again to allow new command submissions. + */ +static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); + + amdgpu_mn_read_unlock(rmn); } static const struct mmu_notifier_ops amdgpu_mn_ops = { .release = amdgpu_mn_release, - .invalidate_page = amdgpu_mn_invalidate_page, .invalidate_range_start = amdgpu_mn_invalidate_range_start, + .invalidate_range_end = amdgpu_mn_invalidate_range_end, }; /** @@ -226,7 +252,7 @@ static const struct mmu_notifier_ops amdgpu_mn_ops = { * * Creates a notifier context for current->mm. */ -static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) +struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) { struct mm_struct *mm = current->mm; struct amdgpu_mn *rmn; @@ -251,8 +277,10 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) rmn->adev = adev; rmn->mm = mm; rmn->mn.ops = &amdgpu_mn_ops; - mutex_init(&rmn->lock); - rmn->objects = RB_ROOT; + init_rwsem(&rmn->lock); + rmn->objects = RB_ROOT_CACHED; + mutex_init(&rmn->read_lock); + atomic_set(&rmn->recursion, 0); r = __mmu_notifier_register(&rmn->mn, mm); if (r) @@ -298,7 +326,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) INIT_LIST_HEAD(&bos); - mutex_lock(&rmn->lock); + down_write(&rmn->lock); while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { kfree(node); @@ -312,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) if (!node) { node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); if (!node) { - mutex_unlock(&rmn->lock); + up_write(&rmn->lock); return -ENOMEM; } } @@ -327,7 +355,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) interval_tree_insert(&node->it, &rmn->objects); - mutex_unlock(&rmn->lock); + up_write(&rmn->lock); return 0; } @@ -353,13 +381,13 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) return; } - mutex_lock(&rmn->lock); + down_write(&rmn->lock); /* save the next list entry for later */ head = bo->mn_list.next; bo->mn = NULL; - list_del(&bo->mn_list); + list_del_init(&bo->mn_list); if (list_empty(head)) { struct amdgpu_mn_node *node; @@ -368,6 +396,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) kfree(node); } - mutex_unlock(&rmn->lock); + up_write(&rmn->lock); mutex_unlock(&adev->mn_lock); } + |