diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 238 |
1 files changed, 114 insertions, 124 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index fc6217dfe401..17478f38dea3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -140,15 +140,15 @@ static void ttm_bo_release_list(struct kref *list_kref) struct ttm_bo_device *bdev = bo->bdev; size_t acc_size = bo->acc_size; - BUG_ON(atomic_read(&bo->list_kref.refcount)); - BUG_ON(atomic_read(&bo->kref.refcount)); + BUG_ON(kref_read(&bo->list_kref)); + BUG_ON(kref_read(&bo->kref)); BUG_ON(atomic_read(&bo->cpu_writers)); BUG_ON(bo->mem.mm_node != NULL); BUG_ON(!list_empty(&bo->lru)); BUG_ON(!list_empty(&bo->ddestroy)); ttm_tt_destroy(bo->ttm); atomic_dec(&bo->glob->bo_count); - fence_put(bo->moving); + dma_fence_put(bo->moving); if (bo->resv == &bo->ttm_resv) reservation_object_fini(&bo->ttm_resv); mutex_destroy(&bo->wu_mutex); @@ -163,6 +163,7 @@ static void ttm_bo_release_list(struct kref *list_kref) void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_type_manager *man; lockdep_assert_held(&bo->resv->lock.base); @@ -170,88 +171,58 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) BUG_ON(!list_empty(&bo->lru)); - list_add(&bo->lru, bdev->driver->lru_tail(bo)); + man = &bdev->man[bo->mem.mem_type]; + list_add_tail(&bo->lru, &man->lru[bo->priority]); kref_get(&bo->list_kref); if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { - list_add(&bo->swap, bdev->driver->swap_lru_tail(bo)); + list_add_tail(&bo->swap, + &bo->glob->swap_lru[bo->priority]); kref_get(&bo->list_kref); } } } EXPORT_SYMBOL(ttm_bo_add_to_lru); -int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) +static void ttm_bo_ref_bug(struct kref *list_kref) { - struct ttm_bo_device *bdev = bo->bdev; - int put_count = 0; - - if (bdev->driver->lru_removal) - bdev->driver->lru_removal(bo); + BUG(); +} +void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) +{ if (!list_empty(&bo->swap)) { list_del_init(&bo->swap); - ++put_count; + kref_put(&bo->list_kref, ttm_bo_ref_bug); } if (!list_empty(&bo->lru)) { list_del_init(&bo->lru); - ++put_count; + kref_put(&bo->list_kref, ttm_bo_ref_bug); } - return put_count; -} - -static void ttm_bo_ref_bug(struct kref *list_kref) -{ - BUG(); -} - -void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, - bool never_free) -{ - kref_sub(&bo->list_kref, count, - (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); + /* + * TODO: Add a driver hook to delete from + * driver-specific LRU's here. + */ } void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) { - int put_count; - spin_lock(&bo->glob->lru_lock); - put_count = ttm_bo_del_from_lru(bo); + ttm_bo_del_from_lru(bo); spin_unlock(&bo->glob->lru_lock); - ttm_bo_list_ref_sub(bo, put_count, true); } EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) { - struct ttm_bo_device *bdev = bo->bdev; - int put_count = 0; - lockdep_assert_held(&bo->resv->lock.base); - if (bdev->driver->lru_removal) - bdev->driver->lru_removal(bo); - - put_count = ttm_bo_del_from_lru(bo); - ttm_bo_list_ref_sub(bo, put_count, true); + ttm_bo_del_from_lru(bo); ttm_bo_add_to_lru(bo); } EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); -struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo) -{ - return bo->bdev->man[bo->mem.mem_type].lru.prev; -} -EXPORT_SYMBOL(ttm_bo_default_lru_tail); - -struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo) -{ - return bo->glob->swap_lru.prev; -} -EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail); - /* * Call bo->mutex locked. */ @@ -342,7 +313,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (bo->mem.mem_type == TTM_PL_SYSTEM) { if (bdev->driver->move_notify) - bdev->driver->move_notify(bo, mem); + bdev->driver->move_notify(bo, evict, mem); bo->mem = *mem; mem->mm_node = NULL; goto moved; @@ -350,7 +321,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, } if (bdev->driver->move_notify) - bdev->driver->move_notify(bo, mem); + bdev->driver->move_notify(bo, evict, mem); if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) @@ -366,7 +337,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_mem_reg tmp_mem = *mem; *mem = bo->mem; bo->mem = tmp_mem; - bdev->driver->move_notify(bo, mem); + bdev->driver->move_notify(bo, false, mem); bo->mem = *mem; *mem = tmp_mem; } @@ -414,7 +385,7 @@ out_err: static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) { if (bo->bdev->driver->move_notify) - bo->bdev->driver->move_notify(bo, NULL); + bo->bdev->driver->move_notify(bo, false, NULL); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; @@ -426,20 +397,20 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) { struct reservation_object_list *fobj; - struct fence *fence; + struct dma_fence *fence; int i; fobj = reservation_object_get_list(bo->resv); fence = reservation_object_get_excl(bo->resv); if (fence && !fence->ops->signaled) - fence_enable_sw_signaling(fence); + dma_fence_enable_sw_signaling(fence); for (i = 0; fobj && i < fobj->shared_count; ++i) { fence = rcu_dereference_protected(fobj->shared[i], reservation_object_held(bo->resv)); if (!fence->ops->signaled) - fence_enable_sw_signaling(fence); + dma_fence_enable_sw_signaling(fence); } } @@ -447,7 +418,6 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; - int put_count; int ret; spin_lock(&glob->lru_lock); @@ -455,13 +425,10 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) if (!ret) { if (!ttm_bo_wait(bo, false, true)) { - put_count = ttm_bo_del_from_lru(bo); - + ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); ttm_bo_cleanup_memtype_use(bo); - ttm_bo_list_ref_sub(bo, put_count, true); - return; } else ttm_bo_flush_all_fences(bo); @@ -504,7 +471,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, bool no_wait_gpu) { struct ttm_bo_global *glob = bo->glob; - int put_count; int ret; ret = ttm_bo_wait(bo, false, true); @@ -554,15 +520,13 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, return ret; } - put_count = ttm_bo_del_from_lru(bo); + ttm_bo_del_from_lru(bo); list_del_init(&bo->ddestroy); - ++put_count; + kref_put(&bo->list_kref, ttm_bo_ref_bug); spin_unlock(&glob->lru_lock); ttm_bo_cleanup_memtype_use(bo); - ttm_bo_list_ref_sub(bo, put_count, true); - return 0; } @@ -717,6 +681,20 @@ out: return ret; } +bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, + const struct ttm_place *place) +{ + /* Don't evict this BO if it's outside of the + * requested placement range + */ + if (place->fpfn >= (bo->mem.start + bo->mem.size) || + (place->lpfn && place->lpfn <= bo->mem.start)) + return false; + + return true; +} +EXPORT_SYMBOL(ttm_bo_eviction_valuable); + static int ttm_mem_evict_first(struct ttm_bo_device *bdev, uint32_t mem_type, const struct ttm_place *place, @@ -726,26 +704,28 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_buffer_object *bo; - int ret = -EBUSY, put_count; + int ret = -EBUSY; + unsigned i; spin_lock(&glob->lru_lock); - list_for_each_entry(bo, &man->lru, lru) { - ret = __ttm_bo_reserve(bo, false, true, NULL); - if (!ret) { - if (place && (place->fpfn || place->lpfn)) { - /* Don't evict this BO if it's outside of the - * requested placement range - */ - if (place->fpfn >= (bo->mem.start + bo->mem.size) || - (place->lpfn && place->lpfn <= bo->mem.start)) { - __ttm_bo_unreserve(bo); - ret = -EBUSY; - continue; - } + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + list_for_each_entry(bo, &man->lru[i], lru) { + ret = __ttm_bo_reserve(bo, false, true, NULL); + if (ret) + continue; + + if (place && !bdev->driver->eviction_valuable(bo, + place)) { + __ttm_bo_unreserve(bo); + ret = -EBUSY; + continue; } break; } + + if (!ret) + break; } if (ret) { @@ -762,13 +742,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, return ret; } - put_count = ttm_bo_del_from_lru(bo); + ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); BUG_ON(ret != 0); - ttm_bo_list_ref_sub(bo, put_count, true); - ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); ttm_bo_unreserve(bo); @@ -792,11 +770,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { - struct fence *fence; + struct dma_fence *fence; int ret; spin_lock(&man->move_lock); - fence = fence_get(man->move); + fence = dma_fence_get(man->move); spin_unlock(&man->move_lock); if (fence) { @@ -806,7 +784,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, if (unlikely(ret)) return ret; - fence_put(bo->moving); + dma_fence_put(bo->moving); bo->moving = fence; } @@ -1188,6 +1166,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, } atomic_inc(&bo->glob->bo_count); drm_vma_node_reset(&bo->vma_node); + bo->priority = 0; /* * For ttm_bo_type_device buffers, allocate @@ -1282,46 +1261,39 @@ int ttm_bo_create(struct ttm_bo_device *bdev, EXPORT_SYMBOL(ttm_bo_create); static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, - unsigned mem_type, bool allow_errors) + unsigned mem_type) { struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_bo_global *glob = bdev->glob; - struct fence *fence; + struct dma_fence *fence; int ret; + unsigned i; /* * Can't use standard list traversal since we're unlocking. */ spin_lock(&glob->lru_lock); - while (!list_empty(&man->lru)) { - spin_unlock(&glob->lru_lock); - ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); - if (ret) { - if (allow_errors) { + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + while (!list_empty(&man->lru[i])) { + spin_unlock(&glob->lru_lock); + ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); + if (ret) return ret; - } else { - pr_err("Cleanup eviction failed\n"); - } + spin_lock(&glob->lru_lock); } - spin_lock(&glob->lru_lock); } spin_unlock(&glob->lru_lock); spin_lock(&man->move_lock); - fence = fence_get(man->move); + fence = dma_fence_get(man->move); spin_unlock(&man->move_lock); if (fence) { - ret = fence_wait(fence, false); - fence_put(fence); - if (ret) { - if (allow_errors) { - return ret; - } else { - pr_err("Cleanup eviction failed\n"); - } - } + ret = dma_fence_wait(fence, false); + dma_fence_put(fence); + if (ret) + return ret; } return 0; @@ -1343,14 +1315,18 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) mem_type); return ret; } - fence_put(man->move); + dma_fence_put(man->move); man->use_type = false; man->has_type = false; ret = 0; if (mem_type > 0) { - ttm_bo_force_list_clean(bdev, mem_type, false); + ret = ttm_bo_force_list_clean(bdev, mem_type); + if (ret) { + pr_err("Cleanup eviction failed\n"); + return ret; + } ret = (*man->func->takedown)(man); } @@ -1373,7 +1349,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) return 0; } - return ttm_bo_force_list_clean(bdev, mem_type, true); + return ttm_bo_force_list_clean(bdev, mem_type); } EXPORT_SYMBOL(ttm_bo_evict_mm); @@ -1382,6 +1358,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, { int ret = -EINVAL; struct ttm_mem_type_manager *man; + unsigned i; BUG_ON(type >= TTM_NUM_MEM_TYPES); man = &bdev->man[type]; @@ -1407,7 +1384,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, man->use_type = true; man->size = p_size; - INIT_LIST_HEAD(&man->lru); + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) + INIT_LIST_HEAD(&man->lru[i]); man->move = NULL; return 0; @@ -1439,6 +1417,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref) container_of(ref, struct ttm_bo_global_ref, ref); struct ttm_bo_global *glob = ref->object; int ret; + unsigned i; mutex_init(&glob->device_list_mutex); spin_lock_init(&glob->lru_lock); @@ -1450,7 +1429,8 @@ int ttm_bo_global_init(struct drm_global_reference *ref) goto out_no_drp; } - INIT_LIST_HEAD(&glob->swap_lru); + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) + INIT_LIST_HEAD(&glob->swap_lru[i]); INIT_LIST_HEAD(&glob->device_list); ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); @@ -1509,8 +1489,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) if (list_empty(&bdev->ddestroy)) TTM_DEBUG("Delayed destroy list was clean\n"); - if (list_empty(&bdev->man[0].lru)) - TTM_DEBUG("Swap list was clean\n"); + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) + if (list_empty(&bdev->man[0].lru[0])) + TTM_DEBUG("Swap list %d was clean\n", i); spin_unlock(&glob->lru_lock); drm_vma_offset_manager_destroy(&bdev->vma_manager); @@ -1602,7 +1583,14 @@ EXPORT_SYMBOL(ttm_bo_unmap_virtual); int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait) { - long timeout = no_wait ? 0 : 15 * HZ; + long timeout = 15 * HZ; + + if (no_wait) { + if (reservation_object_test_signaled_rcu(bo->resv, true)) + return 0; + else + return -EBUSY; + } timeout = reservation_object_wait_timeout_rcu(bo->resv, true, interruptible, timeout); @@ -1653,12 +1641,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) container_of(shrink, struct ttm_bo_global, shrink); struct ttm_buffer_object *bo; int ret = -EBUSY; - int put_count; - uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); + unsigned i; spin_lock(&glob->lru_lock); - list_for_each_entry(bo, &glob->swap_lru, swap) { - ret = __ttm_bo_reserve(bo, false, true, NULL); + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + list_for_each_entry(bo, &glob->swap_lru[i], swap) { + ret = __ttm_bo_reserve(bo, false, true, NULL); + if (!ret) + break; + } if (!ret) break; } @@ -1676,16 +1667,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) return ret; } - put_count = ttm_bo_del_from_lru(bo); + ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); - ttm_bo_list_ref_sub(bo, put_count, true); - /** * Move to system cached */ - if ((bo->mem.placement & swap_placement) != swap_placement) { + if (bo->mem.mem_type != TTM_PL_SYSTEM || + bo->ttm->caching_state != tt_cached) { struct ttm_mem_reg evict_mem; evict_mem = bo->mem; |