diff options
Diffstat (limited to 'drivers/gpu/drm/drm_gem_vram_helper.c')
-rw-r--r-- | drivers/gpu/drm/drm_gem_vram_helper.c | 300 |
1 files changed, 143 insertions, 157 deletions
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 50cad0e4a92e..02ca22e90290 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/dma-buf-map.h> #include <linux/module.h> #include <drm/drm_debugfs.h> @@ -15,7 +16,6 @@ #include <drm/drm_plane.h> #include <drm/drm_prime.h> #include <drm/drm_simple_kms_helper.h> -#include <drm/ttm/ttm_page_alloc.h> static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; @@ -113,8 +113,8 @@ static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) * up; only release the GEM object. */ - WARN_ON(gbo->kmap_use_count); - WARN_ON(gbo->kmap.virtual); + WARN_ON(gbo->vmap_use_count); + WARN_ON(dma_buf_map_is_set(&gbo->map)); drm_gem_object_release(&gbo->bo.base); } @@ -140,22 +140,19 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, unsigned int c = 0; if (pl_flag & DRM_GEM_VRAM_PL_FLAG_TOPDOWN) - pl_flag = TTM_PL_FLAG_TOPDOWN; + invariant_flags = TTM_PL_FLAG_TOPDOWN; gbo->placement.placement = gbo->placements; gbo->placement.busy_placement = gbo->placements; if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) { gbo->placements[c].mem_type = TTM_PL_VRAM; - gbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - invariant_flags; + gbo->placements[c++].flags = invariant_flags; } if (pl_flag & DRM_GEM_VRAM_PL_FLAG_SYSTEM || !c) { gbo->placements[c].mem_type = TTM_PL_SYSTEM; - gbo->placements[c++].flags = TTM_PL_MASK_CACHING | - invariant_flags; + gbo->placements[c++].flags = invariant_flags; } gbo->placement.num_placement = c; @@ -167,58 +164,18 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, } } -/* - * Note that on error, drm_gem_vram_init will free the buffer object. - */ - -static int drm_gem_vram_init(struct drm_device *dev, - struct drm_gem_vram_object *gbo, - size_t size, unsigned long pg_align) -{ - struct drm_vram_mm *vmm = dev->vram_mm; - struct ttm_bo_device *bdev; - int ret; - size_t acc_size; - - if (WARN_ONCE(!vmm, "VRAM MM not initialized")) { - kfree(gbo); - return -EINVAL; - } - bdev = &vmm->bdev; - - gbo->bo.base.funcs = &drm_gem_vram_object_funcs; - - ret = drm_gem_object_init(dev, &gbo->bo.base, size); - if (ret) { - kfree(gbo); - return ret; - } - - acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo)); - - gbo->bo.bdev = bdev; - drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM | - DRM_GEM_VRAM_PL_FLAG_SYSTEM); - - ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device, - &gbo->placement, pg_align, false, acc_size, - NULL, NULL, ttm_buffer_object_destroy); - if (ret) - /* - * A failing ttm_bo_init will call ttm_buffer_object_destroy - * to release gbo->bo.base and kfree gbo. - */ - return ret; - - return 0; -} - /** * drm_gem_vram_create() - Creates a VRAM-backed GEM object * @dev: the DRM device * @size: the buffer size in bytes * @pg_align: the buffer's alignment in multiples of the page size * + * GEM objects are allocated by calling struct drm_driver.gem_create_object, + * if set. Otherwise kzalloc() will be used. Drivers can set their own GEM + * object functions in struct drm_driver.gem_create_object. If no functions + * are set, the new GEM object will use the default functions from GEM VRAM + * helpers. + * * Returns: * A new instance of &struct drm_gem_vram_object on success, or * an ERR_PTR()-encoded error code otherwise. @@ -228,11 +185,17 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, unsigned long pg_align) { struct drm_gem_vram_object *gbo; + struct drm_gem_object *gem; + struct drm_vram_mm *vmm = dev->vram_mm; + struct ttm_bo_device *bdev; int ret; + size_t acc_size; + + if (WARN_ONCE(!vmm, "VRAM MM not initialized")) + return ERR_PTR(-EINVAL); if (dev->driver->gem_create_object) { - struct drm_gem_object *gem = - dev->driver->gem_create_object(dev, size); + gem = dev->driver->gem_create_object(dev, size); if (!gem) return ERR_PTR(-ENOMEM); gbo = drm_gem_vram_of_gem(gem); @@ -240,10 +203,32 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, gbo = kzalloc(sizeof(*gbo), GFP_KERNEL); if (!gbo) return ERR_PTR(-ENOMEM); + gem = &gbo->bo.base; } - ret = drm_gem_vram_init(dev, gbo, size, pg_align); - if (ret < 0) + if (!gem->funcs) + gem->funcs = &drm_gem_vram_object_funcs; + + ret = drm_gem_object_init(dev, gem, size); + if (ret) { + kfree(gbo); + return ERR_PTR(ret); + } + + bdev = &vmm->bdev; + acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo)); + + gbo->bo.bdev = bdev; + drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM); + + /* + * A failing ttm_bo_init will call ttm_buffer_object_destroy + * to release gbo->bo.base and kfree gbo. + */ + ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device, + &gbo->placement, pg_align, false, acc_size, + NULL, NULL, ttm_buffer_object_destroy); + if (ret) return ERR_PTR(ret); return gbo; @@ -301,7 +286,7 @@ static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo) */ s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo) { - if (WARN_ON_ONCE(!gbo->pin_count)) + if (WARN_ON_ONCE(!gbo->bo.pin_count)) return (s64)-ENODEV; return drm_gem_vram_pg_offset(gbo) << PAGE_SHIFT; } @@ -310,24 +295,21 @@ EXPORT_SYMBOL(drm_gem_vram_offset); static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo, unsigned long pl_flag) { - int i, ret; struct ttm_operation_ctx ctx = { false, false }; + int ret; - if (gbo->pin_count) + if (gbo->bo.pin_count) goto out; if (pl_flag) drm_gem_vram_placement(gbo, pl_flag); - for (i = 0; i < gbo->placement.num_placement; ++i) - gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; - ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx); if (ret < 0) return ret; out: - ++gbo->pin_count; + ttm_bo_pin(&gbo->bo); return 0; } @@ -369,26 +351,9 @@ int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag) } EXPORT_SYMBOL(drm_gem_vram_pin); -static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo) +static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo) { - int i, ret; - struct ttm_operation_ctx ctx = { false, false }; - - if (WARN_ON_ONCE(!gbo->pin_count)) - return 0; - - --gbo->pin_count; - if (gbo->pin_count) - return 0; - - for (i = 0; i < gbo->placement.num_placement ; ++i) - gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; - - ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx); - if (ret < 0) - return ret; - - return 0; + ttm_bo_unpin(&gbo->bo); } /** @@ -406,46 +371,45 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo) ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); if (ret) return ret; - ret = drm_gem_vram_unpin_locked(gbo); + + drm_gem_vram_unpin_locked(gbo); ttm_bo_unreserve(&gbo->bo); - return ret; + return 0; } EXPORT_SYMBOL(drm_gem_vram_unpin); -static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, - bool map, bool *is_iomem) +static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, + struct dma_buf_map *map) { int ret; - struct ttm_bo_kmap_obj *kmap = &gbo->kmap; - - if (gbo->kmap_use_count > 0) - goto out; - if (kmap->virtual || !map) + if (gbo->vmap_use_count > 0) goto out; - ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap); + ret = ttm_bo_vmap(&gbo->bo, &gbo->map); if (ret) - return ERR_PTR(ret); + return ret; out: - if (!kmap->virtual) { - if (is_iomem) - *is_iomem = false; - return NULL; /* not mapped; don't increment ref */ - } - ++gbo->kmap_use_count; - if (is_iomem) - return ttm_kmap_obj_virtual(kmap, is_iomem); - return kmap->virtual; + ++gbo->vmap_use_count; + *map = gbo->map; + + return 0; } -static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) +static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo, + struct dma_buf_map *map) { - if (WARN_ON_ONCE(!gbo->kmap_use_count)) + struct drm_device *dev = gbo->bo.base.dev; + + if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count)) return; - if (--gbo->kmap_use_count > 0) + + if (drm_WARN_ON_ONCE(dev, !dma_buf_map_is_equal(&gbo->map, map))) + return; /* BUG: map not mapped from this BO */ + + if (--gbo->vmap_use_count > 0) return; /* @@ -459,7 +423,9 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) /** * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address * space - * @gbo: The GEM VRAM object to map + * @gbo: The GEM VRAM object to map + * @map: Returns the kernel virtual address of the VRAM GEM object's backing + * store. * * The vmap function pins a GEM VRAM object to its current location, either * system or video memory, and maps its buffer into kernel address space. @@ -468,48 +434,44 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) * unmap and unpin the GEM VRAM object. * * Returns: - * The buffer's virtual address on success, or - * an ERR_PTR()-encoded error code otherwise. + * 0 on success, or a negative error code otherwise. */ -void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo) +int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map) { int ret; - void *base; ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); if (ret) - return ERR_PTR(ret); + return ret; ret = drm_gem_vram_pin_locked(gbo, 0); if (ret) goto err_ttm_bo_unreserve; - base = drm_gem_vram_kmap_locked(gbo, true, NULL); - if (IS_ERR(base)) { - ret = PTR_ERR(base); + ret = drm_gem_vram_kmap_locked(gbo, map); + if (ret) goto err_drm_gem_vram_unpin_locked; - } ttm_bo_unreserve(&gbo->bo); - return base; + return 0; err_drm_gem_vram_unpin_locked: drm_gem_vram_unpin_locked(gbo); err_ttm_bo_unreserve: ttm_bo_unreserve(&gbo->bo); - return ERR_PTR(ret); + return ret; } EXPORT_SYMBOL(drm_gem_vram_vmap); /** * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object - * @gbo: The GEM VRAM object to unmap - * @vaddr: The mapping's base address as returned by drm_gem_vram_vmap() + * @gbo: The GEM VRAM object to unmap + * @map: Kernel virtual address where the VRAM GEM object was mapped * * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See * the documentation for drm_gem_vram_vmap() for more information. */ -void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr) +void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map) { int ret; @@ -517,7 +479,7 @@ void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr) if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret)) return; - drm_gem_vram_kunmap_locked(gbo); + drm_gem_vram_kunmap_locked(gbo, map); drm_gem_vram_unpin_locked(gbo); ttm_bo_unreserve(&gbo->bo); @@ -608,15 +570,30 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo, bool evict, struct ttm_resource *new_mem) { - struct ttm_bo_kmap_obj *kmap = &gbo->kmap; + struct ttm_buffer_object *bo = &gbo->bo; + struct drm_device *dev = bo->base.dev; - if (WARN_ON_ONCE(gbo->kmap_use_count)) + if (drm_WARN_ON_ONCE(dev, gbo->vmap_use_count)) return; - if (!kmap->virtual) - return; - ttm_bo_kunmap(kmap); - kmap->virtual = NULL; + ttm_bo_vunmap(bo, &gbo->map); +} + +static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo, + bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_mem) +{ + int ret; + + drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem); + ret = ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem); + if (ret) { + swap(*new_mem, gbo->bo.mem); + drm_gem_vram_bo_driver_move_notify(gbo, false, new_mem); + swap(*new_mem, gbo->bo.mem); + } + return ret; } /* @@ -640,7 +617,7 @@ static void drm_gem_vram_object_free(struct drm_gem_object *gem) */ /** - * drm_gem_vram_driver_create_dumb() - \ + * drm_gem_vram_driver_dumb_create() - \ Implements &struct drm_driver.dumb_create * @file: the DRM file * @dev: the DRM device @@ -865,37 +842,33 @@ static void drm_gem_vram_object_unpin(struct drm_gem_object *gem) } /** - * drm_gem_vram_object_vmap() - \ - Implements &struct drm_gem_object_funcs.vmap - * @gem: The GEM object to map + * drm_gem_vram_object_vmap() - + * Implements &struct drm_gem_object_funcs.vmap + * @gem: The GEM object to map + * @map: Returns the kernel virtual address of the VRAM GEM object's backing + * store. * * Returns: - * The buffers virtual address on success, or - * NULL otherwise. + * 0 on success, or a negative error code otherwise. */ -static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem) +static int drm_gem_vram_object_vmap(struct drm_gem_object *gem, struct dma_buf_map *map) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); - void *base; - base = drm_gem_vram_vmap(gbo); - if (IS_ERR(base)) - return NULL; - return base; + return drm_gem_vram_vmap(gbo, map); } /** - * drm_gem_vram_object_vunmap() - \ - Implements &struct drm_gem_object_funcs.vunmap - * @gem: The GEM object to unmap - * @vaddr: The mapping's base address + * drm_gem_vram_object_vunmap() - + * Implements &struct drm_gem_object_funcs.vunmap + * @gem: The GEM object to unmap + * @map: Kernel virtual address where the VRAM GEM object was mapped */ -static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, - void *vaddr) +static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, struct dma_buf_map *map) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); - drm_gem_vram_vunmap(gbo, vaddr); + drm_gem_vram_vunmap(gbo, map); } /* @@ -941,7 +914,7 @@ static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo, if (!tt) return NULL; - ret = ttm_tt_init(tt, bo, page_flags); + ret = ttm_tt_init(tt, bo, page_flags, ttm_cached); if (ret < 0) goto err_ttm_tt_init; @@ -966,9 +939,7 @@ static void bo_driver_evict_flags(struct ttm_buffer_object *bo, drm_gem_vram_bo_driver_evict_flags(gbo, placement); } -static void bo_driver_move_notify(struct ttm_buffer_object *bo, - bool evict, - struct ttm_resource *new_mem) +static void bo_driver_delete_mem_notify(struct ttm_buffer_object *bo) { struct drm_gem_vram_object *gbo; @@ -978,7 +949,20 @@ static void bo_driver_move_notify(struct ttm_buffer_object *bo, gbo = drm_gem_vram_of_bo(bo); - drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem); + drm_gem_vram_bo_driver_move_notify(gbo, false, NULL); +} + +static int bo_driver_move(struct ttm_buffer_object *bo, + bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_mem, + struct ttm_place *hop) +{ + struct drm_gem_vram_object *gbo; + + gbo = drm_gem_vram_of_bo(bo); + + return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem); } static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, @@ -992,6 +976,7 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, case TTM_PL_VRAM: mem->bus.offset = (mem->start << PAGE_SHIFT) + vmm->vram_base; mem->bus.is_iomem = true; + mem->bus.caching = ttm_write_combined; break; default: return -EINVAL; @@ -1005,7 +990,8 @@ static struct ttm_bo_driver bo_driver = { .ttm_tt_destroy = bo_driver_ttm_tt_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = bo_driver_evict_flags, - .move_notify = bo_driver_move_notify, + .move = bo_driver_move, + .delete_mem_notify = bo_driver_delete_mem_notify, .io_mem_reserve = bo_driver_io_mem_reserve, }; @@ -1050,10 +1036,10 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, vmm->vram_base = vram_base; vmm->vram_size = vram_size; - ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, + ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, dev->dev, dev->anon_inode->i_mapping, dev->vma_offset_manager, - true); + false, true); if (ret) return ret; |