diff options
author | Thomas Zimmermann <tzimmermann@suse.de> | 2022-11-24 09:28:05 +0100 |
---|---|---|
committer | Thomas Zimmermann <tzimmermann@suse.de> | 2022-11-24 09:28:05 +0100 |
commit | 1e5b3968a57d7894d5f86a2ecb58fa057cb6f7b2 (patch) | |
tree | a1beadf663bf4a318cccbfba8b4679b5fd0991c7 /drivers/gpu/drm/i915/gem | |
parent | 6fb6c979ca628583d4d0c59a0f8ff977e581ecc0 (diff) | |
parent | d47f9580839eb6fe568e38b2084d94887fbf5ce0 (diff) |
Merge drm/drm-next into drm-misc-next
Backmerging to get v6.1-rc6 into drm-misc-next.
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Diffstat (limited to 'drivers/gpu/drm/i915/gem')
20 files changed, 89 insertions, 107 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 01402f3c58f6..7f2831efc798 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -546,7 +546,7 @@ set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data) } if (intel_engine_uses_guc(master)) { - DRM_DEBUG("bonding extension not supported with GuC submission"); + drm_dbg(&i915->drm, "bonding extension not supported with GuC submission"); return -ENODEV; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 9322ac29008b..fd556a076d05 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -240,7 +240,6 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *sgt; - unsigned int sg_page_sizes; assert_object_held(obj); @@ -264,8 +263,7 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) (!HAS_LLC(i915) && !IS_DG1(i915))) wbinvd_on_all_cpus(); - sg_page_sizes = i915_sg_dma_sizes(sgt->sgl); - __i915_gem_object_set_pages(obj, sgt, sg_page_sizes); + __i915_gem_object_set_pages(obj, sgt); return 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 1160723c9d2d..29e9e8d5b6fe 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -30,6 +30,7 @@ #include "i915_gem_context.h" #include "i915_gem_evict.h" #include "i915_gem_ioctls.h" +#include "i915_reg.h" #include "i915_trace.h" #include "i915_user_extensions.h" @@ -53,13 +54,13 @@ enum { #define DBG_FORCE_RELOC 0 /* choose one of the above! */ }; -/* __EXEC_OBJECT_NO_RESERVE is BIT(31), defined in i915_vma.h */ -#define __EXEC_OBJECT_HAS_PIN BIT(30) -#define __EXEC_OBJECT_HAS_FENCE BIT(29) -#define __EXEC_OBJECT_USERPTR_INIT BIT(28) -#define __EXEC_OBJECT_NEEDS_MAP BIT(27) -#define __EXEC_OBJECT_NEEDS_BIAS BIT(26) -#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 26) /* all of the above + */ +/* __EXEC_OBJECT_ flags > BIT(29) defined in i915_vma.h */ +#define __EXEC_OBJECT_HAS_PIN BIT(29) +#define __EXEC_OBJECT_HAS_FENCE BIT(28) +#define __EXEC_OBJECT_USERPTR_INIT BIT(27) +#define __EXEC_OBJECT_NEEDS_MAP BIT(26) +#define __EXEC_OBJECT_NEEDS_BIAS BIT(25) +#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 25) /* all of the above + */ #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE) #define __EXEC_HAS_RELOC BIT(31) @@ -2101,7 +2102,8 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) eb->composite_fence ? eb->composite_fence : &eb->requests[j]->fence, - flags | __EXEC_OBJECT_NO_RESERVE); + flags | __EXEC_OBJECT_NO_RESERVE | + __EXEC_OBJECT_NO_REQUEST_AWAIT); } } @@ -2148,7 +2150,8 @@ err_skip: return err; } -static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) +static int i915_gem_check_execbuffer(struct drm_i915_private *i915, + struct drm_i915_gem_execbuffer2 *exec) { if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS) return -EINVAL; @@ -2161,7 +2164,7 @@ static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) } if (exec->DR4 == 0xffffffff) { - DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); + drm_dbg(&i915->drm, "UXA submitting garbage DR4, fixing up\n"); exec->DR4 = 0; } if (exec->DR1 || exec->DR4) @@ -2799,7 +2802,8 @@ add_timeline_fence_array(struct i915_execbuffer *eb, syncobj = drm_syncobj_find(eb->file, user_fence.handle); if (!syncobj) { - DRM_DEBUG("Invalid syncobj handle provided\n"); + drm_dbg(&eb->i915->drm, + "Invalid syncobj handle provided\n"); return -ENOENT; } @@ -2807,7 +2811,8 @@ add_timeline_fence_array(struct i915_execbuffer *eb, if (!fence && user_fence.flags && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) { - DRM_DEBUG("Syncobj handle has no fence\n"); + drm_dbg(&eb->i915->drm, + "Syncobj handle has no fence\n"); drm_syncobj_put(syncobj); return -EINVAL; } @@ -2816,7 +2821,9 @@ add_timeline_fence_array(struct i915_execbuffer *eb, err = dma_fence_chain_find_seqno(&fence, point); if (err && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) { - DRM_DEBUG("Syncobj handle missing requested point %llu\n", point); + drm_dbg(&eb->i915->drm, + "Syncobj handle missing requested point %llu\n", + point); dma_fence_put(fence); drm_syncobj_put(syncobj); return err; @@ -2842,7 +2849,8 @@ add_timeline_fence_array(struct i915_execbuffer *eb, * 0) would break the timeline. */ if (user_fence.flags & I915_EXEC_FENCE_WAIT) { - DRM_DEBUG("Trying to wait & signal the same timeline point.\n"); + drm_dbg(&eb->i915->drm, + "Trying to wait & signal the same timeline point.\n"); dma_fence_put(fence); drm_syncobj_put(syncobj); return -EINVAL; @@ -2913,14 +2921,16 @@ static int add_fence_array(struct i915_execbuffer *eb) syncobj = drm_syncobj_find(eb->file, user_fence.handle); if (!syncobj) { - DRM_DEBUG("Invalid syncobj handle provided\n"); + drm_dbg(&eb->i915->drm, + "Invalid syncobj handle provided\n"); return -ENOENT; } if (user_fence.flags & I915_EXEC_FENCE_WAIT) { fence = drm_syncobj_fence_get(syncobj); if (!fence) { - DRM_DEBUG("Syncobj handle has no fence\n"); + drm_dbg(&eb->i915->drm, + "Syncobj handle has no fence\n"); drm_syncobj_put(syncobj); return -EINVAL; } @@ -3515,7 +3525,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - err = i915_gem_check_execbuffer(args); + err = i915_gem_check_execbuffer(i915, args); if (err) return err; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index 629acb403a2c..f66bcefc09ec 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -35,7 +35,6 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *st; struct scatterlist *sg; - unsigned int sg_page_sizes; unsigned int npages; int max_order = MAX_ORDER; unsigned int max_segment; @@ -64,7 +63,6 @@ create_st: sg = st->sgl; st->nents = 0; - sg_page_sizes = 0; do { int order = min(fls(npages) - 1, max_order); @@ -83,7 +81,6 @@ create_st: } while (1); sg_set_page(sg, page, PAGE_SIZE << order, 0); - sg_page_sizes |= PAGE_SIZE << order; st->nents++; npages -= 1 << order; @@ -105,7 +102,7 @@ create_st: goto err; } - __i915_gem_object_set_pages(obj, st, sg_page_sizes); + __i915_gem_object_set_pages(obj, st); return 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index e63329bc8065..c29efdef8313 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -330,7 +330,7 @@ retry: if (ret) goto err_rpm; - ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu); + ret = intel_gt_reset_lock_interruptible(ggtt->vm.gt, &srcu); if (ret) goto err_pages; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 6b9ecff42bb5..3db53769864c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -403,8 +403,7 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, unsigned long n); void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, - struct sg_table *pages, - unsigned int sg_page_sizes); + struct sg_table *pages); int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 16f845663ff2..05a27723ebb8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -16,8 +16,7 @@ #include "i915_gem_mman.h" void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, - struct sg_table *pages, - unsigned int sg_page_sizes) + struct sg_table *pages) { struct drm_i915_private *i915 = to_i915(obj->base.dev); unsigned long supported = RUNTIME_INFO(i915)->page_sizes; @@ -45,8 +44,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, obj->mm.pages = pages; - GEM_BUG_ON(!sg_page_sizes); - obj->mm.page_sizes.phys = sg_page_sizes; + obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl); + GEM_BUG_ON(!obj->mm.page_sizes.phys); /* * Calculate the supported page-sizes which fit into the given diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 0d0e46dae559..68453572275b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -79,7 +79,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) /* We're no longer struct page backed */ obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE; - __i915_gem_object_set_pages(obj, st, sg->length); + __i915_gem_object_set_pages(obj, st); return 0; @@ -209,11 +209,8 @@ static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj) return 0; err_xfer: - if (!IS_ERR_OR_NULL(pages)) { - unsigned int sg_page_sizes = i915_sg_dma_sizes(pages->sgl); - - __i915_gem_object_set_pages(obj, pages, sg_page_sizes); - } + if (!IS_ERR_OR_NULL(pages)) + __i915_gem_object_set_pages(obj, pages); return err; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 2f7804492cd5..9c759df700ca 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -247,7 +247,7 @@ rebuild_st: if (i915_gem_object_can_bypass_llc(obj)) obj->cache_dirty = true; - __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl)); + __i915_gem_object_set_pages(obj, st); return 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 0c70711818ed..bc9521078807 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -628,7 +628,7 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) sg_dma_len(pages->sgl), POISON_INUSE); - __i915_gem_object_set_pages(obj, pages, obj->stolen->size); + __i915_gem_object_set_pages(obj, pages); return 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 25129af70f70..1e50fb0d6bfc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -603,6 +603,10 @@ static int i915_ttm_truncate(struct drm_i915_gem_object *obj) WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED); + err = ttm_bo_wait(bo, true, false); + if (err) + return err; + err = i915_ttm_move_notify(bo); if (err) return err; @@ -815,8 +819,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, GEM_BUG_ON(obj->mm.rsgt); obj->mm.rsgt = rsgt; - __i915_gem_object_set_pages(obj, &rsgt->table, - i915_sg_dma_sizes(rsgt->table.sgl)); + __i915_gem_object_set_pages(obj, &rsgt->table); } GEM_BUG_ON(bo->ttm && ((obj->base.size >> PAGE_SHIFT) < bo->ttm->num_pages)); @@ -1031,9 +1034,6 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) vm_fault_t ret; int idx; - if (i915_ttm_is_ghost_object(bo)) - return VM_FAULT_SIGBUS; - /* Sanity check that we allow writing into this object */ if (unlikely(i915_gem_object_is_readonly(obj) && area->vm_flags & VM_WRITE)) @@ -1048,9 +1048,6 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } - if (i915_ttm_cpu_maps_iomem(bo->resource)) - wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); - if (!i915_ttm_resource_mappable(bo->resource)) { int err = -ENODEV; int i; @@ -1078,6 +1075,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) } } + if (i915_ttm_cpu_maps_iomem(bo->resource)) + wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); + if (drm_dev_enter(dev, &idx)) { ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, TTM_BO_VM_NUM_PREFAULT); @@ -1098,6 +1098,8 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock); list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list); spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock); + + GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource)); } if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) @@ -1180,6 +1182,8 @@ static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj) } } + GEM_WARN_ON(obj->userfault_count); + ttm_bo_unmap_virtual(i915_gem_to_ttm(obj)); if (wakeref) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 1b1a22716722..9348b1804d53 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -131,7 +131,6 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev); struct sg_table *st; - unsigned int sg_page_sizes; struct page **pvec; int ret; @@ -170,8 +169,7 @@ alloc_table: if (i915_gem_object_can_bypass_llc(obj)) obj->cache_dirty = true; - sg_page_sizes = i915_sg_dma_sizes(st->sgl); - __i915_gem_object_set_pages(obj, st, sg_page_sizes); + __i915_gem_object_set_pages(obj, st); return 0; @@ -427,9 +425,10 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { VMA_ITERATOR(vmi, mm, addr); struct vm_area_struct *vma; + unsigned long end = addr + len; mmap_read_lock(mm); - for_each_vma_range(vmi, vma, addr + len) { + for_each_vma_range(vmi, vma, end) { /* Check for holes, note that we also update the addr below */ if (vma->vm_start > addr) break; @@ -441,7 +440,7 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len) } mmap_read_unlock(mm); - if (vma) + if (vma || addr < end) return -EFAULT; return 0; } diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c index f963b8e1e37b..cbd9b624a788 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c @@ -68,7 +68,7 @@ static int huge_get_pages(struct drm_i915_gem_object *obj) if (i915_gem_gtt_prepare_pages(obj, pages)) goto err; - __i915_gem_object_set_pages(obj, pages, PAGE_SIZE); + __i915_gem_object_set_pages(obj, pages); return 0; diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 0cb99e75b0bc..beaf27e09e8a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -136,7 +136,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj) goto err; GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask); - __i915_gem_object_set_pages(obj, st, sg_page_sizes); + __i915_gem_object_set_pages(obj, st); return 0; @@ -210,7 +210,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) const u64 max_len = rounddown_pow_of_two(UINT_MAX); struct sg_table *st; struct scatterlist *sg; - unsigned int sg_page_sizes; u64 rem; st = kmalloc(sizeof(*st), GFP); @@ -226,7 +225,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) rem = obj->base.size; sg = st->sgl; st->nents = 0; - sg_page_sizes = 0; do { unsigned int page_size = get_largest_page_size(i915, rem); unsigned int len = min(page_size * div_u64(rem, page_size), @@ -239,8 +237,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) sg_dma_len(sg) = len; sg_dma_address(sg) = page_size; - sg_page_sizes |= len; - st->nents++; rem -= len; @@ -254,7 +250,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) i915_sg_trim(st); - __i915_gem_object_set_pages(obj, st, sg_page_sizes); + __i915_gem_object_set_pages(obj, st); return 0; } @@ -286,7 +282,7 @@ static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj) sg_dma_len(sg) = obj->base.size; sg_dma_address(sg) = page_size; - __i915_gem_object_set_pages(obj, st, sg->length); + __i915_gem_object_set_pages(obj, st); return 0; #undef GFP diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index 9a6a6b5b722b..692a16914ca0 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -13,6 +13,7 @@ #include "gt/intel_gt_regs.h" #include "gem/i915_gem_lmem.h" +#include "gem/selftests/igt_gem_utils.h" #include "selftests/igt_flush_test.h" #include "selftests/mock_drm.h" #include "selftests/i915_random.h" @@ -457,21 +458,6 @@ static int verify_buffer(const struct tiled_blits *t, return ret; } -static int move_to_active(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags) -{ - int err; - - i915_vma_lock(vma); - err = i915_request_await_object(rq, vma->obj, false); - if (err == 0) - err = i915_vma_move_to_active(vma, rq, flags); - i915_vma_unlock(vma); - - return err; -} - static int pin_buffer(struct i915_vma *vma, u64 addr) { int err; @@ -525,11 +511,11 @@ tiled_blit(struct tiled_blits *t, goto err_bb; } - err = move_to_active(t->batch, rq, 0); + err = igt_vma_move_to_active_unlocked(t->batch, rq, 0); if (!err) - err = move_to_active(src->vma, rq, 0); + err = igt_vma_move_to_active_unlocked(src->vma, rq, 0); if (!err) - err = move_to_active(dst->vma, rq, 0); + err = igt_vma_move_to_active_unlocked(dst->vma, rq, 0); if (!err) err = rq->engine->emit_bb_start(rq, t->batch->node.start, diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index a666d7e610f5..c228fe4aba50 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -239,9 +239,7 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v) } intel_ring_advance(rq, cs); - err = i915_request_await_object(rq, vma->obj, true); - if (err == 0) - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); out_rq: i915_request_add(rq); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index d8864444432b..a0ff51d71d07 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -984,15 +984,11 @@ retry: goto err_batch; } - err = i915_request_await_object(rq, batch->obj, false); - if (err == 0) - err = i915_vma_move_to_active(batch, rq, 0); + err = i915_vma_move_to_active(batch, rq, 0); if (err) goto skip_request; - err = i915_request_await_object(rq, vma->obj, true); - if (err == 0) - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); if (err) goto skip_request; @@ -1553,9 +1549,7 @@ static int write_to_scratch(struct i915_gem_context *ctx, } i915_vma_lock(vma); - err = i915_request_await_object(rq, vma->obj, false); - if (err == 0) - err = i915_vma_move_to_active(vma, rq, 0); + err = i915_vma_move_to_active(vma, rq, 0); i915_vma_unlock(vma); if (err) goto skip_request; @@ -1689,9 +1683,7 @@ static int read_from_scratch(struct i915_gem_context *ctx, } i915_vma_lock(vma); - err = i915_request_await_object(rq, vma->obj, true); - if (err == 0) - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_vma_unlock(vma); if (err) goto skip_request; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 1cae24349a96..3f658d5717d8 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -17,6 +17,7 @@ #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_migrate.h" +#include "i915_reg.h" #include "i915_ttm_buddy_manager.h" #include "huge_gem_object.h" @@ -565,10 +566,8 @@ retry: goto err_unpin; } - err = i915_request_await_object(rq, vma->obj, true); - if (err == 0) - err = i915_vma_move_to_active(vma, rq, - EXEC_OBJECT_WRITE); + err = i915_vma_move_to_active(vma, rq, + EXEC_OBJECT_WRITE); i915_request_add(rq); err_unpin: @@ -1608,9 +1607,7 @@ retry: goto out_unpin; } - err = i915_request_await_object(rq, vma->obj, false); - if (err == 0) - err = i915_vma_move_to_active(vma, rq, 0); + err = i915_vma_move_to_active(vma, rq, 0); err = engine->emit_bb_start(rq, vma->node.start, 0, 0); i915_request_get(rq); diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c index 3c55e77b0f1b..374b10ac430e 100644 --- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c @@ -131,17 +131,13 @@ int igt_gpu_fill_dw(struct intel_context *ce, } i915_vma_lock(batch); - err = i915_request_await_object(rq, batch->obj, false); - if (err == 0) - err = i915_vma_move_to_active(batch, rq, 0); + err = i915_vma_move_to_active(batch, rq, 0); i915_vma_unlock(batch); if (err) goto skip_request; i915_vma_lock(vma); - err = i915_request_await_object(rq, vma->obj, true); - if (err == 0) - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_vma_unlock(vma); if (err) goto skip_request; diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h index 4221cf84d175..1379fbc14431 100644 --- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h @@ -9,6 +9,8 @@ #include <linux/types.h> +#include "i915_vma.h" + struct i915_request; struct i915_gem_context; struct i915_vma; @@ -29,4 +31,16 @@ int igt_gpu_fill_dw(struct intel_context *ce, struct i915_vma *vma, u64 offset, unsigned long count, u32 val); +static inline int __must_check +igt_vma_move_to_active_unlocked(struct i915_vma *vma, struct i915_request *rq, + unsigned int flags) +{ + int err; + + i915_vma_lock(vma); + err = _i915_vma_move_to_active(vma, rq, &rq->fence, flags); + i915_vma_unlock(vma); + return err; +} + #endif /* __IGT_GEM_UTILS_H__ */ |