summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_gem.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2018-05-08 20:39:47 +1000
committerBen Skeggs <bskeggs@redhat.com>2018-05-18 15:01:26 +1000
commit0db912af8f5ad4fa4dc08a9c8e411a10953c5403 (patch)
tree10f8e13d89ac5a9de5bd3002d994cb10b2bcf41a /drivers/gpu/drm/nouveau/nouveau_gem.c
parent19ca10d82e33bcfe92412c461fc3534ec1e14747 (diff)
drm/nouveau/gem: attach fences to VMAs to track GPU usage
An upcoming patch will use these to fix issues related to the deferred unmapping of GEM objects. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_gem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 707e02c80f18..2016d9eb338e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -99,6 +99,7 @@ struct nouveau_gem_object_unmap {
static void
nouveau_gem_object_delete(struct nouveau_vma *vma)
{
+ nouveau_fence_unref(&vma->fence);
nouveau_vma_del(&vma);
}
@@ -344,9 +345,20 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
b = &pbbo[nvbo->pbbo_index];
- if (likely(fence))
+ if (likely(fence)) {
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_vma *vma;
+
nouveau_bo_fence(nvbo, fence, !!b->write_domains);
+ if (drm->client.vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
+ vma = (void *)(unsigned long)b->user_priv;
+ nouveau_fence_unref(&vma->fence);
+ dma_fence_get(&fence->base);
+ vma->fence = fence;
+ }
+ }
+
if (unlikely(nvbo->validate_mapped)) {
ttm_bo_kunmap(&nvbo->kmap);
nvbo->validate_mapped = false;