summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c150
1 files changed, 102 insertions, 48 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index c2fb98a94bc3..db6c4e281d75 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -19,8 +19,6 @@
#include "msm_gpu.h"
#include "msm_mmu.h"
-static void update_lru(struct drm_gem_object *obj);
-
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -63,6 +61,49 @@ static void sync_for_cpu(struct msm_gem_object *msm_obj)
dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
}
+static void update_lru_active(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ GEM_WARN_ON(!msm_obj->pages);
+
+ if (msm_obj->pin_count) {
+ drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
+ } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
+ drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
+ } else {
+ GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
+
+ drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
+ }
+}
+
+static void update_lru_locked(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ msm_gem_assert_locked(&msm_obj->base);
+
+ if (!msm_obj->pages) {
+ GEM_WARN_ON(msm_obj->pin_count);
+
+ drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
+ } else {
+ update_lru_active(obj);
+ }
+}
+
+static void update_lru(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+
+ mutex_lock(&priv->lru.lock);
+ update_lru_locked(obj);
+ mutex_unlock(&priv->lru.lock);
+}
+
/* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
{
@@ -180,6 +221,7 @@ static void put_pages(struct drm_gem_object *obj)
static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
{
+ struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **p;
@@ -190,10 +232,13 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
}
p = get_pages(obj);
- if (!IS_ERR(p)) {
- to_msm_bo(obj)->pin_count++;
- update_lru(obj);
- }
+ if (IS_ERR(p))
+ return p;
+
+ mutex_lock(&priv->lru.lock);
+ msm_obj->pin_count++;
+ update_lru_locked(obj);
+ mutex_unlock(&priv->lru.lock);
return p;
}
@@ -309,12 +354,10 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
msm_gem_assert_locked(obj);
- vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ vma = msm_gem_vma_new(aspace);
if (!vma)
return ERR_PTR(-ENOMEM);
- vma->aspace = aspace;
-
list_add_tail(&vma->list, &msm_obj->vmas);
return vma;
@@ -361,9 +404,9 @@ put_iova_spaces(struct drm_gem_object *obj, bool close)
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace) {
- msm_gem_purge_vma(vma->aspace, vma);
+ msm_gem_vma_purge(vma);
if (close)
- msm_gem_close_vma(vma->aspace, vma);
+ msm_gem_vma_close(vma);
}
}
}
@@ -399,7 +442,7 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
if (IS_ERR(vma))
return vma;
- ret = msm_gem_init_vma(aspace, vma, obj->size,
+ ret = msm_gem_vma_init(vma, obj->size,
range_start, range_end);
if (ret) {
del_vma(vma);
@@ -437,7 +480,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
if (IS_ERR(pages))
return PTR_ERR(pages);
- ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
+ ret = msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
if (ret)
msm_gem_unpin_locked(obj);
@@ -446,14 +489,34 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
void msm_gem_unpin_locked(struct drm_gem_object *obj)
{
+ struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
+ mutex_lock(&priv->lru.lock);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
+ update_lru_locked(obj);
+ mutex_unlock(&priv->lru.lock);
+}
- update_lru(obj);
+/* Special unpin path for use in fence-signaling path, avoiding the need
+ * to hold the obj lock by only depending on things that a protected by
+ * the LRU lock. In particular we know that that we already have backing
+ * and and that the object's dma_resv has the fence for the current
+ * submit/job which will prevent us racing against page eviction.
+ */
+void msm_gem_unpin_active(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ mutex_lock(&priv->lru.lock);
+ msm_obj->pin_count--;
+ GEM_WARN_ON(msm_obj->pin_count < 0);
+ update_lru_active(obj);
+ mutex_unlock(&priv->lru.lock);
}
struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
@@ -539,8 +602,8 @@ static int clear_iova(struct drm_gem_object *obj,
if (msm_gem_vma_inuse(vma))
return -EBUSY;
- msm_gem_purge_vma(vma->aspace, vma);
- msm_gem_close_vma(vma->aspace, vma);
+ msm_gem_vma_purge(vma);
+ msm_gem_vma_close(vma);
del_vma(vma);
return 0;
@@ -589,7 +652,7 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj,
msm_gem_lock(obj);
vma = lookup_vma(obj, aspace);
if (!GEM_WARN_ON(!vma)) {
- msm_gem_unpin_vma(vma);
+ msm_gem_vma_unpin(vma);
msm_gem_unpin_locked(obj);
}
msm_gem_unlock(obj);
@@ -628,6 +691,7 @@ fail:
static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
int ret = 0;
msm_gem_assert_locked(obj);
@@ -641,6 +705,10 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
return ERR_PTR(-EBUSY);
}
+ pages = msm_gem_pin_pages_locked(obj);
+ if (IS_ERR(pages))
+ return ERR_CAST(pages);
+
/* increment vmap_count *before* vmap() call, so shrinker can
* check vmap_count (is_vunmapable()) outside of msm_obj lock.
* This guarantees that we won't try to msm_gem_vunmap() this
@@ -650,25 +718,19 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
msm_obj->vmap_count++;
if (!msm_obj->vaddr) {
- struct page **pages = get_pages(obj);
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto fail;
- }
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
if (msm_obj->vaddr == NULL) {
ret = -ENOMEM;
goto fail;
}
-
- update_lru(obj);
}
return msm_obj->vaddr;
fail:
msm_obj->vmap_count--;
+ msm_gem_unpin_locked(obj);
return ERR_PTR(ret);
}
@@ -707,6 +769,7 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
GEM_WARN_ON(msm_obj->vmap_count < 1);
msm_obj->vmap_count--;
+ msm_gem_unpin_locked(obj);
}
void msm_gem_put_vaddr(struct drm_gem_object *obj)
@@ -721,10 +784,13 @@ void msm_gem_put_vaddr(struct drm_gem_object *obj)
*/
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{
+ struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_lock(obj);
+ mutex_lock(&priv->lru.lock);
+
if (msm_obj->madv != __MSM_MADV_PURGED)
msm_obj->madv = madv;
@@ -733,7 +799,9 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
/* If the obj is inactive, we might need to move it
* between inactive lists
*/
- update_lru(obj);
+ update_lru_locked(obj);
+
+ mutex_unlock(&priv->lru.lock);
msm_gem_unlock(obj);
@@ -743,6 +811,7 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
void msm_gem_purge(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
@@ -759,7 +828,10 @@ void msm_gem_purge(struct drm_gem_object *obj)
put_iova_vmas(obj);
+ mutex_lock(&priv->lru.lock);
+ /* A one-way transition: */
msm_obj->madv = __MSM_MADV_PURGED;
+ mutex_unlock(&priv->lru.lock);
drm_gem_free_mmap_offset(obj);
@@ -806,29 +878,6 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
msm_obj->vaddr = NULL;
}
-static void update_lru(struct drm_gem_object *obj)
-{
- struct msm_drm_private *priv = obj->dev->dev_private;
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
-
- msm_gem_assert_locked(&msm_obj->base);
-
- if (!msm_obj->pages) {
- GEM_WARN_ON(msm_obj->pin_count);
- GEM_WARN_ON(msm_obj->vmap_count);
-
- drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
- } else if (msm_obj->pin_count || msm_obj->vmap_count) {
- drm_gem_lru_move_tail(&priv->lru.pinned, obj);
- } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
- drm_gem_lru_move_tail(&priv->lru.willneed, obj);
- } else {
- GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
-
- drm_gem_lru_move_tail(&priv->lru.dontneed, obj);
- }
-}
-
bool msm_gem_active(struct drm_gem_object *obj)
{
msm_gem_assert_locked(obj);
@@ -846,6 +895,11 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
+ if (op & MSM_PREP_BOOST) {
+ dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
+ ktime_get());
+ }
+
ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
true, remain);
if (ret == 0)