summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c22
1 files changed, 7 insertions, 15 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 7f92231785a0..8ddbd2e001d4 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -129,7 +129,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
/* For non-cached buffers, ensure the new pages are clean
* because display controller, GPU, etc. are not coherent:
*/
- if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ if (msm_obj->flags & MSM_BO_WC)
sync_for_device(msm_obj);
update_inactive(msm_obj);
@@ -160,7 +160,7 @@ static void put_pages(struct drm_gem_object *obj)
* pages are clean because display controller,
* GPU, etc. are not coherent:
*/
- if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ if (msm_obj->flags & MSM_BO_WC)
sync_for_cpu(msm_obj);
sg_free_table(msm_obj->sgt);
@@ -213,7 +213,7 @@ void msm_gem_put_pages(struct drm_gem_object *obj)
static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
{
- if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ if (msm_obj->flags & MSM_BO_WC)
return pgprot_writecombine(prot);
return prot;
}
@@ -259,7 +259,8 @@ static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
+
out_unlock:
msm_gem_unlock(obj);
out:
@@ -1004,7 +1005,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
#endif
/* don't call directly! Use drm_gem_object_put() */
-void msm_gem_free_object(struct drm_gem_object *obj)
+static void msm_gem_free_object(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev;
@@ -1020,8 +1021,6 @@ void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->mm_list);
mutex_unlock(&priv->mm_lock);
- msm_gem_lock(obj);
-
/* object should not be on active list: */
GEM_WARN_ON(is_active(msm_obj));
@@ -1037,17 +1036,11 @@ void msm_gem_free_object(struct drm_gem_object *obj)
put_iova_vmas(obj);
- /* dma_buf_detach() grabs resv lock, so we need to unlock
- * prior to drm_prime_gem_destroy
- */
- msm_gem_unlock(obj);
-
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
msm_gem_vunmap(obj);
put_pages(obj);
put_iova_vmas(obj);
- msm_gem_unlock(obj);
}
drm_gem_object_release(obj);
@@ -1059,7 +1052,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
@@ -1114,7 +1107,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
struct msm_gem_object *msm_obj;
switch (flags & MSM_BO_CACHE_MASK) {
- case MSM_BO_UNCACHED:
case MSM_BO_CACHED:
case MSM_BO_WC:
break;