summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHari Kanigeri <h-kanigeri2@ti.com>2010-08-16 15:59:03 -0500
committerRicardo Perez Olivares <x0081762@ti.com>2010-08-17 14:03:06 -0500
commit43ab2b654badf0cb30f010f918471a358e24cdc7 (patch)
tree364dadcfc9fbdc873f4bd7919191c975cd20a526
parent85bcd03e6c1ee04954286d3e22cf42f8eec3a42d (diff)
omap:iommu-dmm fixes
This fixes the following: 1. pgd and pte entries weren't getting flushed out leading to MMU faults. 2. Cache invalidate was setting wrong size parameter to memory_regain_ownership leading cache invalidate function to fail. Signed-off-by: Hari Kanigeri <h-kanigeri2@ti.com>
-rw-r--r--arch/arm/plat-omap/iodmm.c20
-rw-r--r--arch/arm/plat-omap/iommu.c12
-rw-r--r--arch/arm/plat-omap/iovmm.c35
3 files changed, 50 insertions, 17 deletions
diff --git a/arch/arm/plat-omap/iodmm.c b/arch/arm/plat-omap/iodmm.c
index 9463a6dbb5cf..315f1570be44 100644
--- a/arch/arm/plat-omap/iodmm.c
+++ b/arch/arm/plat-omap/iodmm.c
@@ -21,7 +21,7 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
-
+
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -394,7 +394,7 @@ int proc_end_dma(struct iodmm_struct *obj, void *pva, u32 ul_size,
goto err_out;
}
- if (memory_regain_ownership(dev, map_obj, (u32) pva, ul_size, dir)) {
+ if (memory_regain_ownership(dev, map_obj, va_align, ul_size, dir)) {
pr_err("%s: InValid address parameters %p %x\n",
__func__, pva, ul_size);
status = -EFAULT;
@@ -456,6 +456,7 @@ int user_to_device_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
u32 pa;
unsigned int pages;
struct iotlb_entry tlb_entry;
+ struct page *mapped_page;
if (!size || !usr_pgs)
return -EINVAL;
@@ -480,27 +481,28 @@ int user_to_device_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
for (pg_i = 0; pg_i < pages; pg_i++) {
pg_num = get_user_pages(current, mm, uva, 1,
- w, 1, usr_pgs, NULL);
+ w, 1, &mapped_page, NULL);
if (pg_num > 0) {
- if (page_count(*usr_pgs) < 1) {
+ if (page_count(mapped_page) < 1) {
pr_err("Bad page count after doing"
"get_user_pages on"
"user buffer\n");
break;
}
tlb_entry.pgsz = MMU_CAM_PGSZ_4K;
- tlb_entry.prsvd = MMU_CAM_P;
+ tlb_entry.prsvd = 0;
tlb_entry.valid = MMU_CAM_V;
- tlb_entry.elsz = MMU_RAM_ELSZ_8;
+ tlb_entry.elsz = MMU_RAM_ELSZ_32;
tlb_entry.endian = MMU_RAM_ENDIAN_LITTLE;
- tlb_entry.mixed = 0;
+ tlb_entry.mixed = MMU_RAM_MIXED;
tlb_entry.da = da;
- pa = page_to_phys(*usr_pgs);
+ pa = page_to_phys(mapped_page);
tlb_entry.pa = (u32)pa;
iopgtable_store_entry(mmu, &tlb_entry);
+ if (usr_pgs)
+ usr_pgs[pg_i] = mapped_page;
da += PAGE_SIZE;
uva += PAGE_SIZE;
- usr_pgs++;
} else {
pr_err("get_user_pages FAILED,"
"MPU addr = 0x%x,"
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index 3b5b1decf200..987bd513fe98 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -469,7 +469,7 @@ static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da)
return ERR_PTR(-ENOMEM);
*iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
- flush_iopgd_range(iopgd, iopgd);
+ flush_iopgd_range(iopgd, iopgd + 1);
dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
} else {
@@ -498,7 +498,7 @@ static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot)
}
*iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
- flush_iopgd_range(iopgd, iopgd);
+ flush_iopgd_range(iopgd, iopgd + 1);
return 0;
}
@@ -515,7 +515,7 @@ static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot)
for (i = 0; i < 16; i++)
*(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
- flush_iopgd_range(iopgd, iopgd + 15);
+ flush_iopgd_range(iopgd, iopgd + 16);
return 0;
}
@@ -528,7 +528,7 @@ static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot)
return PTR_ERR(iopte);
*iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
- flush_iopte_range(iopte, iopte);
+ flush_iopte_range(iopte, iopte + 1);
dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
__func__, da, pa, iopte, *iopte);
@@ -553,7 +553,7 @@ static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot)
for (i = 0; i < 16; i++)
*(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
- flush_iopte_range(iopte, iopte + 15);
+ flush_iopte_range(iopte, iopte + 16);
return 0;
}
@@ -724,7 +724,7 @@ void iopgtable_clear_entry_all(struct iommu *obj)
iopte_free(iopte_offset(iopgd, 0));
*iopgd = 0;
- flush_iopgd_range(iopgd, iopgd);
+ flush_iopgd_range(iopgd, iopgd + 1);
}
flush_iotlb_all(obj);
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
index d1557df2df47..256b36a5ec9e 100644
--- a/arch/arm/plat-omap/iovmm.c
+++ b/arch/arm/plat-omap/iovmm.c
@@ -98,6 +98,24 @@ err_out:
return -ENOMEM;
}
+static int omap_delete_vmm_pool(struct iodmm_struct *obj, int pool_id)
+{
+ struct iovmm_pool *pool;
+ struct iovmm_device *iovmm_obj = obj->iovmm;
+ struct list_head *_pool, *_next_pool;
+
+ list_for_each_safe(_pool, _next_pool, &iovmm_obj->mmap_pool) {
+ pool = list_entry(_pool, struct iovmm_pool, list);
+ if (pool->pool_id == pool_id) {
+ gen_pool_destroy(pool->genpool);
+ list_del(&pool->list);
+ kfree(pool);
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
static int omap_iovmm_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long args)
{
@@ -207,7 +225,7 @@ static int omap_iovmm_ioctl(struct inode *inode, struct file *filp,
u32 da;
int size;
int status;
-
+
size = copy_from_user(&da, (void __user *)args, sizeof(u32));
if (size) {
ret = -EINVAL;
@@ -289,8 +307,21 @@ static int omap_iovmm_ioctl(struct inode *inode, struct file *filp,
ret = status;
break;
}
- case IOVMM_IOCDATOPA:
case IOVMM_IOCDELETEPOOL:
+ {
+ int pool_id;
+ int size;
+
+ size = copy_from_user(&pool_id, (void __user *)args,
+ sizeof(int));
+ if (size) {
+ ret = -EINVAL;
+ goto err_user_buf;
+ }
+ ret = omap_delete_vmm_pool(obj, pool_id);
+ break;
+ }
+ case IOVMM_IOCDATOPA:
default:
return -ENOTTY;
}