summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mach-omap2/ipu_dev.c9
-rw-r--r--arch/arm/mach-omap2/ipu_drv.c61
-rw-r--r--arch/arm/mach-omap2/remoteproc44xx.c9
-rw-r--r--arch/arm/plat-omap/dmm_user.c187
-rw-r--r--arch/arm/plat-omap/include/plat/dmm_user.h37
-rw-r--r--arch/arm/plat-omap/include/plat/ipu_dev.h2
-rw-r--r--arch/arm/plat-omap/include/syslink/notifydefs.h2
-rw-r--r--arch/arm/plat-omap/iodmm.c657
-rw-r--r--arch/arm/plat-omap/iommu.c14
-rw-r--r--arch/arm/plat-omap/remoteproc.c8
10 files changed, 556 insertions, 430 deletions
diff --git a/arch/arm/mach-omap2/ipu_dev.c b/arch/arm/mach-omap2/ipu_dev.c
index e10f25f36afb..8d3386f1f86c 100644
--- a/arch/arm/mach-omap2/ipu_dev.c
+++ b/arch/arm/mach-omap2/ipu_dev.c
@@ -201,8 +201,6 @@ inline int ipu_pm_module_set_bandwidth(unsigned rsrc,
}
EXPORT_SYMBOL(ipu_pm_module_set_bandwidth);
-static struct omap_device *od_iva;
-
/* FIXME: not in use now
* static struct omap_ipupm_mod_ops omap_ipu_ops = {
* .start = NULL,
@@ -330,6 +328,7 @@ static int __init omap_ipussdev_init(void)
{
int status = -ENODEV;
int i;
+ int first = 1;
struct omap_hwmod *oh;
struct omap_device *od;
char *oh_name;
@@ -368,7 +367,11 @@ static int __init omap_ipussdev_init(void)
WARN(status, "Could not build omap_device for %s %s\n",
pdev_name, oh_name);
if (!status) {
- od_iva = od;
+ /* Save the id of the first registered dev */
+ if (first) {
+ ipu_pm_first_dev = od->pdev.id;
+ first = 0;
+ }
omap_ipupm_data[i].pdev = &od->pdev;
omap_ipupm_data[i].dev = &od->pdev.dev;
}
diff --git a/arch/arm/mach-omap2/ipu_drv.c b/arch/arm/mach-omap2/ipu_drv.c
index 85796c0a140c..88a78ba7be29 100644
--- a/arch/arm/mach-omap2/ipu_drv.c
+++ b/arch/arm/mach-omap2/ipu_drv.c
@@ -47,6 +47,7 @@
static struct class *omap_ipu_pm_class;
static dev_t omap_ipu_pm_dev;
+int ipu_pm_first_dev;
static struct proc_dir_entry *ipu_pm_proc_entry;
/* we could iterate over something much more
@@ -189,12 +190,72 @@ static int __devinit ipu_pm_probe(struct platform_device *pdev)
return 0;
}
+static int ipu_pm_drv_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int retval = 0;
+
+ if (pdev->id == ipu_pm_first_dev) {
+ pr_debug("%s.%d ASKED TO SUSPEND", pdev->name, pdev->id);
+ /* save any local context,
+ * BIOS timers could be saved locally or on Ducati
+ */
+
+ /* call our notification function */
+ retval = ipu_pm_notifications(PM_SUSPEND, NULL);
+
+ /* FIXME: Currently sending SUSPEND is enough to send
+ * Ducati to hibernate, save ctx can be called at this
+ * point to save ctx and reset remote procs
+ * Currently the save ctx process can be called using
+ * which ever proc_id, maybe this will change when
+ * Tesla support is added.
+ */
+ /* sysm3 is handling hibernation of ducati currently */
+ ipu_pm_save_ctx(SYS_M3);
+
+ /* return result, should be zero if all Ducati clients
+ * returned zero else fail code
+ */
+ }
+
+ return retval;
+}
+
+static int ipu_pm_drv_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int retval = 0;
+
+ if (pdev->id == ipu_pm_first_dev) {
+ pr_debug("%s.%d ASKED TO RESUME", pdev->name, pdev->id);
+ /* restore any local context,
+ * BIOS timers could be restored locally or on Ducati
+ */
+
+ /* call our notification function */
+ retval = ipu_pm_notifications(PM_RESUME, NULL);
+
+ /* return result, should be zero if all Ducati clients
+ * returned zero else fail code
+ */
+ }
+
+ return retval;
+}
+
+static const struct dev_pm_ops ipu_pm_ops = {
+ .suspend = ipu_pm_drv_suspend,
+ .resume = ipu_pm_drv_resume,
+};
+
static struct platform_driver ipu_pm_driver = {
.probe = ipu_pm_probe,
/*.remove = ipu_pm_remove, */
.driver = {
.name = IPU_DRIVER_NAME,
.owner = THIS_MODULE,
+ .pm = &ipu_pm_ops,
},
};
diff --git a/arch/arm/mach-omap2/remoteproc44xx.c b/arch/arm/mach-omap2/remoteproc44xx.c
index 4189456d018d..d25becba39f5 100644
--- a/arch/arm/mach-omap2/remoteproc44xx.c
+++ b/arch/arm/mach-omap2/remoteproc44xx.c
@@ -170,13 +170,22 @@ static struct omap_rproc_platform_data omap4_rproc_data[] = {
.name = "ducati-proc0",
.ops = &omap4_ducati0_ops,
.oh_name = "ipu_c0",
+#ifdef CONFIG_SYSLINK_DUCATI_PM
+ .timer_id = 3,
+#else
.timer_id = -1,
+#endif
},
{
.name = "ducati-proc1",
.ops = &omap4_ducati1_ops,
.oh_name = "ipu_c1",
+#ifdef CONFIG_SYSLINK_DUCATI_PM
+ .timer_id = 4,
+#else
.timer_id = -1,
+#endif
+
},
};
diff --git a/arch/arm/plat-omap/dmm_user.c b/arch/arm/plat-omap/dmm_user.c
index 3afa28db3d2d..6137ebf22354 100644
--- a/arch/arm/plat-omap/dmm_user.c
+++ b/arch/arm/plat-omap/dmm_user.c
@@ -30,12 +30,10 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
-#include <linux/eventfd.h>
#include <plat/iommu.h>
#include <plat/iovmm.h>
#include <plat/dmm_user.h>
-#include "iopgtable.h"
#define OMAP_DMM_NAME "iovmm-omap"
@@ -59,161 +57,46 @@ static int omap_dmm_ioctl(struct inode *inode, struct file *filp,
switch (cmd) {
case DMM_IOCSETTLBENT:
- {
- struct iotlb_entry e;
- int size;
- size = copy_from_user(&e, (void __user *)args,
- sizeof(struct iotlb_entry));
- if (size) {
- ret = -EINVAL;
- goto err_user_buf;
- }
- load_iotlb_entry(obj->iovmm->iommu, &e);
+ /* FIXME: re-visit this check to perform
+ proper permission checks */
+ /* if (!capable(CAP_SYS_ADMIN))
+ return -EPERM; */
+ ret = program_tlb_entry(obj, (const void __user *)args);
break;
- }
case DMM_IOCCREATEPOOL:
- {
- struct iovmm_pool_info pool_info;
- int size;
-
- size = copy_from_user(&pool_info, (void __user *)args,
- sizeof(struct iovmm_pool_info));
- if (size) {
- ret = -EINVAL;
- goto err_user_buf;
- }
- omap_create_dmm_pool(obj, pool_info.pool_id, pool_info.size,
- pool_info.da_begin);
+ /* FIXME: re-visit this check to perform
+ proper permission checks */
+ /* if (!capable(CAP_SYS_ADMIN))
+ return -EPERM; */
+ ret = omap_create_dmm_pool(obj, (const void __user *)args);
break;
- }
- case DMM_IOCDELETEPOOL:
- {
- int pool_id;
- int size;
-
- size = copy_from_user(&pool_id, (void __user *)args,
- sizeof(int));
- if (size) {
- ret = -EINVAL;
- goto err_user_buf;
- }
- ret = omap_delete_dmm_pool(obj, pool_id);
- break;
- }
case DMM_IOCMEMMAP:
- {
- struct dmm_map_info map_info;
- int size;
- int status;
-
- size = copy_from_user(&map_info, (void __user *)args,
- sizeof(struct dmm_map_info));
-
- status = dmm_user(obj, map_info.mem_pool_id,
- map_info.da, map_info.mpu_addr,
- map_info.size, map_info.flags);
- ret = copy_to_user((void __user *)args, &map_info,
- sizeof(struct dmm_map_info));
+ ret = dmm_user(obj, (void __user *)args);
break;
- }
case DMM_IOCMEMUNMAP:
- {
- u32 da;
- int size;
- int status = 0;
-
- size = copy_from_user(&da, (void __user *)args, sizeof(u32));
- if (size) {
- ret = -EINVAL;
- goto err_user_buf;
- }
- status = user_un_map(obj, da);
- ret = status;
+ ret = user_un_map(obj, (const void __user *)args);
break;
- }
case IOMMU_IOCEVENTREG:
- {
- int fd;
- int size;
- struct iommu_event_ntfy *fd_reg;
-
- size = copy_from_user(&fd, (void __user *)args, sizeof(int));
- if (size) {
- ret = -EINVAL;
- goto err_user_buf;
- }
-
- fd_reg = kzalloc(sizeof(struct iommu_event_ntfy), GFP_KERNEL);
- fd_reg->fd = fd;
- fd_reg->evt_ctx = eventfd_ctx_fdget(fd);
- INIT_LIST_HEAD(&fd_reg->list);
- spin_lock_irq(&obj->iovmm->iommu->event_lock);
- list_add_tail(&fd_reg->list, &obj->iovmm->iommu->event_list);
- spin_unlock_irq(&obj->iovmm->iommu->event_lock);
+ ret = register_mmufault(obj, (const void __user *)args);
break;
- }
case IOMMU_IOCEVENTUNREG:
- {
- int fd;
- int size;
- struct iommu_event_ntfy *fd_reg, *temp_reg;
-
- size = copy_from_user(&fd, (void __user *)args, sizeof(int));
- if (size) {
- ret = -EINVAL;
- goto err_user_buf;
- }
- /* Free DMM mapped memory resources */
- spin_lock_irq(&obj->iovmm->iommu->event_lock);
- list_for_each_entry_safe(fd_reg, temp_reg,
- &obj->iovmm->iommu->event_list, list) {
- if (fd_reg->fd == fd) {
- list_del(&fd_reg->list);
- kfree(fd_reg);
- }
- }
- spin_unlock_irq(&obj->iovmm->iommu->event_lock);
+ ret = register_mmufault(obj, (const void __user *)args);
break;
- }
case DMM_IOCMEMFLUSH:
- {
- int size;
- int status;
- struct dmm_dma_info dma_info;
- size = copy_from_user(&dma_info, (void __user *)args,
- sizeof(struct dmm_dma_info));
- if (size) {
- ret = -EINVAL;
- goto err_user_buf;
- }
- status = proc_begin_dma(obj, dma_info.pva, dma_info.ul_size,
- dma_info.dir);
- ret = status;
+ ret = proc_begin_dma(obj, (void __user *)args);
break;
- }
case DMM_IOCMEMINV:
- {
- int size;
- int status;
- struct dmm_dma_info dma_info;
- size = copy_from_user(&dma_info, (void __user *)args,
- sizeof(struct dmm_dma_info));
- if (size) {
- ret = -EINVAL;
- goto err_user_buf;
- }
- status = proc_end_dma(obj, dma_info.pva, dma_info.ul_size,
- dma_info.dir);
- ret = status;
+ ret = proc_end_dma(obj, (void __user *)args);
+ break;
+ /* This ioctl can be deprecated */
+ case DMM_IOCDELETEPOOL:
break;
- }
case DMM_IOCDATOPA:
default:
return -ENOTTY;
}
-err_user_buf:
- return ret;
+ return ret;
}
static int omap_dmm_open(struct inode *inode, struct file *filp)
@@ -222,6 +105,7 @@ static int omap_dmm_open(struct inode *inode, struct file *filp)
struct iovmm_device *obj;
obj = container_of(inode->i_cdev, struct iovmm_device, cdev);
+ obj->refcount++;
iodmm = kzalloc(sizeof(struct iodmm_struct), GFP_KERNEL);
INIT_LIST_HEAD(&iodmm->map_list);
@@ -240,25 +124,41 @@ static int omap_dmm_release(struct inode *inode, struct file *filp)
if (!filp->private_data) {
status = -EIO;
- goto err;
+ goto err_out;
}
obj = filp->private_data;
+
flush_signals(current);
status = mutex_lock_interruptible(&obj->iovmm->dmm_map_lock);
if (status == 0) {
- iommu_notify_event(obj->iovmm->iommu, IOMMU_CLOSE, NULL);
+ /*
+ * Report to remote Processor of the cleanup of these
+ * resources before cleaning in order to avoid MMU fault
+ * type of behavior
+ */
+ if (!list_empty(&obj->map_list)) {
+ iommu_notify_event(obj->iovmm->iommu, IOMMU_CLOSE,
+ NULL);
+ }
mutex_unlock(&obj->iovmm->dmm_map_lock);
} else {
pr_err("%s mutex_lock_interruptible returned 0x%x\n",
__func__, status);
}
+
user_remove_resources(obj);
iommu_put(obj->iovmm->iommu);
+
+ /* Delete all the DMM pools after the reference count goes to zero */
+ if (--obj->iovmm->refcount == 0)
+ omap_delete_dmm_pools(obj);
+
kfree(obj);
+
filp->private_data = NULL;
-err:
+err_out:
return status;
}
@@ -316,6 +216,7 @@ static int __devinit omap_dmm_probe(struct platform_device *pdev)
mutex_init(&obj->dmm_map_lock);
platform_set_drvdata(pdev, obj);
return 0;
+
clean_cdev:
cdev_del(&obj->cdev);
err_cdev:
@@ -326,14 +227,12 @@ static int __devexit omap_dmm_remove(struct platform_device *pdev)
{
struct iovmm_device *obj = platform_get_drvdata(pdev);
int major = MAJOR(omap_dmm_dev);
+
device_destroy(omap_dmm_class, MKDEV(major, obj->minor));
cdev_del(&obj->cdev);
platform_set_drvdata(pdev, NULL);
- iopgtable_clear_entry_all(obj->iommu);
- iommu_put(obj->iommu);
- free_pages((unsigned long)obj->iommu->iopgd,
- get_order(IOPGD_TABLE_SIZE));
kfree(obj);
+
return 0;
}
diff --git a/arch/arm/plat-omap/include/plat/dmm_user.h b/arch/arm/plat-omap/include/plat/dmm_user.h
index b02d82d29e4a..c231314810ec 100644
--- a/arch/arm/plat-omap/include/plat/dmm_user.h
+++ b/arch/arm/plat-omap/include/plat/dmm_user.h
@@ -53,12 +53,8 @@ struct iovmm_pool_info {
/* used to cache dma mapping information */
struct device_dma_map_info {
- /* direction of DMA in action, or DMA_NONE */
- enum dma_data_direction dir;
/* number of elements requested by us */
int num_pages;
- /* number of elements returned from dma_map_sg */
- int sg_num;
/* list of buffers used in this DMA action */
struct scatterlist *sg;
};
@@ -68,7 +64,7 @@ struct dmm_map_info {
u32 *da;
u32 num_of_buf;
u32 size;
- u32 mem_pool_id;
+ u32 pool_id;
u32 flags;
};
@@ -100,22 +96,31 @@ struct iovmm_device {
struct iommu *iommu;
const char *name;
/* List of memory pool it manages */
- struct list_head mmap_pool;
+ struct list_head mmap_pool;
struct mutex dmm_map_lock;
int minor;
struct cdev cdev;
+ int refcount;
};
/* user dmm functions */
-int dmm_user(struct iodmm_struct *obj, u32 pool_id, u32 *da,
- u32 va, size_t bytes, u32 flags);
+int dmm_user(struct iodmm_struct *obj, void __user *args);
+
void user_remove_resources(struct iodmm_struct *obj);
-int user_un_map(struct iodmm_struct *obj, u32 map_addr);
-int proc_begin_dma(struct iodmm_struct *obj, void *pva, u32 ul_size,
- enum dma_data_direction dir);
-int proc_end_dma(struct iodmm_struct *obj, void *pva, u32 ul_size,
- enum dma_data_direction dir);
-int omap_create_dmm_pool(struct iodmm_struct *obj, int pool_id, int size,
- int sa);
-int omap_delete_dmm_pool(struct iodmm_struct *obj, int pool_id);
+
+int user_un_map(struct iodmm_struct *obj, const void __user *args);
+
+int proc_begin_dma(struct iodmm_struct *obj, const void __user *args);
+
+int proc_end_dma(struct iodmm_struct *obj, const void __user *args);
+
+int omap_create_dmm_pool(struct iodmm_struct *obj, const void __user *args);
+
+int omap_delete_dmm_pools(struct iodmm_struct *obj);
+
+int program_tlb_entry(struct iodmm_struct *obj, const void __user *args);
+
+int register_mmufault(struct iodmm_struct *obj, const void __user *args);
+
+int unregister_mmufault(struct iodmm_struct *obj, const void __user *args);
#endif
diff --git a/arch/arm/plat-omap/include/plat/ipu_dev.h b/arch/arm/plat-omap/include/plat/ipu_dev.h
index 4b24f3bc14f2..c7b7959c38b2 100644
--- a/arch/arm/plat-omap/include/plat/ipu_dev.h
+++ b/arch/arm/plat-omap/include/plat/ipu_dev.h
@@ -90,6 +90,8 @@ struct ipu_pm_dev {
struct cdev cdev;
};
+extern int ipu_pm_first_dev;
+
extern int ipu_pm_module_start(unsigned rsrc);
extern int ipu_pm_module_stop(unsigned rsrc);
extern int ipu_pm_module_set_rate(unsigned rsrc,
diff --git a/arch/arm/plat-omap/include/syslink/notifydefs.h b/arch/arm/plat-omap/include/syslink/notifydefs.h
index b0df5d536168..e04b76382ff8 100644
--- a/arch/arm/plat-omap/include/syslink/notifydefs.h
+++ b/arch/arm/plat-omap/include/syslink/notifydefs.h
@@ -86,6 +86,8 @@ struct notify_object {
/* List of event callbacks registered */
struct list_head event_list[NOTIFY_MAXEVENTS];
/* List of event listeners registered */
+ struct mutex lock;
+ /* Lock for event_list */
};
diff --git a/arch/arm/plat-omap/iodmm.c b/arch/arm/plat-omap/iodmm.c
index 49eb0e40a6d1..a193c70c7c1d 100644
--- a/arch/arm/plat-omap/iodmm.c
+++ b/arch/arm/plat-omap/iodmm.c
@@ -6,6 +6,9 @@
* Authors: Ramesh Gupta <grgupta@ti.com>
* Hari Kanigeri <h-kanigeri2@ti.com>
*
+ * dma_map API usage in this code is inspired from Ohad Ben-Cohen's
+ * implementation in dspbridge code.
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
@@ -28,6 +31,7 @@
#include <linux/pagemap.h>
#include <linux/kernel.h>
#include <linux/genalloc.h>
+#include <linux/eventfd.h>
#include <linux/sched.h>
#include <asm/cacheflush.h>
@@ -113,10 +117,48 @@ int temp_user_dma_op(unsigned long start, unsigned long end, int op)
} while (start < end);
up_read(&mm->mmap_sem);
+
return 0;
}
#endif
+static inline struct gen_pool *get_pool_handle(struct iovmm_device *iovmm_obj,
+ int pool_id)
+{
+ struct iovmm_pool *pool;
+
+ list_for_each_entry(pool, &iovmm_obj->mmap_pool, list) {
+ if (pool->pool_id == pool_id)
+ return pool->genpool;
+ }
+ return NULL;
+}
+
+/*
+ * This function walks through the page tables to convert a userland
+ * virtual address to physical address
+ */
+static u32 __user_va2_pa(struct mm_struct *mm, u32 address)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+
+ pgd = pgd_offset(mm, address);
+ if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
+ pmd = pmd_offset(pgd, address);
+ if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
+ ptep = pte_offset_map(pmd, address);
+ if (ptep) {
+ pte = *ptep;
+ if (pte_present(pte))
+ return pte & PAGE_MASK;
+ }
+ }
+ }
+ return 0;
+}
+
/* remember mapping information */
static struct dmm_map_object *add_mapping_info(struct iodmm_struct *obj,
struct gen_pool *gen_pool, u32 va, u32 da, u32 size)
@@ -222,6 +264,13 @@ static int match_containing_map_obj(struct dmm_map_object *map_obj,
return res;
}
+/**
+ * Find the mapping object based on either MPU virtual address or
+ * Device virtual address. Which option to select to search for the mapping
+ * is specified with check_va flag. check_va is set to TRUE if search is
+ * based on MPU virtual address and FALSE if search is based on Device
+ * virtual address
+ */
static struct dmm_map_object *find_containing_mapping(
struct iodmm_struct *obj,
u32 va, u32 da, bool check_va,
@@ -283,13 +332,36 @@ static int find_first_page_in_cache(struct dmm_map_object *map_obj,
/* Cache operation against kernel address instead of users */
static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start,
- ssize_t len, int pg_i)
+ size_t len)
{
struct page *page;
unsigned long offset;
ssize_t rest;
int ret = 0, i = 0;
- struct scatterlist *sg = map_obj->dma_info.sg;
+ unsigned long first_data_page = start >> PAGE_SHIFT;
+ unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
+ /* calculating the number of pages this area spans */
+ unsigned long num_pages = last_data_page - first_data_page + 1;
+ struct scatterlist *sg;
+ int pg_i;
+
+ sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL);
+ if (!sg) {
+ pr_err("%s: kcalloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ sg_init_table(sg, num_pages);
+
+ /* cleanup a previous sg allocation */
+ /* this may happen if application doesn't signal for e/o DMA */
+ kfree(map_obj->dma_info.sg);
+
+ map_obj->dma_info.sg = sg;
+ map_obj->dma_info.num_pages = num_pages;
+
+ pg_i = find_first_page_in_cache(map_obj, start);
while (len) {
page = get_mapping_page(map_obj, pg_i);
@@ -335,16 +407,22 @@ static int memory_regain_ownership(struct device *dev,
/* calculating the number of pages this area spans */
unsigned long num_pages = last_data_page - first_data_page + 1;
struct device_dma_map_info *dma_info = &map_obj->dma_info;
+ long pg_i;
if (!dma_info->sg)
goto out;
- if (dma_info->dir != dir || dma_info->num_pages != num_pages) {
- pr_err("%s: dma info doesn't match given params\n", __func__);
+ if (num_pages > dma_info->num_pages) {
+ pr_err("%s: dma info params invalid\n", __func__);
return -EINVAL;
}
- dma_unmap_sg(dev, dma_info->sg, num_pages, dma_info->dir);
+ pg_i = find_first_page_in_cache(map_obj, start);
+ if (pg_i == -1) {
+ ret = -EFAULT;
+ goto out;
+ }
+ dma_unmap_sg(dev, (dma_info->sg), num_pages, dir);
pr_debug("%s: dma_map_sg unmapped\n", __func__);
@@ -357,127 +435,111 @@ static int memory_give_ownership(struct device *dev,
struct dmm_map_object *map_obj, unsigned long start,
ssize_t len, enum dma_data_direction dir)
{
- int pg_i, ret, sg_num;
- struct scatterlist *sg;
+ int ret, sg_num;
+ struct device_dma_map_info *dma_info = &map_obj->dma_info;
unsigned long first_data_page = start >> PAGE_SHIFT;
unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
/* calculating the number of pages this area spans */
unsigned long num_pages = last_data_page - first_data_page + 1;
+ long pg_i;
pg_i = find_first_page_in_cache(map_obj, start);
- if (pg_i < 0) {
- pr_err("%s: failed to find first page in cache\n", __func__);
- ret = -EINVAL;
- goto out;
- }
-
- sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL);
- if (!sg) {
- pr_err("%s: kcalloc failed\n", __func__);
- ret = -ENOMEM;
+ if (pg_i == -1) {
+ ret = -EFAULT;
goto out;
}
- sg_init_table(sg, num_pages);
-
- /* cleanup a previous sg allocation */
- /* this may happen if application doesn't signal for e/o DMA */
- kfree(map_obj->dma_info.sg);
-
- map_obj->dma_info.sg = sg;
- map_obj->dma_info.dir = dir;
- map_obj->dma_info.num_pages = num_pages;
-
- ret = build_dma_sg(map_obj, start, len, pg_i);
- if (ret)
- goto kfree_sg;
-
- sg_num = dma_map_sg(dev, sg, num_pages, dir);
+ sg_num = dma_map_sg(dev, (dma_info->sg), num_pages, dir);
if (sg_num < 1) {
pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num);
ret = -EFAULT;
- goto kfree_sg;
+ goto out;
}
pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num);
- map_obj->dma_info.sg_num = sg_num;
return 0;
-
-kfree_sg:
- kfree(sg);
- map_obj->dma_info.sg = NULL;
out:
return ret;
}
#endif
-int proc_begin_dma(struct iodmm_struct *obj, void *pva, u32 ul_size,
- enum dma_data_direction dir)
+int proc_begin_dma(struct iodmm_struct *obj, const void __user *args)
{
int status = 0;
+ struct dmm_dma_info dma_info;
#ifdef CONFIG_DMM_DMA_API
- u32 va_align;
struct dmm_map_object *map_obj;
- struct device *dev = obj->iovmm->iommu->dev;
- va_align = round_down((u32)pva, PAGE_SIZE);
+ struct device *dev;
+
+ if (copy_from_user(&dma_info, (void __user *)args,
+ sizeof(struct dmm_dma_info)))
+ return -EFAULT;
+ dev = obj->iovmm->iommu->dev;
mutex_lock(&obj->iovmm->dmm_map_lock);
pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
- (u32)va_align,
- ul_size, dir);
+ (u32)dma_info.pva,
+ dma_info.ul_size, dma_info.dir);
/* find requested memory are in cached mapping information */
- map_obj = find_containing_mapping(obj, (u32) va_align, 0, true,
- ul_size);
+ map_obj = find_containing_mapping(obj, (u32)dma_info.pva, 0, true,
+ dma_info.ul_size);
if (!map_obj) {
pr_err("%s: find_containing_mapping failed\n", __func__);
status = -EFAULT;
goto err_out;
}
- if (memory_give_ownership(dev, map_obj, (u32)pva, ul_size, dir)) {
+ if (memory_give_ownership(dev, map_obj, (u32)dma_info.pva,
+ dma_info.ul_size, dma_info.dir)) {
pr_err("%s: InValid address parameters %x %x\n",
- __func__, va_align, ul_size);
+ __func__, (u32)dma_info.pva, dma_info.ul_size);
status = -EFAULT;
}
err_out:
mutex_unlock(&obj->iovmm->dmm_map_lock);
#else
-
- u32 end = (u32)pva + ul_size;
- status = temp_user_dma_op((u32)pva, end, 3);
+ if (copy_from_user(&dma_info, (void __user *)args,
+ sizeof(struct dmm_dma_info)))
+ return -EFAULT;
+ status = temp_user_dma_op((u32)dma_info.pva,
+ (u32)dma_info.pva + dma_info.ul_size, 3);
#endif
return status;
}
-int proc_end_dma(struct iodmm_struct *obj, void *pva, u32 ul_size,
- enum dma_data_direction dir)
+int proc_end_dma(struct iodmm_struct *obj, const void __user *args)
{
int status = 0;
+ struct dmm_dma_info dma_info;
#ifdef CONFIG_DMM_DMA_API
- u32 va_align;
+ struct device *dev;
struct dmm_map_object *map_obj;
- struct device *dev = obj->iovmm->iommu->dev;
- va_align = round_down((u32)pva, PAGE_SIZE);
+
+ if (copy_from_user(&dma_info, (void __user *)args,
+ sizeof(struct dmm_dma_info)))
+ return -EFAULT;
+ dev = obj->iovmm->iommu->dev;
pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
- (u32)va_align,
- ul_size, dir);
+ (u32)dma_info.pva,
+ dma_info.ul_size, dma_info.dir);
mutex_lock(&obj->iovmm->dmm_map_lock);
/* find requested memory are in cached mapping information */
- map_obj = find_containing_mapping(obj, (u32) va_align, 0, true,
- ul_size);
+ map_obj = find_containing_mapping(obj, (u32)dma_info.pva, 0, true,
+ dma_info.ul_size);
if (!map_obj) {
pr_err("%s: find_containing_mapping failed\n", __func__);
status = -EFAULT;
goto err_out;
}
- if (memory_regain_ownership(dev, map_obj, (u32)pva, ul_size, dir)) {
+ if (memory_regain_ownership(dev, map_obj, (u32)dma_info.pva,
+ dma_info.ul_size, dma_info.dir)) {
pr_err("%s: InValid address parameters %p %x\n",
- __func__, pva, ul_size);
+ __func__, dma_info.pva, dma_info.ul_size);
status = -EFAULT;
goto err_out;
}
@@ -485,13 +547,117 @@ int proc_end_dma(struct iodmm_struct *obj, void *pva, u32 ul_size,
err_out:
mutex_unlock(&obj->iovmm->dmm_map_lock);
#else
- u32 end = (u32)pva + ul_size;
- status = temp_user_dma_op((u32)pva, end, 1);
+ if (copy_from_user(&dma_info, (void __user *)args,
+ sizeof(struct dmm_dma_info)))
+ return -EFAULT;
+ status = temp_user_dma_op((u32)dma_info.pva,
+ (u32)dma_info.pva + dma_info.ul_size, 1);
#endif
return status;
}
/**
+ * user_to_device_unmap() - unmaps Device virtual buffer.
+ * @mmu: Pointer to iommu handle.
+ * @da DSP address
+ *
+ * This function unmaps a user space buffer into DSP virtual address.
+ *
+ */
+static int user_to_device_unmap(struct iommu *mmu, u32 da, unsigned size)
+{
+ unsigned total = size;
+ unsigned start = da;
+
+ while (total > 0) {
+ size_t bytes;
+ bytes = iopgtable_clear_entry(mmu, start);
+ if (bytes == 0)
+ bytes = PAGE_SIZE;
+ else
+ dev_dbg(mmu->dev, "%s: unmap 0x%x 0x%x\n",
+ __func__, start, bytes);
+ BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
+ total -= bytes;
+ start += bytes;
+ }
+ return 0;
+}
+
+static int __user_un_map(struct iodmm_struct *obj, u32 map_addr)
+{
+ int status = 0;
+ u32 va_align;
+ u32 size_align;
+ struct dmm_map_object *map_obj;
+ int i;
+ struct page *pg;
+
+ va_align = round_down(map_addr, PAGE_SIZE);
+
+ mutex_lock(&obj->iovmm->dmm_map_lock);
+ /*
+ * Update DMM structures. Get the size to unmap.
+ * This function returns error if the VA is not mapped
+ */
+ /* find requested memory are in cached mapping information */
+ map_obj = find_containing_mapping(obj, 0, map_addr, false, 0);
+ if (!map_obj)
+ goto err;
+ size_align = map_obj->size;
+ /* Remove mapping from the page tables. */
+ status = user_to_device_unmap(obj->iovmm->iommu, va_align,
+ size_align);
+ if (status)
+ goto err;
+
+ i = size_align/PAGE_SIZE;
+ while (i--) {
+ pg = map_obj->pages[i];
+ if (pg && pfn_valid(page_to_pfn(pg))) {
+ if (page_count(pg) < 1)
+ pr_info("%s UNMAP FAILURE !!!\n", __func__);
+ else {
+ SetPageDirty(pg);
+ page_cache_release(pg);
+ }
+ }
+ }
+ /*
+ * A successful unmap should be followed by removal of map_obj
+ * from dmm_map_list, so that mapped memory resource tracking
+ * remains uptodate
+ */
+ remove_mapping_information(obj, map_obj->da, map_obj->size);
+err:
+ mutex_unlock(&obj->iovmm->dmm_map_lock);
+ return status;
+}
+
+
+/**
+ * user_un_map - Removes User's mapped address
+ * @obj: target dmm object
+ * @args Mapped address that needs to be unmapped
+ *
+ * removes user's dmm buffer mapping
+ **/
+int user_un_map(struct iodmm_struct *obj, const void __user *args)
+{
+ int status = 0;
+ u32 map_addr;
+
+ if (copy_from_user(&map_addr, (void __user *)args, sizeof(u32)))
+ return -EFAULT;
+
+ status = __user_un_map(obj, map_addr);
+ if (status)
+ pr_err("%s:Unmap of buffer 0x%x failedn", __func__, map_addr);
+
+ return status;
+}
+
+/**
* user_to_device_map() - maps user to dsp virtual address
* @mmu: Pointer to iommu handle.
* @uva: Virtual user space address.
@@ -562,19 +728,8 @@ static int user_to_device_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
break;
}
}
- return res;
-}
-
-static inline struct gen_pool *get_pool_handle(struct iovmm_device *iovmm_obj,
- int pool_id)
-{
- struct iovmm_pool *pool;
- list_for_each_entry(pool, &iovmm_obj->mmap_pool, list) {
- if (pool->pool_id == pool_id)
- return pool->genpool;
- }
- return NULL;
+ return res;
}
/**
@@ -597,7 +752,6 @@ static int phys_to_device_map(struct iodmm_struct *obj,
struct dmm_map_object *dmm_obj;
int da;
u32 all_bits;
- u32 num_bytes = bytes;
int err = 0;
u32 pg_size[] = {SZ_16M, SZ_1M, SZ_64K, SZ_4K};
int size_flag[] = {MMU_CAM_PGSZ_16M, MMU_CAM_PGSZ_1M,
@@ -605,7 +759,7 @@ static int phys_to_device_map(struct iodmm_struct *obj,
int i;
struct gen_pool *gen_pool;
- if (!num_bytes) {
+ if (!bytes) {
err = -EINVAL;
goto exit;
}
@@ -620,34 +774,34 @@ static int phys_to_device_map(struct iodmm_struct *obj,
gen_pool = get_pool_handle(obj->iovmm, pool_id);
if (gen_pool) {
da = gen_pool_alloc(gen_pool, bytes);
- *mapped_addr = (da | ((u32)pa & (PAGE_SIZE - 1)));
+ *mapped_addr = (da | (pa & (PAGE_SIZE - 1)));
} else {
err = -EFAULT;
goto exit;
}
}
- dmm_obj = add_mapping_info(obj, gen_pool, pa, *mapped_addr, num_bytes);
+ dmm_obj = add_mapping_info(obj, gen_pool, pa, *mapped_addr, bytes);
if (dmm_obj == NULL) {
err = -ENODEV;
goto err_add_map;
}
- while (num_bytes) {
+ while (bytes) {
/*
* To find the max. page size with which both PA & VA are
* aligned
*/
all_bits = pa | da;
for (i = 0; i < 4; i++) {
- if ((num_bytes >= pg_size[i]) && ((all_bits &
+ if ((bytes >= pg_size[i]) && ((all_bits &
(pg_size[i] - 1)) == 0)) {
iotlb_init_entry(&e, da, pa,
size_flag[i] |
MMU_RAM_ENDIAN_LITTLE |
MMU_RAM_ELSZ_32);
iopgtable_store_entry(obj->iovmm->iommu, &e);
- num_bytes -= pg_size[i];
+ bytes -= pg_size[i];
da += pg_size[i];
pa += pg_size[i];
break;
@@ -663,86 +817,32 @@ exit:
}
/**
- * user_to_device_unmap() - unmaps Device virtual buffer.
- * @mmu: Pointer to iommu handle.
- * @da DSP address
- *
- * This function unmaps a user space buffer into DSP virtual address.
- *
- */
-static int user_to_device_unmap(struct iommu *mmu, u32 da, unsigned size)
-{
- unsigned total = size;
- unsigned start = da;
-
- while (total > 0) {
- size_t bytes;
- bytes = iopgtable_clear_entry(mmu, start);
- if (bytes == 0)
- bytes = PAGE_SIZE;
- else
- dev_dbg(mmu->dev, "%s: unmap 0x%x 0x%x\n",
- __func__, start, bytes);
- BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
- total -= bytes;
- start += bytes;
- }
- return 0;
-}
-
-/*
- * ======== user_va2_pa ========
- * Purpose:
- * This function walks through the page tables to convert a userland
- * virtual address to physical address
- */
-static u32 user_va2_pa(struct mm_struct *mm, u32 address)
-{
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *ptep, pte;
-
- pgd = pgd_offset(mm, address);
- if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
- pmd = pmd_offset(pgd, address);
- if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
- ptep = pte_offset_map(pmd, address);
- if (ptep) {
- pte = *ptep;
- if (pte_present(pte))
- return pte & PAGE_MASK;
- }
- }
- }
- return 0;
-}
-
-/**
* dmm_user - Maps user buffer to Device address
* @obj: target dmm object
- * @pool_id: DMM pool id
- * @da: Mapped Device Address
- * @va: User virtual Address
- * @bytes Size of the buffer to be mapped
- * flags flags on how to interpret user buffer
+ * @args: DMM map information
*
* Maps given user buffer to Device address
**/
-int dmm_user(struct iodmm_struct *obj, u32 pool_id, u32 *da,
- u32 va, size_t bytes, u32 flags)
+int dmm_user(struct iodmm_struct *obj, void __user *args)
{
struct gen_pool *gen_pool;
struct dmm_map_object *dmm_obj;
struct iovmm_device *iovmm_obj = obj->iovmm;
- u32 pa_align, da_align, size_align, tmp_addr;
+ u32 addr_align, da_align, size_align, tmp_addr;
int err = 0;
int i, num_of_pages;
struct page *pg;
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
u32 io_addr;
+ struct dmm_map_info map_info;
struct iotlb_entry e;
+
+ if (copy_from_user(&map_info, (void __user *)args,
+ sizeof(struct dmm_map_info)))
+ return -EFAULT;
+
/*
* Important Note: va is mapped from user application process
* to current process - it must lie completely within the current
@@ -751,8 +851,9 @@ int dmm_user(struct iodmm_struct *obj, u32 pool_id, u32 *da,
down_read(&mm->mmap_sem);
/* Calculate the page-aligned PA, VA and size */
- pa_align = round_down((u32) va, PAGE_SIZE);
- size_align = round_up(bytes + va - pa_align, PAGE_SIZE);
+ addr_align = round_down((u32) map_info.mpu_addr, PAGE_SIZE);
+ size_align = round_up(map_info.size + map_info.mpu_addr - addr_align,
+ PAGE_SIZE);
mutex_lock(&iovmm_obj->dmm_map_lock);
@@ -761,18 +862,19 @@ int dmm_user(struct iodmm_struct *obj, u32 pool_id, u32 *da,
* specified if pool_id as -1, so the da is interpreted
* as the Device Address.
*/
- if (flags == DMM_DA_PHYS) {
- err = phys_to_device_map(obj, pool_id, da, pa_align,
- size_align, flags);
- goto err;
+ if (map_info.flags == DMM_DA_PHYS) {
+ err = phys_to_device_map(obj, map_info.pool_id, map_info.da,
+ addr_align, size_align, map_info.flags);
+ goto exit;
}
- vma = find_vma(mm, va);
+ vma = find_vma(mm, map_info.mpu_addr);
if (vma) {
dev_dbg(iovmm_obj->iommu->dev,
"VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
- "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", va,
- bytes, vma->vm_start, vma->vm_end,
+ "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n",
+ map_info.mpu_addr,
+ map_info.size, vma->vm_start, vma->vm_end,
vma->vm_flags);
}
/*
@@ -780,55 +882,57 @@ int dmm_user(struct iodmm_struct *obj, u32 pool_id, u32 *da,
* spread across several VMAs. So loop through and check if the entire
* user buffer is covered
*/
- while ((vma) && (va + bytes > vma->vm_end)) {
+ while ((vma) && (map_info.mpu_addr + map_info.size > vma->vm_end)) {
/* jump to the next VMA region */
vma = find_vma(mm, vma->vm_end + 1);
dev_dbg(iovmm_obj->iommu->dev,
"VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
- "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", va,
- bytes, vma->vm_start, vma->vm_end,
+ "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n",
+ map_info.mpu_addr,
+ map_info.size, vma->vm_start, vma->vm_end,
vma->vm_flags);
}
if (!vma) {
pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
- __func__, va, bytes);
+ __func__, map_info.mpu_addr, map_info.size);
err = -EINVAL;
- goto err;
+ goto exit;
}
/*
* If user provided anonymous address, then don't allocate it from
* from genpool
*/
- if (flags == DMM_DA_ANON) {
+ if (map_info.flags == DMM_DA_ANON) {
gen_pool = NULL;
- da_align = round_down(*da, PAGE_SIZE);
+ da_align = round_down((u32)map_info.da, PAGE_SIZE);
} else {
/* search through the list of available pools to
* pool handle
*/
- gen_pool = get_pool_handle(iovmm_obj, pool_id);
+ gen_pool = get_pool_handle(iovmm_obj, map_info.pool_id);
if (gen_pool)
da_align = gen_pool_alloc(gen_pool, size_align);
else {
err = -EFAULT;
- goto err;
+ goto exit;
}
}
/* Mapped address = MSB of VA | LSB of PA */
- tmp_addr = (da_align | ((u32)va & (PAGE_SIZE - 1)));
- dmm_obj = add_mapping_info(obj, gen_pool, pa_align, tmp_addr,
+ tmp_addr = (da_align | ((u32)map_info.mpu_addr & (PAGE_SIZE - 1)));
+ dmm_obj = add_mapping_info(obj, gen_pool, map_info.mpu_addr, tmp_addr,
size_align);
- *da = tmp_addr;
if (!dmm_obj)
- goto err;
+ goto exit;
+
+ *map_info.da = tmp_addr;
/* Mapping the IO buffers */
if (vma->vm_flags & VM_IO) {
num_of_pages = size_align/PAGE_SIZE;
for (i = 0; i < num_of_pages; i++) {
- io_addr = user_va2_pa(current->mm, pa_align);
+ io_addr = __user_va2_pa(current->mm, addr_align);
pg = phys_to_page(io_addr);
iotlb_init_entry(&e, da_align, io_addr,
@@ -837,82 +941,47 @@ int dmm_user(struct iodmm_struct *obj, u32 pool_id, u32 *da,
MMU_RAM_ELSZ_32);
iopgtable_store_entry(obj->iovmm->iommu, &e);
da_align += PAGE_SIZE;
- pa_align += PAGE_SIZE;
+ addr_align += PAGE_SIZE;
dmm_obj->pages[i] = pg;
}
err = 0;
- goto err;
+ goto exit;
}
/* Mapping the Userspace buffer */
- err = user_to_device_map(iovmm_obj->iommu, pa_align,
+ err = user_to_device_map(iovmm_obj->iommu, addr_align,
da_align, size_align, dmm_obj->pages);
- if (err)
- remove_mapping_information(obj, tmp_addr, size_align);
-err:
+ if (err) {
+ /* clean the entries that were mapped */
+ __user_un_map(obj, tmp_addr);
+ goto exit;
+ }
+#ifdef CONFIG_DMM_DMA_API
+ /*
+ * Build the SG list that would be required for dma map and
+ * unmap APIs
+ */
+ err = build_dma_sg(dmm_obj, map_info.mpu_addr, map_info.size);
+ if (!err) {
+ /*
+ * calling dma_map_sg(cache flush) is essential for
+ * dma_unmap_sg to work since the sg->dma_address required
+ * for dma_unmap_sg is built during dma_map_sg call.
+ */
+ err = memory_give_ownership(iovmm_obj->iommu->dev, dmm_obj,
+ map_info.mpu_addr, map_info.size, DMA_BIDIRECTIONAL);
+ }
+#endif
+
+exit:
+ copy_to_user((void __user *)args, &map_info,
+ sizeof(struct dmm_map_info));
mutex_unlock(&iovmm_obj->dmm_map_lock);
up_read(&mm->mmap_sem);
return err;
}
/**
- * user_un_map - Removes User's mapped address
- * @obj: target dmm object
- * @map_addr Mapped address that needs to be unmapped
- *
- * removes user's dmm buffer mapping
- **/
-int user_un_map(struct iodmm_struct *obj, u32 map_addr)
-{
- int status = 0;
- u32 va_align;
- u32 size_align;
- struct dmm_map_object *map_obj;
- int i;
- struct page *pg;
-
- va_align = round_down(map_addr, PAGE_SIZE);
-
- mutex_lock(&obj->iovmm->dmm_map_lock);
- /*
- * Update DMM structures. Get the size to unmap.
- * This function returns error if the VA is not mapped
- */
- /* find requested memory are in cached mapping information */
- map_obj = find_containing_mapping(obj, 0, map_addr, false, 0);
- if (!map_obj)
- goto err;
- size_align = map_obj->size;
- /* Remove mapping from the page tables. */
- status = user_to_device_unmap(obj->iovmm->iommu, va_align,
- size_align);
- if (status)
- goto err;
-
- i = size_align/PAGE_SIZE;
- while (i--) {
- pg = map_obj->pages[i];
- if (pg && pfn_valid(page_to_pfn(pg))) {
- if (page_count(pg) < 1)
- pr_info("%s UNMAP FAILURE !!!\n", __func__);
- else {
- SetPageDirty(pg);
- page_cache_release(pg);
- }
- }
- }
- /*
- * A successful unmap should be followed by removal of map_obj
- * from dmm_map_list, so that mapped memory resource tracking
- * remains uptodate
- */
- remove_mapping_information(obj, map_obj->da, map_obj->size);
-err:
- mutex_unlock(&obj->iovmm->dmm_map_lock);
- return status;
-}
-
-/**
* user_remove_resources - Removes User's dmm resources
* @obj: target dmm object
*
@@ -926,7 +995,7 @@ void user_remove_resources(struct iodmm_struct *obj)
/* Free DMM mapped memory resources */
list_for_each_entry_safe(map_obj, temp_map, &obj->map_list, link) {
- status = user_un_map(obj, map_obj->da);
+ status = __user_un_map(obj, map_obj->da);
if (status) {
pr_err("%s: proc_un_map failed!"
" status = 0x%x\n", __func__, status);
@@ -937,39 +1006,39 @@ void user_remove_resources(struct iodmm_struct *obj)
/**
* omap_create_dmm_pool - Create DMM pool
* @obj: target dmm object
- * @pool_id pool id to assign to the pool
- * @size Size of the pool
- * @sa Starting Address of the Virtual pool
+ * @args pool information
**/
-int omap_create_dmm_pool(struct iodmm_struct *obj, int pool_id, int size,
- int sa)
+int omap_create_dmm_pool(struct iodmm_struct *obj, const void __user *args)
{
struct iovmm_pool *pool;
struct iovmm_device *iovmm = obj->iovmm;
+ struct iovmm_pool_info pool_info;
+
+ if (copy_from_user(&pool_info, args, sizeof(struct iovmm_pool_info)))
+ return -EFAULT;
- pool = kzalloc(sizeof(struct iovmm_pool), GFP_ATOMIC);
+ pool = kzalloc(sizeof(struct iovmm_pool), GFP_KERNEL);
if (!pool)
- goto err_out;
+ return -EFAULT;
+
+ pool->pool_id = pool_info.pool_id;
+ pool->da_begin = pool_info.da_begin;
+ pool->da_end = pool_info.da_begin + pool_info.size;
- pool->pool_id = pool_id;
- pool->da_begin = sa;
- pool->da_end = sa + size;
pool->genpool = gen_pool_create(12, -1);
- gen_pool_add(pool->genpool, pool->da_begin, size, -1);
+ gen_pool_add(pool->genpool, pool->da_begin, pool_info.size, -1);
+
INIT_LIST_HEAD(&pool->list);
list_add_tail(&pool->list, &iovmm->mmap_pool);
- return 0;
-err_out:
- return -ENOMEM;
+ return 0;
}
/**
- * omap_delete_dmm_pool - Delete DMM pool
+ * omap_delete_dmm_pool - Delete DMM pools
* @obj: target dmm object
- * @pool_id pool id to delete
**/
-int omap_delete_dmm_pool(struct iodmm_struct *obj, int pool_id)
+int omap_delete_dmm_pools(struct iodmm_struct *obj)
{
struct iovmm_pool *pool;
struct iovmm_device *iovmm_obj = obj->iovmm;
@@ -977,14 +1046,88 @@ int omap_delete_dmm_pool(struct iodmm_struct *obj, int pool_id)
list_for_each_safe(_pool, _next_pool, &iovmm_obj->mmap_pool) {
pool = list_entry(_pool, struct iovmm_pool, list);
- if (pool->pool_id == pool_id) {
- gen_pool_destroy(pool->genpool);
- list_del(&pool->list);
- kfree(pool);
- return 0;
+ gen_pool_destroy(pool->genpool);
+ list_del(&pool->list);
+ kfree(pool);
+ }
+
+ return 0;
+}
+
+/**
+ * register_mmufault - Register for MMU fault notification
+ * @obj: target dmm object
+ * @args: Eventfd information
+ *
+ * Registering to MMU fault event notification
+ **/
+int register_mmufault(struct iodmm_struct *obj, const void __user *args)
+{
+ int fd;
+ struct iommu_event_ntfy *fd_reg;
+
+ if (copy_from_user(&fd, args, sizeof(int)))
+ return -EFAULT;
+
+ fd_reg = kzalloc(sizeof(struct iommu_event_ntfy), GFP_KERNEL);
+ fd_reg->fd = fd;
+ fd_reg->evt_ctx = eventfd_ctx_fdget(fd);
+ INIT_LIST_HEAD(&fd_reg->list);
+ spin_lock_irq(&obj->iovmm->iommu->event_lock);
+ list_add_tail(&fd_reg->list, &obj->iovmm->iommu->event_list);
+ spin_unlock_irq(&obj->iovmm->iommu->event_lock);
+
+ return 0;
+}
+
+/**
+ * unregister_mmufault - Unregister for MMU fault notification
+ * @obj: target dmm object
+ * @args: Eventfd information
+ *
+ * Unregister to MMU fault event notification
+ **/
+int unregister_mmufault(struct iodmm_struct *obj, const void __user *args)
+{
+ int fd;
+ struct iommu_event_ntfy *fd_reg, *temp_reg;
+
+ if (copy_from_user(&fd, (void __user *)args, sizeof(int)))
+ return -EFAULT;
+
+ /* Free DMM mapped memory resources */
+ spin_lock_irq(&obj->iovmm->iommu->event_lock);
+ list_for_each_entry_safe(fd_reg, temp_reg,
+ &obj->iovmm->iommu->event_list, list) {
+ if (fd_reg->fd == fd) {
+ list_del(&fd_reg->list);
+ kfree(fd_reg);
}
}
- return -ENODEV;
+ spin_unlock_irq(&obj->iovmm->iommu->event_lock);
+
+ return 0;
+}
+
+/**
+ * program_tlb_entry - Program the IOMMU TLB entry
+ * @obj: target dmm object
+ * @args: TLB entry information
+ *
+ * This function loads the TLB entry that the user specifies.
+ * This function should be used only during remote Processor
+ * boot time.
+ **/
+int program_tlb_entry(struct iodmm_struct *obj, const void __user *args)
+{
+ struct iotlb_entry e;
+ int ret;
+
+ if (copy_from_user(&e, args, sizeof(struct iotlb_entry)))
+ return -EFAULT;
+
+ ret = load_iotlb_entry(obj->iovmm->iommu, &e);
+ return ret;
}
MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index d728af8aec6b..abc177d75072 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -400,10 +400,10 @@ u32 iommu_save_tlb_entries(struct iommu *obj)
for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr_tmp) {
iotlb_cr_to_e(&cr_tmp, e);
+ dev_dbg(obj->dev, "%s: %08x %08x %d %d %d", __func__, e->da,
+ e->pa, e->pgsz, e->prsvd,
+ e->valid);
e++;
-
- dev_dbg(obj->dev, "%s: [%02x] %08x %08x\n", __func__,
- i, cr_tmp.cam, cr_tmp.ram);
}
return 0;
@@ -429,17 +429,17 @@ u32 iommu_restore_tlb_entries(struct iommu *obj)
goto error;
for (i = 0; i < obj->nr_tlb_entries; i++) {
- if (!e->valid) {
+ if (!e->prsvd) {
e++;
continue;
}
+ dev_dbg(obj->dev, "%s: %08x %08x %d %d %d", __func__, e->da,
+ e->pa, e->pgsz, e->prsvd,
+ e->valid);
status = load_iotlb_entry(obj, e);
if (status)
goto error;
e++;
-
- dev_dbg(obj->dev, "%s: [%02x] %08x\n", __func__,
- i, e->pa);
}
return 0;
diff --git a/arch/arm/plat-omap/remoteproc.c b/arch/arm/plat-omap/remoteproc.c
index 6b1e28de8493..34b32becf490 100644
--- a/arch/arm/plat-omap/remoteproc.c
+++ b/arch/arm/plat-omap/remoteproc.c
@@ -42,7 +42,7 @@ static dev_t omap_rproc_dev;
static atomic_t num_of_rprocs;
-void rproc_eventfd_ntfy(struct omap_rproc *obj, int event)
+static void rproc_eventfd_ntfy(struct omap_rproc *obj, int event)
{
struct omap_rproc_ntfy *fd_reg;
@@ -177,7 +177,8 @@ static inline int rproc_get_state(struct omap_rproc *rproc)
return pdata->ops->get_state(rproc->dev);
}
-int rproc_reg_user_event(struct omap_rproc *rproc, const void __user *arg)
+static int rproc_reg_user_event(struct omap_rproc *rproc,
+ const void __user *arg)
{
struct omap_rproc_ntfy *fd_reg;
int state;
@@ -224,7 +225,8 @@ int rproc_reg_user_event(struct omap_rproc *rproc, const void __user *arg)
return 0;
}
-int rproc_unreg_user_event(struct omap_rproc *rproc, const void __user *arg)
+static int rproc_unreg_user_event(struct omap_rproc *rproc,
+ const void __user *arg)
{
struct omap_rproc_ntfy *fd_reg, *temp_reg;
struct omap_rproc_reg_event_args args;