summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorRicardo Perez Olivares <x0081762@ti.com>2010-07-10 01:44:50 -0500
committerRicardo Perez Olivares <x0081762@ti.com>2010-07-10 01:44:50 -0500
commit50058107a0e9175a5117b65e32096574bb5fdf5b (patch)
treea7a12d5bedd50d3cd8e526cdd06522493f161494 /drivers
parent2da90ce1fc438d09f40ea13a175565d3d57bb272 (diff)
parent717aa928fc96c14e75030e5ea2e8184228909bde (diff)
Merge branch 'syslink_next' of git://dev.omapzoom.org/pub/scm/tisyslink/kernel-syslink into L24x8
Conflicts: drivers/Kconfig drivers/Makefile Signed-off-by: Ricardo Perez Olivares <x0081762@ti.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile5
-rw-r--r--drivers/dsp/syslink/Kconfig67
-rw-r--r--drivers/dsp/syslink/ipu_pm/ipu_pm.c1463
-rw-r--r--drivers/dsp/syslink/ipu_pm/ipu_pm.h317
-rw-r--r--drivers/dsp/syslink/multicore_ipc/Kbuild31
-rw-r--r--drivers/dsp/syslink/multicore_ipc/gate.c69
-rw-r--r--drivers/dsp/syslink/multicore_ipc/gate_remote.c40
-rw-r--r--drivers/dsp/syslink/multicore_ipc/gatehwspinlock.c494
-rw-r--r--drivers/dsp/syslink/multicore_ipc/gatemp.c1846
-rw-r--r--drivers/dsp/syslink/multicore_ipc/gatemp_ioctl.c356
-rw-r--r--drivers/dsp/syslink/multicore_ipc/gatepeterson.c1004
-rw-r--r--drivers/dsp/syslink/multicore_ipc/heap.c115
-rw-r--r--drivers/dsp/syslink/multicore_ipc/heapbufmp.c1555
-rw-r--r--drivers/dsp/syslink/multicore_ipc/heapbufmp_ioctl.c459
-rw-r--r--drivers/dsp/syslink/multicore_ipc/heapmemmp.c1669
-rw-r--r--drivers/dsp/syslink/multicore_ipc/heapmemmp_ioctl.c478
-rw-r--r--drivers/dsp/syslink/multicore_ipc/ipc.c1550
-rw-r--r--drivers/dsp/syslink/multicore_ipc/ipc_drv.c240
-rw-r--r--drivers/dsp/syslink/multicore_ipc/ipc_ioctl.c69
-rw-r--r--drivers/dsp/syslink/multicore_ipc/listmp.c1472
-rw-r--r--drivers/dsp/syslink/multicore_ipc/listmp_ioctl.c564
-rw-r--r--drivers/dsp/syslink/multicore_ipc/messageq.c1618
-rw-r--r--drivers/dsp/syslink/multicore_ipc/messageq_ioctl.c566
-rw-r--r--drivers/dsp/syslink/multicore_ipc/multiproc.c301
-rw-r--r--drivers/dsp/syslink/multicore_ipc/multiproc_ioctl.c171
-rw-r--r--drivers/dsp/syslink/multicore_ipc/nameserver.c1540
-rw-r--r--drivers/dsp/syslink/multicore_ipc/nameserver_ioctl.c593
-rw-r--r--drivers/dsp/syslink/multicore_ipc/nameserver_remote.c48
-rw-r--r--drivers/dsp/syslink/multicore_ipc/nameserver_remotenotify.c824
-rw-r--r--drivers/dsp/syslink/multicore_ipc/platform.c1877
-rw-r--r--drivers/dsp/syslink/multicore_ipc/platform_mem.c325
-rw-r--r--drivers/dsp/syslink/multicore_ipc/sharedregion.c1606
-rw-r--r--drivers/dsp/syslink/multicore_ipc/sharedregion_ioctl.c479
-rw-r--r--drivers/dsp/syslink/multicore_ipc/sysipc_ioctl.c207
-rw-r--r--drivers/dsp/syslink/multicore_ipc/sysmemmgr.c459
-rw-r--r--drivers/dsp/syslink/multicore_ipc/sysmemmgr_ioctl.c227
-rw-r--r--drivers/dsp/syslink/multicore_ipc/sysmgr.c846
-rwxr-xr-xdrivers/dsp/syslink/multicore_ipc/transportshm.c1160
-rw-r--r--drivers/dsp/syslink/multicore_ipc/transportshm_setup.c205
-rw-r--r--drivers/dsp/syslink/notify_ducatidriver/notify_ducati.c1330
-rw-r--r--drivers/dsp/syslink/omap_notify/drv_notify.c928
-rw-r--r--drivers/dsp/syslink/omap_notify/notify.c1140
-rw-r--r--drivers/dsp/syslink/omap_notify/notify_driver.c186
-rw-r--r--drivers/dsp/syslink/omap_notify/plat/omap4_notify_setup.c165
-rw-r--r--drivers/dsp/syslink/procmgr/Kbuild10
-rw-r--r--drivers/dsp/syslink/procmgr/proc4430/Kbuild10
-rw-r--r--drivers/dsp/syslink/procmgr/proc4430/dmm4430.c356
-rw-r--r--drivers/dsp/syslink/procmgr/proc4430/dmm4430.h50
-rw-r--r--drivers/dsp/syslink/procmgr/proc4430/ducatienabler.c866
-rw-r--r--drivers/dsp/syslink/procmgr/proc4430/hw_mmu.c661
-rw-r--r--drivers/dsp/syslink/procmgr/proc4430/proc4430.c1085
-rw-r--r--drivers/dsp/syslink/procmgr/proc4430/proc4430.h147
-rw-r--r--drivers/dsp/syslink/procmgr/proc4430/proc4430_drv.c401
-rw-r--r--drivers/dsp/syslink/procmgr/proc4430/proc4430_drvdefs.h169
-rw-r--r--drivers/dsp/syslink/procmgr/procdefs.h203
-rw-r--r--drivers/dsp/syslink/procmgr/processor.c398
-rw-r--r--drivers/dsp/syslink/procmgr/processor.h84
-rw-r--r--drivers/dsp/syslink/procmgr/procmgr.c958
-rw-r--r--drivers/dsp/syslink/procmgr/procmgr_drv.c759
-rw-r--r--drivers/dsp/syslink/procmgr/procmgr_drvdefs.h541
61 files changed, 37364 insertions, 0 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 5934034789fc..260a07c38af8 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -112,4 +112,6 @@ source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
+source "drivers/dsp/syslink/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 89ea12e249c5..76c9d154d63d 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -115,3 +115,8 @@ obj-y += platform/
obj-y += ieee802154/
obj-$(CONFIG_DMM_OMAP) += media/
obj-$(CONFIG_TILER_OMAP) += media/
+obj-$(CONFIG_SYSLINK_DUCATI_PM) += dsp/syslink/multicore_ipc/
+obj-$(CONFIG_MPU_SYSLINK_PLATFORM) += dsp/syslink/multicore_ipc/
+obj-$(CONFIG_MPU_SYSLINK_IPC) += dsp/syslink/multicore_ipc/
+obj-$(CONFIG_SYSLINK_PROC) += dsp/syslink/procmgr/
+obj-$(CONFIG_SYSLINK_PROC4430) += dsp/syslink/procmgr/proc4430/
diff --git a/drivers/dsp/syslink/Kconfig b/drivers/dsp/syslink/Kconfig
new file mode 100644
index 000000000000..08964039a4a3
--- /dev/null
+++ b/drivers/dsp/syslink/Kconfig
@@ -0,0 +1,67 @@
+
+
+menuconfig Sys_Link
+ bool "Sys_Link"
+ default y
+
+if Sys_Link
+
+config SYSLINK_PROC
+ tristate "Syslink ProcMgr"
+ default y
+ select OMAP_IOMMU
+ help
+ Syslink Proc manager
+
+config SYSLINK_PROC4430
+ tristate "Proc 4430"
+ depends on SYSLINK_PROC
+ default y
+ help
+ Ducati Proc implementation
+
+config DUCATI_BASEIMAGE_PHYS_ADDR
+ hex "Physical Address where the Ducati is loaded"
+ depends on SYSLINK_PROC4430
+ default 0x9CF00000
+ help
+ Specify the physical address where the Ducati image will be
+ loaded.
+
+config SYSLINK_DUCATI_PM
+ tristate "DUCATI POWER MANAGEMENT"
+ depends on SYSLINK_PROC && SYSLINK_PROC4430
+ default y
+ help
+ Ducati Power Management Implementation
+
+config MPU_SYSLINK_PLATFORM
+ tristate "Syslink Platform Module"
+ default y
+ help
+ Syslink Platform Module
+
+config MPU_SYSLINK_IPC
+ tristate "Syslink IPC Module"
+ depends on SYSLINK_PROC4430
+ default y
+ select OMAP_MBOX_FWK
+ select OMAP_REMOTE_PROC
+ help
+ Syslink IPC Module (includes Notify)
+
+config SYSLINK_USE_SYSMGR
+ bool "Enable SYS MGR setup"
+ depends on MPU_SYSLINK_IPC && SYSLINK_PROC
+ default y
+ help
+ This is the experimental option to enable SYS manager setup
+
+ config OMAP_IOMMU_DEBUG_MODULE
+ bool "IOMMU debugging"
+ default y
+endif
+
+
+
+
diff --git a/drivers/dsp/syslink/ipu_pm/ipu_pm.c b/drivers/dsp/syslink/ipu_pm/ipu_pm.c
new file mode 100644
index 000000000000..b36e4e721045
--- /dev/null
+++ b/drivers/dsp/syslink/ipu_pm/ipu_pm.c
@@ -0,0 +1,1463 @@
+/*
+ * ipu_pm.c
+ *
+ * IPU Power Management support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <generated/autoconf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <syslink/notify.h>
+#include <syslink/notify_driver.h>
+#include <syslink/notifydefs.h>
+#include <syslink/notify_driverdefs.h>
+#include <syslink/notify_ducatidriver.h>
+
+/* Power Management headers */
+#include <plat/omap_hwmod.h>
+#include <plat/omap_device.h>
+#include <plat/dma.h>
+#include <plat/dmtimer.h>
+#include <plat/clock.h>
+#include <plat/i2c.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/semaphore.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/i2c/twl.h>
+
+/* Module headers */
+#include "ipu_pm.h"
+
+/** ============================================================================
+ * Macros and types
+ * ============================================================================
+ */
+#define A9 3
+#define SYS_M3 2
+#define APP_M3 1
+#define TESLA 0
+
+#define LINE_ID 0
+#define NUM_SELF_PROC 2
+#define PM_VERSION 0x0100
+
+/* FIXME:Values needed for the regulator hack */
+#define VAUX3_CFG_STATE 0x8E
+#define VAUX3_CFG_VOLTAGE 0x8F
+#define VCXIO_CFG_TRANS 0x91
+#define VMMC_CFG_VOLTAGE 0x9B
+#define CAM_2_ENABLE 0xE1
+#define CAM_2_DISABLE 0xE0
+
+/** ============================================================================
+ * Forward declarations of internal functions
+ * ============================================================================
+ */
+
+/* Function to get sdma channels from PRCM */
+static inline int ipu_pm_get_sdma_chan(int proc_id, unsigned rcb_num);
+
+/* Function to get gptimers from PRCM */
+static inline int ipu_pm_get_gptimer(int proc_id, unsigned rcb_num);
+
+/* Function to get i2c buses from PRCM */
+static inline int ipu_pm_get_i2c_bus(int proc_id, unsigned rcb_num);
+
+/* Function to get gpios from PRCM */
+static inline int ipu_pm_get_gpio(int proc_id, unsigned rcb_num);
+
+/* Function to get regulators from PRCM */
+static inline int ipu_pm_get_regulator(int proc_id, unsigned rcb_num);
+
+/* Function to release sdma channels to PRCM */
+static inline int ipu_pm_rel_sdma_chan(int proc_id, unsigned rcb_num);
+
+/* Function to release gptimers to PRCM */
+static inline int ipu_pm_rel_gptimer(int proc_id, unsigned rcb_num);
+
+/* Function to release i2c buses to PRCM */
+static inline int ipu_pm_rel_i2c_bus(int proc_id, unsigned rcb_num);
+
+/* Function to release gpios from PRCM */
+static inline int ipu_pm_rel_gpio(int proc_id, unsigned rcb_num);
+
+/* Function to release regulators to PRCM */
+static inline int ipu_pm_rel_regulator(int proc_id, unsigned rcb_num);
+
+/* Function to get ipu pm object */
+static inline struct ipu_pm_object *ipu_pm_get_handle(int proc_id);
+
+/** ============================================================================
+ * Globals
+ * ============================================================================
+ */
+
+static union message_slicer pm_msg;
+
+static int pm_action_type;
+static int pm_resource_type;
+static int pm_gptimer_num;
+static int pm_gpio_num;
+static int pm_i2c_bus_num;
+static int pm_sdmachan_num;
+static int pm_sdmachan_dummy;
+static int ch, ch_aux;
+static int pm_regulator_num;
+static int return_val;
+static u32 GPTIMER_USE_MASK = 0xFFFF;
+
+static int ipu_timer_list[NUM_IPU_TIMERS] = {
+ GP_TIMER_3,
+ GP_TIMER_4,
+ GP_TIMER_9,
+ GP_TIMER_11};
+
+static struct ipu_pm_object *pm_handle_appm3;
+static struct ipu_pm_object *pm_handle_sysm3;
+
+static struct ipu_pm_module_object ipu_pm_state = {
+ .def_cfg.reserved = 1,
+ .gate_handle = NULL
+} ;
+
+static struct ipu_pm_params pm_params = {
+ .pm_gpio_counter = 0,
+ .pm_gptimer_counter = 0,
+ .pm_i2c_bus_counter = 0,
+ .pm_sdmachan_counter = 0,
+ .pm_regulator_counter = 0,
+ .shared_addr = NULL,
+ .timeout = 10000,
+ .pm_num_events = NUMBER_PM_EVENTS,
+ .pm_resource_event = PM_RESOURCE,
+ .pm_notification_event = PM_NOTIFICATION,
+ .proc_id = A9,
+ .remote_proc_id = -1,
+ .line_id = 0
+} ;
+
+/*
+ Function for PM resources Callback
+ *
+ */
+void ipu_pm_callback(u16 proc_id, u16 line_id, u32 event_id,
+ uint *arg, u32 payload)
+{
+ struct rcb_block *rcb_p;
+ struct ipu_pm_object *handle;
+ struct ipu_pm_params *params;
+
+ /* get the handle to proper ipu pm object */
+ handle = ipu_pm_get_handle(proc_id);
+ if (WARN_ON(unlikely(handle == NULL)))
+ return;
+
+ params = handle->params;
+ if (WARN_ON(unlikely(params == NULL)))
+ return;
+
+ /* Get the payload */
+ pm_msg.whole = payload;
+ /* Get pointer to the proper RCB */
+ rcb_p = (struct rcb_block *)
+ &handle->rcb_table->rcb[pm_msg.fields.rcb_num];
+
+ /* Get the type of resource and the actions required */
+ pm_action_type = rcb_p->msg_type;
+ pm_resource_type = rcb_p->sub_type;
+
+ /* Request the resource to PRCM */
+ switch (pm_resource_type) {
+ case SDMA:
+ if (pm_action_type == PM_REQUEST_RESOURCE) {
+ return_val =
+ ipu_pm_get_sdma_chan(proc_id,
+ pm_msg.fields.rcb_num);
+ if (return_val != PM_SUCCESS) {
+ /* Update payload with the failure msg */
+ pm_msg.fields.msg_type = PM_REQUEST_FAIL;
+ pm_msg.fields.parm = return_val;
+ break;
+ }
+ break;
+ }
+ if (pm_action_type == PM_RELEASE_RESOURCE) {
+ return_val =
+ ipu_pm_rel_sdma_chan(proc_id,
+ pm_msg.fields.rcb_num);
+ if (return_val != PM_SUCCESS) {
+ /* Update payload with the failure msg */
+ pm_msg.fields.msg_type = PM_RELEASE_FAIL;
+ pm_msg.fields.parm = return_val;
+ break;
+ }
+ break;
+ }
+ break;
+ case GP_TIMER:
+ if (pm_action_type == PM_REQUEST_RESOURCE) {
+ /* GP Timers 3,4,9 or 11 for Ducati M3 */
+ return_val = ipu_pm_get_gptimer(proc_id,
+ pm_msg.fields.rcb_num);
+ if (return_val != PM_SUCCESS) {
+ /* Update the payload with the failure msg */
+ pm_msg.fields.msg_type = PM_REQUEST_FAIL;
+ pm_msg.fields.parm = return_val;
+ break;
+ }
+ break;
+ }
+ if (pm_action_type == PM_RELEASE_RESOURCE) {
+ return_val =
+ ipu_pm_rel_gptimer(proc_id,
+ pm_msg.fields.rcb_num);
+ if (return_val != PM_SUCCESS) {
+ /* Update the payload with the failure msg */
+ pm_msg.fields.msg_type = PM_RELEASE_FAIL;
+ pm_msg.fields.parm = return_val;
+ break;
+ }
+ break;
+ }
+ break;
+ case GP_IO:
+ if (pm_action_type == PM_REQUEST_RESOURCE) {
+ return_val =
+ ipu_pm_get_gpio(proc_id,
+ pm_msg.fields.rcb_num);
+ if (return_val != PM_SUCCESS) {
+ /* Update the payload with the failure msg */
+ pm_msg.fields.msg_type = PM_REQUEST_FAIL;
+ pm_msg.fields.parm = return_val;
+ break;
+ }
+ break;
+ }
+ if (pm_action_type == PM_RELEASE_RESOURCE) {
+ return_val =
+ ipu_pm_rel_gpio(proc_id,
+ pm_msg.fields.rcb_num);
+ if (return_val != PM_SUCCESS) {
+ /* Update the payload with the failure msg */
+ pm_msg.fields.msg_type = PM_RELEASE_FAIL;
+ pm_msg.fields.parm = return_val;
+ break;
+ }
+ break;
+ }
+ break;
+ case I2C:
+ if (pm_action_type == PM_REQUEST_RESOURCE) {
+ return_val =
+ ipu_pm_get_i2c_bus(proc_id,
+ pm_msg.fields.rcb_num);
+ if (return_val != PM_SUCCESS) {
+ /* i2c bus/clock for Ducati unavailable */
+ /* Update the payload with the failure msg */
+ pm_msg.fields.msg_type = PM_REQUEST_FAIL;
+ pm_msg.fields.parm = return_val;
+ break;
+ }
+ break;
+ }
+ if (pm_action_type == PM_RELEASE_RESOURCE) {
+ return_val =
+ ipu_pm_rel_i2c_bus(proc_id,
+ pm_msg.fields.rcb_num);
+ if (return_val != PM_SUCCESS) {
+ /* i2c bus/clock for Ducati unavailable */
+ /* Update the payload with the failure msg */
+ pm_msg.fields.msg_type = PM_RELEASE_FAIL;
+ pm_msg.fields.parm = return_val;
+ break;
+ }
+ break;
+ }
+ break;
+ case REGULATOR:
+ if (pm_action_type == PM_REQUEST_RESOURCE) {
+ return_val =
+ ipu_pm_get_regulator(proc_id,
+ pm_msg.fields.rcb_num);
+ if (return_val != PM_SUCCESS) {
+ /* Regulator unavailable */
+ /* Update the payload with the failure msg */
+ pm_msg.fields.msg_type = PM_REQUEST_FAIL;
+ pm_msg.fields.parm = return_val;
+ break;
+ }
+ break;
+ }
+ if (pm_action_type == PM_RELEASE_RESOURCE) {
+ return_val =
+ ipu_pm_rel_regulator(proc_id,
+ pm_msg.fields.rcb_num);
+ if (return_val != PM_SUCCESS) {
+ /* Update the payload with the failure msg */
+ pm_msg.fields.msg_type = PM_RELEASE_FAIL;
+ pm_msg.fields.parm = return_val;
+ break;
+ }
+ break;
+ }
+ break;
+ case DUCATI:
+ case IVA_HD:
+ case ISS:
+ default:
+ printk(KERN_ERR "Unsupported resource\n");
+ /* Report error to Remote processor */
+ pm_msg.fields.msg_type = PM_FAILURE,
+ pm_msg.fields.parm = PM_UNSUPPORTED;
+ break;
+ }
+
+ /* Update the payload with the reply msg */
+ pm_msg.fields.reply_flag = true;
+
+ /* Update the payload before send */
+ payload = pm_msg.whole;
+
+ /* send the ACK to DUCATI*/
+ return_val = notify_send_event(
+ params->remote_proc_id,/*DUCATI_PROC*/
+ params->line_id,
+ params->pm_resource_event | \
+ (NOTIFY_SYSTEMKEY << 16),
+ payload,
+ true);
+ if (return_val < 0)
+ printk(KERN_ERR "ERROR SENDING PM EVENT\n");
+}
+EXPORT_SYMBOL(ipu_pm_callback);
+
+/*
+ Function for PM notifications Callback
+ *
+ */
+void ipu_pm_notify_callback(u16 proc_id, u16 line_id, u32 event_id,
+ uint *arg, u32 payload)
+{
+ /**
+ * Post semaphore based in eventType (payload);
+ * IPU has alreay finished the process for the
+ * notification
+ */
+ /* Get the payload */
+ struct ipu_pm_object *handle;
+ /* get the handle to proper ipu pm object */
+ handle = ipu_pm_get_handle(proc_id);
+ if (WARN_ON(unlikely(handle == NULL)))
+ return;
+
+ pm_msg.whole = payload;
+ switch (pm_msg.fields.msg_subtype) {
+ case PM_SUSPEND:
+ up(&handle->pm_event[PM_SUSPEND].sem_handle);
+ break;
+ case PM_RESUME:
+ up(&handle->pm_event[PM_RESUME].sem_handle);
+ break;
+ case PM_OTHER:
+ up(&handle->pm_event[PM_OTHER].sem_handle);
+ break;
+ }
+}
+EXPORT_SYMBOL(ipu_pm_notify_callback);
+
+/*
+ Function for send PM Notifications
+ *
+ */
+int ipu_pm_notifications(enum pm_event_type event_type)
+{
+ /**
+ * Function called by linux driver
+ * Recieves evenType: Suspend, Resume, others...
+ * Send event to Ducati
+ * Pend semaphore based in event_type (payload)
+ * Return ACK to caller
+ */
+
+ struct ipu_pm_object *handle;
+ struct ipu_pm_params *params;
+ int pm_ack = 0;
+ int i;
+ int proc_id;
+
+ /*get the handle to proper ipu pm object */
+ for (i = 0; i < NUM_SELF_PROC; i++) {
+ proc_id = i + 1;
+ handle = ipu_pm_get_handle(proc_id);
+ if (handle == NULL)
+ continue;
+ params = handle->params;
+ if (params == NULL)
+ continue;
+ switch (event_type) {
+ case PM_SUSPEND:
+ pm_msg.fields.msg_type = PM_NOTIFICATIONS;
+ pm_msg.fields.msg_subtype = PM_SUSPEND;
+ pm_msg.fields.parm = PM_SUCCESS;
+ /* send the request to IPU*/
+ return_val = notify_send_event(
+ params->remote_proc_id,
+ params->line_id,
+ params->pm_notification_event | \
+ (NOTIFY_SYSTEMKEY << 16),
+ (unsigned int)pm_msg.whole,
+ true);
+ if (return_val < 0)
+ printk(KERN_ERR "ERROR SENDING PM EVENT\n");
+ /* wait until event from IPU (ipu_pm_notify_callback)*/
+ return_val = down_timeout
+ (&handle->pm_event[PM_SUSPEND]
+ .sem_handle,
+ msecs_to_jiffies(params->timeout));
+ if (WARN_ON((return_val < 0) ||
+ (pm_msg.fields.parm ==
+ PM_NOTIFICATIONS_FAIL))) {
+ printk(KERN_ERR "Error Suspend\n");
+ pm_ack = EBUSY;
+ }
+ break;
+ case PM_RESUME:
+ pm_msg.fields.msg_type = PM_NOTIFICATIONS;
+ pm_msg.fields.msg_subtype = PM_RESUME;
+ pm_msg.fields.parm = PM_SUCCESS;
+ /* send the request to IPU*/
+ return_val = notify_send_event(
+ params->remote_proc_id,
+ params->line_id,
+ params->pm_notification_event | \
+ (NOTIFY_SYSTEMKEY << 16),
+ (unsigned int)pm_msg.whole,
+ true);
+ if (return_val < 0)
+ printk(KERN_ERR "ERROR SENDING PM EVENT\n");
+ /* wait until event from IPU (ipu_pm_notify_callback)*/
+ return_val = down_timeout
+ (&handle->pm_event[PM_RESUME]
+ .sem_handle,
+ msecs_to_jiffies(params->timeout));
+ if (WARN_ON((return_val < 0) ||
+ (pm_msg.fields.parm ==
+ PM_NOTIFICATIONS_FAIL))) {
+ printk(KERN_ERR "Error Resume\n");
+ pm_ack = EBUSY;
+ }
+ break;
+ case PM_OTHER:
+ pm_msg.fields.msg_type = PM_NOTIFICATIONS;
+ pm_msg.fields.msg_subtype = PM_OTHER;
+ pm_msg.fields.parm = PM_SUCCESS;
+ /* send the request to IPU*/
+ return_val = notify_send_event(
+ params->remote_proc_id,
+ params->line_id,
+ params->pm_notification_event | \
+ (NOTIFY_SYSTEMKEY << 16),
+ (unsigned int)pm_msg.whole,
+ true);
+ if (return_val < 0)
+ printk(KERN_ERR "ERROR SENDING PM EVENT\n");
+ /* wait until event from IPU (ipu_pm_notify_callback)*/
+ return_val = down_timeout
+ (&handle->pm_event[PM_OTHER]
+ .sem_handle,
+ msecs_to_jiffies(params->timeout));
+ if (WARN_ON((return_val < 0) ||
+ (pm_msg.fields.parm ==
+ PM_NOTIFICATIONS_FAIL))) {
+ printk(KERN_ERR "Error Other\n");
+ pm_ack = EBUSY;
+ }
+ break;
+ }
+ }
+ return pm_ack;
+}
+EXPORT_SYMBOL(ipu_pm_notifications);
+
+/*
+ Function to get sdma channels from PRCM
+ *
+ */
+static inline int ipu_pm_get_sdma_chan(int proc_id, unsigned rcb_num)
+{
+ struct ipu_pm_object *handle;
+ struct ipu_pm_params *params;
+ struct rcb_block *rcb_p;
+
+ /* get the handle to proper ipu pm object */
+ handle = ipu_pm_get_handle(proc_id);
+ if (WARN_ON(unlikely(handle == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ params = handle->params;
+ if (WARN_ON(unlikely(params == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ /* Get pointer to the proper RCB */
+ if (WARN_ON((rcb_num < RCB_MIN) || (rcb_num > RCB_MAX)))
+ return PM_INVAL_RCB_NUM;
+ rcb_p = (struct rcb_block *)&handle->rcb_table->rcb[rcb_num];
+ /* Get number of channels from RCB */
+ pm_sdmachan_num = rcb_p->num_chan;
+ if (WARN_ON((pm_sdmachan_num <= 0) ||
+ (pm_sdmachan_num > SDMA_CHANNELS_MAX)))
+ return PM_INVAL_NUM_CHANNELS;
+
+ /* Request resource using PRCM API */
+ for (ch = 0; ch < pm_sdmachan_num; ch++) {
+ return_val = omap_request_dma(proc_id,
+ "ducati-ss",
+ NULL,
+ NULL,
+ &pm_sdmachan_dummy);
+ if (return_val == 0) {
+ params->pm_sdmachan_counter++;
+ rcb_p->channels[ch] = (unsigned char)pm_sdmachan_dummy;
+ } else
+ goto clean_sdma;
+ }
+ return PM_SUCCESS;
+clean_sdma:
+ /*failure, need to free the chanels*/
+ for (ch_aux = 0; ch_aux < ch; ch_aux++) {
+ pm_sdmachan_dummy = (int)rcb_p->channels[ch_aux];
+ omap_free_dma(pm_sdmachan_dummy);
+ params->pm_sdmachan_counter--;
+ }
+ return PM_INSUFFICIENT_CHANNELS;
+}
+
+/*
+ Function to get gptimers from PRCM
+ *
+ */
+static inline int ipu_pm_get_gptimer(int proc_id, unsigned rcb_num)
+{
+ struct ipu_pm_object *handle;
+ struct ipu_pm_params *params;
+ struct rcb_block *rcb_p;
+ struct omap_dm_timer *p_gpt = NULL;
+ int pm_gp_num;
+
+ /* get the handle to proper ipu pm object */
+ handle = ipu_pm_get_handle(proc_id);
+ if (WARN_ON(unlikely(handle == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ params = handle->params;
+ if (WARN_ON(unlikely(params == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ /* Get pointer to the proper RCB */
+ if (WARN_ON((rcb_num < RCB_MIN) || (rcb_num > RCB_MAX)))
+ return PM_INVAL_RCB_NUM;
+ rcb_p = (struct rcb_block *)&handle->rcb_table->rcb[rcb_num];
+ /* Request resource using PRCM API */
+ for (pm_gp_num = 0; pm_gp_num < NUM_IPU_TIMERS; pm_gp_num++) {
+ if (GPTIMER_USE_MASK & (1 << ipu_timer_list[pm_gp_num])) {
+ p_gpt = omap_dm_timer_request_specific
+ (ipu_timer_list[pm_gp_num]);
+ } else
+ continue;
+ if (p_gpt != NULL) {
+ /* Clear the bit in the usage mask */
+ GPTIMER_USE_MASK &= ~(1 << ipu_timer_list[pm_gp_num]);
+ break;
+ }
+ }
+ if (p_gpt == NULL)
+ return PM_NO_GPTIMER;
+ else {
+ /* Store the gptimer number and base address */
+ rcb_p->fill9 = ipu_timer_list[pm_gp_num];
+ rcb_p->mod_base_addr = (unsigned)p_gpt;
+ params->pm_gptimer_counter++;
+ return PM_SUCCESS;
+ }
+}
+
+/*
+ Function to get an i2c bus
+ *
+ */
+static inline int ipu_pm_get_i2c_bus(int proc_id, unsigned rcb_num)
+{
+ struct ipu_pm_object *handle;
+ struct ipu_pm_params *params;
+ struct rcb_block *rcb_p;
+ struct clk *p_i2c_clk;
+ int i2c_clk_status;
+ char i2c_name[10];
+
+ /* get the handle to proper ipu pm object */
+ handle = ipu_pm_get_handle(proc_id);
+ if (WARN_ON(unlikely(handle == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ params = handle->params;
+ if (WARN_ON(unlikely(params == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ /* Get pointer to the proper RCB */
+ if (WARN_ON((rcb_num < RCB_MIN) || (rcb_num > RCB_MAX)))
+ return PM_INVAL_RCB_NUM;
+ rcb_p = (struct rcb_block *)&handle->rcb_table->rcb[rcb_num];
+
+ pm_i2c_bus_num = rcb_p->fill9;
+ if (WARN_ON((pm_i2c_bus_num < I2C_BUS_MIN) ||
+ (pm_i2c_bus_num > I2C_BUS_MAX)))
+ return PM_INVAL_NUM_I2C;
+
+ /* building the name for i2c_clk */
+ sprintf(i2c_name, "i2c%d_fck", pm_i2c_bus_num);
+
+ /* Request resource using PRCM API */
+ p_i2c_clk = omap_clk_get_by_name(i2c_name);
+ if (p_i2c_clk == 0)
+ return PM_NO_I2C;
+ i2c_clk_status = clk_enable(p_i2c_clk);
+ if (i2c_clk_status != 0)
+ return PM_NO_I2C;
+ rcb_p->mod_base_addr = (unsigned)p_i2c_clk;
+ params->pm_i2c_bus_counter++;
+
+ return PM_SUCCESS;
+}
+
+/*
+ Function to get gpio
+ *
+ */
+static inline int ipu_pm_get_gpio(int proc_id, unsigned rcb_num)
+{
+ struct ipu_pm_object *handle;
+ struct ipu_pm_params *params;
+ struct rcb_block *rcb_p;
+
+ /* get the handle to proper ipu pm object */
+ handle = ipu_pm_get_handle(proc_id);
+ if (WARN_ON(unlikely(handle == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ params = handle->params;
+ if (WARN_ON(unlikely(params == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ /* Get pointer to the proper RCB */
+ if (WARN_ON((rcb_num < RCB_MIN) || (rcb_num > RCB_MAX)))
+ return PM_INVAL_RCB_NUM;
+ rcb_p = (struct rcb_block *)&handle->rcb_table->rcb[rcb_num];
+
+ pm_gpio_num = rcb_p->fill9;
+ return_val = gpio_request(pm_gpio_num , "ducati-ss");
+ if (return_val != 0)
+ return PM_NO_GPIO;
+ params->pm_gpio_counter++;
+
+ return PM_SUCCESS;
+}
+
+/*
+ Function to get a regulator
+ *
+ */
+static inline int ipu_pm_get_regulator(int proc_id, unsigned rcb_num)
+{
+ struct ipu_pm_object *handle;
+ struct ipu_pm_params *params;
+ struct rcb_block *rcb_p;
+ struct regulator *p_regulator = NULL;
+ u8 pm_reg_voltage_index;
+ s32 retval = 0;
+ /*
+ There are 5 bits to set the voltage, to calculate the max_error
+ *(steps / 2) and we add this value to the
+ *value shared in rcb->data[0]->(minVoltage) to provide the nearest
+ *value to the minVoltage.
+ *Fixed_voltage -> minVoltage + max_error
+ */
+ u32 fixed_voltage;
+ u32 max_error = (100000 / 2);
+
+ /* get the handle to proper ipu pm object */
+ handle = ipu_pm_get_handle(proc_id);
+ if (WARN_ON(unlikely(handle == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ params = handle->params;
+ if (WARN_ON(unlikely(params == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ /* Get pointer to the proper RCB */
+ if (WARN_ON((rcb_num < RCB_MIN) || (rcb_num > RCB_MAX)))
+ return PM_INVAL_RCB_NUM;
+ rcb_p = (struct rcb_block *)&handle->rcb_table->rcb[rcb_num];
+
+ pm_regulator_num = rcb_p->fill9;
+ if (WARN_ON((pm_regulator_num < REGULATOR_MIN) ||
+ (pm_regulator_num > REGULATOR_MAX)))
+ return PM_INVAL_REGULATOR;
+
+ /*
+ FIXME:Only providing 1 regulator, if more are provided
+ * this check is not valid.
+ */
+ if (WARN_ON(params->pm_regulator_counter > 0))
+ return PM_INVAL_REGULATOR;
+
+ /*
+ Fix the voltage to give the nearest value to
+ *the minimum by adding the maximum error.
+ *rcb_p->data[0] contains the minimum voltage
+ *rcb_p->data[1] contains the maximum voltage
+ */
+ fixed_voltage = rcb_p->data[0] + max_error;
+ /* 5 bits to represent the voltage */
+ pm_reg_voltage_index = ((fixed_voltage - 1000000)/100000)+1;
+
+ /*
+ FIXME:Disable/set_voltage regulator with a hack, once the
+ * regulator API are fully working this will be removed.
+ * This is only for Phoenix ES1.0
+ */
+ pm_reg_voltage_index |= 0x80;
+ retval = twl_i2c_write_u8(TWL6030_MODULE_ID1, 0x00, VMMC_CFG_VOLTAGE);
+ if (retval)
+ goto exit;
+ retval = twl_i2c_write_u8(TWL6030_MODULE_ID1, 0x80, VCXIO_CFG_TRANS);
+ if (retval)
+ goto exit;
+ retval = twl_i2c_write_u8(TWL6030_MODULE_ID0, CAM_2_ENABLE,
+ VAUX3_CFG_STATE);
+ if (retval)
+ goto exit;
+ retval = twl_i2c_write_u8(TWL6030_MODULE_ID0, pm_reg_voltage_index,
+ VAUX3_CFG_VOLTAGE);
+ if (retval)
+ goto exit;
+
+ /*
+ FIXME:The real value will be stored once the regulator API
+ * are fully working.
+ */
+ rcb_p->mod_base_addr = (unsigned)p_regulator;
+ params->pm_regulator_counter++;
+
+ return PM_SUCCESS;
+exit:
+ return PM_INVAL_REGULATOR;
+}
+
+/*
+ Function to release sdma channels to PRCM
+ *
+ */
+static inline int ipu_pm_rel_sdma_chan(int proc_id, unsigned rcb_num)
+{
+ struct ipu_pm_object *handle;
+ struct ipu_pm_params *params;
+ struct rcb_block *rcb_p;
+
+ /* get the handle to proper ipu pm object */
+ handle = ipu_pm_get_handle(proc_id);
+ if (WARN_ON(unlikely(handle == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ params = handle->params;
+ if (WARN_ON(unlikely(params == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ /* Get pointer to the proper RCB */
+ if (WARN_ON((rcb_num < RCB_MIN) || (rcb_num > RCB_MAX)))
+ return PM_INVAL_RCB_NUM;
+
+ rcb_p = (struct rcb_block *)&handle->rcb_table->rcb[rcb_num];
+
+ /* Release resource using PRCM API */
+ pm_sdmachan_num = rcb_p->num_chan;
+ for (ch = 0; ch < pm_sdmachan_num; ch++) {
+ pm_sdmachan_dummy = (int)rcb_p->channels[ch];
+ omap_free_dma(pm_sdmachan_dummy);
+ params->pm_sdmachan_counter--;
+ }
+ return PM_SUCCESS;
+}
+
+/*
+ Function to release gptimer to PRCM
+ *
+ */
+static inline int ipu_pm_rel_gptimer(int proc_id, unsigned rcb_num)
+{
+ struct ipu_pm_object *handle;
+ struct ipu_pm_params *params;
+ struct rcb_block *rcb_p;
+ struct omap_dm_timer *p_gpt;
+
+ /* get the handle to proper ipu pm object */
+ handle = ipu_pm_get_handle(proc_id);
+ if (WARN_ON(unlikely(handle == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ params = handle->params;
+ if (WARN_ON(unlikely(params == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ /* Get pointer to the proper RCB */
+ if (WARN_ON((rcb_num < RCB_MIN) || (rcb_num > RCB_MAX)))
+ return PM_INVAL_RCB_NUM;
+
+ rcb_p = (struct rcb_block *)&handle->rcb_table->rcb[rcb_num];
+
+ p_gpt = (struct omap_dm_timer *)rcb_p->mod_base_addr;
+ pm_gptimer_num = rcb_p->fill9;
+
+ /* Set the usage mask for reuse */
+ GPTIMER_USE_MASK |= (1 << pm_gptimer_num);
+
+ /* Release resource using PRCM API */
+ if (p_gpt != NULL)
+ omap_dm_timer_free(p_gpt);
+ rcb_p->mod_base_addr = 0;
+ params->pm_gptimer_counter--;
+ return PM_SUCCESS;
+}
+
+/*
+ Function to release an i2c bus
+ *
+ */
+static inline int ipu_pm_rel_i2c_bus(int proc_id, unsigned rcb_num)
+{
+ struct ipu_pm_object *handle;
+ struct ipu_pm_params *params;
+ struct rcb_block *rcb_p;
+ struct clk *p_i2c_clk;
+
+ /* get the handle to proper ipu pm object */
+ handle = ipu_pm_get_handle(proc_id);
+ if (WARN_ON(unlikely(handle == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ params = handle->params;
+ if (WARN_ON(unlikely(params == NULL)))
+ return PM_NOT_INSTANTIATED;;
+
+ /* Get pointer to the proper RCB */
+ if (WARN_ON((rcb_num < RCB_MIN) || (rcb_num > RCB_MAX)))
+ return PM_INVAL_RCB_NUM;
+
+ rcb_p = (struct rcb_block *)&handle->rcb_table->rcb[rcb_num];
+ p_i2c_clk = (struct clk *)rcb_p->mod_base_addr;
+
+ /* Release resource using PRCM API */
+ clk_disable(p_i2c_clk);
+ rcb_p->mod_base_addr = 0;
+ params->pm_i2c_bus_counter--;
+
+ return PM_SUCCESS;
+}
+
+/*
+ Function to release gpio
+ *
+ */
+static inline int ipu_pm_rel_gpio(int proc_id, unsigned rcb_num)
+{
+ struct ipu_pm_object *handle;
+ struct ipu_pm_params *params;
+ struct rcb_block *rcb_p;
+
+ /* get the handle to proper ipu pm object */
+ handle = ipu_pm_get_handle(proc_id);
+ if (WARN_ON(unlikely(handle == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ params = handle->params;
+ if (WARN_ON(unlikely(params == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ /* Get pointer to the proper RCB */
+ if (WARN_ON((rcb_num < RCB_MIN) || (rcb_num > RCB_MAX)))
+ return PM_INVAL_RCB_NUM;
+ rcb_p = (struct rcb_block *)&handle->rcb_table->rcb[rcb_num];
+
+ pm_gpio_num = rcb_p->fill9;
+ gpio_free(pm_gpio_num);
+ params->pm_gpio_counter--;
+
+ return PM_SUCCESS;
+}
+
+/*
+ Function to release a regulator
+ *
+ */
+static inline int ipu_pm_rel_regulator(int proc_id, unsigned rcb_num)
+{
+ struct ipu_pm_object *handle;
+ struct ipu_pm_params *params;
+ struct rcb_block *rcb_p;
+ struct regulator *p_regulator = NULL;
+ s32 retval = 0;
+
+ /* get the handle to proper ipu pm object */
+ handle = ipu_pm_get_handle(proc_id);
+ if (WARN_ON(unlikely(handle == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ params = handle->params;
+ if (WARN_ON(unlikely(params == NULL)))
+ return PM_NOT_INSTANTIATED;
+
+ /* Get pointer to the proper RCB */
+ if (WARN_ON((rcb_num < RCB_MIN) || (rcb_num > RCB_MAX)))
+ return PM_INVAL_RCB_NUM;
+ rcb_p = (struct rcb_block *)&handle->rcb_table->rcb[rcb_num];
+ p_regulator = (struct regulator *)rcb_p->mod_base_addr;
+
+ /* Release resource using PRCM API */
+ /*
+ FIXME:Disable/voltage the regulator with a hack, once the regulator
+ * API are fully working this will be removed.
+ * Add a check for twl write
+ */
+ retval = twl_i2c_write_u8(TWL6030_MODULE_ID1, 0x00, VMMC_CFG_VOLTAGE);
+ if (retval)
+ goto exit;
+ retval = twl_i2c_write_u8(TWL6030_MODULE_ID1, 0x80, VCXIO_CFG_TRANS);
+ if (retval)
+ goto exit;
+ retval = twl_i2c_write_u8(TWL6030_MODULE_ID0, 0x00, VAUX3_CFG_VOLTAGE);
+ if (retval)
+ goto exit;
+ retval = twl_i2c_write_u8(TWL6030_MODULE_ID0, CAM_2_DISABLE,
+ VAUX3_CFG_STATE);
+ if (retval)
+ goto exit;
+ retval = twl_i2c_write_u8(TWL6030_MODULE_ID1, 0x40, VCXIO_CFG_TRANS);
+ if (retval)
+ goto exit;
+
+ rcb_p->mod_base_addr = 0;
+ params->pm_regulator_counter--;
+
+ return PM_SUCCESS;
+exit:
+ return PM_INVAL_REGULATOR;
+}
+
+/*
+ Function to set init parameters
+ *
+ */
+void ipu_pm_params_init(struct ipu_pm_params *params)
+{
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(params == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ memcpy(params, &(pm_params), sizeof(struct ipu_pm_params));
+ return;
+exit:
+ printk(KERN_ERR "ipu_pm_params_init failed status(0x%x)\n", retval);
+}
+EXPORT_SYMBOL(ipu_pm_params_init);
+
+/*
+ Function to calculate ipu_pm mem required
+ *
+ */
+int ipu_pm_mem_req(const struct ipu_pm_params *params)
+{
+ /* Memory required for ipu pm module */
+ /* FIXME: Maybe more than this is needed */
+ return sizeof(struct sms);
+}
+EXPORT_SYMBOL(ipu_pm_mem_req);
+
+/*
+ Function to register events
+ This function will register the events needed for ipu_pm
+ the events reserved for power management are 2 and 3
+ both sysm3 and appm3 will use the same events.
+ */
+int ipu_pm_init_transport(struct ipu_pm_object *handle)
+{
+ s32 status = 0;
+ struct ipu_pm_params *params;
+
+ if (WARN_ON(unlikely(handle == NULL))) {
+ status = -EINVAL;
+ goto pm_register_fail;
+ }
+
+ params = handle->params;
+ if (WARN_ON(unlikely(params == NULL))) {
+ status = -EINVAL;
+ goto pm_register_fail;
+ }
+
+ status = notify_register_event(
+ params->remote_proc_id,
+ params->line_id,
+ params->pm_resource_event | \
+ (NOTIFY_SYSTEMKEY << 16),
+ (notify_fn_notify_cbck)ipu_pm_callback,
+ (void *)NULL);
+ if (status < 0)
+ goto pm_register_fail;
+
+ status = notify_register_event(
+ params->remote_proc_id,
+ params->line_id,
+ params->pm_notification_event | \
+ (NOTIFY_SYSTEMKEY << 16),
+ (notify_fn_notify_cbck)ipu_pm_notify_callback,
+ (void *)NULL);
+
+ if (status < 0) {
+ status = notify_unregister_event(
+ params->remote_proc_id,
+ params->line_id,
+ params->pm_resource_event | \
+ (NOTIFY_SYSTEMKEY << 16),
+ (notify_fn_notify_cbck)ipu_pm_callback,
+ (void *)NULL);
+ if (status < 0)
+ printk(KERN_ERR "ERROR UNREGISTERING PM EVENT\n");
+ goto pm_register_fail;
+ }
+ return status;
+
+pm_register_fail:
+ printk(KERN_ERR "pm register events failed status(0x%x)", status);
+ return status;
+}
+
+/*
+ Function to create ipu pm object
+ *
+ */
+struct ipu_pm_object *ipu_pm_create(const struct ipu_pm_params *params)
+{
+ int i;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(params == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (params->remote_proc_id == SYS_M3) {
+ pm_handle_sysm3 = kmalloc(sizeof(struct ipu_pm_object),
+ GFP_ATOMIC);
+
+ if (WARN_ON(unlikely(pm_handle_sysm3 == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ pm_handle_sysm3->rcb_table = (struct sms *)params->shared_addr;
+
+ pm_handle_sysm3->pm_event = kzalloc(sizeof(struct pm_event)
+ * params->pm_num_events, GFP_KERNEL);
+
+ if (WARN_ON(unlikely(pm_handle_sysm3->pm_event == NULL))) {
+ retval = -EINVAL;
+ kfree(pm_handle_sysm3);
+ goto exit;
+ }
+
+ /* Each event has it own sem */
+ for (i = 0; i < params->pm_num_events; i++) {
+ sema_init(&pm_handle_sysm3->pm_event[i].sem_handle, 0);
+ pm_handle_sysm3->pm_event[i].event_type = i;
+ }
+
+ pm_handle_sysm3->params = kzalloc(sizeof(struct ipu_pm_params)
+ , GFP_KERNEL);
+
+ if (WARN_ON(unlikely(pm_handle_sysm3->params == NULL))) {
+ retval = -EINVAL;
+ kfree(pm_handle_sysm3->pm_event);
+ kfree(pm_handle_sysm3);
+ goto exit;
+ }
+
+ memcpy(pm_handle_sysm3->params, params,
+ sizeof(struct ipu_pm_params));
+
+ /* Check the SW version on both sides */
+ if (WARN_ON(pm_handle_sysm3->rcb_table->pm_version !=
+ PM_VERSION))
+ printk(KERN_WARNING "Mismatch in PM version Host:0x%x "
+ "Remote:0x%x", PM_VERSION,
+ pm_handle_sysm3->rcb_table->pm_version);
+
+ return pm_handle_sysm3;
+ } else {/* remote_proc_id == APP_M3 */
+ pm_handle_appm3 = kmalloc(sizeof(struct ipu_pm_object),
+ GFP_ATOMIC);
+
+ if (WARN_ON(unlikely(pm_handle_appm3 == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ pm_handle_appm3->rcb_table = (struct sms *)params->shared_addr;
+
+ pm_handle_appm3->pm_event = kzalloc(sizeof(struct pm_event)
+ * params->pm_num_events, GFP_KERNEL);
+
+ if (WARN_ON(unlikely(pm_handle_appm3->pm_event == NULL))) {
+ retval = -EINVAL;
+ kfree(pm_handle_appm3);
+ goto exit;
+ }
+
+ /* Each event has it own sem */
+ for (i = 0; i < params->pm_num_events; i++) {
+ sema_init(&pm_handle_appm3->pm_event[i].sem_handle, 0);
+ pm_handle_appm3->pm_event[i].event_type = i;
+ }
+
+ pm_handle_appm3->params = kzalloc(sizeof(struct ipu_pm_params)
+ , GFP_KERNEL);
+
+ if (WARN_ON(unlikely(pm_handle_appm3->params == NULL))) {
+ retval = -EINVAL;
+ kfree(pm_handle_appm3->pm_event);
+ kfree(pm_handle_appm3);
+ goto exit;
+ }
+
+ memcpy(pm_handle_appm3->params, params,
+ sizeof(struct ipu_pm_params));
+
+ /* Check the SW version on both sides */
+ if (WARN_ON(pm_handle_appm3->rcb_table->pm_version !=
+ PM_VERSION))
+ printk(KERN_WARNING "Mismatch in PM version Host:0x%x "
+ "Remote:0x%x", PM_VERSION,
+ pm_handle_appm3->rcb_table->pm_version);
+
+ return pm_handle_appm3;
+ }
+
+exit:
+ printk(KERN_ERR "ipu_pm_create failed! "
+ "status = 0x%x\n", retval);
+ return NULL;
+}
+
+/*
+ Function to delete ipu pm object
+ *
+ */
+void ipu_pm_delete(struct ipu_pm_object *handle)
+{
+ s32 retval = 0;
+ struct ipu_pm_params *params;
+
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ params = handle->params;
+ if (WARN_ON(unlikely(params == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ /* Release the shared RCB */
+ handle->rcb_table = NULL;
+
+ kfree(handle->pm_event);
+ if (params->remote_proc_id == SYS_M3)
+ pm_handle_sysm3 = NULL;
+ else
+ pm_handle_appm3 = NULL;
+ kfree(handle->params);
+ kfree(handle);
+ return;
+exit:
+ printk(KERN_ERR "ipu_pm_delete is already NULL "
+ "status = 0x%x\n", retval);
+}
+
+/*
+ Function to get ipu pm object
+ *
+ */
+static inline struct ipu_pm_object *ipu_pm_get_handle(int proc_id)
+{
+ if (proc_id == SYS_M3)
+ return pm_handle_sysm3;
+ else if (proc_id == APP_M3)
+ return pm_handle_appm3;
+ else
+ return NULL;
+}
+
+/*
+ Get the default configuration for the ipu_pm module.
+ needed in ipu_pm_setup.
+ */
+void ipu_pm_get_config(struct ipu_pm_config *cfg)
+{
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(cfg == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (atomic_cmpmask_and_lt(&(ipu_pm_state.ref_count),
+ IPU_PM_MAKE_MAGICSTAMP(0),
+ IPU_PM_MAKE_MAGICSTAMP(1)) == true)
+ memcpy(cfg, &ipu_pm_state.def_cfg,
+ sizeof(struct ipu_pm_config));
+ else
+ memcpy(cfg, &ipu_pm_state.cfg, sizeof(struct ipu_pm_config));
+ return;
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "ipu_pm_get_config failed! status = 0x%x",
+ retval);
+ }
+ return;
+}
+EXPORT_SYMBOL(ipu_pm_get_config);
+
+/*
+ Function to setup ipu pm object
+ This function is called in platform_setup()
+ TODO
+ This function will load the default configuration for ipu_pm
+ in this function we can decide what is going to be controled
+ by ipu_pm (DVFS, NOTIFICATIONS, ...) this configuration can
+ can be changed on run-time.
+ */
+int ipu_pm_setup(struct ipu_pm_config *cfg)
+{
+ struct ipu_pm_config tmp_cfg;
+ s32 retval = 0;
+ struct mutex *lock = NULL;
+
+ /* This sets the ref_count variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of refCount variable.
+ */
+ atomic_cmpmask_and_set(&ipu_pm_state.ref_count,
+ IPU_PM_MAKE_MAGICSTAMP(0),
+ IPU_PM_MAKE_MAGICSTAMP(0));
+ if (atomic_inc_return(&ipu_pm_state.ref_count)
+ != IPU_PM_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ if (cfg == NULL) {
+ ipu_pm_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+
+ /* Create a default gate handle for local module protection */
+ lock = kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (lock == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+ mutex_init(lock);
+ ipu_pm_state.gate_handle = lock;
+
+ /* No proc attached yet */
+ pm_handle_appm3 = NULL;
+ pm_handle_sysm3 = NULL;
+
+ memcpy(&ipu_pm_state.cfg, cfg, sizeof(struct ipu_pm_config));
+ ipu_pm_state.is_setup = true;
+ return retval;
+
+exit:
+ printk(KERN_ERR "ipu_pm_setup failed! retval = 0x%x", retval);
+ return retval;
+}
+EXPORT_SYMBOL(ipu_pm_setup);
+
+/*
+ Function to attach ipu pm object
+ This function is called in ipc_attach()
+ TODO
+ This function will create the object based on the remoteproc id
+ and save the handle.
+ It is also recieving the shared address pointer to use in rcb
+ */
+int ipu_pm_attach(u16 remote_proc_id, void *shared_addr)
+{
+ struct ipu_pm_params params;
+ struct ipu_pm_object *handle;
+ s32 retval = 0;
+
+ ipu_pm_params_init(&params);
+ params.remote_proc_id = remote_proc_id;
+ params.shared_addr = (void *)shared_addr;
+ params.line_id = LINE_ID;
+ params.shared_addr_size = ipu_pm_mem_req(NULL);
+
+ handle = ipu_pm_create(&params);
+
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = ipu_pm_init_transport(handle);
+
+ if (retval < 0)
+ goto exit;
+
+ return retval;
+exit:
+ printk(KERN_ERR "ipu_pm_attach failed! retval = 0x%x", retval);
+ return retval;
+}
+EXPORT_SYMBOL(ipu_pm_attach);
+
+/*
+ Function to deattach ipu pm object
+ This function is called in ipc_deattach()
+ TODO
+ This function will delete the object based on the remoteproc id
+ and save the handle.
+ */
+int ipu_pm_detach(u16 remote_proc_id)
+{
+ struct ipu_pm_object *handle;
+ struct ipu_pm_params *params;
+ s32 retval = 0;
+
+ /* get the handle to proper ipu pm object */
+ handle = ipu_pm_get_handle(remote_proc_id);
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ params = handle->params;
+ if (WARN_ON(unlikely(params == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ /* unregister the events used for ipu_pm */
+ retval = notify_unregister_event(
+ params->remote_proc_id,
+ params->line_id,
+ params->pm_resource_event | (NOTIFY_SYSTEMKEY << 16),
+ (notify_fn_notify_cbck)ipu_pm_callback,
+ (void *)NULL);
+ if (retval < 0) {
+ printk(KERN_ERR "ERROR UNREGISTERING PM EVENT\n");
+ goto exit;
+ }
+ retval = notify_unregister_event(
+ params->remote_proc_id,
+ params->line_id,
+ params->pm_notification_event | (NOTIFY_SYSTEMKEY << 16),
+ (notify_fn_notify_cbck)ipu_pm_notify_callback,
+ (void *)NULL);
+ if (retval < 0) {
+ printk(KERN_ERR "ERROR UNREGISTERING PM EVENT\n");
+ goto exit;
+ }
+
+ /* Deleting the handle based on remote_proc_id */
+ ipu_pm_delete(handle);
+ return retval;
+exit:
+ printk(KERN_ERR "ipu_pm_detach failed handle null retval 0x%x", retval);
+ return retval;
+}
+EXPORT_SYMBOL(ipu_pm_detach);
+
+/*
+ Function to destroy ipu_pm module
+ this function will destroy the shared region 1(?)
+ an all the other structs created to set the configuration
+ */
+int ipu_pm_destroy(void)
+{
+ s32 retval = 0;
+ struct mutex *lock = NULL;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &ipu_pm_state.ref_count,
+ IPU_PM_MAKE_MAGICSTAMP(0),
+ IPU_PM_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (!(atomic_dec_return(&ipu_pm_state.ref_count)
+ == IPU_PM_MAKE_MAGICSTAMP(0))) {
+ retval = 1;
+ goto exit;
+ }
+
+ if (WARN_ON(ipu_pm_state.gate_handle == NULL)) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(ipu_pm_state.gate_handle);
+ if (retval)
+ goto exit;
+
+ lock = ipu_pm_state.gate_handle;
+ ipu_pm_state.gate_handle = NULL;
+ mutex_unlock(lock);
+ kfree(lock);
+ return retval;
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "ipu_pm_destroy failed, retval: %x\n",
+ retval);
+ }
+ return retval;
+}
+EXPORT_SYMBOL(ipu_pm_destroy);
diff --git a/drivers/dsp/syslink/ipu_pm/ipu_pm.h b/drivers/dsp/syslink/ipu_pm/ipu_pm.h
new file mode 100644
index 000000000000..6d1ab5d1c6ba
--- /dev/null
+++ b/drivers/dsp/syslink/ipu_pm/ipu_pm.h
@@ -0,0 +1,317 @@
+/*
+* ipu_pm.h
+*
+* Syslink IPU Power Managament support functions for TI OMAP processors.
+*
+* Copyright (C) 2009-2010 Texas Instruments, Inc.
+*
+* This package is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+*
+* --------------------------------------------------------------------
+* | rcb_num | | | | |
+* | msg_type | | | | |
+* | sub_type | | | | |
+* | rqst_cpu | 1-|word | | |
+* | extd_mem_flag | | | | |
+* | num_chan | | | | |
+* | fill9 | | | | |
+* |-------------------------------------| ------ 4-words 4-words |
+* | process_id | 1-word | | |
+* |-------------------------------------| ------ | | |
+* | sem_hnd | 1-word | | |
+* |-------------------------------------| ------ | | |
+* | mod_base_addr | 1-word | | |
+* |-------------------------------------| ------ ----- ----- |
+* | channels[0] | data[0] | datax[0] | | | | |
+* | channels[1] | | | 1-word | | RCB_SIZE
+* | channels[2] | | | | | | =
+* | channels[3] | | | | | | 8WORDS
+* |--------------|---------|------------| ------ | | |
+* | channels[4] | data[0] | datax[1] | | | | |
+* | channels[5] | | | 1-word | RCB_SIZE-5 |
+* | channels[6] | | | | | | |
+* | channels[7] | | | | RCB_SIZE-4 | |
+* |--------------|---------|------------| ------ | | |
+* | channels[8] | data[0] | datax[2] | | | | |
+* | channels[9] | | | 1-word | | |
+* | channels[10] | | | | | | |
+* | channels[11] | | | | | | |
+* |--------------|---------|------------| ------ | ----- |
+* | channels[12] | data[0] |extd_mem_hnd| | | | |
+* | channels[13] | | | 1-word | 1-word |
+* | channels[14] | | | | | | |
+* | channels[15] | | | | | | |
+* --------------------------------------------------------------------
+*
+*The Ducati Power Management sub-system uses a structure called RCB_struct or
+*just RCB to share information with the MPU about a particular resource involved
+*in the communication. The information stored in this structure is needed to get
+*attributes and other useful data about the resource.
+*The fisrt fields of the RCB resemble the Rcb message sent across the NotifyDver
+*It retains the rcb_num, msg_type and msg_subtype from the rcb message as its
+*first 3 fields. The rqst_cpu fields indicates which remote processor originates
+*the request/release petition. When a particular resource is requested, some of
+*its parameters should be specify.
+*For devices like Gptimer and GPIO, the most significant attribute its itemID.
+*This value should be placed in the "fill9" field of the Rcb sruct. This field
+*should be fill by the requester if asking for a particular resource or by the
+*receiver if the resource granted is other than the one asked.
+*
+*Other variables related with the resource are:
+*"sem_hnd" which storage the semaphore handle associated in the ducati side.
+*We are pending on this semaphore when asked for the resource and
+*posted when granted.
+*"mod_base_addr". It is the virtual base addres for the resource.
+*"process_id". It is the Task Id where the petition for the resource was called.
+*
+*The last 16 bytes of the structure could be interpreted in 3 different ways
+*according to the context.
+*1) For the case of the Rcb is for SDMA. The last 16 bytes correspond to a array
+* of 16 channels[ ]. Each entry has the number of the SDMA channel granted.
+* As many number of channels indicated in num_chan as many are meaningful
+* in the channels[] array.
+*2) If the extd_mem_flag bit is NOT set the 16 last bytes are used as a data[]
+* array. Each entry is 4bytes long so the maximum number of entries is 4.
+*3) If the extd_mem_flag bit is NOT set the 16 last bytes are used as an array
+* datax[ ] 3 members Each entry 4bytes long and one additional field of
+* "extd_mem_hnd" which is a pointer to the continuation of this datax array
+*/
+
+#ifndef _IPU_PM_H_
+#define _IPU_PM_H_
+
+#include <linux/types.h>
+#include <linux/semaphore.h>
+
+/* Pm notify ducati driver */
+/* Suspend/resume/other... */
+#define NUMBER_PM_EVENTS 3
+
+#define RCB_SIZE 8
+
+#define DATA_MAX (RCB_SIZE - 4)
+#define DATAX_MAX (RCB_SIZE - 5)
+#define SDMA_CHANNELS_MAX 16
+#define I2C_BUS_MIN 1
+#define I2C_BUS_MAX 4
+#define REGULATOR_MIN 1
+#define REGULATOR_MAX 1
+
+#define GP_TIMER_3 3
+#define GP_TIMER_4 4
+#define GP_TIMER_9 9
+#define GP_TIMER_11 11
+#define NUM_IPU_TIMERS 4
+
+#define RCB_MIN 1
+#define RCB_MAX 33
+
+#define PM_RESOURCE 2
+#define PM_NOTIFICATION 3
+#define PM_SUCCESS 0
+#define PM_FAILURE -1
+#define PM_SHM_BASE_ADDR 0x9cff0000
+
+/*
+ * IPU_PM_MODULEID
+ * Unique module ID
+ */
+#define IPU_PM_MODULEID (0x6A6A)
+
+/* Macro to make a correct module magic number with refCount */
+#define IPU_PM_MAKE_MAGICSTAMP(x) ((IPU_PM_MODULEID << 12u) | (x))
+
+enum pm_failure_codes{
+ PM_INSUFFICIENT_CHANNELS = 1,
+ PM_NO_GPTIMER,
+ PM_NO_GPIO,
+ PM_NO_I2C,
+ PM_NO_REGULATOR,
+ PM_REGULATOR_IN_USE,
+ PM_INVAL_RCB_NUM,
+ PM_INVAL_NUM_CHANNELS,
+ PM_INVAL_NUM_I2C,
+ PM_INVAL_REGULATOR,
+ PM_NOT_INSTANTIATED,
+ PM_UNSUPPORTED
+};
+
+enum pm_msgtype_codes{PM_NULLMSG,
+ PM_ACKNOWLEDGEMENT,
+ PM_REQUEST_RESOURCE,
+ PM_RELEASE_RESOURCE,
+ PM_REQUEST_FAIL,
+ PM_RELEASE_FAIL,
+ PM_REGULATOR_FAIL,
+ PM_NOTIFICATIONS,
+ PM_NOTIFICATIONS_FAIL,
+ PM_ENABLE_RESOURCE,
+ PM_WRITE_RESOURCE,
+ PM_READ_RESOURCE,
+ PM_DISABLE_RESOURCE
+};
+
+enum pm_regulator_action{PM_SET_VOLTAGE,
+ PM_SET_CURRENT,
+ PM_SET_MODE,
+ PM_GET_MODE,
+ PM_GET_CURRENT,
+ PM_GET_VOLTAGE
+};
+
+enum res_type{
+ DUCATI = 0,
+ IVA_HD,
+ ISS,
+ SDMA,
+ GP_TIMER,
+ GP_IO,
+ I2C,
+ REGULATOR
+};
+
+enum pm_event_type{PM_SUSPEND,
+ PM_RESUME,
+ PM_OTHER
+};
+
+struct rcb_message {
+ unsigned rcb_flag:1;
+ unsigned rcb_num:6;
+ unsigned reply_flag:1;
+ unsigned msg_type:4;
+ unsigned msg_subtype:4;
+ unsigned parm:16;
+};
+
+union message_slicer {
+ struct rcb_message fields;
+ int whole;
+};
+
+struct rcb_block {
+ unsigned rcb_num:6;
+ unsigned msg_type:4;
+ unsigned sub_type:4;
+ unsigned rqst_cpu:4;
+ unsigned extd_mem_flag:1;
+ unsigned num_chan:4;
+ unsigned fill9:9;
+
+ unsigned process_id;
+ unsigned *sem_hnd;
+ unsigned mod_base_addr;
+ union {
+ unsigned int data[DATA_MAX];
+ struct {
+ unsigned datax[DATAX_MAX];
+ unsigned extd_mem_hnd;
+ };
+ unsigned char channels[SDMA_CHANNELS_MAX];
+ };
+};
+
+struct sms {
+ unsigned rat;
+ unsigned pm_version;
+ struct rcb_block rcb[RCB_MAX];
+};
+
+struct pm_event {
+ enum pm_event_type event_type;
+ struct semaphore sem_handle;
+};
+
+struct ipu_pm_params {
+ int pm_gptimer_counter;
+ int pm_gpio_counter;
+ int pm_sdmachan_counter;
+ int pm_i2c_bus_counter;
+ int pm_regulator_counter;
+ int timeout;
+ void *shared_addr;
+ int shared_addr_size;
+ int pm_num_events;
+ int pm_resource_event;
+ int pm_notification_event;
+ int proc_id;
+ int remote_proc_id;
+ int line_id;
+ void *gate_mp;
+};
+
+/* This structure defines attributes for initialization of the ipu_pm module. */
+struct ipu_pm_config {
+ u32 reserved;
+};
+
+/* Defines the ipu_pm state object, which contains all the module
+ * specific information. */
+struct ipu_pm_module_object {
+ atomic_t ref_count;
+ /* Reference count */
+ struct ipu_pm_config cfg;
+ /* ipu_pm configuration structure */
+ struct ipu_pm_config def_cfg;
+ /* Default module configuration */
+ struct mutex *gate_handle;
+ /* Handle of gate to be used for local thread safety */
+ bool is_setup;
+ /* Indicates whether the ipu_pm module is setup. */
+};
+
+/* ipu_pm handle one for each proc SYSM3/APPM3 */
+struct ipu_pm_object {
+ struct sms *rcb_table;
+ struct pm_event *pm_event;
+ struct ipu_pm_params *params;
+};
+
+/* Function for PM resources Callback */
+void ipu_pm_callback(u16 proc_id, u16 line_id, u32 event_id,
+ uint *arg, u32 payload);
+
+/* Function for PM notifications Callback */
+void ipu_pm_notify_callback(u16 proc_id, u16 line_id, u32 event_id,
+ uint *arg, u32 payload);
+
+/* Function for send PM Notifications */
+int ipu_pm_notifications(enum pm_event_type event_type);
+
+/* Function to set init parameters */
+void ipu_pm_params_init(struct ipu_pm_params *params);
+
+/* Function to calculate ipu pm mem */
+int ipu_pm_mem_req(const struct ipu_pm_params *params);
+
+/* Function to config ipu_pm module */
+void ipu_pm_get_config(struct ipu_pm_config *cfg);
+
+/* Function to set up ipu_pm module */
+int ipu_pm_setup(struct ipu_pm_config *cfg);
+
+/* Function to create ipu pm object */
+struct ipu_pm_object *ipu_pm_create(const struct ipu_pm_params *params);
+
+/* Function to delete ipu pm object */
+void ipu_pm_delete(struct ipu_pm_object *handle);
+
+/* Function to destroy ipu_pm module */
+int ipu_pm_destroy(void);
+
+/* Function to attach ipu_pm module */
+int ipu_pm_attach(u16 remote_proc_id, void *shared_addr);
+
+/* Function to deattach ipu_pm module */
+int ipu_pm_detach(u16 remote_proc_id);
+
+/* Function to register the ipu_pm events */
+int ipu_pm_init_transport(struct ipu_pm_object *handle);
+
+#endif
diff --git a/drivers/dsp/syslink/multicore_ipc/Kbuild b/drivers/dsp/syslink/multicore_ipc/Kbuild
new file mode 100644
index 000000000000..faf06d8b35b4
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/Kbuild
@@ -0,0 +1,31 @@
+libsyslink_ipc = multiproc.o multiproc_ioctl.o nameserver.o \
+nameserver_ioctl.o nameserver_remote.o nameserver_remotenotify.o \
+listmp.o listmp_ioctl.o sharedregion.o sharedregion_ioctl.o \
+gate.o gatepeterson.o gatehwspinlock.o gatemp.o gatemp_ioctl.o \
+heap.o heapmemmp.o heapmemmp_ioctl.o heapbufmp.o heapbufmp_ioctl.o \
+messageq.o messageq_ioctl.o transportshm.o transportshm_setup.o \
+platform.o ipc.o sysipc_ioctl.o ipc_ioctl.o ipc_drv.o \
+../omap_notify/notify_driver.o ../omap_notify/notify.o \
+../omap_notify/drv_notify.o ../omap_notify/plat/omap4_notify_setup.o \
+../notify_ducatidriver/notify_ducati.o ../ipu_pm/ipu_pm.o
+
+libsyslink_platform = platform_mem.o
+
+obj-$(CONFIG_MPU_SYSLINK_IPC) += syslink_ipc.o
+syslink_ipc-objs = $(libservices) $(libsyslink_ipc)
+
+obj-$(CONFIG_MPU_SYSLINK_PLATFORM) += syslink_platform.o
+syslink_platform-objs = $(libservices) $(libsyslink_platform)
+
+ccflags-y += -Wno-strict-prototypes
+
+#Machine dependent
+ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \
+ -DTICFG_PROC_VER -DTICFG_EVM_TYPE -DCHNL_SMCLASS \
+ -DCHNL_MESSAGES -DUSE_LEVEL_1_MACROS \
+ -DCONFIG_DISABLE_BRIDGE_PM -DDSP_TRACEBUF_DISABLED
+
+#Header files
+ccflags-y += -Iarch/arm/plat-omap/include
+ccflags-y += -Iarch/arm/plat-omap/include/syslink
+
diff --git a/drivers/dsp/syslink/multicore_ipc/gate.c b/drivers/dsp/syslink/multicore_ipc/gate.c
new file mode 100644
index 000000000000..713f22e7fcad
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/gate.c
@@ -0,0 +1,69 @@
+/*
+ * gatemp.c
+ *
+ * Gate wrapper implementation
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+
+/* Standard headers */
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/* Module level headers */
+#include <igateprovider.h>
+#include <gate.h>
+
+
+/* Structure defining internal object for the Gate Peterson.*/
+struct gate_object {
+ IGATEPROVIDER_SUPEROBJECT; /* For inheritance from IGateProvider */
+};
+
+/* Function to enter a Gate */
+int *gate_enter_system(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ return (int *)flags;
+}
+
+/* Function to leave a gate */
+void gate_leave_system(int *key)
+{
+ local_irq_restore((unsigned long) key);
+}
+
+/* Match with IGateProvider */
+static inline int *_gate_enter_system(struct gate_object *obj)
+{
+ (void) obj;
+ return gate_enter_system();
+}
+
+/* Match with IGateProvider */
+static inline void _gate_leave_system(struct gate_object *obj, int *key)
+{
+ (void) obj;
+ gate_leave_system(key);
+}
+
+struct gate_object gate_system_object = {
+ .enter = (int *(*)(void *))_gate_enter_system,
+ .leave = (void (*)(void *, int *))_gate_leave_system,
+};
+
+struct igateprovider_object *gate_system_handle = \
+ (struct igateprovider_object *)&gate_system_object;
diff --git a/drivers/dsp/syslink/multicore_ipc/gate_remote.c b/drivers/dsp/syslink/multicore_ipc/gate_remote.c
new file mode 100644
index 000000000000..b5cf6871c8b9
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/gate_remote.c
@@ -0,0 +1,40 @@
+/*
+ * gate_remote.c
+ *
+ * This includes the functions to handle remote gates
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/types.h>
+
+/*
+ * ======== gate_remote_enter ========
+ * Purpose:
+ * This function is used to enter in to a remote gate
+ */
+int gate_remote_enter(void *ghandle)
+{
+ return 0;
+}
+
+/*
+ * ======== gate_remote_leave ========
+ * Purpose:
+ * This function is used to leave from a remote gate
+ */
+int gate_remote_leave(void *ghandle, u32 key)
+{
+ key = 0;
+ return 0;
+}
+
diff --git a/drivers/dsp/syslink/multicore_ipc/gatehwspinlock.c b/drivers/dsp/syslink/multicore_ipc/gatehwspinlock.c
new file mode 100644
index 000000000000..c97e57cb30f8
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/gatehwspinlock.c
@@ -0,0 +1,494 @@
+/*
+ * gatehwspinlock.c
+ *
+ * Hardware-based spinlock gate for mutual exclusion of shared memory.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <syslink/atomic_linux.h>
+#include <multiproc.h>
+#include <sharedregion.h>
+#include <gatemp.h>
+#include <igatempsupport.h>
+#include <igateprovider.h>
+#include <iobject.h>
+#include <gatehwspinlock.h>
+
+
+/* =============================================================================
+ * Macros
+ * =============================================================================
+ */
+
+/* Macro to make a correct module magic number with refCount */
+#define GATEHWSPINLOCK_MAKE_MAGICSTAMP(x) ((GATEHWSPINLOCK_MODULEID << 12u) \
+ | (x))
+/*
+ * structure for gatehwspinlock module state
+ */
+struct gatehwspinlock_module_object {
+ atomic_t ref_count; /* Reference count */
+ struct gatehwspinlock_config cfg;
+ struct gatehwspinlock_config default_cfg;
+ struct gatehwspinlock_params def_inst_params; /* default instance
+ paramters */
+ u32 *base_addr; /* Base address of lock registers */
+ u32 num_locks; /* Maximum number of locks */
+};
+
+/*
+ * Structure defining object for the Gate Spinlock
+ */
+struct gatehwspinlock_object {
+ IGATEPROVIDER_SUPEROBJECT; /* For inheritance from IGateProvider */
+ IOBJECT_SUPEROBJECT; /* For inheritance for IObject */
+ u32 lock_num;
+ u32 nested;
+ void *local_gate;
+};
+
+/*
+ * Variable for holding state of the gatehwspinlock module
+ */
+struct gatehwspinlock_module_object gatehwspinlock_state = {
+ .default_cfg.default_protection = \
+ gatehwspinlock_LOCALPROTECT_INTERRUPT,
+ .default_cfg.num_locks = 32u,
+ .def_inst_params.shared_addr = NULL,
+ .def_inst_params.resource_id = 0x0,
+ .def_inst_params.region_id = 0x0,
+ .num_locks = 32u
+};
+
+static struct gatehwspinlock_module_object *gatehwspinlock_module =
+ &gatehwspinlock_state;
+
+/* =============================================================================
+ * Internal functions
+ * =============================================================================
+ */
+
+/* TODO: figure these out */
+#define gate_enter_system() 0
+#define gate_leave_system(key) {}
+
+/* =============================================================================
+ * APIS
+ * =============================================================================
+ */
+/*
+ * ======== gatehwspinlock_get_config ========
+ * Purpose:
+ * This will get the default configuration parameters for gatehwspinlock
+ * module
+ */
+void gatehwspinlock_get_config(struct gatehwspinlock_config *config)
+{
+ int *key = 0;
+
+ if (WARN_ON(config == NULL))
+ goto exit;
+
+ key = gate_enter_system();
+ if (atomic_cmpmask_and_lt(&(gatehwspinlock_module->ref_count),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(0),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(1)) == true)
+ memcpy(config, &gatehwspinlock_module->default_cfg,
+ sizeof(struct gatehwspinlock_config));
+ else
+ memcpy(config, &gatehwspinlock_module->cfg,
+ sizeof(struct gatehwspinlock_config));
+ gate_leave_system(key);
+
+exit:
+ return;
+}
+EXPORT_SYMBOL(gatehwspinlock_get_config);
+
+/*
+ * ======== gatehwspinlock_setup ========
+ * Purpose:
+ * This will setup the gatehwspinlock module
+ */
+int gatehwspinlock_setup(const struct gatehwspinlock_config *config)
+{
+ struct gatehwspinlock_config tmp_cfg;
+ int *key = 0;
+
+ key = gate_enter_system();
+
+ /* This sets the ref_count variable not initialized, upper 16 bits is
+ * written with module _id to ensure correctness of ref_count variable
+ */
+ atomic_cmpmask_and_set(&gatehwspinlock_module->ref_count,
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(0),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&gatehwspinlock_module->ref_count)
+ != GATEHWSPINLOCK_MAKE_MAGICSTAMP(1)) {
+ gate_leave_system(key);
+ return 1;
+ }
+
+ if (config == NULL) {
+ gatehwspinlock_get_config(&tmp_cfg);
+ config = &tmp_cfg;
+ }
+ gate_leave_system(key);
+
+ memcpy(&gatehwspinlock_module->cfg, config,
+ sizeof(struct gatehwspinlock_config));
+
+ gatehwspinlock_module->base_addr = (void *)config->base_addr;
+ gatehwspinlock_module->num_locks = config->num_locks;
+
+ return 0;
+
+}
+EXPORT_SYMBOL(gatehwspinlock_setup);
+
+/*
+ * ======== gatehwspinlock_destroy ========
+ * Purpose:
+ * This will destroy the gatehwspinlock module
+ */
+int gatehwspinlock_destroy(void)
+{
+ s32 retval = 0;
+ int *key = 0;
+
+ key = gate_enter_system();
+
+ if (atomic_cmpmask_and_lt(&(gatehwspinlock_module->ref_count),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(0),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (!(atomic_dec_return(&gatehwspinlock_module->ref_count)
+ == GATEHWSPINLOCK_MAKE_MAGICSTAMP(0))) {
+ gate_leave_system(key);
+ retval = 1;
+ goto exit;
+ }
+ gate_leave_system(key);
+
+ memset(&gatehwspinlock_module->cfg, 0,
+ sizeof(struct gatehwspinlock_config));
+ return 0;
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "gatehwspinlock_destroy failed status:%x\n",
+ retval);
+ }
+ return retval;
+
+}
+EXPORT_SYMBOL(gatehwspinlock_destroy);
+
+/*
+ * ======== gatehwspinlock_get_num_instances ========
+ * Purpose:
+ * Function to return the number of instances configured in the module.
+ */
+u32 gatehwspinlock_get_num_instances(void)
+{
+ return gatehwspinlock_module->num_locks;
+}
+EXPORT_SYMBOL(gatehwspinlock_get_num_instances);
+
+/*
+ * ======== gatepeterson_locks_init ========
+ * Purpose:
+ * Function to initialize the locks.
+ */
+void gatehwspinlock_locks_init(void)
+{
+ u32 i;
+
+ for (i = 0; i < gatehwspinlock_module->num_locks; i++)
+ gatehwspinlock_module->base_addr[i] = 0;
+}
+EXPORT_SYMBOL(gatehwspinlock_locks_init);
+
+/*
+ * ======== gatehwspinlock_params_init ========
+ * Purpose:
+ * This will Initialize this config-params structure with
+ * supplier-specified defaults before instance creation
+ */
+void gatehwspinlock_params_init(struct gatehwspinlock_params *params)
+{
+ int *key = 0;
+
+ key = gate_enter_system();
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(gatehwspinlock_module->ref_count),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(0),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+ if (WARN_ON(params == NULL))
+ goto exit;
+
+ gate_leave_system(key);
+ memcpy(params, &(gatehwspinlock_module->def_inst_params),
+ sizeof(struct gatehwspinlock_params));
+ return;
+
+exit:
+ gate_leave_system(key);
+ return;
+}
+EXPORT_SYMBOL(gatehwspinlock_params_init);
+
+/*
+ * ======== gatehwspinlock_create ========
+ * Purpose:
+ * This will creates a new instance of gatehwspinlock module
+ */
+void *gatehwspinlock_create(enum igatempsupport_local_protect local_protect,
+ const struct gatehwspinlock_params *params)
+{
+ void *handle = NULL;
+ struct gatehwspinlock_object *obj = NULL;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(gatehwspinlock_module->ref_count),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(0),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(params == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(params->shared_addr == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ obj = kzalloc(sizeof(struct gatehwspinlock_object), GFP_KERNEL);
+ if (obj == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ IGATEPROVIDER_OBJECTINITIALIZER(obj, gatehwspinlock);
+
+ /* Create the local gate */
+ obj->local_gate = gatemp_create_local(local_protect);
+ if (obj->local_gate == NULL) {
+ retval = GATEHWSPINLOCK_E_FAIL;
+ goto exit;
+ }
+
+ obj->lock_num = params->resource_id;
+ obj->nested = 0;
+
+ handle = obj;
+ return handle;
+
+exit:
+ printk(KERN_ERR "gatehwspinlock_create failed status: %x\n", retval);
+ return NULL;
+}
+EXPORT_SYMBOL(gatehwspinlock_create);
+
+/*
+ * ======== gatehwspinlock_delete ========
+ * Purpose:
+ * This will deletes an instance of gatehwspinlock module
+ */
+int gatehwspinlock_delete(void **gphandle)
+
+{
+ struct gatehwspinlock_object *obj = NULL;
+ s32 retval;
+
+ if (atomic_cmpmask_and_lt(&(gatehwspinlock_module->ref_count),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(0),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(gphandle == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(*gphandle == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct gatehwspinlock_object *)(*gphandle);
+
+ /* No need to delete the local gate, as it is gatemp module wide
+ * local mutex. */
+
+ kfree(obj);
+ *gphandle = NULL;
+
+ return 0;
+
+exit:
+ printk(KERN_ERR "gatehwspinlock_delete failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(gatehwspinlock_delete);
+
+
+/*
+ * ======== gatehwspinlock_enter ========
+ * Purpose:
+ * This will enters the gatehwspinlock instance
+ */
+int *gatehwspinlock_enter(void *gphandle)
+{
+ struct gatehwspinlock_object *obj = NULL;
+ s32 retval = 0;
+ int *key = 0;
+ VOLATILE u32 *base_addr = (VOLATILE u32 *)
+ gatehwspinlock_module->base_addr;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(gatehwspinlock_module->ref_count),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(0),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(gphandle == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct gatehwspinlock_object *)gphandle;
+
+ /* Enter local gate */
+ /* Enter local gate */
+ if (obj->local_gate != NULL) {
+ retval = mutex_lock_interruptible(
+ (struct mutex *)obj->local_gate);
+ if (retval)
+ goto exit;
+ }
+
+ /* If the gate object has already been entered, return the nested
+ * value */
+ obj->nested++;
+ if (obj->nested > 1)
+ return key;
+
+ /* Enter the spinlock */
+ while (1) {
+ if (base_addr[obj->lock_num] == 0)
+ break;
+ }
+
+exit:
+ if (retval < 0)
+ printk(KERN_ERR "gatehwspinlock_enter failed! status = 0x%x",
+ retval);
+ return key;
+}
+EXPORT_SYMBOL(gatehwspinlock_enter);
+
+/*
+ * ======== gatehwspinlock_leave ========
+ * Purpose:
+ * This will leaves the gatehwspinlock instance
+ */
+void gatehwspinlock_leave(void *gphandle, int *key)
+{
+ struct gatehwspinlock_object *obj = NULL;
+ VOLATILE u32 *base_addr = (VOLATILE u32 *)
+ gatehwspinlock_module->base_addr;
+ s32 retval = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(gatehwspinlock_module->ref_count),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(0),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(gphandle == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct gatehwspinlock_object *)gphandle;
+ obj->nested--;
+ /* Leave the spinlock if the leave() is not nested */
+ if (obj->nested == 0)
+ base_addr[obj->lock_num] = 0;
+ /* Leave local gate */
+ mutex_unlock(obj->local_gate);
+
+exit:
+ if (retval < 0)
+ printk(KERN_ERR "gatehwspinlock_leave failed! status = 0x%x",
+ retval);
+ return;
+}
+EXPORT_SYMBOL(gatehwspinlock_leave);
+
+/*
+ * ======== gatehwspinlock_get_resource_id ========
+ */
+u32 gatehwspinlock_get_resource_id(void *handle)
+{
+ struct gatehwspinlock_object *obj = NULL;
+ s32 retval = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(gatehwspinlock_module->ref_count),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(0),
+ GATEHWSPINLOCK_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(handle == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct gatehwspinlock_object *)handle;
+
+ return obj->lock_num;
+
+exit:
+ printk(KERN_ERR "gatehwspinlock_get_resource_id failed status: %x\n",
+ retval);
+ return (u32)-1;
+}
+EXPORT_SYMBOL(gatehwspinlock_get_resource_id);
+
+/*
+ * ======== gatehwspinlock_shared_memreq ========
+ * Purpose:
+ * This will give the amount of shared memory required
+ * for creation of each instance
+ */
+u32 gatehwspinlock_shared_mem_req(const struct gatehwspinlock_params *params)
+{
+ return 0;
+}
+EXPORT_SYMBOL(gatehwspinlock_shared_mem_req);
diff --git a/drivers/dsp/syslink/multicore_ipc/gatemp.c b/drivers/dsp/syslink/multicore_ipc/gatemp.c
new file mode 100644
index 000000000000..4c7244168bbd
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/gatemp.c
@@ -0,0 +1,1846 @@
+/*
+ * gatemp.c
+ *
+ * Gate wrapper implementation
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Utilities headers */
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+
+/* Syslink utilities headers */
+#include <syslink/atomic_linux.h>
+
+/* Syslink module headers */
+#include <multiproc.h>
+#include <igateprovider.h>
+#include <igatempsupport.h>
+#include <iobject.h>
+#include <gate.h>
+/*#include <memory.h>
+#include <bitops.h>
+#include <ti/syslink/utils/Cache.h>
+*/
+#include <nameserver.h>
+#include <sharedregion.h>
+
+/* Module level headers */
+#include <gatemp.h>
+#include <gatemp.h>
+#include <gatempdefs.h>
+
+
+/* -----------------------------------------------------------------------------
+ * Macros
+ * -----------------------------------------------------------------------------
+ */
+/* VERSION */
+#define GATEMP_VERSION (1)
+
+/* CREATED */
+#define GATEMP_CREATED (0x11202009)
+
+/* PROXYORDER_SYSTEM */
+#define GATEMP_PROXYORDER_SYSTEM (0)
+
+/* PROXYORDER_CUSTOM1 */
+#define GATEMP_PROXYORDER_CUSTOM1 (1)
+
+/* PROXYORDER_CUSTOM2 */
+#define GATEMP_PROXYORDER_CUSTOM2 (2)
+
+/* PROXYORDER_NUM */
+#define GATEMP_PROXYORDER_NUM (3)
+
+/* Macro to make a correct module magic number with refCount */
+#define GATEMP_MAKE_MAGICSTAMP(x) \
+ ((GATEMP_MODULEID << 12u) | (x))
+
+/* Helper macros */
+#define GETREMOTE(mask) ((enum gatemp_remote_protect)(mask >> 8))
+#define GETLOCAL(mask) ((enum gatemp_local_protect)(mask & 0xFF))
+#define SETMASK(remote_protect, local_protect) \
+ ((u32)(remote_protect << 8 | local_protect))
+
+
+/* Name of the reserved NameServer used for GateMP. */
+#define GATEMP_NAMESERVER "GateMP"
+
+#define ROUND_UP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
+
+#define Gate_enterSystem() (int *)0
+
+#define Gate_leaveSystem(key) (void)0
+
+/* -----------------------------------------------------------------------------
+ * Structs & Enums
+ * -----------------------------------------------------------------------------
+ */
+/* Attrs */
+struct gatemp_attrs {
+ u16 mask;
+ u16 creator_proc_id;
+ u32 arg;
+ u32 status;
+};
+
+/* Structure defining state of GateMP Module */
+struct gatemp_module_state {
+ void *name_server;
+ int num_remote_system;
+ int num_remote_custom1;
+ int num_remote_custom2;
+ u8 *remote_system_in_use_alloc;
+ u8 *remote_custom1_in_use_alloc;
+ u8 *remote_custom2_in_use_alloc;
+ void **remote_system_gates_alloc;
+ void **remote_custom1_gates_alloc;
+ void **remote_custom2_gates_alloc;
+ u8 *remote_system_in_use;
+ u8 *remote_custom1_in_use;
+ u8 *remote_custom2_in_use;
+ void **remote_system_gates;
+ void **remote_custom1_gates;
+ void **remote_custom2_gates;
+ struct igateprovider_object *gate_hwi;
+ struct mutex *gate_mutex;
+ struct igateprovider_object *gate_null;
+ struct gatemp_object *default_gate;
+ int proxy_map[GATEMP_PROXYORDER_NUM];
+ atomic_t ref_count;
+ struct gatemp_config cfg;
+ /* Current config values */
+ struct gatemp_config default_cfg;
+ /* default config values */
+ struct gatemp_params def_inst_params;
+ /* default instance paramters */
+ bool is_owner;
+ /* Indicates if this processor is the owner */
+ atomic_t attach_ref_count;
+ /* Attach/detach reference count */
+};
+
+/* Structure defining instance of GateMP Module */
+struct gatemp_object {
+ IGATEPROVIDER_SUPEROBJECT; /* For inheritance from IGateProvider */
+ IOBJECT_SUPEROBJECT; /* For inheritance for IObject */
+ enum gatemp_remote_protect remote_protect;
+ enum gatemp_local_protect local_protect;
+ void *ns_key;
+ int num_opens;
+ u16 creator_proc_id;
+ bool cache_enabled;
+ struct gatemp_attrs *attrs;
+ u16 region_id;
+ uint alloc_size;
+ void *proxy_attrs;
+ u32 resource_id;
+ void *gate_handle;
+ enum ipc_obj_type obj_type; /* from shared region? */
+};
+
+/* Reserved */
+struct gatemp_reserved {
+ u32 version;
+};
+
+/* Localgate */
+struct gatemp_local_gate {
+ struct igateprovider_object *local_gate;
+ int ref_count;
+};
+
+/*!
+ * @brief Structure defining parameters for the GateMP module.
+ */
+struct _gatemp_params {
+ char *name;
+ u32 region_id;
+ void *shared_addr;
+ enum gatemp_local_protect local_protect;
+ enum gatemp_remote_protect remote_protect;
+ u32 resource_id;
+ bool open_flag;
+};
+
+/* -----------------------------------------------------------------------------
+ * Forward declaration
+ * -----------------------------------------------------------------------------
+ */
+static void gatemp_set_region0_reserved(void *shared_addr);
+static void gatemp_clear_region0_reserved(void);
+static void gatemp_open_region0_reserved(void *shared_addr);
+static void gatemp_close_region0_reserved(void *shared_addr);
+static void gatemp_set_default_remote(void *handle);
+static uint gatemp_get_free_resource(u8 *in_use, int num);
+static struct gatemp_object *_gatemp_create(
+ const struct _gatemp_params *params);
+
+/* -----------------------------------------------------------------------------
+ * Globals
+ * -----------------------------------------------------------------------------
+ */
+static struct gatemp_module_state gatemp_state = {
+ .default_cfg.num_resources = 32,
+ .default_cfg.max_name_len = 32,
+ .default_cfg.default_protection = GATEMP_LOCALPROTECT_INTERRUPT,
+ .def_inst_params.shared_addr = 0x0,
+ .def_inst_params.region_id = 0x0,
+ .default_gate = NULL
+};
+
+static struct gatemp_module_state *gatemp_module = &gatemp_state;
+static struct gatemp_object *gatemp_first_object;
+
+/* -----------------------------------------------------------------------------
+ * APIs
+ * -----------------------------------------------------------------------------
+ */
+
+void gatemp_get_config(struct gatemp_config *cfg)
+{
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(cfg == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (atomic_cmpmask_and_lt(&(gatemp_module->ref_count),
+ GATEMP_MAKE_MAGICSTAMP(0),
+ GATEMP_MAKE_MAGICSTAMP(1)) == true) {
+ /* Setup has not yet been called */
+ memcpy((void *)cfg, &gatemp_module->default_cfg,
+ sizeof(struct gatemp_config));
+ } else {
+ memcpy((void *)cfg, &gatemp_module->cfg,
+ sizeof(struct gatemp_config));
+ }
+
+exit:
+ if (retval < 0)
+ printk(KERN_ERR "gatemp_get_config failed! status = 0x%x",
+ retval);
+ return;
+}
+
+s32 gatemp_setup(const struct gatemp_config *cfg)
+{
+ s32 retval = 0;
+ struct gatemp_config tmp_cfg;
+ int i;
+ struct nameserver_params params;
+
+ /* This sets the ref_count variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of refCount variable.
+ */
+ atomic_cmpmask_and_set(&gatemp_module->ref_count,
+ GATEMP_MAKE_MAGICSTAMP(0),
+ GATEMP_MAKE_MAGICSTAMP(0));
+ if (atomic_inc_return(&gatemp_module->ref_count)
+ != GATEMP_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ if (cfg == NULL) {
+ gatemp_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+
+ gatemp_module->default_gate = NULL;
+ for (i = 0; i < GATEMP_PROXYORDER_NUM; i++)
+ gatemp_module->proxy_map[i] = i;
+
+ if ((void *)gatemp_remote_custom1_proxy_create
+ == (void *)gatemp_remote_system_proxy_create) {
+ gatemp_module->proxy_map[GATEMP_PROXYORDER_CUSTOM1] =
+ gatemp_module->proxy_map[GATEMP_PROXYORDER_SYSTEM];
+ }
+
+ if ((void *) gatemp_remote_system_proxy_create
+ == (void *) gatemp_remote_custom2_proxy_create) {
+ gatemp_module->proxy_map[GATEMP_PROXYORDER_CUSTOM2] =
+ gatemp_module->proxy_map[GATEMP_PROXYORDER_SYSTEM];
+ } else if ((void *) gatemp_remote_custom2_proxy_create
+ == (void *) gatemp_remote_custom1_proxy_create) {
+ gatemp_module->proxy_map[GATEMP_PROXYORDER_CUSTOM2] =
+ gatemp_module->proxy_map[GATEMP_PROXYORDER_CUSTOM1];
+ }
+
+ /* Create MutexPri gate */
+ gatemp_module->gate_mutex = kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (gatemp_module->gate_mutex == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+ mutex_init(gatemp_module->gate_mutex);
+
+ /* create Nameserver */
+ nameserver_params_init(&params);
+ params.max_runtime_entries = cfg->max_runtime_entries;
+ params.max_name_len = cfg->max_name_len;
+ params.max_value_len = 2 * sizeof(u32);
+ gatemp_module->name_server = nameserver_create(GATEMP_NAMESERVER,
+ &params);
+ if (gatemp_module->name_server == NULL) {
+ retval = -1;
+ goto error_nameserver;
+ }
+
+ /* Get the number of coonfigured instances from the plugged in
+ * Proxy gates */
+ gatemp_module->num_remote_system = \
+ gatemp_remote_system_proxy_get_num_instances();
+ gatemp_module->num_remote_custom1 = \
+ gatemp_remote_custom1_proxy_get_num_instances();
+ gatemp_module->num_remote_custom2 = \
+ gatemp_remote_custom2_proxy_get_num_instances();
+ gatemp_module->remote_system_in_use_alloc = \
+ kzalloc((sizeof(u8) * cfg->num_resources), GFP_KERNEL);
+ if (gatemp_module->remote_system_in_use_alloc == NULL) {
+ retval = -ENOMEM;
+ goto error_remote_system_fail;
+ }
+ gatemp_module->remote_system_in_use = \
+ gatemp_module->remote_system_in_use_alloc;
+
+ gatemp_module->remote_custom1_in_use_alloc = \
+ kzalloc((sizeof(u8) * cfg->num_resources), GFP_KERNEL);
+ if (gatemp_module->remote_custom1_in_use_alloc == NULL) {
+ retval = -ENOMEM;
+ goto error_remote_custom1_fail;
+ }
+ gatemp_module->remote_custom1_in_use = \
+ gatemp_module->remote_custom1_in_use_alloc;
+
+ gatemp_module->remote_custom2_in_use_alloc = \
+ kzalloc((sizeof(u8) * cfg->num_resources), GFP_KERNEL);
+ if (gatemp_module->remote_custom2_in_use_alloc == NULL) {
+ retval = -ENOMEM;
+ goto error_remote_custom2_fail;
+ }
+ gatemp_module->remote_custom2_in_use = \
+ gatemp_module->remote_custom2_in_use_alloc;
+
+ if (gatemp_module->num_remote_system) {
+ gatemp_module->remote_system_gates_alloc = kzalloc(
+ (sizeof(void *) * gatemp_module->num_remote_system),
+ GFP_KERNEL);
+ if (gatemp_module->remote_system_gates_alloc == NULL) {
+ retval = -ENOMEM;
+ goto error_remote_system_gates_fail;
+ }
+ } else
+ gatemp_module->remote_system_gates_alloc = NULL;
+ gatemp_module->remote_system_gates = \
+ gatemp_module->remote_system_gates_alloc;
+
+ if (gatemp_module->num_remote_custom1) {
+ gatemp_module->remote_custom1_gates_alloc = kzalloc(
+ (sizeof(void *) * gatemp_module->num_remote_custom1),
+ GFP_KERNEL);
+ if (gatemp_module->remote_custom1_gates_alloc == NULL) {
+ retval = -ENOMEM;
+ goto error_remote_custom1_gates_fail;
+ }
+ } else
+ gatemp_module->remote_custom1_gates_alloc = NULL;
+ gatemp_module->remote_custom1_gates = \
+ gatemp_module->remote_custom1_gates_alloc;
+
+ if (gatemp_module->num_remote_custom2) {
+ gatemp_module->remote_custom2_gates_alloc = kzalloc(
+ (sizeof(void *) * gatemp_module->num_remote_custom2),
+ GFP_KERNEL);
+ if (gatemp_module->remote_custom2_gates_alloc == NULL) {
+ retval = -ENOMEM;
+ goto error_remote_custom2_gates_fail;
+ }
+ } else
+ gatemp_module->remote_custom2_gates_alloc = NULL;
+ gatemp_module->remote_custom2_gates = \
+ gatemp_module->remote_custom2_gates_alloc;
+
+ /* Copy the cfg */
+ memcpy((void *) &gatemp_module->cfg, (void *) cfg,
+ sizeof(struct gatemp_config));
+ return 0;
+
+error_remote_custom2_gates_fail:
+ kfree(gatemp_module->remote_custom1_gates_alloc);
+ gatemp_module->remote_custom1_gates_alloc = NULL;
+ gatemp_module->remote_custom1_gates = NULL;
+error_remote_custom1_gates_fail:
+ kfree(gatemp_module->remote_system_gates_alloc);
+ gatemp_module->remote_system_gates_alloc = NULL;
+ gatemp_module->remote_system_gates = NULL;
+error_remote_system_gates_fail:
+ kfree(gatemp_module->remote_custom2_in_use_alloc);
+ gatemp_module->remote_custom2_in_use_alloc = NULL;
+ gatemp_module->remote_custom2_in_use = NULL;
+error_remote_custom2_fail:
+ kfree(gatemp_module->remote_custom1_in_use_alloc);
+ gatemp_module->remote_custom1_in_use_alloc = NULL;
+ gatemp_module->remote_custom1_in_use = NULL;
+error_remote_custom1_fail:
+ kfree(gatemp_module->remote_system_in_use_alloc);
+ gatemp_module->remote_system_in_use_alloc = NULL;
+ gatemp_module->remote_system_in_use = NULL;
+error_remote_system_fail:
+ if (gatemp_module->name_server)
+ nameserver_delete(&gatemp_module->name_server);
+error_nameserver:
+ kfree(gatemp_module->gate_mutex);
+ gatemp_module->gate_mutex = NULL;
+exit:
+ printk(KERN_ERR "gatemp_setup failed! status = 0x%x", retval);
+ return retval;
+}
+
+s32 gatemp_destroy(void)
+{
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(gatemp_module->ref_count),
+ GATEMP_MAKE_MAGICSTAMP(0),
+ GATEMP_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (!(atomic_dec_return(&gatemp_module->ref_count)
+ == GATEMP_MAKE_MAGICSTAMP(0))) {
+ retval = 1;
+ goto exit;
+ }
+
+ kfree(gatemp_module->gate_mutex);
+ gatemp_module->gate_mutex = NULL;
+
+ if (gatemp_module->name_server)
+ nameserver_delete(&gatemp_module->name_server);
+
+ kfree(gatemp_module->remote_system_in_use_alloc);
+ gatemp_module->remote_system_in_use_alloc = NULL;
+ gatemp_module->remote_system_in_use = NULL;
+
+ kfree(gatemp_module->remote_custom1_in_use_alloc);
+ gatemp_module->remote_custom1_in_use_alloc = NULL;
+ gatemp_module->remote_custom1_in_use = NULL;
+
+ kfree(gatemp_module->remote_custom2_in_use_alloc);
+ gatemp_module->remote_custom2_in_use_alloc = NULL;
+ gatemp_module->remote_custom2_in_use = NULL;
+
+ kfree(gatemp_module->remote_system_gates_alloc);
+ gatemp_module->remote_system_gates_alloc = NULL;
+ gatemp_module->remote_system_gates = NULL;
+
+ kfree(gatemp_module->remote_custom1_gates_alloc);
+ gatemp_module->remote_custom1_gates_alloc = NULL;
+ gatemp_module->remote_custom1_gates = NULL;
+
+ kfree(gatemp_module->remote_custom2_gates_alloc);
+ gatemp_module->remote_custom2_gates_alloc = NULL;
+ gatemp_module->remote_custom2_gates = NULL;
+
+ /* Clear cfg area */
+ memset((void *) &gatemp_module->cfg, 0, sizeof(struct gatemp_config));
+ gatemp_module->is_owner = false;
+ return 0;
+
+exit:
+ if (retval < 0)
+ printk(KERN_ERR "gatemp_destroy failed! status = 0x%x", retval);
+ return retval;
+}
+
+static void _gatemp_get_shared_params(struct gatemp_params *sparams,
+ const struct _gatemp_params *params)
+{
+ sparams->name = params->name;
+ sparams->region_id = params->region_id;
+ sparams->shared_addr = params->shared_addr;
+ sparams->local_protect = \
+ (enum gatemp_local_protect) params->local_protect;
+ sparams->remote_protect = \
+ (enum gatemp_remote_protect) params->remote_protect;
+}
+
+void gatemp_params_init(struct gatemp_params *params)
+{
+ params->name = NULL;
+ params->region_id = 0;
+ params->shared_addr = NULL;
+ params->local_protect = GATEMP_LOCALPROTECT_INTERRUPT;
+ params->remote_protect = GATEMP_REMOTEPROTECT_SYSTEM;
+}
+
+int gatemp_instance_init(struct gatemp_object *obj,
+ const struct _gatemp_params *params)
+{
+ int *key;
+ void *remote_handle;
+ gatemp_remote_system_proxy_params system_params;
+ gatemp_remote_custom1_proxy_params custom1_params;
+ gatemp_remote_custom2_proxy_params custom2_params;
+ u32 min_align;
+ u32 offset;
+ u32 *shared_shm_base;
+ struct gatemp_params sparams;
+ u32 ns_value[2];
+ uint cache_line_size = 0;
+ int retval = 0;
+ void *region_heap;
+
+ /* No parameter check since this function will be called internally */
+
+ /* Initialize resource_id to an invalid value */
+ obj->resource_id = (u32)-1;
+
+ /* Open GateMP instance */
+ if (params->open_flag == true) {
+ /* all open work done here except for remote gate_handle */
+ obj->local_protect = params->local_protect;
+ obj->remote_protect = params->remote_protect;
+ obj->ns_key = 0;
+ obj->num_opens = 1;
+ obj->creator_proc_id = MULTIPROC_INVALIDID;
+ obj->attrs = (struct gatemp_attrs *)params->shared_addr;
+ obj->region_id = sharedregion_get_id((void *)obj->attrs);
+ obj->cache_enabled = \
+ sharedregion_is_cache_enabled(obj->region_id);
+ obj->obj_type = IPC_OBJTYPE_OPENDYNAMIC;
+
+ /* Assert that the buffer is in a valid shared region */
+ if (obj->region_id == SHAREDREGION_INVALIDREGIONID)
+ retval = 1;
+
+ if (retval == 0) {
+ cache_line_size = sharedregion_get_cache_line_size(
+ obj->region_id);
+
+ obj->alloc_size = 0;
+
+ /*min_align = Memory_getMaxDefaultTypeAlign();*/
+ min_align = 4;
+ if (cache_line_size > min_align)
+ min_align = cache_line_size;
+
+ offset = ROUND_UP(sizeof(struct gatemp_attrs), \
+ min_align);
+ obj->proxy_attrs = (void *)((u32)obj->attrs + offset);
+ }
+ goto proxy_work;
+ }
+
+ /* Create GateMP instance */
+ obj->local_protect = params->local_protect;
+ obj->remote_protect = params->remote_protect;
+ obj->ns_key = 0;
+ obj->num_opens = 0;
+ obj->creator_proc_id = multiproc_self();
+
+ /* No Remote Protection needed, just create the local protection */
+ if (obj->remote_protect == GATEMP_REMOTEPROTECT_NONE) {
+ /* Creating a local gate (Attrs is in local memory) */
+ /* all work done here and return */
+ obj->gate_handle = gatemp_create_local(obj->local_protect);
+
+ if (params->shared_addr != NULL) {
+ obj->attrs = params->shared_addr;
+ obj->obj_type = IPC_OBJTYPE_CREATEDYNAMIC;
+ /* Need cache settings since attrs is in shared mem */
+ obj->region_id = \
+ sharedregion_get_id((void *)obj->attrs);
+ obj->cache_enabled = \
+ sharedregion_is_cache_enabled(obj->region_id);
+ } else {
+ obj->obj_type = IPC_OBJTYPE_LOCAL;
+ obj->cache_enabled = false; /* local */
+ obj->region_id = SHAREDREGION_INVALIDREGIONID;
+ /* Using default target alignment */
+ obj->attrs = kmalloc(sizeof(struct gatemp_attrs),
+ GFP_KERNEL);
+ if (obj->attrs == NULL)
+ return 2;
+ }
+
+ if (retval == 0) {
+ obj->attrs->arg = (u32)obj;
+ obj->attrs->mask = SETMASK(obj->remote_protect,
+ obj->local_protect);
+ obj->attrs->creator_proc_id = obj->creator_proc_id;
+ obj->attrs->status = GATEMP_CREATED;
+#if 0
+ if (obj->cache_enabled) {
+ /* Need to write back memory if cache is enabled
+ * because cache will be invalidated during
+ * open_by_addr */
+ Cache_wbInv(obj->attrs,
+ sizeof(struct gatemp_attrs),
+ Cache_Type_ALL, true);
+ }
+#endif
+ if (params->name != NULL) {
+ /* Top 16 bits = procId of creator. Bottom 16
+ * bits = '0' if local, '1' otherwise */
+ ns_value[0] = (u32)obj->attrs;
+ ns_value[1] = multiproc_self() << 16;
+ obj->ns_key = nameserver_add(
+ gatemp_module->name_server,
+ params->name, &ns_value,
+ 2 * sizeof(u32));
+ }
+ }
+ goto proxy_work;
+ }
+
+ /* Create remote protection */
+ if (params->shared_addr == NULL) {
+ /* If sharedAddr = NULL we are creating dynamically from the
+ * heap */
+ obj->obj_type = IPC_OBJTYPE_CREATEDYNAMIC_REGION;
+ obj->region_id = params->region_id;
+ _gatemp_get_shared_params(&sparams, params);
+ obj->alloc_size = gatemp_shared_mem_req(&sparams);
+ obj->cache_enabled = sharedregion_is_cache_enabled(
+ obj->region_id);
+
+ /* The region heap will do the alignment */
+ region_heap = sharedregion_get_heap(obj->region_id);
+ WARN_ON(region_heap == NULL);
+ obj->attrs = sl_heap_alloc(region_heap, obj->alloc_size, 0);
+ if (obj->attrs == NULL)
+ retval = 3;
+
+ if (retval == 0) {
+ cache_line_size = sharedregion_get_cache_line_size(
+ obj->region_id);
+ /*min_align = Memory_getMaxDefaultTypeAlign();*/
+ min_align = 4;
+
+ if (cache_line_size > min_align)
+ min_align = cache_line_size;
+
+ offset = ROUND_UP(sizeof(struct gatemp_attrs), \
+ min_align);
+ obj->proxy_attrs = (void *)((u32)obj->attrs + offset);
+ }
+ } else { /* creating using shared_addr */
+ obj->region_id = sharedregion_get_id(params->shared_addr);
+ /* Assert that the buffer is in a valid shared region */
+ if (obj->region_id == SHAREDREGION_INVALIDREGIONID)
+ retval = 4;
+
+ cache_line_size = sharedregion_get_cache_line_size(
+ obj->region_id);
+ /* Assert that shared_addr is cache aligned */
+ if ((retval == 0) && (((u32)params->shared_addr % \
+ cache_line_size) != 0))
+ retval = 5;
+
+ if (retval == 0) {
+ obj->obj_type = IPC_OBJTYPE_CREATEDYNAMIC;
+ obj->attrs = (struct gatemp_attrs *)params->shared_addr;
+ obj->cache_enabled = \
+ sharedregion_is_cache_enabled(obj->region_id);
+
+ /*min_align = Memory_getMaxDefaultTypeAlign();*/
+ min_align = 4;
+ if (cache_line_size > min_align)
+ min_align = cache_line_size;
+ offset = ROUND_UP(sizeof(struct gatemp_attrs), \
+ min_align);
+ obj->proxy_attrs = (void *)((u32)obj->attrs + offset);
+ }
+ }
+
+proxy_work:
+ /* Proxy work for open and create done here */
+ switch (obj->remote_protect) {
+ case GATEMP_REMOTEPROTECT_SYSTEM:
+ if (obj->obj_type != IPC_OBJTYPE_OPENDYNAMIC) {
+ /* Created Instance */
+ obj->resource_id = gatemp_get_free_resource(
+ gatemp_module->remote_system_in_use,
+ gatemp_module->num_remote_system);
+ if (obj->resource_id == -1)
+ retval = 6;
+ } else {
+ /* resource_id set by open call */
+ obj->resource_id = params->resource_id;
+ }
+
+ if (retval == 0) {
+ /* Create the proxy object */
+ gatemp_remote_system_proxy_params_init(&system_params);
+ system_params.resource_id = obj->resource_id;
+ system_params.open_flag = \
+ (obj->obj_type == IPC_OBJTYPE_OPENDYNAMIC);
+ system_params.shared_addr = obj->proxy_attrs;
+ system_params.region_id = obj->region_id;
+ remote_handle = gatemp_remote_system_proxy_create(
+ (enum igatempsupport_local_protect)
+ obj->local_protect,
+ &system_params);
+
+ if (remote_handle == NULL)
+ retval = 7;
+
+ if (retval == 0) {
+ /* Finish filling in the object */
+ obj->gate_handle = remote_handle;
+
+ /* Fill in the local array because it is
+ * cooked */
+ key = Gate_enterSystem();
+ gatemp_module->remote_system_gates[
+ obj->resource_id] = (void *)obj;
+ Gate_leaveSystem(key);
+ }
+ }
+ break;
+
+ case GATEMP_REMOTEPROTECT_CUSTOM1:
+ if (obj->obj_type != IPC_OBJTYPE_OPENDYNAMIC) {
+ /* Created Instance */
+ obj->resource_id = gatemp_get_free_resource(
+ gatemp_module->remote_custom1_in_use,
+ gatemp_module->num_remote_custom1);
+ if (obj->resource_id == -1)
+ retval = 6;
+ } else {
+ /* resource_id set by open call */
+ obj->resource_id = params->resource_id;
+ }
+
+ if (retval == 0) {
+ /* Create the proxy object */
+ gatemp_remote_custom1_proxy_params_init(\
+ &custom1_params);
+ custom1_params.resource_id = obj->resource_id;
+ custom1_params.open_flag = \
+ (obj->obj_type == IPC_OBJTYPE_OPENDYNAMIC);
+ custom1_params.shared_addr = obj->proxy_attrs;
+ custom1_params.region_id = obj->region_id;
+ remote_handle = gatemp_remote_custom1_proxy_create(
+ (enum gatemp_local_protect)
+ obj->local_protect,
+ &custom1_params);
+ if (remote_handle == NULL)
+ retval = 7;
+
+ if (retval == 0) {
+ /* Finish filling in the object */
+ obj->gate_handle = remote_handle;
+
+ /* Fill in the local array because it is
+ * cooked */
+ key = Gate_enterSystem();
+ gatemp_module->remote_custom1_gates[
+ obj->resource_id] = (void *)obj;
+ Gate_leaveSystem(key);
+ }
+ }
+ break;
+
+ case GATEMP_REMOTEPROTECT_CUSTOM2:
+ if (obj->obj_type != IPC_OBJTYPE_OPENDYNAMIC) {
+ /* Created Instance */
+ obj->resource_id = gatemp_get_free_resource(
+ gatemp_module->remote_custom2_in_use,
+ gatemp_module->num_remote_custom2);
+ if (obj->resource_id == -1)
+ retval = 6;
+ } else {
+ /* resource_id set by open call */
+ obj->resource_id = params->resource_id;
+ }
+
+ if (retval == 0) {
+ /* Create the proxy object */
+ gatemp_remote_custom2_proxy_params_init(\
+ &custom2_params);
+ custom2_params.resource_id = obj->resource_id;
+ custom2_params.open_flag = \
+ (obj->obj_type == IPC_OBJTYPE_OPENDYNAMIC);
+ custom2_params.shared_addr = obj->proxy_attrs;
+ custom2_params.region_id = obj->region_id;
+ remote_handle = gatemp_remote_custom2_proxy_create(
+ (enum gatemp_local_protect)
+ obj->local_protect,
+ &custom2_params);
+ if (remote_handle == NULL)
+ retval = 7;
+
+ if (retval == 0) {
+ /* Finish filling in the object */
+ obj->gate_handle = remote_handle;
+
+ /* Fill in the local array because it is
+ * cooked */
+ key = Gate_enterSystem();
+ gatemp_module->remote_custom2_gates[
+ obj->resource_id] = (void *)obj;
+ Gate_leaveSystem(key);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* Place Name/Attrs into NameServer table */
+ if ((obj->obj_type != IPC_OBJTYPE_OPENDYNAMIC) && (retval == 0)) {
+ /* Fill in the attrs */
+ obj->attrs->arg = obj->resource_id;
+ obj->attrs->mask = \
+ SETMASK(obj->remote_protect, obj->local_protect);
+ obj->attrs->creator_proc_id = obj->creator_proc_id;
+ obj->attrs->status = GATEMP_CREATED;
+#if 0
+ if (obj->cache_enabled) {
+ Cache_wbInv(obj->attrs, sizeof(struct gatemp_attrs),
+ Cache_Type_ALL, true);
+ }
+#endif
+
+ if (params->name != NULL) {
+ shared_shm_base = sharedregion_get_srptr(obj->attrs,
+ obj->region_id);
+ ns_value[0] = (u32)shared_shm_base;
+ /* Top 16 bits = procId of creator, Bottom 16
+ * bits = '0' if local, '1' otherwise */
+ ns_value[1] = multiproc_self() << 16 | 1;
+ obj->ns_key = nameserver_add(gatemp_module->name_server,
+ params->name, &ns_value,
+ 2 * sizeof(u32));
+ if (obj->ns_key == NULL)
+ retval = 8;
+ }
+ }
+
+ if (retval != 0) {
+ printk(KERN_ERR "gatemp_instance_init failed! status = 0x%x",
+ retval);
+ }
+ return retval;
+}
+
+void gatemp_instance_finalize(struct gatemp_object *obj, int status)
+{
+ int *system_key = (int *)0;
+ gatemp_remote_system_proxy_handle system_handle;
+ gatemp_remote_custom1_proxy_handle custom1_handle;
+ gatemp_remote_custom2_proxy_handle custom2_handle;
+ int retval = 0;
+ void **remote_handles = NULL;
+ u8 *in_use_array = NULL;
+ u32 num_resources = 0;
+
+ /* No parameter check since this function will be called internally */
+
+ /* Cannot call when numOpen is non-zero. */
+ if (obj->num_opens != 0) {
+ retval = GateMP_E_INVALIDSTATE;
+ goto exit;
+ }
+
+ /* Remove from NameServer */
+ if (obj->ns_key != 0) {
+ nameserver_remove_entry(gatemp_module->name_server,
+ obj->ns_key);
+ }
+ /* Set the status to 0 */
+ if (obj->obj_type != IPC_OBJTYPE_OPENDYNAMIC) {
+ obj->attrs->status = 0;
+#if 0
+ if (obj->cache_enabled)
+ Cache_wbInv(obj->attrs, sizeof(struct gatemp_attrs),
+ Cache_Type_ALL, true);
+#endif
+ }
+
+ /* If ObjType_LOCAL, memory was allocated from the local system heap.
+ * obj->attrs might be NULL if the Memory_alloc failed in Instance_init
+ */
+ if (obj->remote_protect == GATEMP_REMOTEPROTECT_NONE)
+ kfree(obj->attrs);
+
+ /* Delete if a remote gate */
+ switch (obj->remote_protect) {
+ /* Delete proxy instance... need to downCast */
+ case GATEMP_REMOTEPROTECT_SYSTEM:
+ if (obj->gate_handle) {
+ system_handle = (gatemp_remote_system_proxy_handle)
+ (obj->gate_handle);
+ gatemp_remote_system_proxy_delete(&system_handle);
+ }
+ in_use_array = gatemp_module->remote_system_in_use;
+ remote_handles = gatemp_module->remote_system_gates;
+ num_resources = gatemp_module->num_remote_system;
+ break;
+ case GATEMP_REMOTEPROTECT_CUSTOM1:
+ if (obj->gate_handle) {
+ custom1_handle = (gatemp_remote_custom1_proxy_handle)
+ (obj->gate_handle);
+ gatemp_remote_custom1_proxy_delete(&custom1_handle);
+ }
+ in_use_array = gatemp_module->remote_custom1_in_use;
+ remote_handles = gatemp_module->remote_custom1_gates;
+ num_resources = gatemp_module->num_remote_custom1;
+ break;
+ case GATEMP_REMOTEPROTECT_CUSTOM2:
+ if (obj->gate_handle) {
+ custom2_handle = (gatemp_remote_custom2_proxy_handle)
+ (obj->gate_handle);
+ gatemp_remote_custom2_proxy_delete(&custom2_handle);
+ }
+ in_use_array = gatemp_module->remote_custom2_in_use;
+ remote_handles = gatemp_module->remote_custom2_gates;
+ num_resources = gatemp_module->num_remote_custom2;
+ break;
+ case GATEMP_REMOTEPROTECT_NONE:
+ /* Nothing else to finalize. Any alloc'ed memory has already
+ * been freed */
+ return;
+ default:
+ /* Nothing to do */
+ break;
+ }
+
+ /* Clear the handle array entry in local memory */
+ if (obj->resource_id != (uint)-1)
+ remote_handles[obj->resource_id] = NULL;
+
+ if (obj->obj_type != IPC_OBJTYPE_OPENDYNAMIC &&
+ obj->resource_id != (uint)-1) {
+ /* Only enter default gate if not deleting default gate. */
+ if (obj != gatemp_module->default_gate)
+ system_key = gatemp_enter(gatemp_module->default_gate);
+ /* Clear the resource used flag in shared memory */
+ in_use_array[obj->resource_id] = false;
+#if 0
+ if (obj->cache_enabled) {
+ Cache_wbInv(in_use_array, num_resources, Cache_Type_ALL,
+ true);
+ }
+#endif
+ /* Only leave default gate if not deleting default gate. */
+ if (obj != gatemp_module->default_gate)
+ gatemp_leave(gatemp_module->default_gate, system_key);
+ }
+
+ if (obj->obj_type == IPC_OBJTYPE_CREATEDYNAMIC_REGION) {
+ if (obj->attrs) {
+ /* Free memory allocated from the region heap */
+ sl_heap_free(sharedregion_get_heap(obj->region_id),
+ obj->attrs, obj->alloc_size);
+ }
+ }
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "gatemp_instance_finalize failed! "
+ "status = 0x%x", retval);
+ }
+ return;
+}
+
+int *gatemp_enter(void *obj)
+{
+ int *key;
+ struct gatemp_object *gmp_handle = (struct gatemp_object *)obj;
+
+ key = igateprovider_enter(gmp_handle->gate_handle);
+
+ return key;
+}
+
+void gatemp_leave(void *obj, int *key)
+{
+ struct gatemp_object *gmp_handle = (struct gatemp_object *)obj;
+
+ igateprovider_leave(gmp_handle->gate_handle, key);
+}
+
+int gatemp_open(char *name, void **handle)
+{
+ u32 *shared_shm_base;
+ int retval;
+ u32 len;
+ void *shared_addr;
+ u32 ns_value[2];
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(gatemp_module->ref_count),
+ GATEMP_MAKE_MAGICSTAMP(0),
+ GATEMP_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(name == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ len = sizeof(ns_value);
+ /* Get the Attrs out of the NameServer instance.
+ * Search all processors. */
+ retval = nameserver_get(gatemp_module->name_server, name, &ns_value,
+ &len, NULL);
+ if (retval < 0) {
+ *handle = NULL;
+ return GateMP_E_NOTFOUND;
+ }
+
+ /* The least significant bit of nsValue[1] == 0 means its a
+ * local GateMP, otherwise its a remote GateMP. */
+ if (!(ns_value[1] & 0x1) && ((ns_value[1] >> 16) != multiproc_self())) {
+ *handle = NULL;
+ return -1;
+ }
+
+ if ((ns_value[1] & 0x1) == 0) {
+ /* Opening a local GateMP locally. The GateMP is created
+ * from a local heap so don't do SharedRegion Ptr conversion. */
+ shared_addr = (u32 *)ns_value[0];
+ } else {
+ /* Opening a remote GateMP. Need to do SR ptr conversion. */
+ shared_shm_base = (u32 *)ns_value[0];
+ shared_addr = sharedregion_get_ptr(shared_shm_base);
+ }
+
+ retval = gatemp_open_by_addr(shared_addr, handle);
+
+exit:
+ if (retval < 0)
+ printk(KERN_ERR "gatemp_open failed! status = 0x%x", retval);
+ return retval;
+}
+
+int gatemp_open_by_addr(void *shared_addr, void **handle)
+{
+ int retval = 0;
+ int *key;
+ struct gatemp_object *obj = NULL;
+ struct _gatemp_params params;
+ struct gatemp_attrs *attrs;
+#if 0
+ u16 region_id;
+#endif
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(gatemp_module->ref_count),
+ GATEMP_MAKE_MAGICSTAMP(0),
+ GATEMP_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(shared_addr == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ attrs = (struct gatemp_attrs *)shared_addr;
+
+#if 0
+ /* get the region id and invalidate attrs is needed */
+ region_id = sharedregion_get_id(shared_addr);
+ if (region_id != SHAREDREGION_INVALIDREGIONID) {
+ if (sharedregion_is_cache_enabled(region_id))
+ Cache_inv(attrs, sizeof(struct gatemp_attrs),
+ Cache_Type_ALL, true);
+ }
+#endif
+
+ if (attrs->status != GATEMP_CREATED) {
+ retval = -1;
+ goto exit;
+ }
+
+ /* Local gate */
+ if (GETREMOTE(attrs->mask) == GATEMP_REMOTEPROTECT_NONE) {
+ if (attrs->creator_proc_id != multiproc_self())
+ retval = GateMP_E_LOCALGATE; /* TBD */
+ else {
+ key = Gate_enterSystem();
+ obj = (void *)attrs->arg;
+ *handle = obj;
+ obj->num_opens++;
+ Gate_leaveSystem(key);
+ }
+ } else {
+ /* Remote case */
+ switch (GETREMOTE(attrs->mask)) {
+ case GATEMP_REMOTEPROTECT_SYSTEM:
+ obj = (struct gatemp_object *)
+ gatemp_module->remote_system_gates[attrs->arg];
+ break;
+
+ case GATEMP_REMOTEPROTECT_CUSTOM1:
+ obj = (struct gatemp_object *)
+ gatemp_module->remote_custom1_gates[attrs->arg];
+ break;
+
+ case GATEMP_REMOTEPROTECT_CUSTOM2:
+ obj = (struct gatemp_object *)
+ gatemp_module->remote_custom2_gates[attrs->arg];
+ break;
+
+ default:
+ break;
+ }
+
+ /* If the object is NULL, then it must have been created on a
+ * remote processor. Need to create a local object. This is
+ * accomplished by setting the open_flag to true. */
+ if ((retval == 0) && (obj == NULL)) {
+ /* Create a GateMP object with the open_flag set to
+ * true */
+ params.name = NULL;
+ params.open_flag = true;
+ params.shared_addr = shared_addr;
+ params.resource_id = attrs->arg;
+ params.local_protect = GETLOCAL(attrs->mask);
+ params.remote_protect = GETREMOTE(attrs->mask);
+
+ obj = _gatemp_create(&params);
+ if (obj == NULL)
+ retval = GateMP_E_FAIL;
+ } else {
+ obj->num_opens++;
+ }
+
+ /* Return the "opened" GateMP instance */
+ *handle = obj;
+ }
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "gatemp_open_by_addr failed! status = 0x%x",
+ retval);
+ }
+ return retval;
+}
+
+int gatemp_close(void **handle)
+{
+ int *key;
+ struct gatemp_object *gate_handle = NULL;
+ int count;
+ int retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(gatemp_module->ref_count),
+ GATEMP_MAKE_MAGICSTAMP(0),
+ GATEMP_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely((handle == NULL) || (*handle == NULL)))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ gate_handle = (struct gatemp_object *)(*handle);
+ /* Cannot call with the num_opens equal to zero. This is either
+ * a created handle or been closed already. */
+ if (unlikely(gate_handle->num_opens == 0)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ key = Gate_enterSystem();
+ count = --gate_handle->num_opens;
+ Gate_leaveSystem(key);
+
+ /* If the gate is remote, call the close function */
+ if (gate_handle->remote_protect != GATEMP_REMOTEPROTECT_NONE) {
+ /* if the count is zero and the gate is opened, then this
+ * object was created in the open (i.e. the create happened
+ * on a remote processor.**/
+ if ((count == 0) && \
+ (gate_handle->obj_type == IPC_OBJTYPE_OPENDYNAMIC))
+ gatemp_delete((void **)&gate_handle);
+ }
+ *handle = NULL;
+ return 0;
+
+exit:
+ printk(KERN_ERR "gatemp_close failed! status = 0x%x", retval);
+ return retval;
+}
+
+u32 *gatemp_get_shared_addr(void *obj)
+{
+ u32 *sr_ptr;
+ struct gatemp_object *object = (struct gatemp_object *) obj;
+
+ sr_ptr = sharedregion_get_srptr(object->attrs, object->region_id);
+
+ return sr_ptr;
+}
+
+bool gatemp_query(int qual)
+{
+ return false;
+}
+
+void *gatemp_get_default_remote(void)
+{
+ return gatemp_module->default_gate;
+}
+
+enum gatemp_local_protect gatemp_get_local_protect(struct gatemp_object *obj)
+{
+ WARN_ON(obj == NULL);
+
+ return obj->local_protect;
+}
+
+enum gatemp_remote_protect gatemp_get_remote_protect(struct gatemp_object *obj)
+{
+ WARN_ON(obj == NULL);
+
+ return obj->remote_protect;
+}
+
+void *gatemp_create_local(enum gatemp_local_protect local_protect)
+{
+ void *gate_handle = NULL;
+
+ /* Create the local gate. */
+ switch (local_protect) {
+ case GATEMP_LOCALPROTECT_NONE:
+ /* Plug with the GateNull singleton */
+ gate_handle = NULL;
+ break;
+
+ case GATEMP_LOCALPROTECT_INTERRUPT:
+ /* Plug with the GateHwi singleton */
+ gate_handle = gate_system_handle;
+ break;
+
+ case GATEMP_LOCALPROTECT_TASKLET:
+ /* Plug with the GateSwi singleton */
+ gate_handle = gatemp_module->gate_mutex;
+ break;
+
+ case GATEMP_LOCALPROTECT_THREAD:
+ case GATEMP_LOCALPROTECT_PROCESS:
+ /* Plug with the GateMutexPri singleton */
+ gate_handle = gatemp_module->gate_mutex;
+ break;
+
+ default:
+ break;
+ }
+
+ return gate_handle;
+}
+
+uint gatemp_shared_mem_req(const struct gatemp_params *params)
+{
+ uint mem_req, min_align;
+ u16 region_id;
+ gatemp_remote_system_proxy_params system_params;
+ gatemp_remote_custom1_proxy_params custom1_params;
+ gatemp_remote_custom2_proxy_params custom2_params;
+
+ if (params->shared_addr)
+ region_id = sharedregion_get_id(params->shared_addr);
+ else
+ region_id = params->region_id;
+
+ /*min_align = Memory_getMaxDefaultTypeAlign();*/min_align = 4;
+ if (sharedregion_get_cache_line_size(region_id) > min_align)
+ min_align = sharedregion_get_cache_line_size(region_id);
+
+ mem_req = ROUND_UP(sizeof(struct gatemp_attrs), min_align);
+
+ /* add the amount of shared memory required by proxy */
+ if (params->remote_protect == GATEMP_REMOTEPROTECT_SYSTEM) {
+ gatemp_remote_system_proxy_params_init(&system_params);
+ system_params.region_id = region_id;
+ mem_req += gatemp_remote_system_proxy_shared_mem_req(
+ &system_params);
+ } else if (params->remote_protect == GATEMP_REMOTEPROTECT_CUSTOM1) {
+ gatemp_remote_custom1_proxy_params_init(&custom1_params);
+ custom1_params.region_id = region_id;
+ mem_req += gatemp_remote_custom1_proxy_shared_mem_req(
+ &custom1_params);
+ } else if (params->remote_protect == GATEMP_REMOTEPROTECT_CUSTOM2) {
+ gatemp_remote_custom2_proxy_params_init(&custom2_params);
+ custom2_params.region_id = region_id;
+ mem_req += gatemp_remote_custom2_proxy_shared_mem_req(
+ &custom2_params);
+ }
+
+ return mem_req;
+}
+
+uint gatemp_get_region0_reserved_size(void)
+{
+ uint reserved, min_align;
+
+ /*min_align = Memory_getMaxDefaultTypeAlign();*/min_align = 4;
+
+ if (sharedregion_get_cache_line_size(0) > min_align)
+ min_align = sharedregion_get_cache_line_size(0);
+
+ reserved = ROUND_UP(sizeof(struct gatemp_reserved), min_align);
+
+ reserved += ROUND_UP(gatemp_module->num_remote_system, min_align);
+
+ if (gatemp_module->proxy_map[GATEMP_PROXYORDER_CUSTOM1] ==
+ GATEMP_PROXYORDER_CUSTOM1) {
+ reserved += ROUND_UP(gatemp_module->num_remote_custom1,
+ min_align);
+ }
+
+ if (gatemp_module->proxy_map[GATEMP_PROXYORDER_CUSTOM2] ==
+ GATEMP_PROXYORDER_CUSTOM2) {
+ reserved += ROUND_UP(gatemp_module->num_remote_custom2,
+ min_align);
+ }
+
+ return reserved;
+}
+
+static void gatemp_set_region0_reserved(void *shared_addr)
+{
+ struct gatemp_reserved *reserve;
+ u32 min_align, offset;
+
+ /*min_align = Memory_getMaxDefaultTypeAlign();*/min_align = 4;
+ if (sharedregion_get_cache_line_size(0) > min_align)
+ min_align = sharedregion_get_cache_line_size(0);
+
+ /* setup struct gatemp_reserved fields */
+ reserve = (struct gatemp_reserved *)shared_addr;
+ reserve->version = GATEMP_VERSION;
+
+ if (sharedregion_is_cache_enabled(0)) {
+#if 0
+ Cache_wbInv(shared_addr, sizeof(struct gatemp_reserved),
+ Cache_Type_ALL, true);
+#endif
+ }
+
+ /* initialize the in-use array in shared memory for the system gates. */
+ offset = ROUND_UP(sizeof(struct gatemp_reserved), min_align);
+ gatemp_module->remote_system_in_use =
+ (void *)((u32)shared_addr + offset);
+ memset(gatemp_module->remote_system_in_use, 0,
+ gatemp_module->num_remote_system);
+
+ if (sharedregion_is_cache_enabled(0)) {
+#if 0
+ Cache_wbInv(gatemp_module->remote_system_in_use,
+ gatemp_module->num_remote_system,
+ Cache_Type_ALL, true);
+#endif
+ }
+
+ /* initialize the in-use array in shared memory for the custom1 gates.
+ * Need to check if this proxy is the same as system */
+ offset = ROUND_UP(gatemp_module->num_remote_system, min_align);
+ if (gatemp_module->proxy_map[GATEMP_PROXYORDER_CUSTOM1] ==
+ GATEMP_PROXYORDER_CUSTOM1) {
+ if (gatemp_module->num_remote_custom1 != 0) {
+ gatemp_module->remote_custom1_in_use =
+ gatemp_module->remote_system_in_use + offset;
+ }
+
+ memset(gatemp_module->remote_custom1_in_use, 0,
+ gatemp_module->num_remote_custom1);
+
+ if (sharedregion_is_cache_enabled(0)) {
+#if 0
+ Cache_wbInv(gatemp_module->remote_custom1_in_use,
+ gatemp_module->num_remote_custom1,
+ Cache_Type_ALL, true);
+#endif
+ }
+ } else {
+ gatemp_module->remote_custom1_in_use = \
+ gatemp_module->remote_system_in_use;
+ gatemp_module->remote_custom1_gates = \
+ gatemp_module->remote_system_gates;
+ }
+
+ /* initialize the in-use array in shared memory for the custom2 gates.
+ * Need to check if this proxy is the same as system or custom1 */
+ offset = ROUND_UP(gatemp_module->num_remote_custom1, min_align);
+ if (gatemp_module->proxy_map[GATEMP_PROXYORDER_CUSTOM2] ==
+ GATEMP_PROXYORDER_CUSTOM2) {
+ if (gatemp_module->num_remote_custom2 != 0) {
+ gatemp_module->remote_custom2_in_use =
+ gatemp_module->remote_custom1_in_use + offset;
+ }
+ memset(gatemp_module->remote_custom2_in_use, 0,
+ gatemp_module->num_remote_custom2);
+
+ if (sharedregion_is_cache_enabled(0)) {
+#if 0
+ Cache_wbInv(gatemp_module->remote_custom2_in_use,
+ gatemp_module->num_remote_custom2,
+ Cache_Type_ALL, true);
+#endif
+ }
+ } else if (gatemp_module->proxy_map[GATEMP_PROXYORDER_CUSTOM2] ==
+ GATEMP_PROXYORDER_CUSTOM1) {
+ gatemp_module->remote_custom2_in_use =
+ gatemp_module->remote_custom1_in_use;
+ gatemp_module->remote_custom2_gates =
+ gatemp_module->remote_custom1_gates;
+ } else {
+ gatemp_module->remote_custom2_in_use = \
+ gatemp_module->remote_system_in_use;
+ gatemp_module->remote_custom2_gates = \
+ gatemp_module->remote_system_gates;
+ }
+
+ return;
+}
+
+static void gatemp_clear_region0_reserved(void)
+{
+ printk(KERN_INFO "gatemp_clear_region0_reserved: either nothing to do "
+ "or not implemented");
+}
+
+static void gatemp_open_region0_reserved(void *shared_addr)
+{
+ struct gatemp_reserved *reserve;
+ u32 min_align, offset;
+
+ /*min_align = Memory_getMaxDefaultTypeAlign();*/min_align = 4;
+ if (sharedregion_get_cache_line_size(0) > min_align)
+ min_align = sharedregion_get_cache_line_size(0);
+
+
+ /* setup struct gatemp_reserved fields */
+ reserve = (struct gatemp_reserved *)shared_addr;
+
+ if (reserve->version != GATEMP_VERSION) {
+ /* TBD */
+ return;
+ }
+
+ offset = ROUND_UP(sizeof(struct gatemp_reserved), min_align);
+ gatemp_module->remote_system_in_use = \
+ (void *)((u32)shared_addr + offset);
+
+ /* initialize the in-use array in shared memory for the custom1 gates.
+ * Need to check if this proxy is the same as system */
+ offset = ROUND_UP(gatemp_module->num_remote_system, min_align);
+ if (gatemp_module->proxy_map[GATEMP_PROXYORDER_CUSTOM1] ==
+ GATEMP_PROXYORDER_CUSTOM1) {
+ if (gatemp_module->num_remote_custom1 != 0) {
+ gatemp_module->remote_custom1_in_use =
+ gatemp_module->remote_system_in_use + offset;
+ }
+ } else {
+ gatemp_module->remote_custom1_in_use = \
+ gatemp_module->remote_system_in_use;
+ gatemp_module->remote_custom1_gates = \
+ gatemp_module->remote_system_gates;
+ }
+
+ offset = ROUND_UP(gatemp_module->num_remote_custom1, min_align);
+ if (gatemp_module->proxy_map[GATEMP_PROXYORDER_CUSTOM2] ==
+ GATEMP_PROXYORDER_CUSTOM2) {
+ if (gatemp_module->num_remote_custom2 != 0) {
+ gatemp_module->remote_custom2_in_use =
+ gatemp_module->remote_custom1_in_use + offset;
+ }
+ } else if (gatemp_module->proxy_map[GATEMP_PROXYORDER_CUSTOM2] ==
+ GATEMP_PROXYORDER_CUSTOM1) {
+ gatemp_module->remote_custom2_in_use =
+ gatemp_module->remote_custom1_in_use;
+ gatemp_module->remote_custom2_gates =
+ gatemp_module->remote_custom1_gates;
+ } else {
+ gatemp_module->remote_custom2_in_use = \
+ gatemp_module->remote_system_in_use;
+ gatemp_module->remote_custom2_gates = \
+ gatemp_module->remote_system_gates;
+ }
+
+ return;
+}
+
+static void gatemp_close_region0_reserved(void *shared_addr)
+{
+ printk(KERN_INFO "gatemp_close_region0_reserved: either nothing to do "
+ "or not implemented");
+}
+
+static void gatemp_set_default_remote(void *handle)
+{
+ gatemp_module->default_gate = handle;
+}
+
+int gatemp_start(void *shared_addr)
+{
+ struct sharedregion_entry entry;
+ struct gatemp_params gatemp_params;
+ void *default_gate;
+ int retval = 0;
+
+ /* get region 0 information */
+ sharedregion_get_entry(0, &entry);
+
+ /* if entry owner proc is not specified return */
+ if (entry.owner_proc_id != SHAREDREGION_DEFAULTOWNERID) {
+ if (entry.owner_proc_id == multiproc_self()) {
+ /* Intialize the locks if ncessary.*/
+ gatemp_remote_system_proxy_locks_init();
+ gatemp_remote_custom1_proxy_locks_init();
+ gatemp_remote_custom2_proxy_locks_init();
+ }
+
+ /* Init params for default gate */
+ gatemp_params_init(&gatemp_params);
+ gatemp_params.shared_addr = (void *)((u32)shared_addr +
+ gatemp_get_region0_reserved_size());
+ gatemp_params.local_protect = GATEMP_LOCALPROTECT_TASKLET;
+
+ if (multiproc_get_num_processors() > 1) {
+ gatemp_params.remote_protect = \
+ GATEMP_REMOTEPROTECT_SYSTEM;
+ } else {
+ gatemp_params.remote_protect = \
+ GATEMP_REMOTEPROTECT_NONE;
+ }
+
+ if (entry.owner_proc_id == multiproc_self()) {
+ gatemp_module->is_owner = true;
+
+ /* if owner of the SharedRegion */
+ gatemp_set_region0_reserved(shared_addr);
+
+ /* create default GateMP */
+ default_gate = gatemp_create(&gatemp_params);
+
+ if (default_gate != NULL) {
+ /* set the default GateMP for creator */
+ gatemp_set_default_remote(default_gate);
+ } else {
+ retval = -1;
+ }
+ }
+ }
+
+ if (retval < 0)
+ printk(KERN_ERR "gatemp_start failed! status = 0x%x", retval);
+ return retval;
+}
+
+int gatemp_stop(void)
+{
+ int retval = 0;
+
+ /* if entry owner proc is not specified return */
+ if (gatemp_module->is_owner == true) {
+ /* if owner of the SharedRegion */
+ gatemp_clear_region0_reserved();
+
+ gatemp_delete((void **)&gatemp_module->default_gate);
+
+ /* set the default GateMP for creator */
+ gatemp_set_default_remote(NULL);
+ }
+
+ return retval;
+}
+
+
+/*
+ *************************************************************************
+ * Internal functions
+ *************************************************************************
+ */
+uint gatemp_get_free_resource(u8 *in_use, int num)
+{
+ int *key = 0;
+ bool flag = false;
+ uint resource_id;
+ void *default_gate;
+
+ /* Need to look at shared memory. Enter default gate */
+ default_gate = gatemp_get_default_remote();
+
+ if (default_gate)
+ key = gatemp_enter(default_gate);
+
+#if 0
+ /* Invalidate cache before looking at the in-use flags */
+ if (sharedregion_is_cache_enabled(0))
+ Cache_inv(in_use, num * sizeof(u8), Cache_Type_ALL, true);
+#endif
+
+ /* Find a free resource id. Note: zero is reserved on the
+ * system proxy for the default gate. */
+ for (resource_id = 0; resource_id < num; resource_id++) {
+ /* If not in-use, set the in_use to true to prevent other
+ * creates from getting this one. */
+ if (in_use[resource_id] == false) {
+ flag = true;
+
+ /* Denote in shared memory that the resource is used */
+ in_use[resource_id] = true;
+ break;
+ }
+ }
+
+#if 0
+ /* Write-back if a in-use flag was changed */
+ if (flag == true && sharedregion_is_cache_enabled(0))
+ Cache_wbInv(in_use, num * sizeof(u8), Cache_Type_ALL, true);
+#endif
+
+ /* Done with the critical section */
+ if (default_gate)
+ gatemp_leave(default_gate, key);
+
+ if (flag == false)
+ resource_id = -1;
+
+ return resource_id;
+}
+
+void *gatemp_create(const struct gatemp_params *params)
+{
+ struct _gatemp_params _params;
+ struct gatemp_object *handle = NULL;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(gatemp_module->ref_count),
+ GATEMP_MAKE_MAGICSTAMP(0),
+ GATEMP_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(params == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(params->shared_addr == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ memset(&_params, 0, sizeof(struct _gatemp_params));
+ memcpy(&_params, params, sizeof(struct gatemp_params));
+
+ handle = _gatemp_create(&_params);
+
+exit:
+ if (retval < 0)
+ printk(KERN_ERR "gatemp_create failed! status = 0x%x", retval);
+ return (void *)handle;
+}
+
+static struct gatemp_object *_gatemp_create(const struct _gatemp_params *params)
+{
+ struct gatemp_object *obj = NULL;
+ s32 retval = 0;
+ int *key;
+
+ /* No parameter checking since internal function */
+
+ obj = kmalloc(sizeof(struct gatemp_object), GFP_KERNEL);
+ if (obj == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ obj->status = gatemp_instance_init(obj, params);
+ if (obj->status != 0) {
+ retval = -1;
+ goto gatemp_init_fail;
+ }
+
+ key = Gate_enterSystem();
+ if (gatemp_first_object == NULL) {
+ gatemp_first_object = obj;
+ obj->next = NULL;
+ } else {
+ obj->next = gatemp_first_object;
+ gatemp_first_object = obj;
+ }
+ Gate_leaveSystem(key);
+ return (void *)obj;
+
+gatemp_init_fail:
+ kfree(obj);
+ obj = NULL;
+exit:
+ printk(KERN_ERR "_gatemp_create failed! status = 0x%x", retval);
+ return (void *)NULL;
+}
+
+int gatemp_delete(void **handle)
+{
+ int *key;
+ struct gatemp_object *temp;
+ bool found = false;
+ int retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(gatemp_module->ref_count),
+ GATEMP_MAKE_MAGICSTAMP(0),
+ GATEMP_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely((handle == NULL) || (*handle == NULL)))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ key = Gate_enterSystem();
+ if ((struct gatemp_object *)*handle == gatemp_first_object) {
+ gatemp_first_object = ((struct gatemp_object *)(*handle))->next;
+ found = true;
+ } else {
+ temp = gatemp_first_object;
+ while (temp) {
+ if (temp->next == (struct gatemp_object *)(*handle)) {
+ temp->next = ((struct gatemp_object *)
+ (*handle))->next;
+ found = true;
+ break;
+ } else {
+ temp = temp->next;
+ }
+ }
+ }
+ Gate_leaveSystem(key);
+
+ if (found == false) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ gatemp_instance_finalize(*handle, ((struct gatemp_object *)
+ (*handle))->status);
+ kfree((*handle));
+ *handle = NULL;
+ return 0;
+
+exit:
+ printk(KERN_ERR "gatemp_delete failed! status = 0x%x", retval);
+ return retval;
+}
+
+int gatemp_attach(u16 remote_proc_id, void *shared_addr)
+{
+ int retval = 0;
+ void *gatemp_shared_addr;
+ struct sharedregion_entry entry;
+ void *default_gate;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(gatemp_module->ref_count),
+ GATEMP_MAKE_MAGICSTAMP(0),
+ GATEMP_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(shared_addr == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (atomic_inc_return(&gatemp_module->attach_ref_count) != 1)
+ return 1;
+
+ /* get region 0 information */
+ sharedregion_get_entry(0, &entry);
+
+ gatemp_shared_addr = (void *)((u32)shared_addr +
+ gatemp_get_region0_reserved_size());
+
+ if ((entry.owner_proc_id != multiproc_self()) &&
+ (entry.owner_proc_id != SHAREDREGION_DEFAULTOWNERID)) {
+ gatemp_module->is_owner = false;
+
+ /* if not the owner of the SharedRegion */
+ gatemp_open_region0_reserved(shared_addr);
+
+ /* open the gate by address */
+ retval = gatemp_open_by_addr(gatemp_shared_addr, &default_gate);
+ /* set the default GateMP for opener */
+ if (retval >= 0)
+ gatemp_set_default_remote(default_gate);
+ }
+
+exit:
+ if (retval < 0)
+ printk(KERN_ERR "gatemp_attach failed! status = 0x%x", retval);
+ return retval;
+}
+
+int gatemp_detach(u16 remote_proc_id, void *shared_addr)
+{
+ int retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(gatemp_module->ref_count),
+ GATEMP_MAKE_MAGICSTAMP(0),
+ GATEMP_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(shared_addr == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (!(atomic_dec_return(&gatemp_module->attach_ref_count) == 0))
+ return 1;
+
+ /* if entry owner proc is not specified return */
+ if (gatemp_module->is_owner == false) {
+ gatemp_close_region0_reserved(shared_addr);
+
+ retval = gatemp_close((void **)&gatemp_module->default_gate);
+
+ /* set the default GateMP for opener */
+ gatemp_set_default_remote(NULL);
+ }
+
+exit:
+ if (retval < 0)
+ printk(KERN_ERR "gatemp_detach failed! status = 0x%x", retval);
+ return retval;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/gatemp_ioctl.c b/drivers/dsp/syslink/multicore_ipc/gatemp_ioctl.c
new file mode 100644
index 000000000000..7270a76774b9
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/gatemp_ioctl.c
@@ -0,0 +1,356 @@
+/*
+ * gatemp_ioctl.c
+ *
+ * The Gate Peterson Algorithm for mutual exclusion of shared memory.
+ * Current implementation works for 2 processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <gatemp.h>
+#include <gatemp_ioctl.h>
+#include <sharedregion.h>
+
+/* ioctl interface to gatemp_get_config function*/
+static int gatemp_ioctl_get_config(struct gatemp_cmd_args *cargs)
+{
+ struct gatemp_config config;
+ s32 status = 0;
+ s32 size;
+
+ gatemp_get_config(&config);
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct gatemp_config));
+ if (size)
+ status = -EFAULT;
+
+ cargs->api_status = 0;
+ return status;
+}
+
+/* ioctl interface to gatemp_setup function */
+static int gatemp_ioctl_setup(struct gatemp_cmd_args *cargs)
+{
+ struct gatemp_config config;
+ s32 status = 0;
+ s32 size;
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct gatemp_config));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = gatemp_setup(&config);
+
+exit:
+ return status;
+}
+
+/* ioctl interface to gatemp_destroy function */
+static int gatemp_ioctl_destroy(struct gatemp_cmd_args *cargs)
+{
+ cargs->api_status = gatemp_destroy();
+ return 0;
+}
+
+/* ioctl interface to gatemp_params_init function */
+static int gatemp_ioctl_params_init(struct gatemp_cmd_args *cargs)
+{
+ struct gatemp_params params;
+ s32 status = 0;
+ s32 size;
+
+ gatemp_params_init(&params);
+ size = copy_to_user(cargs->args.params_init.params, &params,
+ sizeof(struct gatemp_params));
+ if (size)
+ status = -EFAULT;
+
+ cargs->api_status = 0;
+ return status;
+}
+
+/* ioctl interface to gatemp_create function */
+static int gatemp_ioctl_create(struct gatemp_cmd_args *cargs)
+{
+ struct gatemp_params params;
+ s32 status = 0;
+ s32 size;
+
+ cargs->api_status = -1;
+ size = copy_from_user(&params, cargs->args.create.params,
+ sizeof(struct gatemp_params));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ if (cargs->args.create.name_len > 0) {
+ params.name = kmalloc(cargs->args.create.name_len, GFP_KERNEL);
+ if (params.name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ params.name[cargs->args.create.name_len] = '\0';
+ size = copy_from_user(params.name,
+ cargs->args.create.params->name,
+ cargs->args.create.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+
+ }
+
+ params.shared_addr = sharedregion_get_ptr(
+ (u32 *)cargs->args.create.shared_addr_srptr);
+ cargs->args.create.handle = gatemp_create(&params);
+ if (cargs->args.create.handle != NULL)
+ cargs->api_status = 0;
+
+name_from_usr_error:
+ if (cargs->args.open.name_len > 0)
+ kfree(params.name);
+
+exit:
+ return status;
+}
+
+/* ioctl interface to gatemp_ioctl_delete function */
+static int gatemp_ioctl_delete(struct gatemp_cmd_args *cargs)
+
+{
+ cargs->api_status = gatemp_delete(&cargs->args.delete_instance.handle);
+ return 0;
+}
+
+/* ioctl interface to gatemp_open function */
+static int gatemp_ioctl_open(struct gatemp_cmd_args *cargs)
+{
+ struct gatemp_params params;
+ void *handle = NULL;
+ s32 status = 0;
+ s32 size;
+
+ gatemp_params_init(&params);
+ if (cargs->args.open.name_len > 0) {
+ params.name = kmalloc(cargs->args.open.name_len, GFP_KERNEL);
+ if (params.name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ params.name[cargs->args.open.name_len] = '\0';
+ size = copy_from_user(params.name,
+ cargs->args.open.name,
+ cargs->args.open.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+ }
+
+ cargs->api_status = gatemp_open(params.name, &handle);
+ cargs->args.open.handle = handle;
+
+name_from_usr_error:
+ if (cargs->args.open.name_len > 0)
+ kfree(params.name);
+
+exit:
+ return status;
+}
+
+/* ioctl interface to gatemp_close function */
+static int gatemp_ioctl_close(struct gatemp_cmd_args *cargs)
+{
+ cargs->api_status = gatemp_close(&cargs->args.close.handle);
+ return 0;
+}
+
+/* ioctl interface to gatemp_enter function */
+static int gatemp_ioctl_enter(struct gatemp_cmd_args *cargs)
+{
+ cargs->args.enter.flags = gatemp_enter(cargs->args.enter.handle);
+ cargs->api_status = 0;
+ return 0;
+}
+
+/* ioctl interface to gatemp_leave function */
+static int gatemp_ioctl_leave(struct gatemp_cmd_args *cargs)
+{
+ gatemp_leave(cargs->args.leave.handle, cargs->args.leave.flags);
+ cargs->api_status = 0;
+ return 0;
+}
+
+/* ioctl interface to gatemp_shared_mem_req function */
+static int gatemp_ioctl_shared_mem_req(struct gatemp_cmd_args *cargs)
+{
+ struct gatemp_params params;
+ s32 status = 0;
+ s32 size;
+
+ size = copy_from_user(&params, cargs->args.shared_mem_req.params,
+ sizeof(struct gatemp_params));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ cargs->args.shared_mem_req.ret_val =
+ gatemp_shared_mem_req(cargs->args.shared_mem_req.params);
+ cargs->api_status = 0;
+
+exit:
+ return status;
+}
+
+/* ioctl interface to gatemp_open_by_addr function */
+static int gatemp_ioctl_open_by_addr(struct gatemp_cmd_args *cargs)
+{
+ void *shared_addr;
+ void *handle = NULL;
+ s32 status = 0;
+
+ /* For open by name, the shared_add_srptr may be invalid */
+ if (cargs->args.open_by_addr.shared_addr_srptr != \
+ SHAREDREGION_INVALIDSRPTR) {
+ shared_addr = sharedregion_get_ptr((u32 *)cargs->args.
+ open_by_addr.shared_addr_srptr);
+ }
+ cargs->api_status = gatemp_open_by_addr(shared_addr, &handle);
+ cargs->args.open.handle = handle;
+
+ return status;
+}
+
+/* ioctl interface to gatemp_ioctl_get_default_remote function */
+static int gatemp_ioctl_get_default_remote(struct gatemp_cmd_args *cargs)
+{
+ void *handle = NULL;
+
+ handle = gatemp_get_default_remote();
+ cargs->args.get_default_remote.handle = handle;
+ cargs->api_status = 0;
+
+ return 0;
+}
+
+/* ioctl interface for gatemp module */
+int gatemp_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ s32 status = 0;
+ s32 size = 0;
+ struct gatemp_cmd_args __user *uarg =
+ (struct gatemp_cmd_args __user *)args;
+ struct gatemp_cmd_args cargs;
+
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (status) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg,
+ sizeof(struct gatemp_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_GATEMP_GETCONFIG:
+ status = gatemp_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_GATEMP_SETUP:
+ status = gatemp_ioctl_setup(&cargs);
+ break;
+
+ case CMD_GATEMP_DESTROY:
+ status = gatemp_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_GATEMP_PARAMS_INIT:
+ status = gatemp_ioctl_params_init(&cargs);
+ break;
+
+ case CMD_GATEMP_CREATE:
+ status = gatemp_ioctl_create(&cargs);
+ break;
+
+ case CMD_GATEMP_DELETE:
+ status = gatemp_ioctl_delete(&cargs);
+ break;
+
+ case CMD_GATEMP_OPEN:
+ status = gatemp_ioctl_open(&cargs);
+ break;
+
+ case CMD_GATEMP_CLOSE:
+ status = gatemp_ioctl_close(&cargs);
+ break;
+
+ case CMD_GATEMP_ENTER:
+ status = gatemp_ioctl_enter(&cargs);
+ break;
+
+ case CMD_GATEMP_LEAVE:
+ status = gatemp_ioctl_leave(&cargs);
+ break;
+
+ case CMD_GATEMP_SHAREDMEMREQ:
+ status = gatemp_ioctl_shared_mem_req(&cargs);
+ break;
+
+ case CMD_GATEMP_OPENBYADDR:
+ status = gatemp_ioctl_open_by_addr(&cargs);
+ break;
+
+ case CMD_GATEMP_GETDEFAULTREMOTE:
+ status = gatemp_ioctl_get_default_remote(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ status = -ENOTTY;
+ break;
+ }
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs, sizeof(struct gatemp_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ return status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/gatepeterson.c b/drivers/dsp/syslink/multicore_ipc/gatepeterson.c
new file mode 100644
index 000000000000..d49561759f75
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/gatepeterson.c
@@ -0,0 +1,1004 @@
+/*
+ * gatepeterson.c
+ *
+ * The Gate Peterson Algorithm for mutual exclusion of shared memory.
+ * Current implementation works for 2 processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <syslink/atomic_linux.h>
+#include <multiproc.h>
+#include <sharedregion.h>
+#include <gatemp.h>
+#include <igatempsupport.h>
+#include <igateprovider.h>
+#include <iobject.h>
+#include <gatepeterson.h>
+
+
+/* IPC stubs */
+
+#define GATEPETERSON_BUSY 1
+#define GATEPETERSON_FREE 0
+#define GATEPETERSON_VERSION 1
+#define GATEPETERSON_CREATED 0x08201997 /* Stamp to indicate GP
+ was created here */
+
+/* Cache line size */
+#define GATEPETERSON_CACHESIZE 128
+
+/* Macro to make a correct module magic number with ref_count */
+#define GATEPETERSON_MAKE_MAGICSTAMP(x) ((GATEPETERSON_MODULEID << 12) | (x))
+
+/*
+ * structure for gatepeterson module state
+ */
+struct gatepeterson_module_object {
+ atomic_t ref_count; /* Reference count */
+ struct list_head obj_list;
+ struct mutex *mod_lock; /* Lock for obj list */
+ struct gatepeterson_config cfg;
+ struct gatepeterson_config default_cfg;
+ struct gatepeterson_params def_inst_params; /* default instance
+ paramters */
+};
+
+/*
+ * Structure defining attribute parameters for the Gate Peterson module
+ */
+struct gatepeterson_attrs {
+ VOLATILE u16 creator_proc_id;
+ VOLATILE u16 opener_proc_id;
+};
+
+/*
+ * Structure defining internal object for the Gate Peterson
+ */
+struct gatepeterson_object {
+ IGATEPROVIDER_SUPEROBJECT; /* For inheritance from IGateProvider */
+ IOBJECT_SUPEROBJECT; /* For inheritance for IObject */
+ struct list_head elem;
+ VOLATILE struct gatepeterson_attrs *attrs; /* Instance attr */
+ VOLATILE u16 *flag[2]; /* Flags for processors */
+ VOLATILE u16 *turn; /* Indicates whoes turn it is now? */
+ u16 self_id; /* Self identifier */
+ u16 other_id; /* Other's identifier */
+ u32 nested; /* Counter to track nesting */
+ void *local_gate; /* Local lock handle */
+ enum igatempsupport_local_protect local_protect; /* Type of local
+ protection to be used */
+ struct gatepeterson_params params;
+ u32 ref_count; /* Local reference count */
+ u32 cache_line_size; /* Cache Line Size */
+ bool cache_enabled; /* Is cache enabled? */
+};
+
+
+/*
+ * Variable for holding state of the gatepeterson module
+ */
+struct gatepeterson_module_object gatepeterson_state = {
+ .obj_list = LIST_HEAD_INIT(gatepeterson_state.obj_list),
+ .default_cfg.default_protection = GATEPETERSON_PROTECT_INTERRUPT,
+ .default_cfg.num_instances = 16,
+ .def_inst_params.shared_addr = 0x0,
+ .def_inst_params.resource_id = 0x0,
+ .def_inst_params.region_id = 0x0
+};
+
+static struct gatepeterson_module_object *gatepeterson_module =
+ &gatepeterson_state;
+
+/* =============================================================================
+ * Internal functions
+ * =============================================================================
+ */
+#if 0
+static void *_gatepeterson_create(enum igatempsupport_local_protect
+ local_protect,
+ const struct gatepeterson_params *params,
+ bool create_flag);
+#endif
+
+static void gatepeterson_post_init(struct gatepeterson_object *obj);
+
+#if 0
+static bool gatepeterson_inc_refcount(const struct gatepeterson_params *params,
+ void **handle);
+#endif
+
+/* TODO: figure these out */
+#define gate_enter_system() 0
+#define gate_leave_system(key) {}
+
+/* =============================================================================
+ * APIS
+ * =============================================================================
+ */
+/*
+ * ======== gatepeterson_get_config ========
+ * Purpose:
+ * This will get the default configuration parameters for gatepeterson
+ * module
+ */
+void gatepeterson_get_config(struct gatepeterson_config *config)
+{
+ if (WARN_ON(config == NULL))
+ goto exit;
+
+ if (atomic_cmpmask_and_lt(&(gatepeterson_module->ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true)
+ memcpy(config, &gatepeterson_module->default_cfg,
+ sizeof(struct gatepeterson_config));
+ else
+ memcpy(config, &gatepeterson_module->cfg,
+ sizeof(struct gatepeterson_config));
+
+exit:
+ return;
+}
+EXPORT_SYMBOL(gatepeterson_get_config);
+
+/*
+ * ======== gatepeterson_setup ========
+ * Purpose:
+ * This will setup the gatepeterson module
+ */
+int gatepeterson_setup(const struct gatepeterson_config *config)
+{
+ struct gatepeterson_config tmp_cfg;
+ int *key = 0;
+ s32 retval = 0;
+
+ key = gate_enter_system();
+
+ /* This sets the ref_count variable not initialized, upper 16 bits is
+ * written with module _id to ensure correctness of ref_count variable
+ */
+ atomic_cmpmask_and_set(&gatepeterson_module->ref_count,
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&gatepeterson_module->ref_count)
+ != GATEPETERSON_MAKE_MAGICSTAMP(1)) {
+ gate_leave_system(key);
+ return 1;
+ }
+
+ if (config == NULL) {
+ gatepeterson_get_config(&tmp_cfg);
+ config = &tmp_cfg;
+ }
+ gate_leave_system(key);
+
+ memcpy(&gatepeterson_module->cfg, config,
+ sizeof(struct gatepeterson_config));
+
+ gatepeterson_module->mod_lock = kmalloc(sizeof(struct mutex),
+ GFP_KERNEL);
+ if (gatepeterson_module->mod_lock == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+ mutex_init(gatepeterson_module->mod_lock);
+
+ /* Initialize object list */
+ INIT_LIST_HEAD(&gatepeterson_module->obj_list);
+
+ return 0;
+
+exit:
+ atomic_set(&gatepeterson_module->ref_count,
+ GATEPETERSON_MAKE_MAGICSTAMP(0));
+
+ printk(KERN_ERR "gatepeterson_setup failed status: %x\n",
+ retval);
+ return retval;
+}
+EXPORT_SYMBOL(gatepeterson_setup);
+
+/*
+ * ======== gatepeterson_destroy ========
+ * Purpose:
+ * This will destroy the gatepeterson module
+ */
+int gatepeterson_destroy(void)
+{
+ struct gatepeterson_object *obj = NULL;
+ struct mutex *lock = NULL;
+ s32 retval = 0;
+ int *key = 0;
+
+ key = gate_enter_system();
+
+ if (atomic_cmpmask_and_lt(&(gatepeterson_module->ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (!(atomic_dec_return(&gatepeterson_module->ref_count)
+ == GATEPETERSON_MAKE_MAGICSTAMP(0))) {
+ gate_leave_system(key);
+ retval = 1;
+ goto exit;
+ }
+ atomic_set(&gatepeterson_module->ref_count,
+ GATEPETERSON_MAKE_MAGICSTAMP(1));
+ /* Check if any gatepeterson instances have not been
+ * ideleted/closed so far, if there any, delete or close them
+ */
+ list_for_each_entry(obj, &gatepeterson_module->obj_list, elem) {
+ gatepeterson_delete((void **)&obj);
+
+ if (list_empty(&gatepeterson_module->obj_list))
+ break;
+ }
+
+ /* Again reset ref_count. */
+ atomic_set(&gatepeterson_module->ref_count,
+ GATEPETERSON_MAKE_MAGICSTAMP(0));
+ gate_leave_system(key);
+
+ retval = mutex_lock_interruptible(gatepeterson_module->mod_lock);
+ if (retval != 0)
+ goto exit;
+
+ lock = gatepeterson_module->mod_lock;
+ gatepeterson_module->mod_lock = NULL;
+ memset(&gatepeterson_module->cfg, 0,
+ sizeof(struct gatepeterson_config));
+ mutex_unlock(lock);
+ kfree(lock);
+ return 0;
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "gatepeterson_destroy failed status:%x\n",
+ retval);
+ }
+ return retval;
+
+}
+EXPORT_SYMBOL(gatepeterson_destroy);
+
+/*
+ * ======== gatepeterson_get_num_instances ========
+ * Purpose:
+ * Function to return the number of instances configured in the module.
+ */
+u32 gatepeterson_get_num_instances(void)
+{
+ return gatepeterson_module->default_cfg.num_instances;
+}
+EXPORT_SYMBOL(gatepeterson_get_num_instances);
+
+/*
+ * ======== gatepeterson_locks_init ========
+ * Purpose:
+ * Function to initialize the locks.
+ */
+inline void gatepeterson_locks_init(void)
+{
+ /* Do nothing*/
+}
+
+/*
+ * ======== gatepeterson_params_init ========
+ * Purpose:
+ * This will Initialize this config-params structure with
+ * supplier-specified defaults before instance creation
+ */
+void gatepeterson_params_init(struct gatepeterson_params *params)
+{
+ int *key = 0;
+
+ key = gate_enter_system();
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(gatepeterson_module->ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(params == NULL))
+ goto exit;
+
+ memcpy(params, &(gatepeterson_module->def_inst_params),
+ sizeof(struct gatepeterson_params));
+
+exit:
+ gate_leave_system(key);
+ return;
+}
+EXPORT_SYMBOL(gatepeterson_params_init);
+
+
+int gatepeterson_instance_init(struct gatepeterson_object *obj,
+ enum igatempsupport_local_protect local_protect,
+ const struct gatepeterson_params *params)
+{
+ s32 retval = 0;
+
+ IGATEPROVIDER_OBJECTINITIALIZER(obj, gatepeterson);
+
+ if (params->shared_addr == NULL) {
+ retval = 1;
+ goto exit;
+ }
+
+ /* Create the local gate */
+ obj->local_gate = gatemp_create_local(local_protect);
+ obj->cache_enabled = sharedregion_is_cache_enabled(params->region_id);
+ obj->cache_line_size =
+ sharedregion_get_cache_line_size(params->region_id);
+
+ /* Settings for both the creator and opener */
+ if (obj->cache_line_size > sizeof(struct gatepeterson_attrs)) {
+ obj->attrs = params->shared_addr;
+ obj->flag[0] = (u16 *)((u32)(obj->attrs) +
+ obj->cache_line_size);
+ obj->flag[1] = (u16 *)((u32)(obj->flag[0]) +
+ obj->cache_line_size);
+ obj->turn = (u16 *)((u32)(obj->flag[1]) +
+ obj->cache_line_size);
+ } else {
+ obj->attrs = params->shared_addr;
+ obj->flag[0] = (u16 *)((u32)(obj->attrs) +
+ sizeof(struct gatepeterson_attrs));
+ obj->flag[1] = (u16 *)((u32)(obj->flag[0]) + sizeof(u16));
+ obj->turn = (u16 *)((u32)(obj->flag[1]) + sizeof(u16));
+ }
+ obj->nested = 0;
+
+ if (!params->open_flag) {
+ /* Creating. */
+ obj->self_id = 0;
+ obj->other_id = 1;
+ gatepeterson_post_init(obj);
+ } else {
+#if 0
+ Cache_inv((Ptr)obj->attrs, sizeof(struct gatepeterson_attrs),
+ Cache_Type_ALL, TRUE);
+#endif
+ if (obj->attrs->creator_proc_id == multiproc_self()) {
+ /* Opening locally */
+ obj->self_id = 0;
+ obj->other_id = 1;
+ } else {
+ /* Trying to open a gate remotely */
+ obj->self_id = 1;
+ obj->other_id = 0;
+ if (obj->attrs->opener_proc_id == MULTIPROC_INVALIDID) {
+ /* Opening remotely for the first time */
+ obj->attrs->opener_proc_id = multiproc_self();
+ } else if (obj->attrs->opener_proc_id !=
+ multiproc_self()) {
+ retval = -EACCES;
+ goto exit;
+ }
+#if 0
+ if (obj->cache_enabled) {
+ Cache_wbInv((Ptr)obj->attrs,
+ sizeof(struct gatepeterson_attrs),
+ Cache_Type_ALL, TRUE);
+ }
+#endif
+ }
+ }
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "gatemp_instance_init failed! status = 0x%x",
+ retval);
+ }
+ return retval;
+}
+
+void gatepeterson_instance_finalize(struct gatepeterson_object *obj, int status)
+{
+ switch (status) {
+ /* No break here. Fall through to the next. */
+ case 0:
+ {
+ /* Modify shared memory */
+ obj->attrs->opener_proc_id = MULTIPROC_INVALIDID;
+#if 0
+ Cache_wbInv((Ptr)obj->attrs, sizeof(GatePeterson_Attrs),
+ Cache_Type_ALL, TRUE);
+#endif
+ }
+ /* No break here. Fall through to the next. */
+
+ case 1:
+ {
+ /* Nothing to be done. */
+ }
+ }
+ return;
+}
+
+
+
+#if 0
+/*
+ * ======== gatepeterson_create ========
+ * Purpose:
+ * This will creates a new instance of gatepeterson module
+ */
+void *gatepeterson_create(enum igatempsupport_local_protect local_protect,
+ const struct gatepeterson_params *params)
+{
+ void *handle = NULL;
+ s32 retval = 0;
+
+ BUG_ON(params == NULL);
+ if (atomic_cmpmask_and_lt(&(gatepeterson_module->ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(params->shared_addr == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ handle = _gatepeterson_create(local_protect, params, true);
+ return handle;
+
+exit:
+ return NULL;
+}
+EXPORT_SYMBOL(gatepeterson_create);
+
+/*
+ * ======== gatepeterson_delete ========
+ * Purpose:
+ * This will deletes an instance of gatepeterson module
+ */
+int gatepeterson_delete(void **gphandle)
+
+{
+ struct gatepeterson_object *obj = NULL;
+ s32 retval;
+
+ BUG_ON(gphandle == NULL);
+ BUG_ON(*gphandle == NULL);
+ if (atomic_cmpmask_and_lt(&(gatepeterson_module->ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ obj = (struct gatepeterson_object *)(*gphandle);
+ if (unlikely(obj == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (unlikely(obj->attrs == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ /* Check if we have created the GP or not */
+ if (WARN_ON(unlikely(obj->attrs->creator_proc_id !=
+ multiproc_get_id(NULL)))) {
+ retval = -EACCES;
+ goto exit;
+ }
+
+ if (obj->ref_count != 0) {
+ retval = -EBUSY;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(gatepeterson_module->mod_lock);
+ if (retval)
+ goto exit;
+
+ list_del(&obj->elem); /* Remove the GP instance from the GP list */
+ mutex_unlock(gatepeterson_module->mod_lock);
+ /* Modify shared memory */
+ obj->attrs->opener_proc_id = MULTIPROC_INVALIDID;
+#if 0
+ Cache_wbInv((Ptr)obj->attrs, sizeof(struct gatepeterson_attrs),
+ Cache_Type_ALL, true);
+#endif
+
+ kfree(obj);
+ *gphandle = NULL;
+ return 0;
+
+exit:
+ printk(KERN_ERR "gatepeterson_delete failed status: %x\n",
+ retval);
+ return retval;
+}
+EXPORT_SYMBOL(gatepeterson_delete);
+
+#else
+
+/* Override the IObject interface to define create and delete APIs */
+IOBJECT_CREATE1(gatepeterson, enum igatempsupport_local_protect);
+
+#endif
+
+#if 0
+/*
+ * ======== gatepeterson_open ========
+ * Purpose:
+ * This will opens a created instance of gatepeterson
+ * module by shared addr.
+ */
+int gatepeterson_open_by_addr(enum igatempsupport_local_protect local_protect,
+ void *shared_addr, void **handle_ptr)
+{
+ void *temp = NULL;
+ s32 retval = 0;
+ struct gatepeterson_params params;
+
+ BUG_ON(shared_addr == NULL);
+ BUG_ON(handle_ptr == NULL);
+ if (atomic_cmpmask_and_lt(&(gatepeterson_module->ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (shared_addr == NULL) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (handle_ptr == NULL) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (gatepeterson_inc_refcount(&params, &temp)) {
+ retval = -EBUSY;
+ goto exit; /* It's already opened from local processor */
+ }
+
+ gatepeterson_params_init(&params);
+ params.shared_addr = shared_addr;
+ params.region_id = sharedregion_get_id(shared_addr);
+
+ *handle_ptr = _gatepeterson_create(local_protect, &params, false);
+ return 0;
+
+exit:
+ printk(KERN_ERR "gatepeterson_open failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(gatepeterson_open_by_addr);
+
+/*
+ * ======== gatepeterson_close ========
+ * Purpose:
+ * This will closes previously opened/created instance
+ * of gatepeterson module
+ */
+int gatepeterson_close(void **gphandle)
+{
+ struct gatepeterson_object *obj = NULL;
+ struct gatepeterson_params *params = NULL;
+ s32 retval = 0;
+
+ BUG_ON(gphandle == NULL);
+ if (atomic_cmpmask_and_lt(&(gatepeterson_module->ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(*gphandle == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct gatepeterson_object *) (*gphandle);
+ if (unlikely(obj == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ key = gatemp_enter(obj->local_gate);
+
+ if (obj->ref_count > 1) {
+ obj->ref_count--;
+ gatemp_leave(obj->local_gate, key);
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(gatepeterson_module->mod_lock);
+ if (retval)
+ goto error_handle;
+
+ list_del(&obj->elem);
+ mutex_unlock(gatepeterson_module->mod_lock);
+ params = &obj->params;
+
+ gatemp_leave(obj->local_gate, key);
+
+ gatemp_delete(obj->local_gate);
+
+ kfree(obj);
+ *gphandle = NULL;
+ return 0;
+
+error_handle:
+ gatemp_leave(obj->local_gate, key);
+
+exit:
+ printk(KERN_ERR "gatepeterson_close failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(gatepeterson_close);
+#endif
+
+/*
+ * ======== gatepeterson_enter ========
+ * Purpose:
+ * This will enters the gatepeterson instance
+ */
+int *gatepeterson_enter(void *gphandle)
+{
+ struct gatepeterson_object *obj = NULL;
+ s32 retval = 0;
+ int *key = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(gatepeterson_module->ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(gphandle == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct gatepeterson_object *) gphandle;
+
+ /* Enter local gate */
+ if (obj->local_gate != NULL) {
+ retval = mutex_lock_interruptible(obj->local_gate);
+ if (retval)
+ goto exit;
+ }
+
+ /* If the gate object has already been entered, return the key */
+ obj->nested++;
+ if (obj->nested > 1)
+ return key;
+
+ /* indicate, needs to use the resource. */
+ *((u32 *)obj->flag[obj->self_id]) = GATEPETERSON_BUSY ;
+#if 0
+ if (obj->cacheEnabled)
+ Cache_wbInv((Ptr)obj->flag[obj->selfId],
+ obj->cacheLineSize, Cache_Type_ALL, true);
+#endif
+ /* Give away the turn. */
+ *((u32 *)(obj->turn)) = obj->other_id;
+#if 0
+ if (obj->cacheEnabled) {
+ Cache_wbInv((Ptr)obj->turn, obj->cacheLineSize,
+ Cache_Type_ALL, true);
+ Cache_inv((Ptr)obj->flag[obj->otherId], obj->cacheLineSize,
+ Cache_Type_ALL, true);
+ }
+#endif
+
+ /* Wait while other processor is using the resource and has
+ * the turn
+ */
+ while ((*((VOLATILE u32 *) obj->flag[obj->other_id])
+ == GATEPETERSON_BUSY) &&
+ (*((VOLATILE u32 *)obj->turn) == obj->other_id)) {
+ /* Empty body loop */
+ /* Except for cache stuff */
+#if 0
+ if (obj->cacheEnabled) {
+ Cache_inv((Ptr)obj->flag[obj->otherId], obj->
+ cacheLineSize, Cache_Type_ALL, true);
+ Cache_inv((Ptr)obj->turn, obj->
+ cacheLineSize, Cache_Type_ALL, true);
+ }
+ udelay(10);
+#endif
+ }
+
+ return key;
+
+exit:
+ return 0;
+}
+EXPORT_SYMBOL(gatepeterson_enter);
+
+/*
+ * ======== gatepeterson_leave ========
+ * Purpose:
+ * This will leaves the gatepeterson instance
+ */
+void gatepeterson_leave(void *gphandle, int *key)
+{
+ struct gatepeterson_object *obj = NULL;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(gatepeterson_module->ref_count),
+ GATEPETERSON_MAKE_MAGICSTAMP(0),
+ GATEPETERSON_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ BUG_ON(gphandle == NULL);
+
+ obj = (struct gatepeterson_object *)gphandle;
+
+ /* Release the resource and leave system gate. */
+ obj->nested--;
+ if (obj->nested == 0) {
+ *((VOLATILE u32 *)obj->flag[obj->self_id]) = GATEPETERSON_FREE;
+#if 0
+ if (obj->cacheEnabled)
+ Cache_wbInv((Ptr)obj->flag[obj->selfId],
+ obj->cacheLineSize, Cache_Type_ALL, true);
+#endif
+ }
+ /* Leave local gate */
+ mutex_unlock(obj->local_gate);
+
+exit:
+ return;
+}
+EXPORT_SYMBOL(gatepeterson_leave);
+
+/*
+ * ======== gatepeterson_shared_mem_req ========
+ * Purpose:
+ * This will give the amount of shared memory required
+ * for creation of each instance
+ */
+u32 gatepeterson_shared_mem_req(const struct gatepeterson_params *params)
+{
+ u32 mem_req = 0;
+
+ if (sharedregion_get_cache_line_size(params->region_id) >=
+ sizeof(struct gatepeterson_attrs))
+ /* 4 Because shared of shared memory usage */
+ mem_req = 4 * sharedregion_get_cache_line_size(params->
+ region_id);
+ else
+ mem_req = sizeof(struct gatepeterson_attrs) +
+ sizeof(u16) * 3;
+
+ return mem_req;
+}
+EXPORT_SYMBOL(gatepeterson_shared_mem_req);
+
+/*
+ *************************************************************************
+ * Internal functions
+ *************************************************************************
+ */
+#if 0
+/*
+ * ======== gatepeterson_create ========
+ * Purpose:
+ * Creates a new instance of gatepeterson module.
+ * This is an internal function because both
+ * gatepeterson_create and gatepeterson_open
+ * call use the same functionality.
+ */
+static void *_gatepeterson_create(enum igatempsupport_local_protect
+ local_protect, const struct gatepeterson_params *params,
+ bool create_flag)
+{
+ int status = 0;
+ struct gatepeterson_object *handle = NULL;
+ struct gatepeterson_obj *obj = NULL;
+
+ handle = kmalloc(sizeof(struct gatepeterson_object), GFP_KERNEL);
+ if (handle == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ obj = kmalloc(sizeof(struct gatepeterson_obj), GFP_KERNEL);
+ if (obj == NULL) {
+ status = -ENOMEM;
+ goto obj_alloc_fail;
+ }
+
+ if (local_protect >= GATEPETERSON_PROTECT_END_VALUE) {
+ status = -EINVAL;
+ goto exit_with_cleanup;
+ }
+
+ handle->obj = obj;
+ handle->enter = &gatepeterson_enter;
+ handle->leave = &gatepeterson_leave;
+
+ /* Create the local gate */
+ obj->local_gate = gatemp_create_local(local_protect);
+ obj->cache_enabled = sharedregion_is_cache_enabled(params->region_id);
+ obj->cache_line_size = sharedregion_get_cache_line_size(params->
+ region_id);
+
+ /* Settings for both the creator and opener */
+ if (obj->cache_line_size > sizeof(struct gatepeterson_attrs)) {
+ obj->attrs = params->shared_addr;
+ obj->flag[0] = (u16 *)((u32)(obj->attrs) + obj->
+ cache_line_size);
+ obj->flag[1] = (u16 *)((u32)(obj->flag[0]) + obj->
+ cache_line_size);
+ obj->turn = (u16 *)((u32)(obj->flag[1]) + obj->
+ cache_line_size);
+ } else {
+ obj->attrs = params->shared_addr;
+ obj->flag[0] = (u16 *)((u32)(obj->attrs) +
+ sizeof(struct gatepeterson_attrs));
+ obj->flag[1] = (u16 *)((u32)(obj->flag[0]) + sizeof(u16));
+ obj->turn = (u16 *)((u32)(obj->flag[1]) + sizeof(u16));
+ }
+ obj->nested = 0;
+
+ if (params->open_flag == false) {
+ /* Creating. */
+ obj->self_id = 0;
+ obj->other_id = 1;
+ obj->ref_count = 0;
+ gatepeterson_post_init(obj);
+ } else {
+#if 0
+ Cache_inv((Ptr)obj->attrs, sizeof(struct gatepeterson_attrs),
+ Cache_Type_ALL, true);
+#endif
+ obj->ref_count = 1;
+ if (obj->attrs->creator_proc_id == multiproc_self()) {
+ /* Opening locally */
+ obj->self_id = 0;
+ obj->other_id = 1;
+ } else {
+ /* Trying to open a gate remotely */
+ obj->self_id = 1;
+ obj->other_id = 0;
+ if (obj->attrs->opener_proc_id == MULTIPROC_INVALIDID)
+ /* Opening remotely for the first time */
+ obj->attrs->opener_proc_id = multiproc_self();
+ else if (obj->attrs->opener_proc_id !=
+ multiproc_self()) {
+ status = -EFAULT;
+ goto exit_with_cleanup;
+ }
+#if 0
+ if (status >= 0) {
+ if (obj->cache_enabled) {
+ Cache_wbInv((Ptr)obj->attrs,
+ sizeof(struct gatepeterson_attrs),
+ Cache_Type_ALL, true);
+ }
+ }
+#endif
+ }
+ }
+
+ status = mutex_lock_interruptible(gatepeterson_module->mod_lock);
+ if (status)
+ goto mod_lock_fail;
+
+ list_add_tail(&obj->elem, &gatepeterson_module->obj_list);
+ mutex_unlock(gatepeterson_module->mod_lock);
+
+ return handle;
+mod_lock_fail:
+exit_with_cleanup:
+ gatemp_delete(&obj->local_gate);
+ kfree(obj);
+
+obj_alloc_fail:
+ kfree(handle);
+ handle = NULL;
+
+exit:
+ if (create_flag == true)
+ printk(KERN_ERR "_gatepeterson_create (create) failed "
+ "status: %x\n", status);
+ else
+ printk(KERN_ERR "_gatepeterson_create (open) failed "
+ "status: %x\n", status);
+
+ return NULL;
+}
+
+#endif
+
+/*
+ * ======== gatepeterson_post_init ========
+ * Purpose:
+ * Function to be called during
+ * 1. module startup to complete the initialization of all static instances
+ * 2. instance_init to complete the initialization of a dynamic instance
+ *
+ * Main purpose is to set up shared memory
+ */
+static void gatepeterson_post_init(struct gatepeterson_object *obj)
+{
+ /* Set up shared memory */
+ *(obj->turn) = 0;
+ *(obj->flag[0]) = 0;
+ *(obj->flag[1]) = 0;
+ obj->attrs->creator_proc_id = multiproc_self();
+ obj->attrs->opener_proc_id = MULTIPROC_INVALIDID;
+#if 0
+ /*
+ * Write everything back to memory. This assumes that obj->attrs is
+ * equal to the shared memory base address
+ */
+ if (obj->cacheEnabled) {
+ Cache_wbInv((Ptr)obj->attrs, sizeof(struct gatepeterson_attrs),
+ Cache_Type_ALL, false);
+ Cache_wbInv((Ptr)(obj->flag[0]), obj->cacheLineSize * 3,
+ Cache_Type_ALL, true);
+ }
+#endif
+}
+
+#if 0
+/*
+ * ======== gatepeterson_inc_refcount ========
+ * Purpose:
+ * This will increment the reference count while opening
+ * a GP instance if it is already opened from local processor
+ */
+static bool gatepeterson_inc_refcount(const struct gatepeterson_params *params,
+ void **handle)
+{
+ struct gatepeterson_object *obj = NULL;
+ s32 retval = 0;
+ bool done = false;
+
+ list_for_each_entry(obj, &gatepeterson_module->obj_list, elem) {
+ if (params->shared_addr != NULL) {
+ if (obj->params.shared_addr == params->shared_addr) {
+ retval = mutex_lock_interruptible(
+ gatepeterson_module->mod_lock);
+ if (retval)
+ break;
+
+ obj->ref_count++;
+ *handle = obj;
+ mutex_unlock(gatepeterson_module->mod_lock);
+ done = true;
+ break;
+ }
+ }
+ }
+
+ return done;
+}
+#endif
diff --git a/drivers/dsp/syslink/multicore_ipc/heap.c b/drivers/dsp/syslink/multicore_ipc/heap.c
new file mode 100644
index 000000000000..b87988a796f0
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/heap.c
@@ -0,0 +1,115 @@
+/*
+ * heap.c
+ *
+ * Heap module manages fixed size buffers that can be used
+ * in a multiprocessor system with shared memory
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+#include <linux/types.h>
+#include <linux/bug.h>
+
+
+#include <heap.h>
+
+
+/*
+ * ======== sl_heap_alloc ========
+ * Purpose:
+ * This will allocate a block of memory of specified
+ * size
+ */
+void *sl_heap_alloc(void *hphandle, u32 size, u32 align)
+{
+ char *block = NULL;
+ struct heap_object *obj = NULL;
+
+ BUG_ON(hphandle == NULL);
+
+ obj = (struct heap_object *)hphandle;
+ BUG_ON(obj->alloc == NULL);
+ block = obj->alloc(hphandle, size, align);
+ return block;
+}
+
+/*
+ * ======== sl_heap_free ========
+ * Purpose:
+ * This will frees a block of memory allocated
+ * rom heap
+ */
+int sl_heap_free(void *hphandle, void *block, u32 size)
+{
+ struct heap_object *obj = NULL;
+ s32 retval = 0;
+
+ BUG_ON(hphandle == NULL);
+
+ obj = (struct heap_object *)hphandle;
+ BUG_ON(obj->free == NULL);
+ retval = obj->free(hphandle, block, size);
+ return retval;
+}
+
+/*
+ * ======== sl_heap_get_stats ========
+ * Purpose:
+ * This will get the heap memory statistics
+ */
+void sl_heap_get_stats(void *hphandle, struct memory_stats *stats)
+{
+ struct heap_object *obj = NULL;
+
+ BUG_ON(hphandle == NULL);
+ BUG_ON(stats == NULL);
+
+ obj = (struct heap_object *)hphandle;
+ BUG_ON(obj->get_stats == NULL);
+ obj->get_stats(hphandle, stats);
+}
+
+/*
+ * ======== sl_heap_get_extended_stats ========
+ * Purpose:
+ * This will get the heap memory extended statistics
+ */
+void sl_heap_get_extended_stats(void *hphandle,
+ struct heap_extended_stats *stats)
+{
+ struct heap_object *obj = NULL;
+
+ BUG_ON(hphandle == NULL);
+ BUG_ON(stats == NULL);
+
+ obj = (struct heap_object *)hphandle;
+ BUG_ON(obj->get_extended_stats == NULL);
+ obj->get_extended_stats(hphandle, stats);
+}
+
+
+/*
+ * ======== sl_heap_is_blocking ========
+ * Purpose:
+ * Indicates whether the heap may block during an alloc or free call
+ */
+bool sl_heap_is_blocking(void *hphandle)
+{
+ struct heap_object *obj = NULL;
+
+ BUG_ON(hphandle == NULL);
+
+ obj = (struct heap_object *)hphandle;
+ BUG_ON(obj->is_blocking == NULL);
+
+ return obj->is_blocking(hphandle);
+}
+
diff --git a/drivers/dsp/syslink/multicore_ipc/heapbufmp.c b/drivers/dsp/syslink/multicore_ipc/heapbufmp.c
new file mode 100644
index 000000000000..e625789950fc
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/heapbufmp.c
@@ -0,0 +1,1555 @@
+/*
+ * heapbufmp.c
+ *
+ * Heap module manages variable size buffers that can be used
+ * in a multiprocessor system with shared memory.
+ *
+ * Copyright(C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <atomic_linux.h>
+#include <multiproc.h>
+#include <nameserver.h>
+#include <sharedregion.h>
+#include <gatemp.h>
+#include <heapbufmp.h>
+
+/*
+ * Name of the reserved nameserver used for heapbufmp.
+ */
+#define HEAPBUFMP_NAMESERVER "HeapBufMP"
+/* brief Macro to make a correct module magic number with ref_count */
+#define HEAPBUFMP_MAKE_MAGICSTAMP(x) ((HEAPBUFMP_MODULEID << 12) | (x))
+/* Max heapbufmp name length */
+#define HEAPBUFMP_MAX_NAME_LEN 32
+/* Max number of runtime entries */
+#define HEAPBUFMP_MAX_RUNTIME_ENTRIES 32
+
+#define ROUND_UP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
+
+/*
+ * Structure defining attribute parameters for the heapbufmp module
+ */
+struct heapbufmp_attrs {
+ VOLATILE u32 status; /* Module status */
+ VOLATILE u32 *gatemp_addr; /* gatemp shared address(shm safe) */
+ VOLATILE u32 *buf_ptr; /* Memory managed by instance */
+ VOLATILE u32 num_free_blocks; /* Number of free blocks */
+ VOLATILE u32 min_free_blocks; /* Min number of free blocks */
+ VOLATILE u32 block_size; /* True size of each block */
+ VOLATILE u32 align; /* Alignment of each block */
+ VOLATILE u32 num_blocks; /* Number of individual blocks */
+ VOLATILE u16 exact; /* For 'exact' allocation */
+};
+
+/*
+ * Structure defining processor related information for the
+ * heapbufmp module
+ */
+struct heapbufmp_proc_attrs {
+ bool creator; /* Creator or opener */
+ u16 proc_id; /* Processor identifier */
+ u32 open_count; /* open count in a processor */
+};
+
+/*
+ * Structure for heapbufmp module state
+ */
+struct heapbufmp_module_object {
+ atomic_t ref_count; /* Reference count */
+ void *nameserver; /* Nameserver handle */
+ struct list_head obj_list; /* List holding created objects */
+ struct mutex *local_lock; /* lock for protecting obj_list */
+ struct heapbufmp_config cfg; /* Current config values */
+ struct heapbufmp_config default_cfg; /* Default config values */
+ struct heapbufmp_params default_inst_params; /* Default instance
+ creation parameters */
+};
+
+struct heapbufmp_module_object heapbufmp_state = {
+ .obj_list = LIST_HEAD_INIT(heapbufmp_state.obj_list),
+ .default_cfg.max_name_len = HEAPBUFMP_MAX_NAME_LEN,
+ .default_cfg.max_runtime_entries = HEAPBUFMP_MAX_RUNTIME_ENTRIES,
+ .default_cfg.track_allocs = false,
+ .default_inst_params.gate = NULL,
+ .default_inst_params.exact = false,
+ .default_inst_params.name = NULL,
+ .default_inst_params.align = 1u,
+ .default_inst_params.num_blocks = 0u,
+ .default_inst_params.block_size = 0u,
+ .default_inst_params.region_id = 0,
+ .default_inst_params.shared_addr = NULL,
+};
+
+/* Pointer to module state */
+static struct heapbufmp_module_object *heapbufmp_module = &heapbufmp_state;
+
+/*
+ * Structure for the handle for the heapbufmp
+ */
+struct heapbufmp_obj {
+ struct list_head list_elem; /* Used for creating a linked list */
+ struct heapbufmp_attrs *attrs; /* The shared attributes structure */
+ void *gate; /* Lock used for critical region management */
+ void *ns_key; /* nameserver key required for remove */
+ bool cache_enabled; /* Whether to do cache calls */
+ u16 region_id; /* shared region index */
+ u32 alloc_size; /* Size of allocated shared memory */
+ char *buf; /* Pointer to allocated memory */
+ void *free_list; /* List of free buffers */
+ u32 block_size; /* Adjusted block_size */
+ u32 align; /* Adjusted alignment */
+ u32 num_blocks; /* Number of blocks in buffer */
+ bool exact; /* Exact match flag */
+ struct heapbufmp_proc_attrs owner; /* owner processor info */
+ void *top; /* Pointer to the top object */
+ struct heapbufmp_params params; /* The creation parameter structure */
+};
+
+#define heapbufmp_object heap_object
+
+/* =============================================================================
+ * Forward declarations of internal functions
+ * =============================================================================
+ */
+static int heapbufmp_post_init(struct heapbufmp_object *handle);
+
+/* =============================================================================
+ * APIs called directly by applications
+ * =============================================================================
+ */
+/*
+ * ======== heapbufmp_get_config ========
+ * Purpose:
+ * This will get default configuration for the
+ * heapbufmp module
+ */
+int heapbufmp_get_config(struct heapbufmp_config *cfgparams)
+{
+ s32 retval = 0;
+
+ BUG_ON(cfgparams == NULL);
+
+ if (cfgparams == NULL) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (atomic_cmpmask_and_lt(&(heapbufmp_module->ref_count),
+ HEAPBUFMP_MAKE_MAGICSTAMP(0),
+ HEAPBUFMP_MAKE_MAGICSTAMP(1)) == true)
+ memcpy(cfgparams, &heapbufmp_module->default_cfg,
+ sizeof(struct heapbufmp_config));
+ else
+ memcpy(cfgparams, &heapbufmp_module->cfg,
+ sizeof(struct heapbufmp_config));
+ return 0;
+error:
+ printk(KERN_ERR "heapbufmp_get_config failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapbufmp_get_config);
+
+/*
+ * ======== heapbufmp_setup ========
+ * Purpose:
+ * This will setup the heapbufmp module
+ *
+ * This function sets up the heapbufmp module. This function
+ * must be called before any other instance-level APIs can be
+ * invoked.
+ * Module-level configuration needs to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then heapbufmp_getconfig can be called to get
+ * the configuration filled with the default values. After this,
+ * only the required configuration values can be changed. If the
+ * user does not wish to make any change in the default parameters,
+ * the application can simply call heapbufmp_setup with NULL
+ * parameters. The default parameters would get automatically used.
+ */
+int heapbufmp_setup(const struct heapbufmp_config *cfg)
+{
+ struct nameserver_params params;
+ struct heapbufmp_config tmp_cfg;
+ s32 retval = 0;
+
+ /* This sets the ref_count variable not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of ref_count variable
+ */
+ atomic_cmpmask_and_set(&heapbufmp_module->ref_count,
+ HEAPBUFMP_MAKE_MAGICSTAMP(0),
+ HEAPBUFMP_MAKE_MAGICSTAMP(0));
+ if (atomic_inc_return(&heapbufmp_module->ref_count)
+ != HEAPBUFMP_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ if (cfg == NULL) {
+ heapbufmp_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+
+ if (cfg->max_name_len == 0 ||
+ cfg->max_name_len > HEAPBUFMP_MAX_NAME_LEN) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /* Initialize the parameters */
+ nameserver_params_init(&params);
+ params.max_value_len = sizeof(u32);
+ params.max_name_len = cfg->max_name_len;
+ params.max_runtime_entries = cfg->max_runtime_entries;
+
+ /* Create the nameserver for modules */
+ heapbufmp_module->nameserver =
+ nameserver_create(HEAPBUFMP_NAMESERVER, &params);
+ if (heapbufmp_module->nameserver == NULL) {
+ retval = -EFAULT;
+ goto error;
+ }
+
+ /* Construct the list object */
+ INIT_LIST_HEAD(&heapbufmp_module->obj_list);
+ /* Copy config info */
+ memcpy(&heapbufmp_module->cfg, cfg, sizeof(struct heapbufmp_config));
+ /* Create a lock for protecting list object */
+ heapbufmp_module->local_lock = kmalloc(sizeof(struct mutex),
+ GFP_KERNEL);
+ mutex_init(heapbufmp_module->local_lock);
+ if (heapbufmp_module->local_lock == NULL) {
+ retval = -ENOMEM;
+ heapbufmp_destroy();
+ goto error;
+ }
+
+ return 0;
+
+error:
+ printk(KERN_ERR "heapbufmp_setup failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapbufmp_setup);
+
+/*
+ * ======== heapbufmp_destroy ========
+ * Purpose:
+ * This will destroy the heapbufmp module
+ */
+int heapbufmp_destroy(void)
+{
+ s32 retval = 0;
+ struct mutex *lock = NULL;
+ struct heapbufmp_obj *obj = NULL;
+
+ if (atomic_cmpmask_and_lt(&(heapbufmp_module->ref_count),
+ HEAPBUFMP_MAKE_MAGICSTAMP(0),
+ HEAPBUFMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (atomic_dec_return(&heapbufmp_module->ref_count)
+ == HEAPBUFMP_MAKE_MAGICSTAMP(0)) {
+ /* Temporarily increment ref_count here. */
+ atomic_set(&heapbufmp_module->ref_count,
+ HEAPBUFMP_MAKE_MAGICSTAMP(1));
+
+ /* Check if any heapbufmp instances have not been
+ * deleted/closed so far. if there any, delete or close them
+ */
+ list_for_each_entry(obj, &heapbufmp_module->obj_list,
+ list_elem) {
+ if (obj->owner.proc_id == multiproc_get_id(NULL))
+ retval = heapbufmp_delete(&obj->top);
+ else
+ retval = heapbufmp_close(obj->top);
+
+ if (list_empty(&heapbufmp_module->obj_list))
+ break;
+
+ if (retval < 0)
+ goto error;
+ }
+
+ /* Again reset ref_count. */
+ atomic_set(&heapbufmp_module->ref_count,
+ HEAPBUFMP_MAKE_MAGICSTAMP(0));
+
+ if (likely(heapbufmp_module->nameserver != NULL)) {
+ retval = nameserver_delete(&heapbufmp_module->
+ nameserver);
+ if (unlikely(retval != 0))
+ goto error;
+ }
+
+ /* Delete the list lock */
+ lock = heapbufmp_module->local_lock;
+ retval = mutex_lock_interruptible(lock);
+ if (retval)
+ goto error;
+
+ heapbufmp_module->local_lock = NULL;
+ mutex_unlock(lock);
+ kfree(lock);
+ memset(&heapbufmp_module->cfg, 0,
+ sizeof(struct heapbufmp_config));
+ }
+
+ return 0;
+
+error:
+ printk(KERN_ERR "heapbufmp_destroy failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapbufmp_destroy);
+
+/*
+ * ======== heapbufmp_params_init ========
+ * Purpose:
+ * This will get the intialization prams for a heapbufmp
+ * module instance
+ */
+void heapbufmp_params_init(struct heapbufmp_params *params)
+{
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(heapbufmp_module->ref_count),
+ HEAPBUFMP_MAKE_MAGICSTAMP(0),
+ HEAPBUFMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ BUG_ON(params == NULL);
+
+ memcpy(params, &heapbufmp_module->default_inst_params,
+ sizeof(struct heapbufmp_params));
+
+ return;
+error:
+ printk(KERN_ERR "heapbufmp_params_init failed status: %x\n", retval);
+}
+EXPORT_SYMBOL(heapbufmp_params_init);
+
+/*
+ * ======== _heapbufmp_create ========
+ * Purpose:
+ * This will create a new instance of heapbufmp module
+ * This is an internal function as both heapbufmp_create
+ * and heapbufmp_open use the functionality
+ *
+ * NOTE: The lock to protect the shared memory area
+ * used by heapbufmp is provided by the consumer of
+ * heapbufmp module
+ */
+int _heapbufmp_create(void **handle_ptr, const struct heapbufmp_params *params,
+ u32 create_flag)
+{
+ s32 retval = 0;
+ struct heapbufmp_obj *obj = NULL;
+ struct heapbufmp_object *handle = NULL;
+ void *gate_handle = NULL;
+ void *local_addr = NULL;
+ u32 *shared_shm_base;
+ u32 min_align;
+
+ if (atomic_cmpmask_and_lt(&(heapbufmp_module->ref_count),
+ HEAPBUFMP_MAKE_MAGICSTAMP(0),
+ HEAPBUFMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ /* No need for parameter checks, since this is an internal function. */
+
+ /* Initialize return parameter. */
+ *handle_ptr = NULL;
+
+ handle = kmalloc(sizeof(struct heapbufmp_object), GFP_KERNEL);
+ if (handle == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ obj = kmalloc(sizeof(struct heapbufmp_obj), GFP_KERNEL);
+ if (obj == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ handle->obj = (struct heapbufmp_obj *)obj;
+ handle->alloc = &heapbufmp_alloc;
+ handle->free = &heapbufmp_free;
+ handle->get_stats = &heapbufmp_get_stats;
+ handle->is_blocking = &heapbufmp_isblocking;
+
+ obj->ns_key = NULL;
+ obj->alloc_size = 0;
+
+ /* Put in local ilst */
+ retval = mutex_lock_interruptible(heapbufmp_module->local_lock);
+ if (retval < 0)
+ goto error;
+
+ INIT_LIST_HEAD(&obj->list_elem);
+ list_add(&heapbufmp_module->obj_list, &obj->list_elem);
+ mutex_unlock(heapbufmp_module->local_lock);
+
+ if (create_flag == false) {
+ obj->owner.creator = false;
+ obj->owner.open_count = 0;
+ obj->owner.proc_id = MULTIPROC_INVALIDID;
+ obj->top = handle;
+
+ obj->attrs = (struct heapbufmp_attrs *) params->shared_addr;
+
+ /* No need to Cache_inv- already done in openByAddr() */
+ obj->align = obj->attrs->align;
+ obj->num_blocks = obj->attrs->num_blocks;
+ obj->block_size = obj->attrs->block_size;
+ obj->exact = obj->attrs->exact;
+ obj->buf = sharedregion_get_ptr((u32 *)obj->attrs->
+ buf_ptr);
+ obj->region_id = sharedregion_get_id(obj->buf);
+
+ /* Set min_align */
+ min_align = 4; /* memory_get_max_default_type_align(); */
+ if (sharedregion_get_cache_line_size(obj->region_id) >
+ min_align)
+ min_align = sharedregion_get_cache_line_size(obj->
+ region_id);
+ obj->cache_enabled = sharedregion_is_cache_enabled(obj->
+ region_id);
+
+ local_addr = sharedregion_get_ptr((u32 *)obj->attrs->
+ gatemp_addr);
+ retval = gatemp_open_by_addr(local_addr, &gate_handle);
+
+ if (retval < 0) {
+ retval = -EFAULT;
+ goto error;
+ }
+ obj->gate = gate_handle;
+
+ /* Open the ListMP */
+ local_addr = (void *) ROUND_UP(((u32)obj->attrs
+ + sizeof(struct heapbufmp_attrs)),
+ min_align);
+ retval = listmp_open_by_addr(local_addr, &(obj->free_list));
+
+ if (retval < 0) {
+ retval = -EFAULT;
+ goto error;
+ }
+ } else {
+ obj->owner.creator = true;
+ obj->owner.open_count = 1;
+ obj->owner.proc_id = multiproc_self();
+ obj->top = handle;
+
+ /* Creating the gate */
+ if (params->gate != NULL)
+ obj->gate = params->gate;
+ else {
+ /* If no gate specified, get the default system gate */
+ obj->gate = gatemp_get_default_remote();
+ }
+
+ if (obj->gate == NULL) {
+ retval = -EFAULT;
+ goto error;
+ }
+
+ obj->exact = params->exact;
+ obj->align = params->align;
+ obj->num_blocks = params->num_blocks;
+
+ if (params->shared_addr == NULL) {
+ /* Creating using a shared region ID */
+ /* It is allowed to have NULL name for an anonymous, not
+ * to be opened by name, heap.
+ */
+ /* Will be allocated in post_init */
+ obj->attrs = NULL;
+ obj->region_id = params->region_id;
+ } else {
+ /* Creating using shared_addr */
+ obj->region_id = sharedregion_get_id(
+ params->shared_addr);
+
+ /* Assert that the buffer is in a valid shared
+ * region
+ */
+ if (obj->region_id == SHAREDREGION_INVALIDREGIONID) {
+ retval = -EFAULT;
+ goto error;
+ } else if (((u32) params->shared_addr
+ % sharedregion_get_cache_line_size(obj->
+ region_id) != 0)) {
+ retval = -EFAULT;
+ goto error;
+ }
+ obj->attrs = (struct heapbufmp_attrs *)
+ params->shared_addr;
+ }
+
+ obj->cache_enabled = sharedregion_is_cache_enabled(
+ obj->region_id);
+
+ /* Fix the alignment (alignment may be needed even if
+ * cache is disabled)
+ */
+ obj->align = 4; /* memory_get_max_default_type_align(); */
+ if (sharedregion_get_cache_line_size(obj->region_id) >
+ obj->align)
+ obj->align = sharedregion_get_cache_line_size(
+ obj->region_id);
+
+ /* Round the block_size up by the adjusted alignment */
+ obj->block_size = ROUND_UP(params->block_size, obj->align);
+
+ retval = heapbufmp_post_init(handle);
+ if (retval < 0) {
+ retval = -EFAULT;
+ goto error;
+ }
+
+ /* Populate the params member */
+ memcpy(&obj->params, params, sizeof(struct heapbufmp_params));
+ if (params->name != NULL) {
+ obj->params.name = kmalloc(strlen(params->name) + 1,
+ GFP_KERNEL);
+ if (obj->params.name == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+ strncpy(obj->params.name, params->name,
+ strlen(params->name) + 1);
+ }
+
+ /* We will store a shared pointer in the NameServer */
+ shared_shm_base = sharedregion_get_srptr((void *)obj->attrs,
+ obj->region_id);
+ if (obj->params.name != NULL) {
+ obj->ns_key = nameserver_add_uint32(
+ heapbufmp_module->nameserver,
+ params->name,
+ (u32)shared_shm_base);
+ if (obj->ns_key == NULL) {
+ retval = -EFAULT;
+ goto error;
+ }
+ }
+ }
+
+ *handle_ptr = (void *)handle;
+ return retval;
+
+
+error:
+ /* Do whatever cleanup is required*/
+ if (create_flag == true)
+ heapbufmp_delete(handle_ptr);
+ else
+ heapbufmp_close(handle_ptr);
+
+ printk(KERN_ERR "_heapbufmp_create failed status: %x\n", retval);
+ return retval;
+}
+
+/*
+ * ======== heapbufmp_create ========
+ * Purpose:
+ * This will create a new instance of heapbufmp module
+ */
+void *heapbufmp_create(const struct heapbufmp_params *params)
+{
+ s32 retval = 0;
+ struct heapbufmp_object *handle = NULL;
+ struct heapbufmp_params sparams;
+
+ BUG_ON(params == NULL);
+
+ if (atomic_cmpmask_and_lt(&(heapbufmp_module->ref_count),
+ HEAPBUFMP_MAKE_MAGICSTAMP(0),
+ HEAPBUFMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (params == NULL) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (params->block_size == 0) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (params->num_blocks == 0) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ memcpy(&sparams, (void *)params, sizeof(struct heapbufmp_params));
+ retval = _heapbufmp_create((void **)&handle, &sparams, true);
+ if (retval < 0)
+ goto error;
+
+ return (void *)handle;
+
+error:
+ printk(KERN_ERR "heapbufmp_create failed status: %x\n", retval);
+ return (void *)handle;
+}
+EXPORT_SYMBOL(heapbufmp_create);
+
+/*
+ * ======== heapbufmp_delete ========
+ * Purpose:
+ * This will delete an instance of heapbufmp module
+ */
+int heapbufmp_delete(void **handle_ptr)
+{
+ int status = 0;
+ struct heapbufmp_object *handle = NULL;
+ struct heapbufmp_obj *obj = NULL;
+ struct heapbufmp_params *params = NULL;
+ struct heapbufmp_object *region_heap = NULL;
+ s32 retval = 0;
+ int *key;
+
+ if (atomic_cmpmask_and_lt(&(heapbufmp_module->ref_count),
+ HEAPBUFMP_MAKE_MAGICSTAMP(0),
+ HEAPBUFMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(handle_ptr == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ handle = (struct heapbufmp_object *)(*handle_ptr);
+ if (WARN_ON(handle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ obj = (struct heapbufmp_obj *)handle->obj;
+ if (obj != NULL) {
+ if (obj->owner.proc_id != multiproc_self()) {
+ status = -ENODEV;
+ goto error;
+ }
+
+ /* Take the local lock */
+ key = gatemp_enter(obj->gate);
+
+ if (obj->owner.open_count > 1) {
+ retval = -ENODEV;
+ goto device_busy_error;
+ }
+
+ retval = mutex_lock_interruptible(heapbufmp_module->local_lock);
+ if (retval < 0)
+ goto lock_error;
+
+ /* Remove frmo the local list */
+ list_del(&obj->list_elem);
+
+ mutex_unlock(heapbufmp_module->local_lock);
+
+ params = (struct heapbufmp_params *) &obj->params;
+
+ if (likely(params->name != NULL)) {
+ if (likely(obj->ns_key != NULL)) {
+ nameserver_remove_entry(heapbufmp_module->
+ nameserver, obj->ns_key);
+ obj->ns_key = NULL;
+ }
+ kfree(params->name);
+ }
+
+ /* Set status to 'not created' */
+ if (obj->attrs != NULL) {
+#if 0
+ obj->attrs->status = 0;
+ if (obj->cache_enabled) {
+ cache_wbinv(obj->attrs, sizeof(struct
+ heapbufmp_attrs), CACHE_TYPE_ALL,
+ true);
+ }
+#endif
+ }
+
+ /* Release the shared lock */
+ gatemp_leave(obj->gate, key);
+
+ if (obj->free_list != NULL)
+ /* Free the list */
+ listmp_delete(&obj->free_list);
+
+ /* If necessary, free shared memory if memory is internally
+ * allocated
+ */
+ region_heap = sharedregion_get_heap(obj->region_id);
+
+ if ((region_heap != NULL) &&
+ (obj->params.shared_addr == NULL) &&
+ (obj->attrs != NULL)) {
+ sl_heap_free(region_heap, obj->attrs, obj->alloc_size);
+ }
+
+ kfree(obj);
+ kfree(handle);
+
+ *handle_ptr = NULL;
+ } else { /* obj == NULL */
+ kfree(handle);
+ *handle_ptr = NULL;
+ }
+
+
+ return 0;
+
+lock_error:
+device_busy_error:
+ gatemp_leave(obj->gate, key);
+
+error:
+ printk(KERN_ERR "heapbufmp_delete failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapbufmp_delete);
+
+/*
+ * ======== heapbufmp_open ========
+ * Purpose:
+ * This will opens a created instance of heapbufmp
+ * module
+ */
+int heapbufmp_open(char *name, void **handle_ptr)
+{
+ s32 retval = 0;
+ u32 *shared_shm_base = SHAREDREGION_INVALIDSRPTR;
+ u32 *shared_addr = NULL;
+ struct heapbufmp_obj *obj = NULL;
+ bool done_flag = false;
+ struct list_head *elem = NULL;
+
+ BUG_ON(name == NULL);
+ BUG_ON(handle_ptr == NULL);
+
+ if (unlikely(
+ atomic_cmpmask_and_lt(&(heapbufmp_module->ref_count),
+ HEAPBUFMP_MAKE_MAGICSTAMP(0),
+ HEAPBUFMP_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (name == NULL) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (handle_ptr == NULL) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /* First check in the local list */
+ list_for_each(elem, &heapbufmp_module->obj_list) {
+ obj = (struct heapbufmp_obj *)elem;
+ if (obj->params.name != NULL) {
+ if (strcmp(obj->params.name, name) == 0) {
+ retval = mutex_lock_interruptible(
+ heapbufmp_module->local_lock);
+ if (retval < 0)
+ goto error;
+ /* Check if we have created the heapbufmp or
+ * not
+ */
+ if (obj->owner.proc_id == multiproc_self())
+ obj->owner.open_count++;
+
+ *handle_ptr = (void *)obj->top;
+ mutex_unlock(heapbufmp_module->local_lock);
+ done_flag = true;
+ break;
+ }
+ }
+ }
+
+ if (likely(done_flag == false)) {
+ /* Find in name server */
+ retval = nameserver_get_uint32(heapbufmp_module->nameserver,
+ name,
+ &shared_shm_base,
+ NULL);
+ if (unlikely(retval < 0))
+ goto error;
+
+ /*
+ * Convert from shared region pointer to local address
+ */
+ shared_addr = sharedregion_get_ptr(shared_shm_base);
+ if (unlikely(shared_addr == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ retval = heapbufmp_open_by_addr(shared_addr, handle_ptr);
+
+ if (unlikely(retval < 0))
+ goto error;
+ }
+
+ return 0;
+
+error:
+ printk(KERN_ERR "heapbufmp_open failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapbufmp_open);
+
+/*
+ * ======== heapbufmp_close ========
+ * Purpose:
+ * This will closes previously opened/created instance
+ * of heapbufmp module
+ */
+int heapbufmp_close(void **handle_ptr)
+{
+ struct heapbufmp_object *handle = NULL;
+ struct heapbufmp_obj *obj = NULL;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(heapbufmp_module->ref_count),
+ HEAPBUFMP_MAKE_MAGICSTAMP(0),
+ HEAPBUFMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(handle_ptr == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (WARN_ON(*handle_ptr == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ handle = (struct heapbufmp_object *)(*handle_ptr);
+ obj = (struct heapbufmp_obj *)handle->obj;
+
+ if (obj != NULL) {
+ retval = mutex_lock_interruptible(heapbufmp_module->
+ local_lock);
+ if (retval)
+ goto error;
+
+ /* opening an instance created locally */
+ if (obj->owner.proc_id == multiproc_self())
+ obj->owner.open_count--;
+
+ /* Check if HeapMemMP is opened on same processor
+ * and this is the last closure.
+ */
+ if ((obj->owner.creator == false)
+ && (obj->owner.open_count == 0)) {
+ list_del(&obj->list_elem);
+
+ if (obj->free_list != NULL)
+ /* Close the list */
+ listmp_close(&obj->free_list);
+
+ if (obj->gate != NULL)
+ /* Close the instance gate */
+ gatemp_close(&obj->gate);
+
+ /* Now free the handle */
+ kfree(obj);
+ obj = NULL;
+ kfree(handle);
+ *handle_ptr = NULL;
+ }
+
+ mutex_unlock(heapbufmp_module->local_lock);
+ } else {
+ kfree(handle);
+ *handle_ptr = NULL;
+ }
+ return 0;
+
+error:
+ printk(KERN_ERR "heapbufmp_close failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapbufmp_close);
+
+/*
+ * ======== heapbufmp_alloc ========
+ * Purpose:
+ * This will allocs a block of memory
+ */
+void *heapbufmp_alloc(void *hphandle, u32 size, u32 align)
+{
+ char *block = NULL;
+ struct heapbufmp_object *handle = NULL;
+ struct heapbufmp_obj *obj = NULL;
+ int *key;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(heapbufmp_module->ref_count),
+ HEAPBUFMP_MAKE_MAGICSTAMP(0),
+ HEAPBUFMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+ if (WARN_ON(hphandle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+ if (WARN_ON(size == 0)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ handle = (struct heapbufmp_object *)(hphandle);
+ obj = (struct heapbufmp_obj *)handle->obj;
+ if (WARN_ON(obj == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (WARN_ON(unlikely(size > obj->block_size))) {
+ retval = -EINVAL;
+ goto error;
+ }
+ if (WARN_ON(unlikely((obj->exact == true)
+ && (size != obj->block_size)))) {
+ retval = -EINVAL;
+ goto error;
+ }
+ if (WARN_ON(unlikely(align > obj->align))) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /*key = gatemp_enter(obj->gate); gate protection acquired in listmp */
+ block = listmp_get_head((struct listmp_object *) obj->free_list);
+ if (unlikely(block == NULL)) {
+ retval = -ENOMEM;
+ goto error;
+ }
+ key = gatemp_enter(obj->gate); /*gatemp call moved down */
+ if (unlikely(heapbufmp_module->cfg.track_allocs)) {
+#if 0
+ /* Make sure the attrs are not in cache */
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((Ptr) obj->attrs,
+ sizeof(heapbufmp_attrs),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+ }
+
+ obj->attrs->num_free_blocks--;
+
+ if (obj->attrs->num_free_blocks
+ < obj->attrs->min_free_blocks) {
+ /* save the new minimum */
+ obj->attrs->min_free_blocks = obj->attrs->num_free_blocks;
+ }
+#if 0
+ /* Make sure the attrs are written out to memory */
+ if (EXPECT_false(obj->cacheEnabled == true)) {
+ Cache_wbInv((Ptr) obj->attrs,
+ sizeof(heapbufmp_attrs),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+ gatemp_leave(obj->gate, key);
+
+ if (block == NULL)
+ printk(KERN_ERR "heapbufmp_alloc returned NULL\n");
+
+ return block;
+error:
+ printk(KERN_ERR "heapbufmp_alloc failed status: %x\n", retval);
+ return NULL;
+}
+EXPORT_SYMBOL(heapbufmp_alloc);
+
+/*
+ * ======== heapbufmp_free ========
+ * Purpose:
+ * This will free a block of memory
+ */
+int heapbufmp_free(void *hphandle, void *block, u32 size)
+{
+ struct heapbufmp_object *handle = NULL;
+ s32 retval = 0;
+ struct heapbufmp_obj *obj = NULL;
+ int *key;
+
+ if (atomic_cmpmask_and_lt(&(heapbufmp_module->ref_count),
+ HEAPBUFMP_MAKE_MAGICSTAMP(0),
+ HEAPBUFMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+ if (WARN_ON(hphandle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+ if (WARN_ON(block == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ handle = (struct heapbufmp_object *)(hphandle);
+ obj = (struct heapbufmp_obj *)handle->obj;
+ if (WARN_ON(obj == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /* key = gatemp_enter(obj->gate); */
+ retval = listmp_put_tail(obj->free_list, block);
+ if (unlikely(retval < 0)) {
+ retval = -EFAULT;
+ goto error;
+ }
+ key = gatemp_enter(obj->gate); /*gatemp call moved down */
+ if (unlikely(heapbufmp_module->cfg.track_allocs)) {
+#if 0
+ /* Make sure the attrs are not in cache */
+ if (EXPECT_false(obj->cacheEnabled == true)) {
+ Cache_inv((Ptr) obj->attrs,
+ sizeof(heapbufmp_attrs),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+
+ obj->attrs->num_free_blocks++;
+#if 0
+ /* Make sure the attrs are written out to memory */
+ if (EXPECT_false(obj->cacheEnabled == true)) {
+ Cache_wbInv((Ptr) obj->attrs,
+ sizeof(heapbufmp_attrs),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+ }
+
+ gatemp_leave(obj->gate, key);
+
+ return 0;
+
+error:
+ printk(KERN_ERR "heapbufmp_free failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapbufmp_free);
+
+/*
+ * ======== heapbufmp_get_stats ========
+ * Purpose:
+ * This will get memory statistics
+ */
+void heapbufmp_get_stats(void *hphandle, struct memory_stats *stats)
+{
+ struct heapbufmp_object *object = NULL;
+ struct heapbufmp_obj *obj = NULL;
+ int *key;
+ s32 retval = 0;
+ u32 block_size;
+
+ if (atomic_cmpmask_and_lt(&(heapbufmp_module->ref_count),
+ HEAPBUFMP_MAKE_MAGICSTAMP(0),
+ HEAPBUFMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+ if (WARN_ON(hphandle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+ if (WARN_ON(stats == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ object = (struct heapbufmp_object *)(hphandle);
+ obj = (struct heapbufmp_obj *)object->obj;
+ if (WARN_ON(obj == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ block_size = obj->attrs->block_size;
+
+ if (unlikely(heapbufmp_module->cfg.track_allocs)) {
+
+ key = gatemp_enter(obj->gate);
+#if 0
+ /* Make sure the attrs are not in cache */
+ if (EXPECT_false(obj->cacheEnabled == true)) {
+ Cache_inv((Ptr) obj->attrs,
+ sizeof(heapbufmp_attrs),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+
+ stats->total_free_size = block_size * obj->attrs->
+ num_free_blocks;
+ stats->largest_free_size = (obj->attrs->num_free_blocks > 0) ?
+ block_size : 0; /* determined later */
+
+ gatemp_leave(obj->gate, key);
+ } else {
+ /* Tracking disabled */
+ stats->total_free_size = 0;
+ stats->largest_free_size = 0;
+ }
+ return;
+
+error:
+ if (retval < 0)
+ printk(KERN_ERR "heapbufmp_get_stats status: [0x%x]\n",
+ retval);
+}
+EXPORT_SYMBOL(heapbufmp_get_stats);
+
+/*
+ * ======== heapbufmp_isblocking ========
+ * Purpose:
+ * Indicate whether the heap may block during an alloc or free call
+ */
+bool heapbufmp_isblocking(void *handle)
+{
+ bool isblocking = false;
+ s32 retval = 0;
+
+ if (WARN_ON(handle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /* TBD: Figure out how to determine whether the gate is blocking */
+ isblocking = true;
+
+ /* retval true Heap blocks during alloc/free calls */
+ /* retval false Heap does not block during alloc/free calls */
+ return isblocking;
+
+error:
+ printk(KERN_ERR "heapbufmp_isblocking status: %x\n", retval);
+ return isblocking;
+}
+EXPORT_SYMBOL(heapbufmp_isblocking);
+
+/*
+ * ======== heapbufmp_get_extended_stats ========
+ * Purpose:
+ * This will get extended statistics
+ */
+void heapbufmp_get_extended_stats(void *hphandle,
+ struct heapbufmp_extended_stats *stats)
+{
+ s32 retval = 0;
+ struct heapbufmp_object *object = NULL;
+ struct heapbufmp_obj *obj = NULL;
+ int *key;
+
+ if (atomic_cmpmask_and_lt(&(heapbufmp_module->ref_count),
+ HEAPBUFMP_MAKE_MAGICSTAMP(0),
+ HEAPBUFMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+ if (WARN_ON(heapbufmp_module->nameserver == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+ if (WARN_ON(hphandle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ object = (struct heapbufmp_object *)(hphandle);
+ obj = (struct heapbufmp_obj *)object->obj;
+ if (WARN_ON(obj == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+#if 0
+ /* Make sure the attrs are not in cache */
+ if (EXPECT_false(obj->cacheEnabled == true)) {
+ Cache_inv((Ptr) obj->attrs,
+ sizeof(heapbufmp_attrs),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+ /*
+ * The maximum number of allocations for this HeapBufMP(for any given
+ * instance of time during its liftime) is computed as follows:
+ *
+ * max_allocated_blocks = obj->num_blocks - obj->min_free_blocks
+ *
+ * Note that max_allocated_blocks is *not* the maximum allocation
+ * count, but rather the maximum allocations seen at any snapshot of
+ * time in the HeapBufMP instance.
+ */
+ key = gatemp_enter(obj->gate);
+ /* if nothing has been alloc'ed yet, return 0 */
+ if ((s32)(obj->attrs->min_free_blocks) == -1)
+ stats->max_allocated_blocks = 0;
+ else
+ stats->max_allocated_blocks = obj->attrs->num_blocks
+ - obj->attrs->min_free_blocks;
+
+ /*
+ * current # of alloc'ed blocks is computed
+ * using curr # of free blocks
+ */
+ stats->num_allocated_blocks = obj->attrs->num_blocks
+ - obj->attrs->num_free_blocks;
+
+ gatemp_leave(obj->gate, key);
+
+ return;
+
+error:
+ printk(KERN_ERR "heapbufmp_get_extended_stats status: %x\n",
+ retval);
+}
+EXPORT_SYMBOL(heapbufmp_get_extended_stats);
+
+/*
+ * ======== heapbufmp_shared_mem_req ========
+ * Purpose:
+ * This will get amount of shared memory required for
+ * creation of each instance
+ */
+int heapbufmp_shared_mem_req(const struct heapbufmp_params *params)
+{
+ int mem_req = 0;
+ struct listmp_params listmp_params;
+ u32 buf_align = 0;
+ u32 block_size = 0;
+
+ s32 status = 0;
+ u32 region_id;
+ u32 min_align;
+
+ if (WARN_ON(params == NULL)) {
+ status = -EINVAL;
+ goto error;
+ }
+ if (WARN_ON(params->block_size == 0)) {
+ status = -EINVAL;
+ goto error;
+ }
+ if (WARN_ON(params->num_blocks == 0)) {
+ status = -EINVAL;
+ goto error;
+ }
+
+ if (params->shared_addr == NULL)
+ region_id = params->region_id;
+ else
+ region_id = sharedregion_get_id(params->shared_addr);
+
+ if (region_id == SHAREDREGION_INVALIDREGIONID) {
+ status = -EFAULT;
+ goto error;
+ }
+
+ buf_align = params->align;
+
+ min_align = 4; /* memory_get_default_type_align() */
+ if (sharedregion_get_cache_line_size(region_id) > min_align)
+ min_align = sharedregion_get_cache_line_size(region_id);
+
+ if (buf_align < min_align)
+ buf_align = min_align;
+
+ /* Determine the actual block size */
+ block_size = ROUND_UP(params->block_size, buf_align);
+
+ /* Add size of HeapBufMP Attrs */
+ mem_req = ROUND_UP(sizeof(struct heapbufmp_attrs), min_align);
+
+ /*
+ * Add size of ListMP Attrs. No need to init params since it's
+ * not used to create.
+ */
+ listmp_params_init(&listmp_params);
+ listmp_params.region_id = region_id;
+ mem_req += listmp_shared_mem_req(&listmp_params);
+
+ /* Round by the buffer alignment */
+ mem_req = ROUND_UP(mem_req, buf_align);
+
+ /*
+ * Add the buffer size. No need to subsequently round because the
+ * product should be a multiple of cacheLineSize if cache alignment
+ * is enabled
+ */
+ mem_req += (block_size * params->num_blocks);
+
+ return mem_req;
+error:
+ printk(KERN_ERR "heapbufmp_shared_mem_req status: %x\n",
+ status);
+ return mem_req;
+}
+EXPORT_SYMBOL(heapbufmp_shared_mem_req);
+
+
+/*
+ * ======== heapbufmp_open_by_addr ========
+ * Purpose:
+ * Open existing heapbufmp based on address
+ */
+int
+heapbufmp_open_by_addr(void *shared_addr, void **handle_ptr)
+{
+ s32 retval = 0;
+ bool done_flag = false;
+ struct heapbufmp_attrs *attrs = NULL;
+ u16 id = 0;
+ struct heapbufmp_params params;
+ struct heapbufmp_obj *obj = NULL;
+
+ if (unlikely(atomic_cmpmask_and_lt(&(heapbufmp_module->ref_count),
+ HEAPBUFMP_MAKE_MAGICSTAMP(0),
+ HEAPBUFMP_MAKE_MAGICSTAMP(1))
+ == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+ if (unlikely(handle_ptr == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /* First check in the local list */
+ list_for_each_entry(obj, &heapbufmp_module->obj_list, list_elem) {
+ if (obj->params.shared_addr == shared_addr) {
+ retval = mutex_lock_interruptible(heapbufmp_module->
+ local_lock);
+ if (retval < 0)
+ goto error;
+
+ if (obj->owner.proc_id == multiproc_self())
+ obj->owner.open_count++;
+
+ mutex_unlock(heapbufmp_module->local_lock);
+ *handle_ptr = obj->top;
+ done_flag = true;
+ break;
+ }
+ }
+
+ /* If not already existing locally, create object locally for open. */
+ if (unlikely(done_flag == false)) {
+ heapbufmp_params_init(&params);
+ params.shared_addr = shared_addr;
+ attrs = (struct heapbufmp_attrs *) shared_addr;
+ id = sharedregion_get_id(shared_addr);
+#if 0
+ if (unlikely(sharedregion_is_cache_enabled(id))) {
+ Cache_inv(attrs,
+ sizeof(heapbufmp_attrs),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+ if (unlikely(attrs->status != HEAPBUFMP_CREATED)) {
+ *handle_ptr = NULL;
+ retval = -ENOENT;
+ goto error;
+ }
+
+ retval = _heapbufmp_create(handle_ptr, &params, false);
+
+ if (unlikely(retval < 0))
+ goto error;
+ }
+ return 0;
+
+error:
+ printk(KERN_ERR "heapbufmp_open_by_addr status: %x\n",
+ retval);
+
+ return retval;
+}
+
+
+/* =========================================================================
+ * Internal functions
+ * =========================================================================
+ */
+/*
+ * Shared memory Layout:
+ *
+ * sharedAddr -> ---------------------------
+ * | heapbufmp_attrs |
+ * | (min_align PADDING) |
+ * |-------------------------|
+ * | ListMP shared instance |
+ * | (bufAlign PADDING) |
+ * |-------------------------|
+ * | HeapBufMP BUFFER |
+ * |-------------------------|
+ */
+
+
+/*
+ * ======== heapbufmp_post_init ========
+ * Purpose:
+ * Slice and dice the buffer up into the correct size blocks and
+ * add to the freelist.
+ */
+int heapbufmp_post_init(struct heapbufmp_object *handle)
+{
+ s32 retval = 0;
+ char *buf = NULL;
+ struct heapbufmp_obj *obj = NULL;
+ struct heapbufmp_object *region_heap = NULL;
+ struct heapbufmp_params params;
+ struct listmp_params listmp_params;
+ u32 min_align;
+ u32 i;
+
+ obj = (struct heapbufmp_obj *)handle->obj;
+ if (WARN_ON(obj == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ min_align = 4; /* memory_get_default_type_align() */
+ if (sharedregion_get_cache_line_size(obj->region_id) > min_align)
+ min_align = sharedregion_get_cache_line_size(obj->region_id);
+
+ if (obj->attrs == NULL) {
+ heapbufmp_params_init(&params);
+ params.region_id = obj->region_id;
+ params.num_blocks = obj->num_blocks;
+ params.align = obj->align;
+ params.block_size = obj->block_size;
+ obj->alloc_size = heapbufmp_shared_mem_req(&params);
+
+ region_heap = sharedregion_get_heap(obj->region_id);
+ if (region_heap == NULL) {
+ retval = -EFAULT;
+ goto error;
+ }
+
+ obj->attrs = sl_heap_alloc(region_heap, obj->alloc_size,
+ min_align);
+ if (obj->attrs == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+ }
+
+ /* Store the GateMP sharedAddr in the HeapBuf Attrs */
+ obj->attrs->gatemp_addr = gatemp_get_shared_addr(obj->gate);
+
+ /* Create the free_list */
+ listmp_params_init(&listmp_params);
+ listmp_params.shared_addr = (void *)ROUND_UP((u32)obj->attrs
+ + sizeof(struct heapbufmp_attrs),
+ min_align);
+ listmp_params.gatemp_handle = obj->gate;
+ obj->free_list = listmp_create(&listmp_params);
+ if (obj->free_list == NULL) {
+ retval = -EFAULT;
+ goto error;
+ }
+
+ /* obj->buf will get alignment-adjusted in postInit */
+ obj->buf = (void *)((u32)listmp_params.shared_addr
+ + listmp_shared_mem_req(&listmp_params));
+ buf = obj->buf = (char *)ROUND_UP((u32)obj->buf, obj->align);
+
+ obj->attrs->num_free_blocks = obj->num_blocks;
+ obj->attrs->min_free_blocks = (32)-1;
+ obj->attrs->block_size = obj->block_size;
+ obj->attrs->align = obj->align;
+ obj->attrs->num_blocks = obj->num_blocks;
+ obj->attrs->exact = obj->exact ? 1 : 0;
+
+ /* Put a SRPtr in attrs */
+ obj->attrs->buf_ptr = sharedregion_get_srptr(obj->buf,
+ obj->region_id);
+ BUG_ON(obj->attrs->buf_ptr == SHAREDREGION_INVALIDSRPTR);
+
+ /*
+ * Split the buffer into blocks that are length "block_size" and
+ * add into the free_list Queue.
+ */
+ for (i = 0; i < obj->num_blocks; i++) {
+ /* Add the block to the free_list */
+ retval = listmp_put_tail(obj->free_list,
+ (struct listmp_elem *)buf);
+ if (retval < 0) {
+ retval = -EFAULT;
+ goto created_free_list_error;
+ }
+
+ buf = (char *)((u32)buf + obj->block_size);
+ }
+
+ /* Last thing, set the status */
+ obj->attrs->status = HEAPBUFMP_CREATED;
+#if 0
+ if (unlikely(obj->cacheEnabled)) {
+ Cache_wbInv((Ptr) obj->attrs,
+ sizeof(heapbufmp_attrs),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+ return 0;
+
+created_free_list_error:
+ listmp_delete(&obj->free_list);
+
+error:
+ printk(KERN_ERR "heapmem_post_init status: %x\n", retval);
+ return retval;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/heapbufmp_ioctl.c b/drivers/dsp/syslink/multicore_ipc/heapbufmp_ioctl.c
new file mode 100644
index 000000000000..539c4eaa6a8b
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/heapbufmp_ioctl.c
@@ -0,0 +1,459 @@
+/*
+ * heapbufmp_ioctl.c
+ *
+ * Heap module manages fixed size buffers that can be used
+ * in a multiprocessor system with shared memory.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <sharedregion.h>
+#include <heap.h>
+#include <heapbufmp.h>
+#include <heapbufmp_ioctl.h>
+
+/*
+ * ======== heapbufmp_ioctl_alloc ========
+ * Purpose:
+ * This ioctl interface to heapbufmp_alloc function
+ */
+static int heapbufmp_ioctl_alloc(struct heapbufmp_cmd_args *cargs)
+{
+ u32 *block_srptr = SHAREDREGION_INVALIDSRPTR;
+ void *block;
+ s32 index;
+ s32 status = 0;
+
+ block = heapbufmp_alloc(cargs->args.alloc.handle,
+ cargs->args.alloc.size,
+ cargs->args.alloc.align);
+ if (block != NULL) {
+ index = sharedregion_get_id(block);
+ block_srptr = sharedregion_get_srptr(block, index);
+ }
+ /* The error on above fn will be a null ptr. We are not
+ checking that condition here. We are passing whatever
+ we are getting from the heapbuf module. So IOCTL will succed,
+ but the actual fn might be failed inside heapbuf
+ */
+ BUG_ON(index == SHAREDREGION_INVALIDREGIONID);
+ cargs->args.alloc.block_srptr = block_srptr;
+ BUG_ON(cargs->args.alloc.block_srptr == SHAREDREGION_INVALIDSRPTR);
+ cargs->api_status = 0;
+ return status;
+}
+
+/*
+ * ======== heapbufmp_ioctl_free ========
+ * Purpose:
+ * This ioctl interface to heapbufmp_free function
+ */
+static int heapbufmp_ioctl_free(struct heapbufmp_cmd_args *cargs)
+{
+ char *block;
+
+ block = sharedregion_get_ptr(cargs->args.free.block_srptr);
+ BUG_ON(block == NULL);
+ cargs->api_status = heapbufmp_free(cargs->args.free.handle, block,
+ cargs->args.free.size);
+ return 0;
+}
+
+/*
+ * ======== heapbufmp_ioctl_params_init ========
+ * Purpose:
+ * This ioctl interface to heapbufmp_params_init function
+ */
+static int heapbufmp_ioctl_params_init(struct heapbufmp_cmd_args *cargs)
+{
+ struct heapbufmp_params params;
+ s32 status = 0;
+ u32 size;
+
+ heapbufmp_params_init(&params);
+ cargs->api_status = 0;
+ size = copy_to_user(cargs->args.params_init.params, &params,
+ sizeof(struct heapbufmp_params));
+ if (size)
+ status = -EFAULT;
+
+ return status;
+}
+
+/*
+ * ======== heapbufmp_ioctl_create ========
+ * Purpose:
+ * This ioctl interface to heapbufmp_create function
+ */
+static int heapbufmp_ioctl_create(struct heapbufmp_cmd_args *cargs)
+{
+ struct heapbufmp_params params;
+ s32 status = 0;
+ u32 size;
+ void *handle = NULL;
+
+ size = copy_from_user(&params, cargs->args.create.params,
+ sizeof(struct heapbufmp_params));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ if (cargs->args.create.name_len > 0) {
+ params.name = kmalloc(cargs->args.create.name_len, GFP_KERNEL);
+ if (params.name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ params.name[cargs->args.create.name_len] = '\0';
+ size = copy_from_user(params.name,
+ cargs->args.create.params->name,
+ cargs->args.create.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+ }
+
+ params.shared_addr = sharedregion_get_ptr((u32 *)
+ cargs->args.create.shared_addr_srptr);
+ params.gate = cargs->args.create.knl_gate;
+ handle = heapbufmp_create(&params);
+ cargs->args.create.handle = handle;
+ cargs->api_status = 0;
+
+name_from_usr_error:
+ if (cargs->args.create.name_len > 0)
+ kfree(params.name);
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== heapbufmp_ioctl_delete ========
+ * Purpose:
+ * This ioctl interface to heapbufmp_delete function
+ */
+static int heapbufmp_ioctl_delete(struct heapbufmp_cmd_args *cargs)
+{
+ cargs->api_status = heapbufmp_delete(&cargs->args.delete.handle);
+ return 0;
+}
+
+/*
+ * ======== heapbufmp_ioctl_open ========
+ * Purpose:
+ * This ioctl interface to heapbufmp_open function
+ */
+static int heapbufmp_ioctl_open(struct heapbufmp_cmd_args *cargs)
+{
+ s32 status = 0;
+ u32 size = 0;
+ void *handle = NULL;
+ char *name = NULL;
+
+ if (cargs->args.open.name_len > 0) {
+ name = kmalloc(cargs->args.open.name_len, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ size = copy_from_user(name, cargs->args.open.name,
+ cargs->args.open.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+ }
+
+ cargs->api_status = heapbufmp_open(cargs->args.open.name, &handle);
+ cargs->args.open.handle = handle;
+
+ if (cargs->args.open.name_len > 0)
+ kfree(name);
+exit:
+ return status;
+}
+
+/*
+ * ======== heapbufmp_ioctl_open_by_addr ========
+ * Purpose:
+ * This ioctl interface to heapbufmp_open_by_addr function
+ */
+static int heapbufmp_ioctl_open_by_addr(struct heapbufmp_cmd_args *cargs)
+{
+ void *handle = NULL;
+ void *shared_addr;
+
+ shared_addr = sharedregion_get_ptr(cargs->args.
+ open_by_addr.shared_addr_srptr);
+ cargs->api_status = heapbufmp_open_by_addr(
+ shared_addr, &handle);
+ cargs->args.open_by_addr.handle = handle;
+
+ return 0;
+}
+
+
+/*
+ * ======== heapbufmp_ioctl_close ========
+ * Purpose:
+ * This ioctl interface to heapbufmp_close function
+ */
+static int heapbufmp_ioctl_close(struct heapbufmp_cmd_args *cargs)
+{
+ cargs->api_status = heapbufmp_close(&cargs->args.close.handle);
+ return 0;
+}
+
+/*
+ * ======== heapbufmp_ioctl_shared_mem_req ========
+ * Purpose:
+ * This ioctl interface to heapbufmp_shared_mem_req function
+ */
+static int heapbufmp_ioctl_shared_mem_req(struct heapbufmp_cmd_args *cargs)
+{
+ struct heapbufmp_params params;
+ s32 status = 0;
+ ulong size;
+
+ size = copy_from_user(&params, cargs->args.shared_mem_req.params,
+ sizeof(struct heapbufmp_params));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ cargs->args.shared_mem_req.bytes = heapbufmp_shared_mem_req(&params);
+ cargs->api_status = 0;
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== heapbufmp_ioctl_get_config ========
+ * Purpose:
+ * This ioctl interface to heapbufmp_get_config function
+ */
+static int heapbufmp_ioctl_get_config(struct heapbufmp_cmd_args *cargs)
+{
+ struct heapbufmp_config config;
+ s32 status = 0;
+ ulong size;
+
+ cargs->api_status = heapbufmp_get_config(&config);
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct heapbufmp_config));
+ if (size)
+ status = -EFAULT;
+
+ return status;
+}
+
+/*
+ * ======== heapbufmp_ioctl_setup ========
+ * Purpose:
+ * This ioctl interface to heapbufmp_setup function
+ */
+static int heapbufmp_ioctl_setup(struct heapbufmp_cmd_args *cargs)
+{
+ struct heapbufmp_config config;
+ s32 status = 0;
+ ulong size;
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct heapbufmp_config));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = heapbufmp_setup(&config);
+
+exit:
+ return status;
+}
+/*
+ * ======== heapbufmp_ioctl_destroy ========
+ * Purpose:
+ * This ioctl interface to heapbufmp_destroy function
+ */
+static int heapbufmp_ioctl_destroy(struct heapbufmp_cmd_args *cargs)
+{
+ cargs->api_status = heapbufmp_destroy();
+ return 0;
+}
+
+
+/*
+ * ======== heapbufmp_ioctl_get_stats ========
+ * Purpose:
+ * This ioctl interface to heapbufmp_get_stats function
+ */
+static int heapbufmp_ioctl_get_stats(struct heapbufmp_cmd_args *cargs)
+{
+ struct memory_stats stats;
+ s32 status = 0;
+ ulong size;
+
+ heapbufmp_get_stats(cargs->args.get_stats.handle, &stats);
+ cargs->api_status = 0;
+
+ size = copy_to_user(cargs->args.get_stats.stats, &stats,
+ sizeof(struct memory_stats));
+ if (size)
+ status = -EFAULT;
+
+ return status;
+}
+
+/*
+ * ======== heapbufmp_ioctl_get_extended_stats ========
+ * Purpose:
+ * This ioctl interface to heapbufmp_get_extended_stats function
+ */
+static int heapbufmp_ioctl_get_extended_stats(struct heapbufmp_cmd_args *cargs)
+{
+ struct heapbufmp_extended_stats stats;
+ s32 status = 0;
+ ulong size;
+ heapbufmp_get_extended_stats(cargs->args.get_extended_stats.
+ handle, &stats);
+ cargs->api_status = 0;
+
+ size = copy_to_user(cargs->args.get_extended_stats.stats, &stats,
+ sizeof(struct heapbufmp_extended_stats));
+ if (size)
+ status = -EFAULT;
+
+ return status;
+}
+
+/*
+ * ======== heapbufmp_ioctl ========
+ * Purpose:
+ * This ioctl interface for heapbuf module
+ */
+int heapbufmp_ioctl(struct inode *pinode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ s32 status = 0;
+ s32 size = 0;
+ struct heapbufmp_cmd_args __user *uarg =
+ (struct heapbufmp_cmd_args __user *)args;
+ struct heapbufmp_cmd_args cargs;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (status) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg,
+ sizeof(struct heapbufmp_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_HEAPBUFMP_ALLOC:
+ status = heapbufmp_ioctl_alloc(&cargs);
+ break;
+
+ case CMD_HEAPBUFMP_FREE:
+ status = heapbufmp_ioctl_free(&cargs);
+ break;
+
+ case CMD_HEAPBUFMP_PARAMS_INIT:
+ status = heapbufmp_ioctl_params_init(&cargs);
+ break;
+
+ case CMD_HEAPBUFMP_CREATE:
+ status = heapbufmp_ioctl_create(&cargs);
+ break;
+
+ case CMD_HEAPBUFMP_DELETE:
+ status = heapbufmp_ioctl_delete(&cargs);
+ break;
+
+ case CMD_HEAPBUFMP_OPEN:
+ status = heapbufmp_ioctl_open(&cargs);
+ break;
+
+ case CMD_HEAPBUFMP_OPENBYADDR:
+ status = heapbufmp_ioctl_open_by_addr(&cargs);
+ break;
+
+ case CMD_HEAPBUFMP_CLOSE:
+ status = heapbufmp_ioctl_close(&cargs);
+ break;
+
+ case CMD_HEAPBUFMP_SHAREDMEMREQ:
+ status = heapbufmp_ioctl_shared_mem_req(&cargs);
+ break;
+
+ case CMD_HEAPBUFMP_GETCONFIG:
+ status = heapbufmp_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_HEAPBUFMP_SETUP:
+ status = heapbufmp_ioctl_setup(&cargs);
+ break;
+
+ case CMD_HEAPBUFMP_DESTROY:
+ status = heapbufmp_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_HEAPBUFMP_GETSTATS:
+ status = heapbufmp_ioctl_get_stats(&cargs);
+ break;
+
+ case CMD_HEAPBUFMP_GETEXTENDEDSTATS:
+ status = heapbufmp_ioctl_get_extended_stats(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ status = -ENOTTY;
+ break;
+ }
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs,
+ sizeof(struct heapbufmp_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ return status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/heapmemmp.c b/drivers/dsp/syslink/multicore_ipc/heapmemmp.c
new file mode 100644
index 000000000000..c5e1b9e163cd
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/heapmemmp.c
@@ -0,0 +1,1669 @@
+/*
+ * heapmemmp.c
+ *
+ * Heap module manages variable size buffers that can be used
+ * in a multiprocessor system with shared memory.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <atomic_linux.h>
+#include <multiproc.h>
+#include <nameserver.h>
+#include <sharedregion.h>
+#include <gatemp.h>
+#include <heapmemmp.h>
+
+/*
+ * Name of the reserved nameserver used for heapmemmp.
+ */
+#define HEAPMEMMP_NAMESERVER "HeapMemMP"
+#define HEAPMEMMP_MAX_NAME_LEN 32
+#define HEAPMEMMP_MAX_RUNTIME_ENTRIES 32
+#define HEAPMEMMP_CACHESIZE 128
+/* brief Macro to make a correct module magic number with ref_count */
+#define HEAPMEMMP_MAKE_MAGICSTAMP(x) ((HEAPMEMMP_MODULEID << 12) | (x))
+
+#define ROUND_UP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
+
+/*
+ * Structure defining processor related information for the
+ * heapmemmp module
+ */
+struct heapmemmp_proc_attrs {
+ bool creator; /* Creator or opener */
+ u16 proc_id; /* Processor identifier */
+ u32 open_count; /* open count in a processor */
+};
+
+/*
+ * heapmemmp header structure
+ */
+struct heapmemmp_header {
+ u32 *next; /* SRPtr to next header */
+ u32 size; /* Size of this segment */
+} heapmemmp_header;
+
+/*
+ * Structure defining attribute parameters for the heapmemmp module
+ */
+struct heapmemmp_attrs {
+ VOLATILE u32 status; /* Module status */
+ VOLATILE u32 *buf_ptr; /* Memory managed by instance */
+ VOLATILE struct heapmemmp_header head; /* header */
+ VOLATILE u32 *gatemp_addr; /* gatemp shared address (shm safe) */
+};
+
+/*
+ * Structure for heapmemmp module state
+ */
+struct heapmemmp_module_object {
+ atomic_t ref_count; /* Reference count */
+ void *nameserver; /* Nameserver handle */
+ struct list_head obj_list; /* List holding created objects */
+ struct mutex *local_lock; /* lock for protecting obj_list */
+ struct heapmemmp_config cfg; /* Current config values */
+ struct heapmemmp_config default_cfg; /* Default config values */
+ struct heapmemmp_params default_inst_params; /* Default instance
+ creation parameters */
+};
+
+struct heapmemmp_module_object heapmemmp_state = {
+ .obj_list = LIST_HEAD_INIT(heapmemmp_state.obj_list),
+ .default_cfg.max_name_len = HEAPMEMMP_MAX_NAME_LEN,
+ .default_cfg.max_runtime_entries = HEAPMEMMP_MAX_RUNTIME_ENTRIES,
+ .default_inst_params.gate = NULL,
+ .default_inst_params.name = NULL,
+ .default_inst_params.region_id = 0,
+ .default_inst_params.shared_addr = NULL,
+ .default_inst_params.shared_buf_size = 0,
+};
+
+/* Pointer to module state */
+static struct heapmemmp_module_object *heapmemmp_module = &heapmemmp_state;
+
+/*
+ * Structure for the handle for the heapmemmp
+ */
+struct heapmemmp_obj {
+ struct list_head list_elem; /* Used for creating a linked list */
+ struct heapmemmp_attrs *attrs; /* The shared attributes structure */
+ void *gate; /* Lock used for critical region management */
+ void *ns_key; /* nameserver key required for remove */
+ bool cache_enabled; /* Whether to do cache calls */
+ u16 region_id; /* shared region index */
+ u32 alloc_size; /* Size of allocated shared memory */
+ char *buf; /* Pointer to allocated memory */
+ u32 min_align; /* Minimum alignment required */
+ u32 buf_size; /* Buffer Size */
+ struct heapmemmp_proc_attrs owner; /* owner processor info */
+ void *top; /* Pointer to the top object */
+ struct heapmemmp_params params; /* The creation parameter structure */
+};
+
+#define heapmemmp_object heap_object
+
+/* =============================================================================
+ * Forward declarations of internal functions
+ * =============================================================================
+ */
+static int heapmemmp_post_init(struct heapmemmp_object *handle);
+
+/* =============================================================================
+ * APIs called directly by applications
+ * =============================================================================
+ */
+
+/*
+ * This will get default configuration for the
+ * heapmemmp module
+ */
+int heapmemmp_get_config(struct heapmemmp_config *cfgparams)
+{
+ s32 retval = 0;
+
+ BUG_ON(cfgparams == NULL);
+
+ if (cfgparams == NULL) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (atomic_cmpmask_and_lt(&(heapmemmp_module->ref_count),
+ HEAPMEMMP_MAKE_MAGICSTAMP(0),
+ HEAPMEMMP_MAKE_MAGICSTAMP(1)) == true)
+ memcpy(cfgparams, &heapmemmp_module->default_cfg,
+ sizeof(struct heapmemmp_config));
+ else
+ memcpy(cfgparams, &heapmemmp_module->cfg,
+ sizeof(struct heapmemmp_config));
+ return 0;
+error:
+ printk(KERN_ERR "heapmemmp_get_config failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapmemmp_get_config);
+
+/*
+ * This will setup the heapmemmp module
+ *
+ * This function sets up the heapmemmp module. This function
+ * must be called before any other instance-level APIs can be
+ * invoked.
+ * Module-level configuration needs to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then heapmemmp_getconfig can be called to get
+ * the configuration filled with the default values. After this,
+ * only the required configuration values can be changed. If the
+ * user does not wish to make any change in the default parameters,
+ * the application can simply call heapmemmp_setup with NULL
+ * parameters. The default parameters would get automatically used.
+ */
+int heapmemmp_setup(const struct heapmemmp_config *cfg)
+{
+ struct nameserver_params params;
+ struct heapmemmp_config tmp_cfg;
+ s32 retval = 0;
+
+ /* This sets the ref_count variable not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of ref_count variable
+ */
+ atomic_cmpmask_and_set(&heapmemmp_module->ref_count,
+ HEAPMEMMP_MAKE_MAGICSTAMP(0),
+ HEAPMEMMP_MAKE_MAGICSTAMP(0));
+ if (atomic_inc_return(&heapmemmp_module->ref_count)
+ != HEAPMEMMP_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ if (cfg == NULL) {
+ heapmemmp_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+
+ if (cfg->max_name_len == 0 ||
+ cfg->max_name_len > HEAPMEMMP_MAX_NAME_LEN) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /* Initialize the parameters */
+ nameserver_params_init(&params);
+ params.max_value_len = sizeof(u32);
+ params.max_name_len = cfg->max_name_len;
+
+ /* Create the nameserver for modules */
+ heapmemmp_module->nameserver =
+ nameserver_create(HEAPMEMMP_NAMESERVER, &params);
+ if (heapmemmp_module->nameserver == NULL) {
+ retval = -EFAULT;
+ goto error;
+ }
+
+ /* Construct the list object */
+ INIT_LIST_HEAD(&heapmemmp_module->obj_list);
+ /* Copy config info */
+ memcpy(&heapmemmp_module->cfg, cfg, sizeof(struct heapmemmp_config));
+ /* Create a lock for protecting list object */
+ heapmemmp_module->local_lock = kmalloc(sizeof(struct mutex),
+ GFP_KERNEL);
+ mutex_init(heapmemmp_module->local_lock);
+ if (heapmemmp_module->local_lock == NULL) {
+ retval = -ENOMEM;
+ heapmemmp_destroy();
+ goto error;
+ }
+
+ return 0;
+
+error:
+ printk(KERN_ERR "heapmemmp_setup failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapmemmp_setup);
+
+/*
+ * This will destroy the heapmemmp module
+ */
+int heapmemmp_destroy(void)
+{
+ s32 retval = 0;
+ struct mutex *lock = NULL;
+ struct heapmemmp_obj *obj = NULL;
+
+ if (atomic_cmpmask_and_lt(&(heapmemmp_module->ref_count),
+ HEAPMEMMP_MAKE_MAGICSTAMP(0),
+ HEAPMEMMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (atomic_dec_return(&heapmemmp_module->ref_count)
+ == HEAPMEMMP_MAKE_MAGICSTAMP(0)) {
+ /* Temporarily increment ref_count here. */
+ atomic_set(&heapmemmp_module->ref_count,
+ HEAPMEMMP_MAKE_MAGICSTAMP(1));
+
+ /* Check if any heapmemmp instances have not been
+ * deleted/closed so far. if there any, delete or close them
+ */
+ list_for_each_entry(obj, &heapmemmp_module->obj_list,
+ list_elem) {
+ if (obj->owner.proc_id == multiproc_get_id(NULL))
+ retval = heapmemmp_delete(&obj->top);
+ else
+ retval = heapmemmp_close(obj->top);
+
+ if (list_empty(&heapmemmp_module->obj_list))
+ break;
+
+ if (retval < 0)
+ goto error;
+ }
+
+ /* Again reset ref_count. */
+ atomic_set(&heapmemmp_module->ref_count,
+ HEAPMEMMP_MAKE_MAGICSTAMP(0));
+
+ if (likely(heapmemmp_module->nameserver != NULL)) {
+ retval = nameserver_delete(&heapmemmp_module->
+ nameserver);
+ if (unlikely(retval != 0))
+ goto error;
+ }
+
+ /* Delete the list lock */
+ lock = heapmemmp_module->local_lock;
+ retval = mutex_lock_interruptible(lock);
+ if (retval)
+ goto error;
+
+ heapmemmp_module->local_lock = NULL;
+ mutex_unlock(lock);
+ kfree(lock);
+ memset(&heapmemmp_module->cfg, 0,
+ sizeof(struct heapmemmp_config));
+ }
+
+ return 0;
+
+error:
+ printk(KERN_ERR "heapmemmp_destroy failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapmemmp_destroy);
+
+/*
+ * This will get the intialization prams for a heapmemmp
+ * module instance
+ */
+void heapmemmp_params_init(struct heapmemmp_params *params)
+{
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(heapmemmp_module->ref_count),
+ HEAPMEMMP_MAKE_MAGICSTAMP(0),
+ HEAPMEMMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ BUG_ON(params == NULL);
+
+ memcpy(params, &heapmemmp_module->default_inst_params,
+ sizeof(struct heapmemmp_params));
+
+ return;
+error:
+ printk(KERN_ERR "heapmemmp_params_init failed status: %x\n", retval);
+}
+EXPORT_SYMBOL(heapmemmp_params_init);
+
+/*
+ * This will create a new instance of heapmemmp module
+ * This is an internal function as both heapmemmp_create
+ * and heapmemmp_open use the functionality
+ *
+ * NOTE: The lock to protect the shared memory area
+ * used by heapmemmp is provided by the consumer of
+ * heapmemmp module
+ */
+int _heapmemmp_create(void **handle_ptr, const struct heapmemmp_params *params,
+ u32 create_flag)
+{
+ s32 retval = 0;
+ struct heapmemmp_obj *obj = NULL;
+ struct heapmemmp_object *handle = NULL;
+ void *gate_handle = NULL;
+ void *local_addr = NULL;
+ u32 *shared_shm_base;
+
+ if (atomic_cmpmask_and_lt(&(heapmemmp_module->ref_count),
+ HEAPMEMMP_MAKE_MAGICSTAMP(0),
+ HEAPMEMMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ BUG_ON(handle_ptr == NULL);
+
+ BUG_ON(params == NULL);
+
+ /* No need for parameter checks, since this is an internal function. */
+
+ /* Initialize return parameter. */
+ *handle_ptr = NULL;
+
+ handle = kmalloc(sizeof(struct heapmemmp_object), GFP_KERNEL);
+ if (handle == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ obj = kmalloc(sizeof(struct heapmemmp_obj), GFP_KERNEL);
+ if (obj == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ handle->obj = (struct heapmemmp_obj *)obj;
+ handle->alloc = &heapmemmp_alloc;
+ handle->free = &heapmemmp_free;
+ handle->get_stats = &heapmemmp_get_stats;
+ handle->is_blocking = &heapmemmp_isblocking;
+
+ obj->ns_key = NULL;
+ obj->alloc_size = 0;
+
+ /* Put in local ilst */
+ retval = mutex_lock_interruptible(heapmemmp_module->local_lock);
+ if (retval < 0)
+ goto error;
+
+ INIT_LIST_HEAD(&obj->list_elem);
+ list_add(&heapmemmp_module->obj_list, &obj->list_elem);
+ mutex_unlock(heapmemmp_module->local_lock);
+
+ if (create_flag == false) {
+ obj->owner.creator = false;
+ obj->owner.open_count = 0;
+ obj->owner.proc_id = MULTIPROC_INVALIDID;
+ obj->top = handle;
+
+ obj->attrs = (struct heapmemmp_attrs *) params->shared_addr;
+
+ /* No need to Cache_inv- already done in openByAddr() */
+ obj->buf = (char *) sharedregion_get_ptr((u32 *)obj->
+ attrs->buf_ptr);
+ obj->buf_size = obj->attrs->head.size;
+ obj->region_id = sharedregion_get_id(obj->buf);
+ obj->cache_enabled = sharedregion_is_cache_enabled(obj->
+ region_id);
+
+ /* Set min_align */
+ obj->min_align = sizeof(struct heapmemmp_header);
+ if (sharedregion_get_cache_line_size(obj->region_id)
+ > obj->min_align) {
+ obj->min_align = sharedregion_get_cache_line_size(
+ obj->region_id);
+ }
+
+ local_addr = sharedregion_get_ptr((u32 *)obj->attrs->
+ gatemp_addr);
+ retval = gatemp_open_by_addr(local_addr, &gate_handle);
+ if (retval < 0) {
+ retval = -EFAULT;
+ goto error;
+ }
+ obj->gate = gate_handle;
+
+
+ } else {
+ obj->owner.creator = true;
+ obj->owner.open_count = 1;
+ obj->owner.proc_id = multiproc_self();
+ obj->top = handle;
+
+ /* Creating the gate */
+ if (params->gate != NULL)
+ obj->gate = params->gate;
+ else {
+ /* If no gate specified, get the default system gate */
+ obj->gate = gatemp_get_default_remote();
+ }
+
+ if (obj->gate == NULL) {
+ retval = -EFAULT;
+ goto error;
+ }
+
+ obj->buf_size = params->shared_buf_size;
+
+ if (params->shared_addr == NULL) {
+ /* Creating using a shared region ID */
+ /* It is allowed to have NULL name for an anonymous,
+ * not to be opened by name, heap.
+ */
+ /* Will be allocated in post_init */
+ obj->attrs = NULL;
+ obj->region_id = params->region_id;
+ } else {
+ /* Creating using shared_addr */
+ obj->region_id = sharedregion_get_id(params->
+ shared_addr);
+
+ /* Assert that the buffer is in a valid shared
+ * region
+ */
+ if (obj->region_id == SHAREDREGION_INVALIDREGIONID) {
+ retval = -EFAULT;
+ goto error;
+ } else if ((u32) params->shared_addr
+ % sharedregion_get_cache_line_size(obj->
+ region_id) != 0) {
+ retval = -EFAULT;
+ goto error;
+ }
+ /* obj->buf will get alignment-adjusted in
+ * postInit
+ */
+ obj->buf = (char *)((u32)params->shared_addr + \
+ sizeof(struct heapmemmp_attrs));
+ obj->attrs = (struct heapmemmp_attrs *)
+ params->shared_addr;
+ }
+
+ obj->cache_enabled = sharedregion_is_cache_enabled(
+ obj->region_id);
+
+ /* Set min_align */
+ obj->min_align = sizeof(struct heapmemmp_header);
+ if (sharedregion_get_cache_line_size(obj->region_id)
+ > obj->min_align)
+ obj->min_align = sharedregion_get_cache_line_size(
+ obj->region_id);
+ retval = heapmemmp_post_init(handle);
+ if (retval < 0) {
+ retval = -EFAULT;
+ goto error;
+ }
+
+ /* Populate the params member */
+ memcpy(&obj->params, params, sizeof(struct heapmemmp_params));
+ if (params->name != NULL) {
+ obj->params.name = kmalloc(strlen(params->name) + 1,
+ GFP_KERNEL);
+ if (obj->params.name == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+ strncpy(obj->params.name, params->name,
+ strlen(params->name) + 1);
+ }
+
+ /* We will store a shared pointer in the NameServer */
+ shared_shm_base = sharedregion_get_srptr(obj->attrs,
+ obj->region_id);
+ if (obj->params.name != NULL) {
+ obj->ns_key =
+ nameserver_add_uint32(heapmemmp_module->nameserver,
+ params->name,
+ (u32) shared_shm_base);
+ if (obj->ns_key == NULL) {
+ retval = -EFAULT;
+ goto error;
+ }
+ }
+ }
+
+ *handle_ptr = (void *)handle;
+ return retval;
+
+error:
+ /* Do whatever cleanup is required*/
+ if (create_flag == true)
+ heapmemmp_delete(handle_ptr);
+ else
+ heapmemmp_close(handle_ptr);
+ printk(KERN_ERR "_heapmemmp_create failed status: %x\n", retval);
+ return retval;
+}
+
+/*
+ * This will create a new instance of heapmemmp module
+ */
+void *heapmemmp_create(const struct heapmemmp_params *params)
+{
+ s32 retval = 0;
+ struct heapmemmp_object *handle = NULL;
+ struct heapmemmp_params sparams;
+
+ BUG_ON(params == NULL);
+
+ if (atomic_cmpmask_and_lt(&(heapmemmp_module->ref_count),
+ HEAPMEMMP_MAKE_MAGICSTAMP(0),
+ HEAPMEMMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (params == NULL) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (params->shared_buf_size == 0) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ memcpy(&sparams, (void *)params, sizeof(struct heapmemmp_params));
+ retval = _heapmemmp_create((void **)&handle, params, true);
+ if (retval < 0)
+ goto error;
+
+ return (void *)handle;
+
+error:
+ printk(KERN_ERR "heapmemmp_create failed status: %x\n", retval);
+ return (void *)handle;
+}
+EXPORT_SYMBOL(heapmemmp_create);
+
+/*
+ * This will delete an instance of heapmemmp module
+ */
+int heapmemmp_delete(void **handle_ptr)
+{
+ int status = 0;
+ struct heapmemmp_object *handle = NULL;
+ struct heapmemmp_obj *obj = NULL;
+ struct heapmemmp_params *params = NULL;
+ struct heapmemmp_object *region_heap = NULL;
+ s32 retval = 0;
+ int *key = 0;
+
+ if (atomic_cmpmask_and_lt(&(heapmemmp_module->ref_count),
+ HEAPMEMMP_MAKE_MAGICSTAMP(0),
+ HEAPMEMMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(handle_ptr == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ handle = (struct heapmemmp_object *)(*handle_ptr);
+ if (WARN_ON(handle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ obj = (struct heapmemmp_obj *)handle->obj;
+ if (obj != NULL) {
+ if (obj->owner.proc_id != multiproc_self()) {
+ status = -ENODEV;
+ goto error;
+ }
+
+ /* Take the local lock */
+ key = gatemp_enter(obj->gate);
+
+ if (obj->owner.open_count > 1) {
+ retval = -ENODEV;
+ goto device_busy_error;
+ }
+
+ retval = mutex_lock_interruptible(heapmemmp_module->local_lock);
+ if (retval < 0)
+ goto lock_error;
+
+ /* Remove frmo the local list */
+ list_del(&obj->list_elem);
+
+ mutex_unlock(heapmemmp_module->local_lock);
+
+ params = (struct heapmemmp_params *) &obj->params;
+
+ if (likely(params->name != NULL)) {
+ if (likely(obj->ns_key != NULL)) {
+ nameserver_remove_entry(heapmemmp_module->
+ nameserver, obj->ns_key);
+ obj->ns_key = NULL;
+ }
+ kfree(params->name);
+ }
+
+ /* Set status to 'not created' */
+ if (obj->attrs != NULL) {
+#if 0
+ obj->attrs->status = 0;
+ if (obj->cache_enabled) {
+ cache_wbinv(obj->attrs,
+ sizeof(struct heapmemmp_attrs),
+ CACHE_TYPE_ALL, true);
+ }
+#endif
+ }
+
+ /* Release the shared lock */
+ gatemp_leave(obj->gate, key);
+
+ /* If necessary, free shared memory if memory is internally
+ * allocated
+ */
+ region_heap = sharedregion_get_heap(obj->region_id);
+
+ if ((region_heap != NULL) &&
+ (obj->params.shared_addr == NULL) &&
+ (obj->attrs != NULL)) {
+ sl_heap_free(region_heap, obj->attrs, obj->alloc_size);
+ }
+
+ kfree(obj);
+ kfree(region_heap);
+
+ *handle_ptr = NULL;
+ } else { /* obj == NULL */
+ kfree(handle);
+ *handle_ptr = NULL;
+ }
+
+ return 0;
+
+lock_error:
+device_busy_error:
+ gatemp_leave(obj->gate, key);
+
+error:
+ printk(KERN_ERR "heapmemmp_delete failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapmemmp_delete);
+
+/*
+ * This will opens a created instance of heapmemmp
+ * module
+ */
+int heapmemmp_open(char *name, void **handle_ptr)
+{
+ s32 retval = 0;
+ u32 *shared_shm_base = SHAREDREGION_INVALIDSRPTR;
+ void *shared_addr = NULL;
+ struct heapmemmp_obj *obj = NULL;
+ bool done_flag = false;
+ struct list_head *elem = NULL;
+
+ BUG_ON(name == NULL);
+ BUG_ON(handle_ptr == NULL);
+
+ if (unlikely(
+ atomic_cmpmask_and_lt(&(heapmemmp_module->ref_count),
+ HEAPMEMMP_MAKE_MAGICSTAMP(0),
+ HEAPMEMMP_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (name == NULL) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (handle_ptr == NULL) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /* First check in the local list */
+ list_for_each(elem, &heapmemmp_module->obj_list) {
+ obj = (struct heapmemmp_obj *)elem;
+ if (obj->params.name != NULL) {
+ if (strcmp(obj->params.name, name)
+ == 0) {
+ retval = mutex_lock_interruptible(
+ heapmemmp_module->local_lock);
+ if (retval < 0)
+ goto error;
+ /* Check if we have created the heapmemmp */
+ /* or not */
+ if (obj->owner.proc_id == multiproc_self())
+ obj->owner.open_count++;
+
+ *handle_ptr = (void *)obj->top;
+ mutex_unlock(heapmemmp_module->local_lock);
+ done_flag = true;
+ break;
+ }
+ }
+ }
+
+ if (likely(done_flag == false)) {
+ /* Find in name server */
+ retval = nameserver_get_uint32(heapmemmp_module->nameserver,
+ name,
+ &shared_shm_base,
+ NULL);
+ if (unlikely(retval < 0))
+ goto error;
+
+ /*
+ * Convert from shared region pointer to local address
+ */
+ shared_addr = sharedregion_get_ptr(shared_shm_base);
+ if (unlikely(shared_addr == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ retval = heapmemmp_open_by_addr(shared_addr, handle_ptr);
+
+ if (unlikely(retval < 0))
+ goto error;
+ }
+
+ return 0;
+
+error:
+ printk(KERN_ERR "heapmemmp_open failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapmemmp_open);
+
+/*
+ * This will closes previously opened/created instance
+ * of heapmemmp module
+ */
+int heapmemmp_close(void **handle_ptr)
+{
+ struct heapmemmp_object *handle = NULL;
+ struct heapmemmp_obj *obj = NULL;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(heapmemmp_module->ref_count),
+ HEAPMEMMP_MAKE_MAGICSTAMP(0),
+ HEAPMEMMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(handle_ptr == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (WARN_ON(*handle_ptr == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ handle = (struct heapmemmp_object *)(*handle_ptr);
+ obj = (struct heapmemmp_obj *)handle->obj;
+
+ if (obj != NULL) {
+ retval = mutex_lock_interruptible(heapmemmp_module->
+ local_lock);
+ if (retval)
+ goto error;
+
+ /* opening an instance created locally */
+ if (obj->owner.proc_id == multiproc_self())
+ obj->owner.open_count--;
+
+ /* Check if HeapMemMP is opened on same processor and
+ * this is the last closure.
+ */
+ if ((obj->owner.creator == false) &&
+ (obj->owner.open_count == 0)) {
+ list_del(&obj->list_elem);
+
+ if (obj->gate != NULL) {
+ /* Close the instance gate */
+ gatemp_close(&obj->gate);
+ }
+
+ /* Now free the handle */
+ kfree(obj);
+ obj = NULL;
+ kfree(handle);
+ *handle_ptr = NULL;
+ }
+
+ mutex_unlock(heapmemmp_module->local_lock);
+ } else {
+ kfree(handle);
+ *handle_ptr = NULL;
+ }
+ return 0;
+
+error:
+ printk(KERN_ERR "heapmemmp_close failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapmemmp_close);
+
+/*
+ * This will allocs a block of memory
+ */
+void *heapmemmp_alloc(void *hphandle, u32 size, u32 align)
+{
+ char *alloc_addr = NULL;
+ struct heapmemmp_object *handle = NULL;
+ struct heapmemmp_obj *obj = NULL;
+ int *key = 0;
+ struct heapmemmp_header *prev_header;
+ struct heapmemmp_header *new_header;
+ struct heapmemmp_header *cur_header;
+ u32 cur_size;
+ u32 adj_size;
+ u32 remain_size;
+ u32 adj_align;
+ u32 offset;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(heapmemmp_module->ref_count),
+ HEAPMEMMP_MAKE_MAGICSTAMP(0),
+ HEAPMEMMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+ if (WARN_ON(hphandle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+ if (WARN_ON(size == 0)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ handle = (struct heapmemmp_object *)(hphandle);
+ obj = (struct heapmemmp_obj *)handle->obj;
+ if (WARN_ON(obj == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ adj_size = size;
+
+ /* Make size requested a multipel of min_align */
+ offset = (adj_size & (obj->min_align - 1));
+ if (offset != 0)
+ adj_size += (obj->min_align - offset);
+
+ /*
+ * Make sure the alignment is at least as large as obj->min_align
+ * Note: adjAlign must be a power of 2 (by function constraint) and
+ * obj->min_align is also a power of 2,
+ */
+ adj_align = align;
+ if (adj_align == 0)
+ adj_align = obj->min_align;
+
+ if (adj_align & (obj->min_align - 1))
+ /* adj_align is less than obj->min_align */
+ adj_align = obj->min_align;
+
+ /* No need to Cache_inv Attrs- 'head' should be constant */
+ prev_header = (struct heapmemmp_header *) &obj->attrs->head;
+
+ key = gatemp_enter(obj->gate);
+ /*
+ * The block will be allocated from cur_header. Maintain a pointer to
+ * prev_header so prev_header->next can be updated after the alloc.
+ */
+#if 0
+ if (unlikely(obj->cache_enabled))
+ Cache_inv(prev_header,
+ sizeof(struct heapmemmp_header),
+ Cache_Type_ALL,
+ true); /* A1 */
+#endif
+ cur_header = (struct heapmemmp_header *)
+ sharedregion_get_ptr(prev_header->next);
+ /* A1 */
+
+ /* Loop over the free list. */
+ while (cur_header != NULL) {
+#if 0
+ /* Invalidate cur_header */
+ if (unlikely(obj->cache_enabled))
+ Cache_inv(cur_header,
+ sizeof(struct heapmemmp_header),
+ Cache_Type_ALL,
+ true); /* A2 */
+#endif
+
+ cur_size = cur_header->size;
+
+ /*
+ * Determine the offset from the beginning to make sure
+ * the alignment request is honored.
+ */
+ offset = (u32)cur_header & (adj_align - 1);
+ if (offset)
+ offset = adj_align - offset;
+
+ /* Internal Assert that offset is a multiple of */
+ /* obj->min_align */
+ if (((offset & (obj->min_align - 1)) != 0)) {
+ retval = -EINVAL;
+ goto error;
+ }
+ /* big enough? */
+
+ /* This is the "else" part of the next if block, but we are */
+ /* moving it here to save indent space */
+ if (cur_size < (adj_size + offset)) {
+ prev_header = cur_header;
+ cur_header = sharedregion_get_ptr(cur_header->next);
+ /* We can quit this iteration of the while loop here */
+ continue;
+ }
+
+ /* if (cur_size >= (adj_size + offset)) */
+
+ /* Set the pointer that will be returned. */
+ /* Alloc from front */
+ alloc_addr = (char *) ((u32) cur_header + offset);
+ /*
+ * Determine the remaining memory after the
+ * allocated block.
+ * Note: this cannot be negative because of above
+ * comparison.
+ */
+ remain_size = cur_size - adj_size - offset;
+
+ /* Internal Assert that remain_size is a multiple of
+ * obj->min_align
+ */
+ if (((remain_size & (obj->min_align - 1)) != 0)) {
+ alloc_addr = (u32) NULL;
+ break;
+ }
+ /*
+ * If there is memory at the beginning (due to alignment
+ * requirements), maintain it in the list.
+ *
+ * offset and remain_size must be multiples of sizeof(struct
+ * heapmemmp_header). Therefore the address of the new_header
+ * below must be a multiple of the sizeof(struct
+ * heapmemmp_header), thus maintaining the requirement.
+ */
+ if (offset) {
+ /* Adjust the cur_header size accordingly */
+ cur_header->size = offset; /* B2 */
+ /* Cache wb at end of this if block */
+
+ /*
+ * If there is remaining memory, add into the free
+ * list.
+ * Note: no need to coalesce and we have heapmemmp
+ * locked so it is safe.
+ */
+ if (offset && remain_size) {
+ new_header = (struct heapmemmp_header *)
+ ((u32) alloc_addr + adj_size);
+
+ /* cur_header has been inv at top of 'while' */
+ /* loop */
+ new_header->next = cur_header->next; /* B1 */
+ new_header->size = remain_size; /* B1 */
+#if 0
+ if (unlikely(obj->cache_enabled))
+ /* Writing back cur_header will */
+ /* cache-wait */
+ Cache_wbInv(new_header,
+ sizeof(struct
+ heapmemmp_header),
+ Cache_Type_ALL,
+ false); /* B1 */
+#endif
+
+ cur_header->next = sharedregion_get_srptr
+ (new_header,
+ obj->region_id);
+ BUG_ON(cur_header->next
+ == SHAREDREGION_INVALIDSRPTR);
+ }
+#if 0
+ /* Write back (and invalidate) new_header and */
+ /* cur_header */
+ if (unlikely(obj->cache_enabled))
+ /* B2 */
+ Cache_wbInv(cur_header,
+ sizeof(struct heapmemmp_header),
+ Cache_Type_ALL,
+ true);
+#endif
+ } else if (remain_size) {
+ /*
+ * If there is any remaining, link it in,
+ * else point to the next free block.
+ * Note: no need to coalesce and we have heapmemmp
+ * locked so it is safe.
+ */
+
+ new_header = (struct heapmemmp_header *)
+ ((u32) alloc_addr + adj_size);
+
+ new_header->next = cur_header->next; /* A2, B3 */
+ new_header->size = remain_size; /* B3 */
+
+#if 0
+ if (unlikely(obj->cache_enabled))
+ /* Writing back prev_header will cache-wait */
+ Cache_wbInv(new_header,
+ sizeof(struct heapmemmp_header),
+ Cache_Type_ALL,
+ false); /* B3 */
+#endif
+
+ /* B4 */
+ prev_header->next = sharedregion_get_srptr(new_header,
+ obj->region_id);
+ } else
+ /* cur_header has been inv at top of 'while' loop */
+ prev_header->next = cur_header->next; /* A2, B4 */
+
+#if 0
+ if (unlikely(obj->cache_enabled))
+ /* B4 */
+ Cache_wbInv(prev_header,
+ sizeof(struct heapmemmp_header),
+ Cache_Type_ALL,
+ true);
+#endif
+
+ /* Success, return the allocated memory */
+ break;
+
+ }
+
+ gatemp_leave(obj->gate, key);
+
+ if (alloc_addr == NULL)
+ printk(KERN_ERR "heapmemmp_alloc returned NULL\n");
+ return alloc_addr;
+
+error:
+ printk(KERN_ERR "heapmemmp_alloc failed status: %x\n", retval);
+ return NULL;
+}
+EXPORT_SYMBOL(heapmemmp_alloc);
+
+/*
+ * This will free a block of memory
+ */
+int heapmemmp_free(void *hphandle, void *addr, u32 size)
+{
+ struct heapmemmp_object *handle = NULL;
+ s32 retval = 0;
+ struct heapmemmp_obj *obj = NULL;
+ int *key = 0;
+ struct heapmemmp_header *next_header;
+ struct heapmemmp_header *new_header;
+ struct heapmemmp_header *cur_header;
+ u32 offset;
+
+ if (atomic_cmpmask_and_lt(&(heapmemmp_module->ref_count),
+ HEAPMEMMP_MAKE_MAGICSTAMP(0),
+ HEAPMEMMP_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto error;
+ }
+ if (WARN_ON(hphandle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+ if (WARN_ON(addr == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ handle = (struct heapmemmp_object *)(hphandle);
+ obj = (struct heapmemmp_obj *)handle->obj;
+ if (WARN_ON(obj == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * obj->attrs never changes, doesn't need Gate protection
+ * and Cache invalidate
+ */
+ cur_header = (struct heapmemmp_header *) &(obj->attrs->head);
+
+ /* Restore size to actual allocated size */
+ offset = size & (obj->min_align - 1);
+ if (offset != 0)
+ size += obj->min_align - offset;
+
+ key = gatemp_enter(obj->gate);
+
+ new_header = (struct heapmemmp_header *) addr;
+
+#if 0
+ if (unlikely(obj->cacheEnabled)) {
+ /* A1 */
+ Cache_inv(cur_header,
+ sizeof(struct heapmemmp_header),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+ next_header = sharedregion_get_ptr(cur_header->next);
+
+ if (unlikely(!(((u32)new_header >= (u32)obj->buf)
+ && ((u32)new_header + size
+ <= (u32)obj->buf + obj->buf_size)))) {
+ retval = -EFAULT;
+ goto error;
+ }
+ /* Go down freelist and find right place for buf */
+ while ((next_header != NULL) && (next_header < new_header)) {
+#if 0
+ if (unlikely(obj->cacheEnabled))
+ Cache_inv(next_header,
+ sizeof(struct heapmemmp_header),
+ Cache_Type_ALL,
+ true); /* A2 */
+#endif
+
+ /* Make sure the addr is not in this free block */
+ if (unlikely((u32)new_header < \
+ ((u32)next_header + next_header->size))) {
+ /* A2 */
+ retval = -EFAULT;
+ goto error;
+ }
+
+ cur_header = next_header;
+ /* A2 */
+ next_header = sharedregion_get_ptr(next_header->next);
+ }
+
+ new_header->next = sharedregion_get_srptr(next_header,
+ obj->region_id);
+ new_header->size = size;
+
+ /* B1, A1 */
+ cur_header->next = sharedregion_get_srptr(new_header,
+ obj->region_id);
+
+ /* Join contiguous free blocks */
+ if (next_header != NULL) {
+ /*
+ * Verify the free size is not overlapping. Not all cases
+ * are detectable, but it is worth a shot. Note: only do
+ * this assert if next_header is non-NULL.
+ */
+ if (unlikely(((u32)new_header + size) > (u32)next_header)) {
+ /* A2 */
+ retval = -EFAULT;
+ goto error;
+ }
+ /* Join with upper block */
+ if (((u32)new_header + size) == (u32)next_header) {
+#if 0
+ if (unlikely(obj->cacheEnabled))
+ Cache_inv(next_header,
+ sizeof(struct heapmemmp_header),
+ Cache_Type_ALL,
+ true);
+#endif
+ new_header->next = next_header->next; /* A2, B2 */
+ new_header->size += next_header->size; /* A2, B2 */
+ /* Don't Cache_wbInv, this will be done later */
+ }
+ }
+
+ /*
+ * Join with lower block. Make sure to check to see if not the
+ * first block. No need to invalidate attrs since head
+ * shouldn't change.
+ */
+ if ((cur_header != &obj->attrs->head)
+ && (((u32) cur_header + cur_header->size)
+ == (u32) new_header)) {
+ /*
+ * Don't Cache_inv new_header since new_header has
+ * data that hasn't been written back yet (B2)
+ */
+ cur_header->next = new_header->next; /* B1, B2 */
+ cur_header->size += new_header->size; /* B1, B2 */
+ }
+#if 0
+ if (unlikely(obj->cacheEnabled)) {
+ Cache_wbInv(cur_header,
+ sizeof(struct heapmemmp_header),
+ Cache_Type_ALL,
+ false); /* B1 */
+ Cache_wbInv(new_header,
+ sizeof(struct heapmemmp_header),
+ Cache_Type_ALL,
+ true); /* B2 */
+ }
+#endif
+
+ gatemp_leave(obj->gate, key);
+ return 0;
+
+error:
+ printk(KERN_ERR "heapmemmp_free failed status: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(heapmemmp_free);
+
+/*
+ * This will get memory statistics
+ */
+void heapmemmp_get_stats(void *hphandle, struct memory_stats *stats)
+{
+ struct heapmemmp_object *object = NULL;
+ struct heapmemmp_obj *obj = NULL;
+ struct heapmemmp_header *cur_header = NULL;
+ int *key = 0;
+ s32 status = 0;
+
+ if (atomic_cmpmask_and_lt(&(heapmemmp_module->ref_count),
+ HEAPMEMMP_MAKE_MAGICSTAMP(0),
+ HEAPMEMMP_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto error;
+ }
+ if (WARN_ON(hphandle == NULL)) {
+ status = -EINVAL;
+ goto error;
+ }
+ if (WARN_ON(stats == NULL)) {
+ status = -EINVAL;
+ goto error;
+ }
+
+ object = (struct heapmemmp_object *)(hphandle);
+ obj = (struct heapmemmp_obj *)object->obj;
+ if (WARN_ON(obj == NULL)) {
+ status = -EINVAL;
+ goto error;
+ }
+
+ stats->total_size = obj->buf_size;
+ stats->total_free_size = 0; /* determined later */
+ stats->largest_free_size = 0; /* determined later */
+
+ key = gatemp_enter(obj->gate);
+ cur_header = sharedregion_get_ptr(obj->attrs->head.next);
+
+ while (cur_header != NULL) {
+#if 0
+ /* Invalidate cur_header */
+ if (unlikely(obj->cacheEnabled)) {
+ Cache_inv(cur_header,
+ sizeof(struct heapmemmp_header),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+ stats->total_free_size += cur_header->size;
+ if (stats->largest_free_size < cur_header->size)
+ stats->largest_free_size = cur_header->size;
+
+ /* This condition is required to avoid assertions during call
+ * to SharedRegion_getPtr because at the end of the
+ * calculation cur_header->next will become
+ * SHAREDREGION_INVALIDSRPTR.
+ */
+ if (cur_header->next != SHAREDREGION_INVALIDSRPTR)
+ cur_header = sharedregion_get_ptr(cur_header->next);
+ else
+ cur_header = NULL;
+ }
+
+ gatemp_leave(obj->gate, key);
+error:
+ if (status < 0)
+ printk(KERN_ERR "heapmemmp_get_stats status: %x\n", status);
+}
+EXPORT_SYMBOL(heapmemmp_get_stats);
+
+/*
+ * Indicate whether the heap may block during an alloc or free call
+ */
+bool heapmemmp_isblocking(void *handle)
+{
+ bool isblocking = false;
+ s32 retval = 0;
+
+ if (WARN_ON(handle == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /* TBD: Figure out how to determine whether the gate is blocking */
+ isblocking = true;
+
+ /* retval true Heap blocks during alloc/free calls */
+ /* retval false Heap does not block during alloc/free calls */
+ return isblocking;
+
+error:
+ printk(KERN_ERR "heapmemmp_isblocking status: %x\n", retval);
+ return isblocking;
+}
+EXPORT_SYMBOL(heapmemmp_isblocking);
+
+/*
+ * This will get extended statistics
+ */
+void heapmemmp_get_extended_stats(void *hphandle,
+ struct heapmemmp_extended_stats *stats)
+{
+ int status = 0;
+ struct heapmemmp_object *object = NULL;
+ struct heapmemmp_obj *obj = NULL;
+
+ if (atomic_cmpmask_and_lt(&(heapmemmp_module->ref_count),
+ HEAPMEMMP_MAKE_MAGICSTAMP(0),
+ HEAPMEMMP_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto error;
+ }
+ if (WARN_ON(heapmemmp_module->nameserver == NULL)) {
+ status = -EINVAL;
+ goto error;
+ }
+ if (WARN_ON(hphandle == NULL)) {
+ status = -EINVAL;
+ goto error;
+ }
+ if (WARN_ON(stats == NULL)) {
+ status = -EINVAL;
+ goto error;
+ }
+
+ object = (struct heapmemmp_object *)hphandle;
+ obj = (struct heapmemmp_obj *)object->obj;
+ if (WARN_ON(obj == NULL)) {
+ status = -EINVAL;
+ goto error;
+ }
+
+ stats->buf = obj->buf;
+ stats->size = obj->buf_size;
+
+ return;
+
+error:
+ printk(KERN_ERR "heapmemmp_get_extended_stats status: %x\n",
+ status);
+}
+EXPORT_SYMBOL(heapmemmp_get_extended_stats);
+
+/*
+ * This will get amount of shared memory required for
+ * creation of each instance
+ */
+int heapmemmp_shared_mem_req(const struct heapmemmp_params *params)
+{
+ int mem_req = 0;
+ s32 retval = 0;
+ u32 region_id;
+ u32 min_align;
+
+ if (params == NULL) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (params->shared_addr == NULL)
+ region_id = params->region_id;
+ else
+ region_id = sharedregion_get_id(params->shared_addr);
+
+ if (region_id == SHAREDREGION_INVALIDREGIONID) {
+ retval = -EFAULT;
+ goto error;
+ }
+
+ min_align = sizeof(struct heapmemmp_header);
+ if (sharedregion_get_cache_line_size(region_id) > min_align)
+ min_align = sharedregion_get_cache_line_size(region_id);
+
+ /* Add size of heapmemmp Attrs */
+ mem_req = ROUND_UP(sizeof(struct heapmemmp_attrs), min_align);
+
+ /* Add the buffer size */
+ mem_req += params->shared_buf_size;
+
+ /* Make sure the size is a multiple of min_align (round down) */
+ mem_req = (mem_req / min_align) * min_align;
+
+ return mem_req;
+
+error:
+ printk(KERN_ERR "heapmemmp_shared_mem_req retval: %x\n",
+ retval);
+ return mem_req;
+}
+EXPORT_SYMBOL(heapmemmp_shared_mem_req);
+
+
+/*
+ * Open existing heapmemmp based on address
+ */
+int
+heapmemmp_open_by_addr(void *shared_addr, void **handle_ptr)
+{
+ s32 retval = 0;
+ bool done_flag = false;
+ struct heapmemmp_attrs *attrs = NULL;
+ struct list_head *elem = NULL;
+ u16 id = 0;
+ struct heapmemmp_params params;
+ struct heapmemmp_obj *obj = NULL;
+
+ BUG_ON(handle_ptr == NULL);
+ BUG_ON(shared_addr == NULL);
+
+ if (unlikely(atomic_cmpmask_and_lt(&(heapmemmp_module->ref_count),
+ HEAPMEMMP_MAKE_MAGICSTAMP(0),
+ HEAPMEMMP_MAKE_MAGICSTAMP(1))
+ == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (unlikely(handle_ptr == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /* First check in the local list */
+ list_for_each(elem, (struct list_head *)&heapmemmp_module->obj_list) {
+ if (obj->params.shared_addr == shared_addr) {
+ retval = mutex_lock_interruptible(heapmemmp_module->
+ local_lock);
+ if (retval < 0)
+ goto error;
+
+ if (obj->owner.proc_id == multiproc_self())
+ obj->owner.open_count++;
+
+ mutex_unlock(heapmemmp_module->local_lock);
+ *handle_ptr = obj->top;
+ done_flag = true;
+ break;
+ }
+ }
+
+ /* If not already existing locally, create object locally for open. */
+ if (unlikely(done_flag == false)) {
+ heapmemmp_params_init(&params);
+ params.shared_addr = shared_addr;
+ attrs = (struct heapmemmp_attrs *) shared_addr;
+ id = sharedregion_get_id(shared_addr);
+#if 0
+ if (unlikely(sharedregion_is_cache_enabled(id))) {
+ Cache_inv(attrs,
+ sizeof(struct heapmemmp_attrs),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+ if (unlikely(attrs->status != HEAPMEMMP_CREATED)) {
+ *handle_ptr = NULL;
+ retval = -ENOENT;
+ goto error;
+ }
+
+ retval = _heapmemmp_create(handle_ptr, &params, false);
+
+ if (unlikely(retval < 0))
+ goto error;
+ }
+ return 0;
+
+error:
+ printk(KERN_ERR "heapmemmp_open_by_addr status: %x\n",
+ retval);
+
+ return retval;
+}
+
+
+/* =============================================================================
+ * Internal functions
+ * =============================================================================
+ */
+/*
+ * Slice and dice the buffer up into the correct size blocks and
+ * add to the freelist.
+ */
+int heapmemmp_post_init(struct heapmemmp_object *handle)
+{
+ s32 retval = 0;
+ struct heapmemmp_obj *obj = NULL;
+ struct heapmemmp_object *region_heap = NULL;
+ struct heapmemmp_params params;
+
+ BUG_ON(handle == NULL);
+
+ obj = (struct heapmemmp_obj *) handle->obj;
+ if (obj->attrs == NULL) {
+ heapmemmp_params_init(&params);
+ params.region_id = obj->region_id;
+ params.shared_buf_size = obj->buf_size;
+ obj->alloc_size = heapmemmp_shared_mem_req(&params);
+ region_heap = sharedregion_get_heap(obj->region_id);
+
+ if (region_heap == NULL) {
+ retval = -EFAULT;
+ goto error;
+ }
+
+ obj->attrs = sl_heap_alloc(region_heap,
+ obj->alloc_size,
+ obj->min_align);
+
+ if (obj->attrs == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+ obj->buf = (void *)((u32)obj->attrs +
+ sizeof(struct heapmemmp_attrs));
+ }
+
+ /* Round obj->buf up by obj->min_align */
+ obj->buf = (void *) ROUND_UP((u32)obj->buf, (obj->min_align));
+
+ if (unlikely(obj->buf_size
+ < sharedregion_get_cache_line_size(obj->region_id))) {
+ retval = -EFAULT;
+ goto error;
+ }
+
+ /* Make sure the size is a multiple of obj->min_align */
+ obj->buf_size = (obj->buf_size / obj->min_align) * obj->min_align;
+
+ obj->attrs->gatemp_addr = gatemp_get_shared_addr(obj->gate);
+ obj->attrs->buf_ptr = sharedregion_get_srptr(obj->buf, obj->region_id);
+
+ /* Store computed obj->buf_size in shared mem */
+ obj->attrs->head.size = obj->buf_size;
+
+ /* Place the initial header */
+ heapmemmp_restore((struct heapmemmp_object *) handle);
+
+ /* Last thing, set the status */
+ obj->attrs->status = HEAPMEMMP_CREATED;
+#if 0
+ if (unlikely(obj->cacheEnabled))
+ Cache_wbInv((Ptr) obj->attrs,
+ sizeof(heapmemmp_Attrs),
+ Cache_Type_ALL,
+ true);
+#endif
+
+ return 0;
+error:
+ printk(KERN_ERR "heapmemmp_post_init status: %x\n",
+ retval);
+ return retval;
+}
+
+
+/*
+ * Restore an instance to it's original created state.
+ */
+void
+heapmemmp_restore(void *handle)
+{
+ struct heapmemmp_header *beg_header = NULL;
+ struct heapmemmp_obj *obj = NULL;
+
+ obj = ((struct heapmemmp_object *) handle)->obj;
+ BUG_ON(obj == NULL);
+
+ /*
+ * Fill in the top of the memory block
+ * next: pointer will be NULL (end of the list)
+ * size: size of this block
+ * NOTE: no need to Cache_inv because obj->attrs->bufPtr
+ * should be const
+ */
+ beg_header = (struct heapmemmp_header *) obj->buf;
+ beg_header->next = (u32 *)SHAREDREGION_INVALIDSRPTR;
+ beg_header->size = obj->buf_size;
+
+ obj->attrs->head.next = (u32 *)obj->attrs->buf_ptr;
+#if 0
+ if (unlikely(obj->cacheEnabled)) {
+ Cache_wbInv((Ptr)&(obj->attrs->head),
+ sizeof(struct heapmemmp_header),
+ Cache_Type_ALL,
+ false);
+ Cache_wbInv(begHeader,
+ sizeof(struct heapmemmp_header),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/heapmemmp_ioctl.c b/drivers/dsp/syslink/multicore_ipc/heapmemmp_ioctl.c
new file mode 100644
index 000000000000..56a0828c2098
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/heapmemmp_ioctl.c
@@ -0,0 +1,478 @@
+/*
+ * heapmemmp_ioctl.c
+ *
+ * Heap module manages fixed size buffers that can be used
+ * in a multiprocessor system with shared memory.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <heap.h>
+#include <heapmemmp_ioctl.h>
+#include <sharedregion.h>
+
+/*
+ * ======== heapmemmp_ioctl_alloc ========
+ * Purpose:
+ * This ioctl interface to heapmemmp_alloc function
+ */
+static int heapmemmp_ioctl_alloc(struct heapmemmp_cmd_args *cargs)
+{
+ u32 *block_srptr = SHAREDREGION_INVALIDSRPTR;
+ void *block;
+ s32 index;
+ s32 status = 0;
+
+ block = heapmemmp_alloc(cargs->args.alloc.handle,
+ cargs->args.alloc.size,
+ cargs->args.alloc.align);
+ if (block != NULL) {
+ index = sharedregion_get_id(block);
+ block_srptr = sharedregion_get_srptr(block, index);
+ }
+ /* The error on above fn will be a null ptr. We are not
+ checking that condition here. We are passing whatever
+ we are getting from the heapmem module. So IOCTL will succed,
+ but the actual fn might be failed inside heapmem
+ */
+ BUG_ON(index == SHAREDREGION_INVALIDREGIONID);
+ cargs->args.alloc.block_srptr = block_srptr;
+ BUG_ON(cargs->args.alloc.block_srptr == SHAREDREGION_INVALIDSRPTR);
+ cargs->api_status = 0;
+ return status;
+}
+
+/*
+ * ======== heapmemmp_ioctl_free ========
+ * Purpose:
+ * This ioctl interface to heapmemmp_free function
+ */
+static int heapmemmp_ioctl_free(struct heapmemmp_cmd_args *cargs)
+{
+ char *block;
+
+ block = sharedregion_get_ptr(cargs->args.free.block_srptr);
+ BUG_ON(block == NULL);
+ cargs->api_status = heapmemmp_free(cargs->args.free.handle, block,
+ cargs->args.free.size);
+ return 0;
+}
+
+/*
+ * ======== heapmemmp_ioctl_params_init ========
+ * Purpose:
+ * This ioctl interface to heapmemmp_params_init function
+ */
+static int heapmemmp_ioctl_params_init(struct heapmemmp_cmd_args *cargs)
+{
+ struct heapmemmp_params params;
+ s32 status = 0;
+ u32 size;
+
+ heapmemmp_params_init(&params);
+ cargs->api_status = 0;
+ size = copy_to_user(cargs->args.params_init.params, &params,
+ sizeof(struct heapmemmp_params));
+ if (size)
+ status = -EFAULT;
+
+ return status;
+}
+
+/*
+ * ======== heapmemmp_ioctl_create ========
+ * Purpose:
+ * This ioctl interface to heapmemmp_create function
+ */
+static int heapmemmp_ioctl_create(struct heapmemmp_cmd_args *cargs)
+{
+ struct heapmemmp_params params;
+ s32 status = 0;
+ u32 size;
+ void *handle = NULL;
+
+ size = copy_from_user(&params, cargs->args.create.params,
+ sizeof(struct heapmemmp_params));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ if (cargs->args.create.name_len > 0) {
+ params.name = kmalloc(cargs->args.create.name_len, GFP_KERNEL);
+ if (params.name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ params.name[cargs->args.create.name_len] = '\0';
+ size = copy_from_user(params.name,
+ cargs->args.create.params->name,
+ cargs->args.create.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+ }
+
+ params.shared_addr = sharedregion_get_ptr((u32 *)
+ cargs->args.create.shared_addr_srptr);
+ params.gate = cargs->args.create.knl_gate;
+ handle = heapmemmp_create(&params);
+ cargs->args.create.handle = handle;
+ cargs->api_status = 0;
+
+name_from_usr_error:
+ if (cargs->args.create.name_len > 0)
+ kfree(params.name);
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== heapmemmp_ioctl_delete ========
+ * Purpose:
+ * This ioctl interface to heapmemmp_delete function
+ */
+static int heapmemmp_ioctl_delete(struct heapmemmp_cmd_args *cargs)
+{
+ cargs->api_status = heapmemmp_delete(&cargs->args.delete.handle);
+ return 0;
+}
+
+/*
+ * ======== heapmemmp_ioctl_open ========
+ * Purpose:
+ * This ioctl interface to heapmemmp_open function
+ */
+static int heapmemmp_ioctl_open(struct heapmemmp_cmd_args *cargs)
+{
+ s32 status = 0;
+ u32 size = 0;
+ void *handle = NULL;
+ char *name = NULL;
+
+ if (cargs->args.open.name_len > 0) {
+ name = kmalloc(cargs->args.open.name_len, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ size = copy_from_user(name, cargs->args.open.name,
+ cargs->args.open.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+ }
+
+ cargs->api_status = heapmemmp_open(cargs->args.open.name, &handle);
+ cargs->args.open.handle = handle;
+
+ if (cargs->args.open.name_len > 0)
+ kfree(name);
+exit:
+ return status;
+}
+
+/*
+ * ======== heapmemmp_ioctl_open_by_addr ========
+ * Purpose:
+ * This ioctl interface to heapmemmp_open_by_addr function
+ */
+static int heapmemmp_ioctl_open_by_addr(struct heapmemmp_cmd_args *cargs)
+{
+ void *handle = NULL;
+
+ cargs->api_status = heapmemmp_open_by_addr((void *)
+ cargs->args.open_by_addr.shared_addr_srptr,
+ &handle);
+ cargs->args.open_by_addr.handle = handle;
+
+ return 0;
+}
+
+
+/*
+ * ======== heapmemmp_ioctl_close ========
+ * Purpose:
+ * This ioctl interface to heapmemmp_close function
+ */
+static int heapmemmp_ioctl_close(struct heapmemmp_cmd_args *cargs)
+{
+ cargs->api_status = heapmemmp_close(cargs->args.close.handle);
+ return 0;
+}
+
+/*
+ * ======== heapmemmp_ioctl_shared_mem_req ========
+ * Purpose:
+ * This ioctl interface to heapmemmp_shared_mem_req function
+ */
+static int heapmemmp_ioctl_shared_mem_req(struct heapmemmp_cmd_args *cargs)
+{
+ struct heapmemmp_params params;
+ s32 status = 0;
+ ulong size;
+ u32 bytes;
+
+ size = copy_from_user(&params, cargs->args.shared_mem_req.params,
+ sizeof(struct heapmemmp_params));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+ if (params.shared_addr != NULL) {
+ params.shared_addr = sharedregion_get_ptr(
+ cargs->args.shared_mem_req.shared_addr_srptr);
+ }
+ bytes = heapmemmp_shared_mem_req(&params);
+ cargs->args.shared_mem_req.bytes = bytes;
+ cargs->api_status = 0;
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== heapmemmp_ioctl_get_config ========
+ * Purpose:
+ * This ioctl interface to heapmemmp_get_config function
+ */
+static int heapmemmp_ioctl_get_config(struct heapmemmp_cmd_args *cargs)
+{
+ struct heapmemmp_config config;
+ s32 status = 0;
+ ulong size;
+
+ cargs->api_status = heapmemmp_get_config(&config);
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct heapmemmp_config));
+ if (size)
+ status = -EFAULT;
+
+ return status;
+}
+
+/*
+ * ======== heapmemmp_ioctl_setup ========
+ * Purpose:
+ * This ioctl interface to heapmemmp_setup function
+ */
+static int heapmemmp_ioctl_setup(struct heapmemmp_cmd_args *cargs)
+{
+ struct heapmemmp_config config;
+ s32 status = 0;
+ ulong size;
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct heapmemmp_config));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = heapmemmp_setup(&config);
+
+exit:
+ return status;
+}
+/*
+ * ======== heapmemmp_ioctl_destroy ========
+ * Purpose:
+ * This ioctl interface to heapmemmp_destroy function
+ */
+static int heapmemmp_ioctl_destroy(struct heapmemmp_cmd_args *cargs)
+{
+ cargs->api_status = heapmemmp_destroy();
+ return 0;
+}
+
+
+/*
+ * ======== heapmemmp_ioctl_get_stats ========
+ * Purpose:
+ * This ioctl interface to heapmemmp_get_stats function
+ */
+static int heapmemmp_ioctl_get_stats(struct heapmemmp_cmd_args *cargs)
+{
+ struct memory_stats stats;
+ s32 status = 0;
+ ulong size;
+
+ heapmemmp_get_stats(cargs->args.get_stats.handle, &stats);
+ cargs->api_status = 0;
+
+ size = copy_to_user(cargs->args.get_stats.stats, &stats,
+ sizeof(struct memory_stats));
+ if (size)
+ status = -EFAULT;
+
+ return status;
+}
+
+/*
+ * ======== heapmemmp_ioctl_get_extended_stats ========
+ * Purpose:
+ * This ioctl interface to heapmemmp_get_extended_stats function
+ */
+static int heapmemmp_ioctl_get_extended_stats(struct heapmemmp_cmd_args *cargs)
+{
+ struct heapmemmp_extended_stats stats;
+ s32 status = 0;
+ ulong size;
+
+ heapmemmp_get_extended_stats(cargs->args.get_extended_stats.handle,
+ &stats);
+ cargs->api_status = 0;
+
+ size = copy_to_user(cargs->args.get_extended_stats.stats, &stats,
+ sizeof(struct heapmemmp_extended_stats));
+ if (size)
+ status = -EFAULT;
+
+ return status;
+}
+
+/*
+ * ======== heapmemmp_ioctl_restore ========
+ * Purpose:
+ * This ioctl interface to heapmemmp_get_extended_stats function
+ */
+static int heapmemmp_ioctl_restore(struct heapmemmp_cmd_args *cargs)
+{
+ heapmemmp_restore(cargs->args.restore.handle);
+ cargs->api_status = 0;
+ return 0;
+}
+
+/*
+ * ======== heapmemmp_ioctl ========
+ * Purpose:
+ * This ioctl interface for heapmem module
+ */
+int heapmemmp_ioctl(struct inode *pinode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ s32 status = 0;
+ s32 size = 0;
+ struct heapmemmp_cmd_args __user *uarg =
+ (struct heapmemmp_cmd_args __user *)args;
+ struct heapmemmp_cmd_args cargs;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (status) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg,
+ sizeof(struct heapmemmp_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_HEAPMEMMP_ALLOC:
+ status = heapmemmp_ioctl_alloc(&cargs);
+ break;
+
+ case CMD_HEAPMEMMP_FREE:
+ status = heapmemmp_ioctl_free(&cargs);
+ break;
+
+ case CMD_HEAPMEMMP_PARAMS_INIT:
+ status = heapmemmp_ioctl_params_init(&cargs);
+ break;
+
+ case CMD_HEAPMEMMP_CREATE:
+ status = heapmemmp_ioctl_create(&cargs);
+ break;
+
+ case CMD_HEAPMEMMP_DELETE:
+ status = heapmemmp_ioctl_delete(&cargs);
+ break;
+
+ case CMD_HEAPMEMMP_OPEN:
+ status = heapmemmp_ioctl_open(&cargs);
+ break;
+
+ case CMD_HEAPMEMMP_OPENBYADDR:
+ status = heapmemmp_ioctl_open_by_addr(&cargs);
+ break;
+
+ case CMD_HEAPMEMMP_CLOSE:
+ status = heapmemmp_ioctl_close(&cargs);
+ break;
+
+ case CMD_HEAPMEMMP_SHAREDMEMREQ:
+ status = heapmemmp_ioctl_shared_mem_req(&cargs);
+ break;
+
+ case CMD_HEAPMEMMP_GETCONFIG:
+ status = heapmemmp_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_HEAPMEMMP_SETUP:
+ status = heapmemmp_ioctl_setup(&cargs);
+ break;
+
+ case CMD_HEAPMEMMP_DESTROY:
+ status = heapmemmp_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_HEAPMEMMP_GETSTATS:
+ status = heapmemmp_ioctl_get_stats(&cargs);
+ break;
+
+ case CMD_HEAPMEMMP_GETEXTENDEDSTATS:
+ status = heapmemmp_ioctl_get_extended_stats(&cargs);
+ break;
+
+ case CMD_HEAPMEMMP_RESTORE:
+ status = heapmemmp_ioctl_restore(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ status = -ENOTTY;
+ break;
+ }
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs,
+ sizeof(struct heapmemmp_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ return status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/ipc.c b/drivers/dsp/syslink/multicore_ipc/ipc.c
new file mode 100644
index 000000000000..7bcc697ce8b7
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/ipc.c
@@ -0,0 +1,1550 @@
+/*
+ * ipc.c
+ *
+ * This module is primarily used to configure IPC-wide settings and
+ * initialize IPC at runtime
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+
+/* Standard headers */
+#include <linux/types.h>
+#include <linux/module.h>
+
+#include <syslink/atomic_linux.h>
+
+/* Module headers */
+#include <multiproc.h>
+#include <ipc.h>
+#include <platform.h>
+
+#include <gatemp.h>
+#include <sharedregion.h>
+#include <notify.h>
+#include <notify_ducatidriver.h>
+#include <notify_setup_proxy.h>
+
+#include <heap.h>
+#include <heapbufmp.h>
+#include <heapmemmp.h>
+
+#include <messageq.h>
+#include <nameserver.h>
+#include <nameserver_remotenotify.h>
+
+/* Ipu Power Management Header (ipu_pm) */
+#include "../ipu_pm/ipu_pm.h"
+/* =============================================================================
+ * Macros
+ * =============================================================================
+ */
+/* Macro to make a correct module magic number with ref_count */
+#define IPC_MAKE_MAGICSTAMP(x)((IPC_MODULEID << 16u) | (x))
+
+/* flag for starting processor synchronization */
+#define IPC_PROCSYNCSTART 1
+
+/* flag for finishing processor synchronization */
+#define IPC_PROCSYNCFINISH 2
+
+#define ROUND_UP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
+
+/* =============================================================================
+ * Enums & Structures
+ * =============================================================================
+ */
+
+/* The structure used for reserving memory in SharedRegion */
+struct ipc_reserved {
+ VOLATILE u32 started_key;
+ u32 *notify_sr_ptr;
+ u32 *nsrn_sr_ptr;
+ u32 *transport_sr_ptr;
+ u32 *ipu_pm_sr_ptr;
+ u32 *config_list_head;
+};
+
+
+/* head of the config list */
+struct ipc_config_head {
+ VOLATILE u32 first;
+ /* Address of first config entry */
+};
+
+
+/*
+ * This structure captures Configuration details of a module/instance
+ * written by a slave to synchornize with a remote slave/HOST
+ */
+struct ipc_config_entry {
+ VOLATILE u32 remote_proc_id;
+ /* Remote processor identifier */
+ VOLATILE u32 local_proc_id;
+ /* Config Entry owner processor identifier */
+ VOLATILE u32 tag;
+ /* Unique tag to distinguish config from other config entries */
+ VOLATILE u32 size;
+ /* Size of the config pointer */
+ VOLATILE u32 next;
+ /* Address of next config entry (In SRPtr format) */
+};
+
+/*
+ * This structure defines the fields that are to be configured
+ * between the executing processor and a remote processor.
+ */
+struct ipc_entry {
+ u16 remote_proc_id; /* the remote processor id */
+ bool setup_notify; /* whether to setup Notify */
+ bool setup_messageq; /* whether to setup messageq */
+ bool setup_ipu_pm; /* whether to setup ipu_pm */
+};
+
+/* Ipc instance structure. */
+struct ipc_proc_entry {
+ void *local_config_list;
+ void *remote_config_list;
+ void *user_obj;
+ bool slave;
+ struct ipc_entry entry;
+ bool is_attached;
+};
+
+
+/* Module state structure */
+struct ipc_module_state {
+ s32 ref_count;
+ atomic_t start_ref_count;
+ void *ipc_shared_addr;
+ void *gatemp_shared_addr;
+ enum ipc_proc_sync proc_sync;
+ struct ipc_config cfg;
+ struct ipc_proc_entry proc_entry[MULTIPROC_MAXPROCESSORS];
+};
+
+
+/* =============================================================================
+ * Forward declaration
+ * =============================================================================
+ */
+/*
+ * ======== ipc_get_master_addr() ========
+ */
+static void *ipc_get_master_addr(u16 remote_proc_id, void *shared_addr);
+
+/*
+ * ======== ipc_get_region0_reserved_size ========
+ * Returns the amount of memory to be reserved for Ipc in SharedRegion 0.
+ *
+ * This is used for synchronizing processors.
+ */
+static u32 ipc_get_region0_reserved_size(void);
+
+/*
+ * ======== ipc_get_slave_addr() ========
+ */
+static void *ipc_get_slave_addr(u16 remote_proc_id, void *shared_addr);
+
+/*
+ * ======== ipc_proc_sync_start ========
+ * Starts the process of synchronizing processors.
+ *
+ * Shared memory in region 0 will be used. The processor which owns
+ * SharedRegion 0 writes its reserve memory address in region 0
+ * to let the other processors know it has started. It then spins
+ * until the other processors start. The other processors write their
+ * reserve memory address in region 0 to let the owner processor
+ * know they've started. The other processors then spin until the
+ * owner processor writes to let them know its finished the process
+ * of synchronization before continuing.
+ */
+static int ipc_proc_sync_start(u16 remote_proc_id, void *shared_addr);
+
+/*
+ * ======== ipc_proc_sync_finish ========
+ * Finishes the process of synchronizing processors.
+ *
+ * Each processor writes its reserve memory address in SharedRegion 0
+ * to let the other processors know its finished the process of
+ * synchronization.
+ */
+static int ipc_proc_sync_finish(u16 remote_proc_id, void *shared_addr);
+
+/*
+ * ======== ipc_reserved_size_per_proc ========
+ * The amount of memory required to be reserved per processor.
+ */
+static u32 ipc_reserved_size_per_proc(void);
+
+/* TODO: figure these out */
+#define gate_enter_system() 0
+#define gate_leave_system(key) {}
+
+/* =============================================================================
+ * Globals
+ * =============================================================================
+ */
+static struct ipc_module_state ipc_module_state = {
+ .proc_sync = IPC_PROCSYNC_ALL,
+ .ref_count = 0,
+};
+static struct ipc_module_state *ipc_module = &ipc_module_state;
+
+/* =============================================================================
+ * APIs
+ * =============================================================================
+ */
+/*
+ * ========== ipc_attach ==========
+ * attaches to a remote processor
+ */
+int ipc_attach(u16 remote_proc_id)
+{
+ int status = 0;
+#if 0
+ u32 reserved_size = ipc_reserved_size_per_proc();
+ bool cache_enabled = sharedregion_is_cache_enabled(0);
+#endif
+ void *notify_shared_addr;
+ void *msgq_shared_addr;
+ void *nsrn_shared_addr;
+ void *ipu_pm_shared_addr;
+ u32 notify_mem_req;
+ VOLATILE struct ipc_reserved *slave;
+ struct ipc_proc_entry *ipc;
+
+ /* determine if self is master or slave */
+ if (multiproc_self() < remote_proc_id)
+ ipc_module->proc_entry[remote_proc_id].slave = true;
+ else
+ ipc_module->proc_entry[remote_proc_id].slave = false;
+
+ /* determine the slave's slot */
+ slave = ipc_get_slave_addr(remote_proc_id, ipc_module->ipc_shared_addr);
+#if 0
+ if (cache_enabled)
+ Cache_inv((void *)slave, reserved_size, Cache_Type_ALL, true);
+#endif
+ /* get the attach paramters associated with remote_proc_id */
+ ipc = &(ipc_module->proc_entry[remote_proc_id]);
+
+ /* Synchronize the processors. */
+ status = ipc_proc_sync_start(remote_proc_id, ipc_module->
+ ipc_shared_addr);
+
+ if (status < 0)
+ printk(KERN_ERR "ipc_attach : ipc_proc_sync_start "
+ "failed [0x%x]\n", status);
+ else
+ printk(KERN_ERR "ipc_proc_sync_start : status [0x%x]\n",
+ status);
+
+
+ if (status >= 0) {
+ /* must be called before SharedRegion_attach */
+ status = gatemp_attach(remote_proc_id, ipc_module->
+ gatemp_shared_addr);
+ if (status < 0)
+ printk(KERN_ERR "ipc_attach : gatemp_attach "
+ "failed [0x%x]\n", status);
+ else
+ printk(KERN_ERR "gatemp_attach : status [0x%x]\n",
+ status);
+
+ }
+
+ /* retrieves the SharedRegion Heap handles */
+ if (status >= 0) {
+ status = sharedregion_attach(remote_proc_id);
+ if (status < 0)
+ printk(KERN_ERR "ipc_attach : sharedregion_attach "
+ "failed [0x%x]\n", status);
+ else
+ printk(KERN_ERR "sharedregion_attach : status "
+ "[0x%x]\n", status);
+ }
+
+ /* attach Notify if not yet attached and specified to set internal
+ setup */
+ if (status >= 0 && !notify_is_registered(remote_proc_id, 0) &&
+ (ipc->entry.setup_notify)) {
+ /* call notify_attach */
+ if (ipc_module->proc_entry[remote_proc_id].slave) {
+ notify_mem_req = notify_shared_mem_req(remote_proc_id,
+ ipc_module->ipc_shared_addr);
+ notify_shared_addr = sl_heap_alloc(
+ sharedregion_get_heap(0),
+ notify_mem_req,
+ sharedregion_get_cache_line_size(0));
+ memset(notify_shared_addr, 0, notify_mem_req);
+ slave->notify_sr_ptr = sharedregion_get_srptr(
+ notify_shared_addr, 0);
+ if (slave->notify_sr_ptr ==
+ SHAREDREGION_INVALIDSRPTR) {
+ status = IPC_E_FAIL;
+ printk(KERN_ERR "ipc_attach : "
+ "sharedregion_get_srptr "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR
+ "sharedregion_get_srptr : "
+ "status [0x%x]\n", status);
+ }
+ } else {
+ notify_shared_addr = sharedregion_get_ptr(slave->
+ notify_sr_ptr);
+ if (notify_shared_addr == NULL) {
+ status = IPC_E_FAIL;
+ printk(KERN_ERR "ipc_attach : "
+ "sharedregion_get_ptr "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR
+ "sharedregion_get_ptr : "
+ "status [0x%x]\n", status);
+ }
+ }
+
+ if (status >= 0) {
+ status = notify_attach(remote_proc_id,
+ notify_shared_addr);
+ if (status < 0)
+ printk(KERN_ERR "ipc_attach : "
+ "notify_attach "
+ "failed [0x%x]\n", status);
+ else
+ printk(KERN_ERR
+ "notify_attach : "
+ "status [0x%x]\n", status);
+ }
+ }
+
+ /* Must come before Notify because depends on default Notify */
+ if (status >= 0 && ipc->entry.setup_notify && ipc->entry.setup_ipu_pm) {
+ if (ipc_module->proc_entry[remote_proc_id].slave) {
+ ipu_pm_shared_addr = sl_heap_alloc
+ (sharedregion_get_heap(0),
+ ipu_pm_mem_req(NULL),
+ sharedregion_get_cache_line_size(0));
+
+ slave->ipu_pm_sr_ptr =
+ sharedregion_get_srptr(ipu_pm_shared_addr, 0);
+ if (slave->ipu_pm_sr_ptr ==
+ SHAREDREGION_INVALIDSRPTR) {
+ status = IPC_E_FAIL;
+ printk(KERN_ERR "ipc_attach : "
+ "sharedregion_get_srptr "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR
+ "sharedregion_get_srptr : "
+ "status [0x%x]\n", status);
+ }
+ } else {
+ ipu_pm_shared_addr =
+ sharedregion_get_ptr(slave->ipu_pm_sr_ptr);
+ if (ipu_pm_shared_addr == NULL) {
+ status = IPC_E_FAIL;
+ printk(KERN_ERR "ipc_attach : "
+ "sharedregion_get_ptr "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR
+ "sharedregion_get_ptr : "
+ "status [0x%x]\n", status);
+ }
+ }
+
+ if (status >= 0) {
+ /* create the nameserver_remotenotify instances */
+ status = ipu_pm_attach(remote_proc_id,
+ ipu_pm_shared_addr);
+
+ if (status < 0)
+ printk(KERN_ERR "ipc_attach : "
+ "ipu_pm_attach "
+ "failed [0x%x]\n", status);
+ else
+ printk(KERN_ERR
+ "ipu_pm_attach : "
+ "status [0x%x]\n", status);
+ }
+ }
+ /* Must come after gatemp_start because depends on default GateMP */
+ if (status >= 0 && ipc->entry.setup_notify) {
+ if (ipc_module->proc_entry[remote_proc_id].slave) {
+ nsrn_shared_addr = sl_heap_alloc(
+ sharedregion_get_heap(0),
+ nameserver_remotenotify_shared_mem_req(
+ NULL),
+ sharedregion_get_cache_line_size(0));
+
+ slave->nsrn_sr_ptr =
+ sharedregion_get_srptr(nsrn_shared_addr, 0);
+ if (slave->nsrn_sr_ptr == SHAREDREGION_INVALIDSRPTR) {
+ status = IPC_E_FAIL;
+ printk(KERN_ERR "ipc_attach : "
+ "sharedregion_get_srptr "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR
+ "sharedregion_get_srptr : "
+ "status [0x%x]\n", status);
+ }
+ } else {
+ nsrn_shared_addr =
+ sharedregion_get_ptr(slave->nsrn_sr_ptr);
+ if (nsrn_shared_addr == NULL) {
+ status = IPC_E_FAIL;
+ printk(KERN_ERR "ipc_attach : "
+ "sharedregion_get_ptr "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR
+ "sharedregion_get_ptr : "
+ "status [0x%x]\n", status);
+ }
+ }
+
+ if (status >= 0) {
+ /* create the nameserver_remotenotify instances */
+ status = nameserver_remotenotify_attach(remote_proc_id,
+ nsrn_shared_addr);
+
+ if (status < 0)
+ printk(KERN_ERR "ipc_attach : "
+ "nameserver_remotenotify_attach "
+ "failed [0x%x]\n", status);
+ else
+ printk(KERN_ERR
+ "nameserver_remotenotify_attach : "
+ "status [0x%x]\n", status);
+ }
+ }
+
+ /* Must come after gatemp_start because depends on default GateMP */
+ if (status >= 0 && ipc->entry.setup_messageq) {
+ if (ipc_module->proc_entry[remote_proc_id].slave) {
+ msgq_shared_addr = sl_heap_alloc
+ (sharedregion_get_heap(0),
+ messageq_shared_mem_req(ipc_module->
+ ipc_shared_addr),
+ sharedregion_get_cache_line_size(0));
+
+ slave->transport_sr_ptr =
+ sharedregion_get_srptr(msgq_shared_addr, 0);
+ if (slave->transport_sr_ptr ==
+ SHAREDREGION_INVALIDSRPTR) {
+ status = IPC_E_FAIL;
+ printk(KERN_ERR "ipc_attach : "
+ "sharedregion_get_srptr "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR
+ "sharedregion_get_srptr : "
+ "status [0x%x]\n", status);
+ }
+ } else {
+ msgq_shared_addr = sharedregion_get_ptr(slave->
+ transport_sr_ptr);
+ if (msgq_shared_addr == NULL) {
+ status = IPC_E_FAIL;
+ printk(KERN_ERR "ipc_attach : "
+ "sharedregion_get_ptr "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR
+ "sharedregion_get_ptr : "
+ "status [0x%x]\n", status);
+ }
+ }
+
+ if (status >= 0) {
+ /* create the messageq Transport instances */
+ status = messageq_attach(remote_proc_id,
+ msgq_shared_addr);
+ if (status < 0)
+ printk(KERN_ERR "ipc_attach : "
+ "messageq_attach "
+ "failed [0x%x]\n", status);
+ else
+ printk(KERN_ERR
+ "messageq_attach : "
+ "status [0x%x]\n", status);
+ }
+ }
+#if 0
+ if (cache_enabled) {
+ if (ipc_module->proc_entry[remote_proc_id].slave)
+ Cache_wbInv((void *)slave, reserved_size,
+ Cache_Type_ALL, true);
+ }
+#endif
+
+ if (status >= 0) {
+ /* Finish the processor synchronization */
+ status = ipc_proc_sync_finish(remote_proc_id,
+ ipc_module->ipc_shared_addr);
+ if (status < 0)
+ printk(KERN_ERR "ipc_attach : "
+ "ipc_proc_sync_finish "
+ "failed [0x%x]\n", status);
+ else
+ printk(KERN_ERR
+ "ipc_proc_sync_finish : "
+ "status [0x%x]\n", status);
+ }
+
+ if (status >= 0)
+ ipc->is_attached = true;
+ else
+ printk(KERN_ERR "ipc_attach failed! status = 0x%x\n", status);
+
+ return status;
+}
+
+
+/*
+ * ============= ipc_detach ==============
+ * detaches from a remote processor
+ */
+int ipc_detach(u16 remote_proc_id)
+{
+ int status = 0;
+#if 0
+ u32 reserved_size = ipc_reserved_size_per_proc();
+#endif
+ bool cache_enabled = sharedregion_is_cache_enabled(0);
+ void *ipu_pm_shared_addr;
+ void *notify_shared_addr;
+ void *nsrn_shared_addr;
+ void *msgq_shared_addr;
+ VOLATILE struct ipc_reserved *slave;
+ VOLATILE struct ipc_reserved *master;
+ struct ipc_proc_entry *ipc;
+ u32 nsrn_mem_req = nameserver_remotenotify_shared_mem_req(NULL);
+ /* prefetching into local variable because of
+ later space restrictions */
+
+ /* get the paramters associated with remote_proc_id */
+ ipc = &(ipc_module->proc_entry[remote_proc_id]);
+
+ if (ipc->is_attached == false) {
+ status = IPC_E_INVALIDSTATE;
+ goto exit;
+ }
+
+ /* determine the slave's slot */
+ slave = ipc_get_slave_addr(remote_proc_id, ipc_module->
+ ipc_shared_addr);
+
+ if (slave != NULL) {
+#if 0
+ if (unlikely(cache_enabled))
+ Cache_inv((void *) slave, reserved_size,
+ Cache_Type_ALL, true);
+#endif
+ if (ipc->entry.setup_messageq) {
+ /* call messageq_detach for remote processor */
+ status = messageq_detach(remote_proc_id);
+ if (status < 0)
+ printk(KERN_ERR "ipc_detach : "
+ "messageq_detach "
+ "failed [0x%x]\n", status);
+ else
+ printk(KERN_ERR
+ "messageq_detach : "
+ "status [0x%x]\n", status);
+
+ /* free the memory if slave processor */
+ if (ipc_module->proc_entry[remote_proc_id].slave) {
+ /* get the pointer to messageq transport
+ instance */
+ msgq_shared_addr = sharedregion_get_ptr(
+ slave->transport_sr_ptr);
+
+ if (msgq_shared_addr != NULL) {
+ /* free the memory back to sharedregion
+ 0 heap */
+ sl_heap_free(sharedregion_get_heap(0),
+ msgq_shared_addr,
+ messageq_shared_mem_req(
+ msgq_shared_addr));
+ }
+
+ /* set the pointer for messageq transport
+ instance back to invalid */
+ slave->transport_sr_ptr =
+ SHAREDREGION_INVALIDSRPTR;
+ }
+ }
+
+ if (ipc->entry.setup_notify) {
+ /* call nameserver_remotenotify_detach for
+ remote processor */
+ status = nameserver_remotenotify_detach(
+ remote_proc_id);
+ if (status < 0)
+ printk(KERN_ERR "ipc_detach : "
+ "nameserver_remotenotify_detach "
+ "failed [0x%x]\n", status);
+ else
+ printk(KERN_ERR
+ "nameserver_remotenotify_detach : "
+ "status [0x%x]\n", status);
+
+ /* free the memory if slave processor */
+ if (ipc_module->proc_entry[remote_proc_id].slave) {
+ /* get the pointer to NSRN instance */
+ nsrn_shared_addr = sharedregion_get_ptr(
+ slave->nsrn_sr_ptr);
+
+ if (nsrn_shared_addr != NULL)
+ /* free the memory back to
+ SharedRegion 0 heap */
+ sl_heap_free(sharedregion_get_heap(0),
+ nsrn_shared_addr,
+ nsrn_mem_req);
+
+ /* set the pointer for NSRN instance back
+ to invalid */
+ slave->nsrn_sr_ptr =
+ SHAREDREGION_INVALIDSRPTR;
+ }
+ }
+
+ if (ipc->entry.setup_notify && ipc->entry.setup_ipu_pm) {
+ /* call ipu_pm_detach for remote processor */
+ status = ipu_pm_detach(remote_proc_id);
+ if (status < 0)
+ printk(KERN_ERR "ipc_detach : "
+ "ipu_pm_detach "
+ "failed [0x%x]\n", status);
+ else
+ printk(KERN_ERR
+ "ipu_pm_detach : "
+ "status [0x%x]\n", status);
+
+ /* free the memory if slave processor */
+ if (ipc_module->proc_entry[remote_proc_id].slave) {
+ /* get the pointer to NSRN instance */
+ ipu_pm_shared_addr = sharedregion_get_ptr(
+ slave->ipu_pm_sr_ptr);
+
+ if (ipu_pm_shared_addr != NULL)
+ /* free the memory back to
+ SharedRegion 0 heap */
+ sl_heap_free(sharedregion_get_heap(1),
+ ipu_pm_shared_addr,
+ ipu_pm_mem_req(NULL));
+
+ /* set the pointer for NSRN instance back
+ to invalid */
+ slave->ipu_pm_sr_ptr =
+ SHAREDREGION_INVALIDSRPTR;
+ }
+ }
+
+ if (ipc->entry.setup_notify) {
+ /* call notify_detach for remote processor */
+ status = notify_detach(remote_proc_id);
+ if (status < 0)
+ printk(KERN_ERR "ipc_detach : "
+ "notify_detach "
+ "failed [0x%x]\n", status);
+ else
+ printk(KERN_ERR
+ "notify_detach : "
+ "status [0x%x]\n", status);
+
+ /* free the memory if slave processor */
+ if (ipc_module->proc_entry[remote_proc_id].slave) {
+ /* get the pointer to Notify instance */
+ notify_shared_addr = sharedregion_get_ptr(
+ slave->notify_sr_ptr);
+
+ if (notify_shared_addr != NULL) {
+ /* free the memory back to
+ SharedRegion 0 heap */
+ sl_heap_free(sharedregion_get_heap(0),
+ notify_shared_addr,
+ notify_shared_mem_req(
+ remote_proc_id,
+ notify_shared_addr));
+ }
+
+ /* set the pointer for Notify instance
+ back to invalid */
+ slave->notify_sr_ptr =
+ SHAREDREGION_INVALIDSRPTR;
+ }
+ }
+
+ if (unlikely(cache_enabled)) {
+ if (ipc_module->proc_entry[remote_proc_id].slave) {
+ slave->started_key = 0;
+ slave->config_list_head =
+ SHAREDREGION_INVALIDSRPTR;
+#if 0
+ Cache_wbInv((void *)slave, reserved_size,
+ Cache_Type_ALL, true);
+#endif
+ } else {
+ /* determine the master's slot */
+ master = ipc_get_master_addr(remote_proc_id,
+ ipc_module->ipc_shared_addr);
+
+ if (master != NULL) {
+ master->started_key = 0;
+ master->config_list_head =
+ SHAREDREGION_INVALIDSRPTR;
+#if 0
+ Cache_wbInv((void *) master,
+ reserved_size,
+ Cache_Type_ALL,
+ true);
+#endif
+ }
+ }
+ }
+
+ /* Now detach the SharedRegion */
+ status = sharedregion_detach(remote_proc_id);
+ BUG_ON(status < 0);
+
+ /* Now detach the GateMP */
+ status = gatemp_detach(remote_proc_id, ipc_module->
+ gatemp_shared_addr);
+ BUG_ON(status < 0);
+ }
+ ipc->is_attached = false;
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "ipc_detach failed with status [0x%x]\n",
+ status);
+ return status;
+}
+
+
+/*
+ * ========= ipc_control ==========
+ * Function to destroy an ipc instance for a slave
+ */
+int
+ipc_control(u16 proc_id, u32 cmd_id, void *arg)
+{
+ int status = IPC_S_SUCCESS;
+
+ switch (cmd_id) {
+ case IPC_CONTROLCMD_LOADCALLBACK:
+ {
+#if defined CONFIG_SYSLINK_USE_SYSMGR
+ status = platform_load_callback(proc_id, arg);
+ if (status < 0)
+ printk(KERN_ERR "ipc_control : platform_load_callback "
+ "failed [0x%x]\n", status);
+#endif
+ }
+ break;
+
+ case IPC_CONTROLCMD_STARTCALLBACK:
+ {
+#if defined CONFIG_SYSLINK_USE_SYSMGR
+ status = platform_start_callback(proc_id, arg);
+ if (status < 0)
+ printk(KERN_ERR "ipc_control : platform_start_callback"
+ " failed [0x%x]\n", status);
+#endif
+ }
+ break;
+
+ case IPC_CONTROLCMD_STOPCALLBACK:
+ {
+#if defined CONFIG_SYSLINK_USE_SYSMGR
+ status = platform_stop_callback(proc_id, arg);
+ if (status < 0)
+ printk(KERN_ERR "ipc_control : platform_stop_callback"
+ " failed [0x%x]\n", status);
+#endif
+ }
+ break;
+
+ default:
+ {
+ status = -EINVAL;
+ printk(KERN_ERR "ipc_control : invalid "
+ " command code [0x%x]\n", cmd_id);
+ }
+ break;
+ }
+
+ return status;
+}
+
+
+/*
+ * ======== ipc_get_master_addr ========
+ */
+void *ipc_get_master_addr(u16 remote_proc_id, void *shared_addr)
+{
+ u32 reserved_size = ipc_reserved_size_per_proc();
+ int slot;
+ u16 master_id;
+ VOLATILE struct ipc_reserved *master;
+
+ /* determine the master's proc_id and slot */
+ if (multiproc_self() < remote_proc_id) {
+ master_id = remote_proc_id;
+ slot = multiproc_self();
+ } else {
+ master_id = multiproc_self();
+ slot = remote_proc_id;
+ }
+
+ /* determine the reserve address for master between self and remote */
+ master = (struct ipc_reserved *)((u32)shared_addr +
+ ((master_id * reserved_size) +
+ (slot * sizeof(struct ipc_reserved))));
+
+ return (void *)master;
+}
+
+/*
+ * ======== ipc_get_region0_reserved_size ========
+ */
+u32 ipc_get_region0_reserved_size(void)
+{
+ u32 reserved_size = ipc_reserved_size_per_proc();
+
+ /* Calculate the total amount to reserve */
+ reserved_size = reserved_size * multiproc_get_num_processors();
+
+ return reserved_size;
+}
+
+/*
+ * ======== Ipc_getSlaveAddr ========
+ */
+void *ipc_get_slave_addr(u16 remote_proc_id, void *shared_addr)
+{
+ u32 reserved_size = ipc_reserved_size_per_proc();
+ int slot;
+ u16 slave_id;
+ VOLATILE struct ipc_reserved *slave;
+
+ /* determine the slave's proc_id and slot */
+ if (multiproc_self() < remote_proc_id) {
+ slave_id = multiproc_self();
+ slot = remote_proc_id - 1;
+ } else {
+ slave_id = remote_proc_id;
+ slot = multiproc_self() - 1;
+ }
+
+ /* determine the reserve address for slave between self and remote */
+ slave = (struct ipc_reserved *)((u32)shared_addr +
+ ((slave_id * reserved_size) +
+ (slot * sizeof(struct ipc_reserved))));
+
+ return (void *)slave;
+}
+
+/*
+ * ======== Ipc_proc_syncStart ========
+ * The owner of SharedRegion 0 writes to its reserve memory address
+ * in region 0 to let the other processors know it has started.
+ * It then spins until the other processors start.
+ * The other processors write their reserve memory address in
+ * region 0 to let the owner processor know they've started.
+ * The other processors then spin until the owner processor writes
+ * to let them know that its finished the process of synchronization
+ * before continuing.
+ */
+int ipc_proc_sync_start(u16 remote_proc_id, void *shared_addr)
+{
+#if 0
+ u32 reserved_size = ipc_reserved_size_per_proc();
+ bool cache_enabled = sharedregion_is_cache_enabled(0);
+#endif
+ int status = 0;
+ VOLATILE struct ipc_reserved *self;
+ VOLATILE struct ipc_reserved *remote;
+ struct ipc_proc_entry *ipc;
+
+ /* don't do any synchronization if proc_sync is NONE */
+ if (ipc_module->proc_sync != IPC_PROCSYNC_NONE) {
+ /* determine self and remote pointers */
+ if (ipc_module->proc_entry[remote_proc_id].slave) {
+ self = ipc_get_slave_addr(remote_proc_id, shared_addr);
+ remote = ipc_get_master_addr(remote_proc_id,
+ shared_addr);
+ } else {
+ self = ipc_get_master_addr(remote_proc_id, shared_addr);
+ remote = ipc_get_slave_addr(remote_proc_id,
+ shared_addr);
+ }
+
+ /* construct the config list */
+ ipc = &(ipc_module->proc_entry[remote_proc_id]);
+
+ ipc->local_config_list = (void *)&self->config_list_head;
+ ipc->remote_config_list = (void *)&remote->config_list_head;
+
+ ((struct ipc_config_head *)ipc->local_config_list)->first =
+ (u32)SHAREDREGION_INVALIDSRPTR;
+#if 0
+ if (cache_enabled) {
+ Cache_wbInv(ipc->local_config_list,
+ reserved_size,
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+
+ if (ipc_module->proc_entry[remote_proc_id].slave) {
+ /* set my processor's reserved key to start */
+ self->started_key = IPC_PROCSYNCSTART;
+#if 0
+ /* write back my processor's reserve key */
+ if (cache_enabled)
+ Cache_wbInv((void *)self, reserved_size,
+ Cache_Type_ALL, true);
+
+ /* wait for remote processor to start */
+ if (cache_enabled)
+ Cache_inv((void *)remote, reserved_size,
+ Cache_Type_ALL, true);
+#endif
+ if (remote->started_key != IPC_PROCSYNCSTART)
+ status = IPC_E_FAIL;
+ goto exit;
+ }
+
+#if 0
+ /* wait for remote processor to start */
+ Cache_inv((void *)remote, reserved_size, Cache_Type_ALL, true);
+#endif
+ if ((self->started_key != IPC_PROCSYNCSTART) &&
+ (remote->started_key != IPC_PROCSYNCSTART)) {
+ status = IPC_E_FAIL;
+ goto exit;
+ }
+
+ if (status >= 0) {
+ /* set my processor's reserved key to start */
+ self->started_key = IPC_PROCSYNCSTART;
+#if 0
+ /* write my processor's reserve key back */
+ if (cache_enabled)
+ Cache_wbInv((void *)self, reserved_size,
+ Cache_Type_ALL, true);
+
+ /* wait for remote processor to finish */
+ Cache_inv((void *)remote, reserved_size,
+ Cache_Type_ALL, true);
+#endif
+ if (remote->started_key != IPC_PROCSYNCFINISH) {
+ status = IPC_E_FAIL;
+ goto exit;
+ }
+ }
+ }
+exit:
+ if (status < 0)
+ printk(KERN_ERR "ipc_proc_sync_start failed: status [0x%x]\n",
+ status);
+ else
+ printk(KERN_ERR "ipc_proc_sync_start done\n");
+
+ return status;
+}
+
+/*
+ * ======== Ipc_proc_syncFinish ========
+ * Each processor writes its reserve memory address in SharedRegion 0
+ * to let the other processors know its finished the process of
+ * synchronization.
+ */
+int ipc_proc_sync_finish(u16 remote_proc_id, void *shared_addr)
+{
+#if 0
+ u32 reserved_size = ipc_reserved_size_per_proc();
+ bool cache_enabled = sharedregion_is_cache_enabled(0);
+#endif
+ VOLATILE struct ipc_reserved *self;
+ VOLATILE struct ipc_reserved *remote;
+
+ /* don't do any synchronization if proc_sync is NONE */
+ if (ipc_module->proc_sync != IPC_PROCSYNC_NONE) {
+ /* determine self pointer */
+ if (ipc_module->proc_entry[remote_proc_id].slave) {
+ self = ipc_get_slave_addr(remote_proc_id, shared_addr);
+ remote = ipc_get_master_addr(remote_proc_id,
+ shared_addr);
+ } else {
+ self = ipc_get_master_addr(remote_proc_id,
+ shared_addr);
+ remote = ipc_get_slave_addr(remote_proc_id,
+ shared_addr);
+ }
+ /* set my processor's reserved key to finish */
+ self->started_key = IPC_PROCSYNCFINISH;
+#if 0
+ /* write back my processor's reserve key */
+ if (cache_enabled)
+ Cache_wbInv((void *)self, reserved_size,
+ Cache_Type_ALL, true);
+#endif
+ /* if slave processor, wait for remote to finish sync */
+ if (ipc_module->proc_entry[remote_proc_id].slave) {
+ /* wait for remote processor to finish */
+ do {
+#if 0
+ if (cacheEnabled) {
+ Cache_inv((Ptr)remote, reservedSize,
+ Cache_Type_ALL, TRUE);
+ }
+#endif
+ } while (remote->started_key != IPC_PROCSYNCFINISH);
+ }
+ }
+
+ return IPC_S_SUCCESS;
+}
+
+/*
+ * ======== ipc_read_config ========
+ */
+int ipc_read_config(u16 remote_proc_id, u32 tag, void *cfg, u32 size)
+{
+#if 0
+ bool cache_enabled = sharedregion_is_cache_enabled(0);
+#endif
+ int status = IPC_E_FAIL;
+ VOLATILE struct ipc_config_entry *entry;
+
+ if (ipc_module->ref_count == 0) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (ipc_module->proc_entry[remote_proc_id].is_attached == false) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+
+#if 0
+ if (cache_enabled) {
+ Cache_inv(ipc_module->proc_entry[remote_proc_id].
+ remote_config_list,
+ sharedregion_get_cache_line_size(0),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+ entry = (struct ipc_config_entry *)((struct ipc_config_head *)
+ ipc_module->proc_entry[remote_proc_id].remote_config_list)->
+ first;
+
+ while ((u32 *)entry != SHAREDREGION_INVALIDSRPTR) {
+ entry = (struct ipc_config_entry *)
+ sharedregion_get_ptr((u32 *)entry);
+ if (entry == NULL) {
+ status = IPC_E_FAIL;
+ goto exit;
+ }
+#if 0
+ /* Traverse the list to find the tag */
+ if (cache_enabled) {
+ Cache_inv((void *)entry,
+ size + sizeof(struct ipc_config_entry),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+
+ if ((entry->remote_proc_id == multiproc_self()) &&
+ (entry->local_proc_id == remote_proc_id) &&
+ (entry->tag == tag)) {
+
+ if (size == entry->size)
+ memcpy(cfg,
+ (void *)((u32)entry + sizeof(struct
+ ipc_config_entry)),
+ entry->size);
+ else
+ status = IPC_E_FAIL;
+ } else {
+ entry = (struct ipc_config_entry *)entry->next;
+ }
+ }
+
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "ipc_read_config failed: status [0x%x]\n",
+ status);
+
+ return status;
+}
+
+/*
+ * ======== ipc_reserved_size_per_proc ========
+ */
+u32 ipc_reserved_size_per_proc(void)
+{
+ u32 reserved_size = sizeof(struct ipc_reserved) *
+ multiproc_get_num_processors();
+ u32 cache_line_size = sharedregion_get_cache_line_size(0);
+
+ /* Calculate amount to reserve per processor */
+ if (cache_line_size > reserved_size)
+ /* Use cache_line_size if larger than reserved_size */
+ reserved_size = cache_line_size;
+ else
+ /* Round reserved_size to cache_line_size */
+ reserved_size = ROUND_UP(reserved_size, cache_line_size);
+
+ return reserved_size;
+}
+
+/*!
+ * ======== ipc_write_config ========
+ */
+int ipc_write_config(u16 remote_proc_id, u32 tag, void *cfg, u32 size)
+{
+#if 0
+ bool cache_enabled = sharedregion_is_cache_enabled(0);
+#endif
+ u32 cache_line_size = sharedregion_get_cache_line_size(0);
+ int status = IPC_S_SUCCESS;
+ struct ipc_config_entry *entry;
+
+ if (ipc_module->ref_count == 0) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (ipc_module->proc_entry[remote_proc_id].is_attached == false) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ /* Allocate memory from the shared heap (System Heap) */
+ entry = sl_heap_alloc(sharedregion_get_heap(0),
+ size + sizeof(struct ipc_config_entry),
+ cache_line_size);
+
+ if (entry == NULL) {
+ status = IPC_E_FAIL;
+ goto exit;
+ }
+
+ entry->remote_proc_id = remote_proc_id;
+ entry->local_proc_id = multiproc_self();
+ entry->tag = tag;
+ entry->size = size;
+ memcpy((void *)((u32)entry + sizeof(struct ipc_config_entry)),
+ cfg, size);
+
+ /* Create a linked-list of config */
+ if (((struct ipc_config_head *)ipc_module->
+ proc_entry[remote_proc_id].local_config_list)->first
+ == (u32)SHAREDREGION_INVALIDSRPTR) {
+
+ entry->next = (u32)SHAREDREGION_INVALIDSRPTR;
+ ((struct ipc_config_head *)ipc_module->
+ proc_entry[remote_proc_id].local_config_list)->first =
+ (u32)sharedregion_get_srptr(entry, 0);
+
+ if (((struct ipc_config_head *)ipc_module->
+ proc_entry[remote_proc_id].local_config_list)->first
+ == (u32)SHAREDREGION_INVALIDSRPTR)
+ status = IPC_E_FAIL;
+ } else {
+ entry->next = ((struct ipc_config_head *)ipc_module->
+ proc_entry[remote_proc_id].local_config_list)->first;
+
+ ((struct ipc_config_head *)ipc_module->
+ proc_entry[remote_proc_id].local_config_list)->first =
+ (u32)sharedregion_get_srptr(entry, 0);
+ if (((struct ipc_config_head *)ipc_module->
+ proc_entry[remote_proc_id].local_config_list)->first
+ == (u32)SHAREDREGION_INVALIDSRPTR)
+ status = IPC_E_FAIL;
+ }
+#if 0
+ if (cache_enabled) {
+ Cache_wbInv(ipc_module->proc_entry[remote_proc_id].
+ local_config_list,
+ sharedregion_get_cache_line_size(0),
+ Cache_Type_ALL,
+ false);
+
+ Cache_wbInv(entry,
+ size + sizeof(struct ipc_config_entry),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "ipc_write_config failed: status [0x%x]\n",
+ status);
+
+ return status;
+}
+
+
+/*
+ * ======== ipc_start ========
+ */
+int ipc_start(void)
+{
+ int status = 0;
+ int i;
+ struct sharedregion_entry entry;
+ void *ipc_shared_addr;
+ void *gatemp_shared_addr;
+ struct gatemp_params gatemp_params;
+ bool line_available;
+
+ /* This sets the ref_count variable is not initialized, upper 16 bits
+ * is written with module Id to ensure correctness of ref_count
+ * variable.
+ */
+ atomic_cmpmask_and_set(&(ipc_module->start_ref_count),
+ IPC_MAKE_MAGICSTAMP(0),
+ IPC_MAKE_MAGICSTAMP(0));
+ if (atomic_inc_return(&(ipc_module->start_ref_count))
+ != IPC_MAKE_MAGICSTAMP(1u)) {
+ status = IPC_S_SUCCESS;
+ goto exit;
+ }
+
+ /* get region 0 information */
+ sharedregion_get_entry(0, &entry);
+
+ /* if entry is not valid then return */
+ if (entry.is_valid == false) {
+ status = IPC_E_FAIL;
+ goto exit;
+ }
+ /*
+ * Need to reserve memory in region 0 for processor synchronization.
+ * This must done before SharedRegion_start().
+ */
+ ipc_shared_addr = sharedregion_reserve_memory(0,
+ ipc_get_region0_reserved_size());
+
+ /* must reserve memory for gatemp before sharedregion_start() */
+ gatemp_shared_addr = sharedregion_reserve_memory(0,
+ gatemp_get_region0_reserved_size());
+
+ /* Init params for default gate(must match those in gatemp_start() */
+ gatemp_params_init(&gatemp_params);
+ gatemp_params.local_protect = GATEMP_LOCALPROTECT_TASKLET;
+
+ if (multiproc_get_num_processors() > 1)
+ gatemp_params.remote_protect = GATEMP_REMOTEPROTECT_SYSTEM;
+ else
+ gatemp_params.remote_protect = GATEMP_REMOTEPROTECT_NONE;
+
+ /* reserve memory for default gate before SharedRegion_start() */
+ sharedregion_reserve_memory(0, gatemp_shared_mem_req(&gatemp_params));
+
+ /* clear the reserved memory */
+ sharedregion_clear_reserved_memory();
+
+ /* Set shared addresses */
+ ipc_module->ipc_shared_addr = ipc_shared_addr;
+ ipc_module->gatemp_shared_addr = gatemp_shared_addr;
+
+ /* create default GateMP, must be called before sharedregion_start */
+ status = gatemp_start(ipc_module->gatemp_shared_addr);
+ if (status < 0) {
+ status = IPC_E_FAIL;
+ goto exit;
+ }
+
+ /* create HeapMemMP in each SharedRegion */
+ status = sharedregion_start();
+ if (status < 0) {
+ status = IPC_E_FAIL;
+ goto exit;
+ }
+
+ /* Call attach for all procs if proc_sync is ALL */
+ if (ipc_module->proc_sync == IPC_PROCSYNC_ALL) {
+ /* Must attach to owner first to get default GateMP and
+ * HeapMemMP */
+ if (multiproc_self() != entry.owner_proc_id) {
+ do {
+ status = ipc_attach(entry.owner_proc_id);
+ } while (status < 0);
+ }
+
+ /* Loop to attach to all other processors */
+ for (i = 0; i < multiproc_get_num_processors(); i++) {
+ if ((i == multiproc_self())
+ || (i == entry.owner_proc_id))
+ continue;
+ line_available =
+ notify_setup_proxy_int_line_available(i);
+ if (!line_available)
+ continue;
+ /* call Ipc_attach for every remote processor */
+ do {
+ status = ipc_attach(i);
+ } while (status < 0);
+ }
+ }
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "ipc_start failed: status [0x%x]\n",
+ status);
+
+ return status;
+}
+
+
+/*
+ * ======== ipc_stop ========
+ */
+int ipc_stop(void)
+{
+ int status = IPC_S_SUCCESS;
+ int tmp_status = IPC_S_SUCCESS;
+ struct sharedregion_entry entry;
+ struct gatemp_params gatemp_params;
+
+ if (unlikely(atomic_cmpmask_and_lt(&(ipc_module->start_ref_count),
+ IPC_MAKE_MAGICSTAMP(0),
+ IPC_MAKE_MAGICSTAMP(1)) == true)) {
+ status = IPC_E_FAIL;
+ goto exit;
+ }
+
+ if (likely(atomic_dec_return(&ipc_module->start_ref_count)
+ == IPC_MAKE_MAGICSTAMP(0))) {
+ /* get region 0 information */
+ sharedregion_get_entry(0, &entry);
+
+ /* if entry is not valid then return */
+ if (entry.is_valid == false) {
+ status = IPC_E_FAIL;
+ goto exit;
+ }
+
+
+ /*
+ * Need to unreserve memory in region 0 for processor
+ * synchronization. This must done before sharedregion_stop().
+ */
+ sharedregion_unreserve_memory(0,
+ ipc_get_region0_reserved_size());
+
+ /* must unreserve memory for GateMP before
+ sharedregion_stop() */
+ sharedregion_unreserve_memory(0,
+ gatemp_get_region0_reserved_size());
+
+ /* Init params for default gate (must match those
+ in gatemp_stop() */
+ gatemp_params_init(&gatemp_params);
+ gatemp_params.local_protect = GATEMP_LOCALPROTECT_TASKLET;
+
+ if (multiproc_get_num_processors() > 1)
+ gatemp_params.remote_protect =
+ GATEMP_REMOTEPROTECT_SYSTEM;
+ else
+ gatemp_params.remote_protect =
+ GATEMP_REMOTEPROTECT_NONE;
+
+ /* unreserve memory for default gate before
+ sharedregion_stop() */
+ sharedregion_unreserve_memory(0,
+ gatemp_shared_mem_req(&gatemp_params));
+
+ /* Delete heapmemmp in each sharedregion */
+ status = sharedregion_stop();
+ if (status < 0) {
+ status = IPC_E_FAIL;
+ goto exit;
+ }
+
+ /* delete default gatemp, must be called after
+ * sharedregion_stop
+ */
+ tmp_status = gatemp_stop();
+ if ((tmp_status < 0) && (status >= 0)) {
+ status = IPC_E_FAIL;
+ goto exit;
+ }
+
+ ipc_module->gatemp_shared_addr = NULL;
+ ipc_module->ipc_shared_addr = NULL;
+ }
+exit:
+ if (status < 0)
+ printk(KERN_ERR "ipc_stop failed: status [0x%x]\n", status);
+
+ return status;
+}
+
+
+/*
+ * ======== ipc_get_config ========
+ */
+void ipc_get_config(struct ipc_config *cfg_params)
+{
+ int key;
+ int status = 0;
+
+ BUG_ON(cfg_params == NULL);
+
+ if (cfg_params == NULL) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+
+ key = gate_enter_system();
+ if (ipc_module->ref_count == 0)
+ cfg_params->proc_sync = IPC_PROCSYNC_ALL;
+ else
+ memcpy((void *) cfg_params, (void *) &ipc_module->cfg,
+ sizeof(struct ipc_config));
+
+ gate_leave_system(key);
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "ipc_get_config failed: status [0x%x]\n",
+ status);
+
+}
+
+
+/* Sets up ipc for this processor. */
+int ipc_setup(const struct ipc_config *cfg)
+{
+ int status = IPC_S_SUCCESS;
+ struct ipc_config tmp_cfg;
+ int key;
+ int i;
+
+ key = gate_enter_system();
+ ipc_module->ref_count++;
+
+ /* This sets the ref_count variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of ref_count variable.
+ */
+ if (ipc_module->ref_count > 1) {
+ status = IPC_S_ALREADYSETUP;
+ gate_leave_system(key);
+ goto exit;
+ }
+
+ gate_leave_system(key);
+ if (cfg == NULL) {
+ ipc_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+
+ /* Copy the cfg */
+ memcpy(&ipc_module->cfg, cfg, sizeof(struct ipc_config));
+
+ ipc_module->proc_sync = cfg->proc_sync;
+
+ status = platform_setup();
+ if (status < 0) {
+ key = gate_enter_system();
+ ipc_module->ref_count--;
+ gate_leave_system(key);
+ status = IPC_E_FAIL;
+ goto exit;
+ }
+
+ /* Following can be done regardless of status */
+ for (i = 0; i < multiproc_get_num_processors(); i++)
+ ipc_module->proc_entry[i].is_attached = false;
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "ipc_setup failed: status [0x%x]\n", status);
+
+ return status;
+}
+
+
+/*
+ * =========== ipc_destroy ==========
+ * Destroys ipc for this processor.
+ */
+int ipc_destroy(void)
+{
+ int status = IPC_S_SUCCESS;
+ int key;
+
+ key = gate_enter_system();
+ ipc_module->ref_count--;
+
+ if (ipc_module->ref_count < 0) {
+ gate_leave_system(key);
+ status = IPC_E_INVALIDSTATE;
+ goto exit;
+ }
+
+ if (ipc_module->ref_count == 0) {
+ gate_leave_system(key);
+ status = platform_destroy();
+ if (status < 0) {
+ status = IPC_E_FAIL;
+ goto exit;
+ }
+ } else
+ gate_leave_system(key);
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "ipc_destroy failed: status [0x%x]\n", status);
+
+ return status;
+}
+
+
+/*
+ * ====== ipc_create =======
+ * Creates a IPC.
+ */
+int ipc_create(u16 remote_proc_id, struct ipc_params *params)
+{
+ ipc_module->proc_entry[remote_proc_id].entry.setup_messageq =
+ params->setup_messageq;
+ ipc_module->proc_entry[remote_proc_id].entry.setup_notify =
+ params->setup_notify;
+ ipc_module->proc_entry[remote_proc_id].entry.setup_ipu_pm =
+ params->setup_ipu_pm;
+ ipc_module->proc_entry[remote_proc_id].entry.remote_proc_id =
+ remote_proc_id;
+
+ /* Assert that the proc_sync is same as configured for the module. */
+ BUG_ON(ipc_module->proc_sync != params->proc_sync);
+
+ return IPC_S_SUCCESS;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/ipc_drv.c b/drivers/dsp/syslink/multicore_ipc/ipc_drv.c
new file mode 100644
index 000000000000..bf6966a0ba3d
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/ipc_drv.c
@@ -0,0 +1,240 @@
+/*
+ * ipc_drv.c
+ *
+ * IPC driver module.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/fs.h>
+#include <linux/moduleparam.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <ipc_ioctl.h>
+#include <drv_notify.h>
+#include <nameserver.h>
+
+#define IPC_NAME "syslink_ipc"
+#define IPC_MAJOR 0
+#define IPC_MINOR 0
+#define IPC_DEVICES 1
+
+struct ipc_device {
+ struct cdev cdev;
+};
+
+struct ipc_device *ipc_device;
+static struct class *ipc_class;
+
+s32 ipc_major = IPC_MAJOR;
+s32 ipc_minor = IPC_MINOR;
+char *ipc_name = IPC_NAME;
+
+module_param(ipc_name, charp, 0);
+MODULE_PARM_DESC(ipc_name, "Device name, default = syslink_ipc");
+
+module_param(ipc_major, int, 0); /* Driver's major number */
+MODULE_PARM_DESC(ipc_major, "Major device number, default = 0 (auto)");
+
+module_param(ipc_minor, int, 0); /* Driver's minor number */
+MODULE_PARM_DESC(ipc_minor, "Minor device number, default = 0 (auto)");
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_LICENSE("GPL v2");
+
+/*
+ * ======== ipc_open ========
+ * This function is invoked when an application
+ * opens handle to the ipc driver
+ */
+int ipc_open(struct inode *inode, struct file *filp)
+{
+ s32 retval = 0;
+ struct ipc_device *dev;
+
+ dev = container_of(inode->i_cdev, struct ipc_device, cdev);
+ filp->private_data = dev;
+ return retval;
+}
+
+/*
+ * ======== ipc_release ========
+ * This function is invoked when an application
+ * closes handle to the ipc driver
+ */
+int ipc_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/*
+ * ======== ipc_ioctl ========
+ * This function provides IO interface to the
+ * ipc driver
+ */
+int ipc_ioctl(struct inode *ip, struct file *filp, u32 cmd, ulong arg)
+{
+ s32 retval = 0;
+ void __user *argp = (void __user *)arg;
+
+ /* Verify the memory and ensure that it is not is kernel
+ address space
+ */
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ retval = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ retval = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
+
+ if (retval) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ retval = ipc_ioc_router(cmd, (ulong)argp);
+ return retval;
+
+exit:
+ return retval;
+}
+
+const struct file_operations ipc_fops = {
+ .open = ipc_open,
+ .release = ipc_release,
+ .ioctl = ipc_ioctl,
+ .read = notify_drv_read,
+ .mmap = notify_drv_mmap,
+};
+
+/*
+ * ======== ipc_modules_init ========
+ * IPC Initialization routine. will initialize various
+ * sub components (modules) of IPC.
+ */
+static int ipc_modules_init(void)
+{
+ /* Setup the notify_drv module */
+ _notify_drv_setup();
+
+ return 0;
+}
+
+/*
+ * ======== ipc_modules_exit ========
+ * IPC cleanup routine. will cleanup of various
+ * sub components (modules) of IPC.
+ */
+static void ipc_modules_exit(void)
+{
+ /* Destroy the notify_drv module */
+ _notify_drv_destroy();
+}
+
+/*
+ * ======== ipc_init ========
+ * Initialization routine. Executed when the driver is
+ * loaded (as a kernel module), or when the system
+ * is booted (when included as part of the kernel
+ * image).
+ */
+static int __init ipc_init(void)
+{
+ dev_t dev ;
+ s32 retval = 0;
+
+ retval = alloc_chrdev_region(&dev, ipc_minor, IPC_DEVICES,
+ ipc_name);
+ ipc_major = MAJOR(dev);
+
+ if (retval < 0) {
+ printk(KERN_ERR "ipc_init: can't get major %x\n", ipc_major);
+ goto exit;
+ }
+
+ ipc_device = kmalloc(sizeof(struct ipc_device), GFP_KERNEL);
+ if (!ipc_device) {
+ printk(KERN_ERR "ipc_init: memory allocation failed for "
+ "ipc_device\n");
+ retval = -ENOMEM;
+ goto unreg_exit;
+ }
+
+ memset(ipc_device, 0, sizeof(struct ipc_device));
+ retval = ipc_modules_init();
+ if (retval) {
+ printk(KERN_ERR "ipc_init: ipc initialization failed\n");
+ goto unreg_exit;
+
+ }
+ ipc_class = class_create(THIS_MODULE, "syslink_ipc");
+ if (IS_ERR(ipc_class)) {
+ printk(KERN_ERR "ipc_init: error creating ipc class\n");
+ retval = PTR_ERR(ipc_class);
+ goto unreg_exit;
+ }
+
+ device_create(ipc_class, NULL, MKDEV(ipc_major, ipc_minor), NULL,
+ ipc_name);
+ cdev_init(&ipc_device->cdev, &ipc_fops);
+ ipc_device->cdev.owner = THIS_MODULE;
+ retval = cdev_add(&ipc_device->cdev, dev, IPC_DEVICES);
+ if (retval) {
+ printk(KERN_ERR "ipc_init: failed to add the ipc device\n");
+ goto class_exit;
+ }
+ return retval;
+
+class_exit:
+ class_destroy(ipc_class);
+
+unreg_exit:
+ unregister_chrdev_region(dev, IPC_DEVICES);
+ kfree(ipc_device);
+
+exit:
+ return retval;
+}
+
+/*
+ * ======== ipc_exit ========
+ * This function is invoked during unlinking of ipc
+ * module from the kernel. ipc resources are
+ * freed in this function.
+ */
+static void __exit ipc_exit(void)
+{
+ dev_t devno;
+
+ ipc_modules_exit();
+ devno = MKDEV(ipc_major, ipc_minor);
+ if (ipc_device) {
+ cdev_del(&ipc_device->cdev);
+ kfree(ipc_device);
+ }
+ unregister_chrdev_region(devno, IPC_DEVICES);
+ if (ipc_class) {
+ /* remove the device from sysfs */
+ device_destroy(ipc_class, MKDEV(ipc_major, ipc_minor));
+ class_destroy(ipc_class);
+ }
+}
+
+/*
+ * ipc driver initialization and de-initialization functions
+ */
+module_init(ipc_init);
+module_exit(ipc_exit);
diff --git a/drivers/dsp/syslink/multicore_ipc/ipc_ioctl.c b/drivers/dsp/syslink/multicore_ipc/ipc_ioctl.c
new file mode 100644
index 000000000000..252f23ea73eb
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/ipc_ioctl.c
@@ -0,0 +1,69 @@
+/*
+ * ipc_ioctl.c
+ *
+ * This is the collection of ioctl functions that will invoke various ipc
+ * module level functions based on user comands
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/uaccess.h>
+
+#include <ipc_ioctl.h>
+#include <multiproc_ioctl.h>
+#include <nameserver_ioctl.h>
+#include <heapbufmp_ioctl.h>
+#include <sharedregion_ioctl.h>
+#include <gatemp_ioctl.h>
+#include <listmp_ioctl.h>
+#include <messageq_ioctl.h>
+#include <sysipc_ioctl.h>
+/*#include <sysmemmgr_ioctl.h>*/
+#include <heapmemmp_ioctl.h>
+#include <drv_notify.h>
+
+/*
+ * This will route the ioctl commands to proper modules
+ */
+int ipc_ioc_router(u32 cmd, ulong arg)
+{
+ s32 retval = 0;
+ u32 ioc_nr = _IOC_NR(cmd);
+
+ if (ioc_nr >= MULTIPROC_BASE_CMD && ioc_nr <= MULTIPROC_END_CMD)
+ retval = multiproc_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= NAMESERVER_BASE_CMD && ioc_nr <= NAMESERVER_END_CMD)
+ retval = nameserver_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= HEAPBUFMP_BASE_CMD && ioc_nr <= HEAPBUFMP_END_CMD)
+ retval = heapbufmp_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= SHAREDREGION_BASE_CMD &&
+ ioc_nr <= SHAREDREGION_END_CMD)
+ retval = sharedregion_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= GATEMP_BASE_CMD && ioc_nr <= GATEMP_END_CMD)
+ retval = gatemp_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= LISTMP_BASE_CMD && ioc_nr <= LISTMP_END_CMD)
+ retval = listmp_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= MESSAGEQ_BASE_CMD && ioc_nr <= MESSAGEQ_END_CMD)
+ retval = messageq_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= IPC_BASE_CMD && ioc_nr <= IPC_END_CMD)
+ retval = sysipc_ioctl(NULL, NULL, cmd, arg);
+/* else if (ioc_nr >= SYSMEMMGR_BASE_CMD && ioc_nr <= SYSMEMMGR_END_CMD)
+ retval = sysmemmgr_ioctl(NULL, NULL, cmd, arg);*/
+ else if (ioc_nr >= HEAPMEMMP_BASE_CMD && ioc_nr <= HEAPMEMMP_END_CMD)
+ retval = heapmemmp_ioctl(NULL, NULL, cmd, arg);
+ else if (ioc_nr >= NOTIFY_BASE_CMD && ioc_nr <= NOTIFY_END_CMD)
+ retval = notify_drv_ioctl(NULL, NULL, cmd, arg);
+ else
+ retval = -ENOTTY;
+
+ return retval;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/listmp.c b/drivers/dsp/syslink/multicore_ipc/listmp.c
new file mode 100644
index 000000000000..466a7cbab65d
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/listmp.c
@@ -0,0 +1,1472 @@
+/*
+ * listmp.c
+ *
+ * The listmp is a linked-list based module designed to be
+ * used in a multi-processor environment. It is designed to
+ * provide a means of communication between different processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+#include <linux/module.h>
+
+/* Utilities headers */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+/* Syslink headers */
+#include <syslink/atomic_linux.h>
+
+/* Module level headers */
+#include <multiproc.h>
+#include <nameserver.h>
+#include <sharedregion.h>
+#include <gatemp.h>
+#include "_listmp.h"
+#include <listmp.h>
+
+
+/* =============================================================================
+ * Globals
+ * =============================================================================
+ */
+/* Macro to make a correct module magic number with ref_count */
+#define LISTMP_MAKE_MAGICSTAMP(x) ((LISTMP_MODULEID << 12u) | (x))
+
+/* Name of the reserved NameServer used for listmp. */
+#define LISTMP_NAMESERVER "ListMP"
+
+#define ROUND_UP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
+
+/* =============================================================================
+ * Structures and Enums
+ * =============================================================================
+ */
+/* structure for listmp module state */
+struct listmp_module_object {
+ atomic_t ref_count;
+ /* Reference count */
+ void *ns_handle;
+ /* Handle to the local NameServer used for storing listmp objects */
+ struct list_head obj_list;
+ /* List holding created listmp objects */
+ struct mutex *local_lock;
+ /* Handle to lock for protecting obj_list */
+ struct listmp_config cfg;
+ /* Current config values */
+ struct listmp_config default_cfg;
+ /* Default config values */
+ struct listmp_params default_inst_params;
+ /* Default instance creation parameters */
+};
+
+/* Structure for the internal Handle for the listmp. */
+struct listmp_object{
+ struct list_head list_elem;
+ /* Used for creating a linked list */
+ VOLATILE struct listmp_attrs *attrs;
+ /* Shared memory attributes */
+ void *ns_key;
+ /* nameserver key required for remove */
+ void *gatemp_handle;
+ /* Gate for critical regions */
+ u32 alloc_size;
+ /* Shared memory allocated */
+ u16 region_id;
+ /* SharedRegion ID */
+ bool cache_enabled;
+ /* Whether to do cache calls */
+ struct listmp_proc_attrs owner;
+ /* Creator's attributes associated with an instance */
+ struct listmp_params params;
+ /* the parameter structure */
+ void *top;
+ /* Pointer to the top Object */
+};
+
+/* =============================================================================
+ * Globals
+ * =============================================================================
+ */
+/* Variable for holding state of the nameserver module. */
+static struct listmp_module_object listmp_state = {
+ .default_cfg.max_runtime_entries = 32,
+ .default_cfg.max_name_len = 32,
+ .default_inst_params.shared_addr = 0,
+ .default_inst_params.name = NULL,
+ .default_inst_params.gatemp_handle = NULL,
+ .default_inst_params.region_id = 0,
+};
+
+/* Pointer to the listmp module state */
+static struct listmp_module_object *listmp_module = &listmp_state;
+
+/* =============================================================================
+ * Function definitions
+ * =============================================================================
+ */
+/* Creates a new instance of listmp module. This is an internal
+ * function because both listmp_create and
+ * listmp_open call use the same functionality. */
+static int _listmp_create(struct listmp_object **handle_ptr,
+ struct listmp_params *params, u32 create_flag);
+
+
+/* =============================================================================
+ * Function API's
+ * =============================================================================
+ */
+/* Function to get configuration parameters to setup the listmp module. */
+void listmp_get_config(struct listmp_config *cfg_params)
+{
+ int status = 0;
+
+ if (WARN_ON(unlikely(cfg_params == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true) {
+ /* If setup has not yet been called) */
+ memcpy(cfg_params, &listmp_module->default_cfg,
+ sizeof(struct listmp_config));
+ } else {
+ memcpy(cfg_params, &listmp_module->cfg,
+ sizeof(struct listmp_config));
+ }
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_get_config failed: status = 0x%x\n",
+ status);
+ }
+ return;
+}
+
+/* Function to setup the listmp module. */
+int listmp_setup(const struct listmp_config *cfg)
+{
+ int status = 0;
+ int status1 = 0;
+ void *nshandle = NULL;
+ struct nameserver_params params;
+ struct listmp_config tmp_cfg;
+
+ /* This sets the ref_count variable if not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of ref_count variable.
+ */
+ atomic_cmpmask_and_set(&listmp_module->ref_count,
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(0));
+ if (atomic_inc_return(&listmp_module->ref_count)
+ != LISTMP_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ if (cfg == NULL) {
+ listmp_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+
+ if (WARN_ON(cfg->max_name_len == 0)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* Initialize the parameters */
+ nameserver_params_init(&params);
+ params.max_value_len = 4;
+ params.max_name_len = cfg->max_name_len;
+ /* Create the nameserver for modules */
+ nshandle = nameserver_create(LISTMP_NAMESERVER, &params);
+ if (unlikely(nshandle == NULL)) {
+ status = LISTMP_E_FAIL;
+ goto exit;
+ }
+ listmp_module->ns_handle = nshandle;
+
+ /* Construct the list object */
+ INIT_LIST_HEAD(&listmp_module->obj_list);
+ /* Create a lock for protecting list object */
+ listmp_module->local_lock = kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (listmp_module->local_lock == NULL) {
+ status = -ENOMEM;
+ goto clean_nameserver;
+ }
+ mutex_init(listmp_module->local_lock);
+
+ /* Copy the cfg */
+ memcpy(&listmp_module->cfg, cfg, sizeof(struct listmp_config));
+ return 0;
+
+clean_nameserver:
+ status1 = nameserver_delete(&(listmp_module->ns_handle));
+ WARN_ON(status1 < 0);
+ atomic_set(&listmp_module->ref_count, LISTMP_MAKE_MAGICSTAMP(0));
+exit:
+ printk(KERN_ERR "listmp_setup failed! status = 0x%x\n", status);
+ return status;
+}
+
+/* Function to destroy the listmp module. */
+int listmp_destroy(void)
+{
+ int status = 0;
+ int status1 = 0;
+ struct list_head *elem = NULL;
+ struct list_head *head = &listmp_module->obj_list;
+ struct list_head *next;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (!(atomic_dec_return(&listmp_module->ref_count) == \
+ LISTMP_MAKE_MAGICSTAMP(0))) {
+ status = 1;
+ goto exit;
+ }
+
+ /* Temporarily increment ref_count here. */
+ atomic_set(&listmp_module->ref_count, LISTMP_MAKE_MAGICSTAMP(1));
+ /* Check if any listmp instances have not been
+ * deleted so far. If not, delete them. */
+ for (elem = (head)->next; elem != (head); elem = next) {
+ /* Retain the next pointer so it doesn't get overwritten */
+ next = elem->next;
+ if (((struct listmp_object *) elem)->owner.proc_id == \
+ multiproc_self()) {
+ status1 = listmp_delete((void **)
+ &(((struct listmp_object *)elem)->top));
+ WARN_ON(status1 < 0);
+ } else {
+ status1 = listmp_close((void **)
+ &(((struct listmp_object *)elem)->top));
+ WARN_ON(status1 < 0);
+ }
+ }
+
+ if (likely(listmp_module->ns_handle != NULL)) {
+ /* Delete the nameserver for modules */
+ status = nameserver_delete(&(listmp_module->ns_handle));
+ WARN_ON(status < 0);
+ }
+
+ /* Destruct the list object */
+ list_del(&listmp_module->obj_list);
+ /* Delete the list lock */
+ kfree(listmp_module->local_lock);
+ listmp_module->local_lock = NULL;
+
+ memset(&listmp_module->cfg, 0, sizeof(struct listmp_config));
+
+ /* Again reset ref_count. */
+ atomic_set(&listmp_module->ref_count, LISTMP_MAKE_MAGICSTAMP(0));
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_destroy failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+
+/* Function to initialize the config-params structure with supplier-specified
+ * defaults before instance creation. */
+void listmp_params_init(struct listmp_params *params)
+{
+ s32 status = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(params == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ memcpy((void *)params, (void *)&listmp_module->default_inst_params,
+ sizeof(struct listmp_params));
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_params_init failed! status = 0x%x\n",
+ status);
+ }
+ return;
+}
+
+/* Creates a new instance of listmp module. */
+void *listmp_create(const struct listmp_params *params)
+{
+ s32 status = 0;
+ struct listmp_object *obj = NULL;
+ struct listmp_params sparams;
+ u32 key;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(params == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ memcpy(&sparams, params, sizeof(struct listmp_params));
+
+ key = mutex_lock_interruptible(listmp_module->local_lock);
+ if (key)
+ goto exit;
+ status = _listmp_create(&obj, &sparams, (u32) true);
+ mutex_unlock(listmp_module->local_lock);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_create failed! status = 0x%x\n",
+ status);
+ }
+ return (void *)obj;
+}
+
+/* Deletes a instance of listmp instance object. */
+int listmp_delete(void **listmp_handleptr)
+{
+ int status = 0;
+ struct listmp_object *obj = NULL;
+ struct listmp_params *params = NULL;
+ u32 key;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(listmp_handleptr == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(*listmp_handleptr == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct listmp_object *)(*listmp_handleptr);
+ params = (struct listmp_params *)&obj->params;
+
+ if (unlikely(obj->owner.proc_id != multiproc_self())) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (unlikely(obj->owner.open_count > 1)) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (unlikely(obj->owner.open_count != 1)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ /* Remove from the local list */
+ key = mutex_lock_interruptible(listmp_module->local_lock);
+ list_del(&obj->list_elem);
+ mutex_unlock(listmp_module->local_lock);
+
+ if (likely(params->name != NULL)) {
+ /* Free memory for the name */
+ kfree(params->name);
+ /* Remove from the name server */
+ if (obj->ns_key != NULL) {
+ nameserver_remove_entry(listmp_module->ns_handle,
+ obj->ns_key);
+ obj->ns_key = NULL;
+ }
+ }
+
+ /* Now free the obj */
+ kfree(obj);
+ obj = NULL;
+ *listmp_handleptr = NULL;
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_delete failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+
+/* Function to open a listmp instance */
+int listmp_open(char *name, void **listmp_handleptr)
+{
+ int status = 0;
+ void *shared_addr = NULL;
+ bool done_flag = false;
+ struct list_head *elem;
+ u32 key;
+ u32 shared_shm_base;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(listmp_handleptr == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(name == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* First check in the local list */
+ list_for_each(elem, &listmp_module->obj_list) {
+ if (((struct listmp_object *)elem)->params.name != NULL) {
+ if (strcmp(((struct listmp_object *)elem)->params.name,
+ name) == 0) {
+ key = mutex_lock_interruptible(
+ listmp_module->local_lock);
+ if (((struct listmp_object *)elem)
+ ->owner.proc_id == multiproc_self())
+ ((struct listmp_object *)elem)
+ ->owner.open_count++;
+ mutex_unlock(listmp_module->local_lock);
+ *listmp_handleptr = \
+ (((struct listmp_object *)elem)->top);
+ done_flag = true;
+ break;
+ }
+ }
+ }
+
+ if (likely(done_flag == false)) {
+ /* Find in name server */
+ status = nameserver_get_uint32(listmp_module->ns_handle,
+ name, &shared_shm_base, NULL);
+ if (unlikely(status < 0)) {
+ status = ((status == -ENOENT) ? status : -1);
+ goto exit;
+ }
+ shared_addr = sharedregion_get_ptr((u32 *)shared_shm_base);
+ if (unlikely(shared_addr == NULL)) {
+ status = LISTMP_E_FAIL;
+ goto exit;
+ }
+ status = listmp_open_by_addr(shared_addr, listmp_handleptr);
+ }
+
+#if 0
+ if (status >= 0) {
+ attrs = (struct listmp_attrs *) (params->shared_addr);
+ if (unlikely(attrs->status != (LISTMP_CREATED)))
+ status = LISTMP_E_NOTCREATED;
+ else if (unlikely(attrs->version !=
+ (LISTMP_VERSION)))
+ status = LISTMP_E_VERSION;
+ }
+
+ if (likely(status >= 0))
+ *listmp_handleptr = (listmp_handle)
+ _listmp_create(params, false);
+#endif
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "listmp_open failed! status = 0x%x\n", status);
+ return status;
+}
+
+/* Function to open a listmp instance by address */
+int listmp_open_by_addr(void *shared_addr, void **listmp_handleptr)
+{
+ int status = 0;
+ bool done_flag = false;
+ struct listmp_params params;
+ struct list_head *elem;
+ u32 key;
+ struct listmp_attrs *attrs;
+ u16 id;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(listmp_handleptr == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(shared_addr == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* First check in the local list */
+ list_for_each(elem, &listmp_module->obj_list) {
+ if (((struct listmp_object *)elem)->params.shared_addr == \
+ shared_addr) {
+ key = mutex_lock_interruptible(
+ listmp_module->local_lock);
+ if (((struct listmp_object *)elem)->owner.proc_id == \
+ multiproc_self())
+ ((struct listmp_object *)elem)
+ ->owner.open_count++;
+ mutex_unlock(listmp_module->local_lock);
+ *listmp_handleptr = \
+ (((struct listmp_object *)elem)->top);
+ done_flag = true;
+ break;
+ }
+ }
+
+ if (likely(done_flag == false)) {
+ listmp_params_init(&params);
+ params.shared_addr = shared_addr;
+
+ attrs = (struct listmp_attrs *)(shared_addr);
+ id = sharedregion_get_id(shared_addr);
+#if 0
+ if (sharedregion_is_cache_enabled(id))
+ Cache_inv(in_use, num * sizeof(u8), Cache_Type_ALL,
+ true);
+#endif
+ if (unlikely(attrs->status != LISTMP_CREATED)) {
+ *listmp_handleptr = NULL;
+ status = -ENOENT;
+ } else {
+ key = mutex_lock_interruptible(
+ listmp_module->local_lock);
+ status = _listmp_create((struct listmp_object **)
+ listmp_handleptr, &params,
+ (u32) false);
+ mutex_unlock(listmp_module->local_lock);
+ }
+ }
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_open failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+
+/* Function to close a previously opened instance */
+int listmp_close(void **listmp_handleptr)
+{
+ int status = 0;
+ struct listmp_object *obj = NULL;
+ struct listmp_params *params = NULL;
+ u32 key;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(listmp_handleptr == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(*listmp_handleptr == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct listmp_object *)(*listmp_handleptr);
+ params = (struct listmp_params *)&obj->params;
+
+ key = mutex_lock_interruptible(listmp_module->local_lock);
+ if (unlikely(obj->owner.proc_id == multiproc_self()))
+ (obj)->owner.open_count--;
+
+ /* Check if ListMP is opened on same processor*/
+ if (likely((((struct listmp_object *)obj)->owner.creator == false))) {
+ list_del(&obj->list_elem);
+ /* remove from the name server */
+ if (params->name != NULL)
+ /* Free memory for the name */
+ kfree(params->name);
+ gatemp_close(&obj->gatemp_handle);
+
+ kfree(obj);
+ obj = NULL;
+ *listmp_handleptr = NULL;
+ }
+
+ mutex_unlock(listmp_module->local_lock);
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "listmp_close failed! status = 0x%x\n", status);
+ return status;
+}
+
+/* Function to check if the shared memory list is empty */
+bool listmp_empty(void *listmp_handle)
+{
+ int status = 0;
+ bool is_empty = false;
+ struct listmp_object *obj = NULL;
+ int *key;
+ struct listmp_elem *shared_head;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(listmp_handle == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct listmp_object *)listmp_handle;
+ key = gatemp_enter(obj->gatemp_handle);
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)&(obj->attrs->head),
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+
+ /* true if list is empty */
+ shared_head = (struct listmp_elem *)(sharedregion_get_srptr(
+ (void *)&(obj->attrs->head), obj->region_id));
+ dsb();
+ if (obj->attrs->head.next == shared_head)
+ is_empty = true;
+
+ gatemp_leave(obj->gatemp_handle, key);
+
+exit:
+ return is_empty;
+}
+
+/* Retrieves the gatemp handle associated with the listmp instance. */
+void *listmp_get_gate(void *listmp_handle)
+{
+ struct listmp_object *obj = NULL;
+ void *gatemp_handle = NULL;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(listmp_handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct listmp_object *)listmp_handle;
+ gatemp_handle = obj->gatemp_handle;
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "listmp_get_gate failed! status = 0x%x",
+ retval);
+ }
+ return gatemp_handle;
+}
+
+/* Function to get head element from a shared memory list */
+void *listmp_get_head(void *listmp_handle)
+{
+ struct listmp_object *obj = NULL;
+ struct listmp_elem *elem = NULL;
+ struct listmp_elem *local_head_next = NULL;
+ struct listmp_elem *local_next = NULL;
+ s32 retval = 0;
+ int *key = NULL;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(listmp_handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct listmp_object *)listmp_handle;
+ key = gatemp_enter(obj->gatemp_handle);
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)&(obj->attrs->head),
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+
+ local_head_next = sharedregion_get_ptr((u32 *)obj->attrs->head.next);
+ WARN_ON(local_head_next == NULL);
+ dsb();
+ /* See if the listmp_object was empty */
+ if (local_head_next != (struct listmp_elem *)&obj->attrs->head) {
+ /* Elem to return */
+ elem = local_head_next;
+ WARN_ON(elem == NULL);
+ dsb();
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)local_head_next,
+ sizeof(struct listmp_elem), Cache_Type_ALL,
+ true);
+ }
+#endif
+ local_next = sharedregion_get_ptr((u32 *)elem->next);
+ WARN_ON(local_next == NULL);
+
+ /* Fix the head of the list next pointer */
+ obj->attrs->head.next = elem->next;
+ dsb();
+ /* Fix the prev pointer of the new first elem on the list */
+ local_next->prev = local_head_next->prev;
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)&(obj->attrs->head),
+ sizeof(struct listmp_elem), Cache_Type_ALL,
+ true);
+ Cache_inv((void *)local_next,
+ sizeof(struct listmp_elem), Cache_Type_ALL,
+ true);
+ }
+#endif
+ }
+ gatemp_leave(obj->gatemp_handle, key);
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "listmp_get_head failed! status = 0x%x",
+ retval);
+ }
+ return elem;
+}
+
+/* Function to get tail element from a shared memory list */
+void *listmp_get_tail(void *listmp_handle)
+{
+ struct listmp_object *obj = NULL;
+ struct listmp_elem *elem = NULL;
+ int *key;
+ struct listmp_elem *local_head_prev = NULL;
+ struct listmp_elem *local_prev = NULL;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(listmp_module->ns_handle == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(listmp_handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct listmp_object *)listmp_handle;
+ key = gatemp_enter(obj->gatemp_handle);
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)&(obj->attrs->head),
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+
+ local_head_prev = sharedregion_get_ptr((u32 *)obj->attrs->head.prev);
+ WARN_ON(local_head_prev == NULL);
+
+ /* See if the listmp_object was empty */
+ if (local_head_prev != (struct listmp_elem *)&obj->attrs->head) {
+ /* Elem to return */
+ elem = local_head_prev;
+ WARN_ON(elem == NULL);
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)local_head_prev,
+ sizeof(struct listmp_elem), Cache_Type_ALL,
+ true);
+ }
+#endif
+ local_prev = sharedregion_get_ptr((u32 *)elem->prev);
+ WARN_ON(local_prev == NULL);
+
+ /* Fix the head of the list prev pointer */
+ obj->attrs->head.prev = elem->prev;
+ /* Fix the next pointer of the new last elem on the list */
+ local_prev->next = local_head_prev->next;
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)&(obj->attrs->head),
+ sizeof(struct listmp_elem), Cache_Type_ALL,
+ true);
+ Cache_inv((void *)local_prev,
+ sizeof(struct listmp_elem), Cache_Type_ALL,
+ true);
+ }
+#endif
+ }
+ gatemp_leave(obj->gatemp_handle, key);
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "listmp_get_tail failed! status = 0x%x",
+ retval);
+ }
+ return elem;
+}
+
+/* Function to put head element into a shared memory list */
+int listmp_put_head(void *listmp_handle, struct listmp_elem *elem)
+{
+ int status = 0;
+ struct listmp_object *obj = NULL;
+ struct listmp_elem *local_next_elem = NULL;
+ int *key;
+ struct listmp_elem *shared_elem = NULL;
+ u32 index;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(listmp_handle == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(elem == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct listmp_object *)listmp_handle;
+ dsb();
+ index = sharedregion_get_id(elem);
+ shared_elem = (struct listmp_elem *)sharedregion_get_srptr((void *)elem,
+ index);
+ WARN_ON((u32 *)shared_elem == SHAREDREGION_INVALIDSRPTR);
+ dsb();
+
+ key = gatemp_enter(obj->gatemp_handle);
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)&(obj->attrs->head),
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+ /* Add the new elem into the list */
+ elem->next = obj->attrs->head.next;
+ dsb();
+ local_next_elem = sharedregion_get_ptr((u32 *)elem->next);
+ WARN_ON(local_next_elem == NULL);
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)local_next_elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+ elem->prev = local_next_elem->prev;
+ local_next_elem->prev = shared_elem;
+ obj->attrs->head.next = shared_elem;
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ /* Need to do cache operations */
+ Cache_inv((void *)local_next_elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ Cache_inv((void *)&(obj->attrs->head),
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ /* writeback invalidate only the elem structure */
+ Cache_inv((void *)elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+ gatemp_leave(obj->gatemp_handle, key);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_put_head failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+
+/* Function to put tail element into a shared memory list */
+int listmp_put_tail(void *listmp_handle, struct listmp_elem *elem)
+{
+ int status = 0;
+ struct listmp_object *obj = NULL;
+ int *key;
+ struct listmp_elem *local_prev_elem = NULL;
+ struct listmp_elem *shared_elem = NULL;
+ u32 index;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(listmp_handle == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(elem == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct listmp_object *)listmp_handle;
+ dsb();
+ /* Safe to do outside the gate */
+ index = sharedregion_get_id(elem);
+ shared_elem = (struct listmp_elem *)sharedregion_get_srptr((void *)elem,
+ index);
+ WARN_ON((u32 *)shared_elem == SHAREDREGION_INVALIDSRPTR);
+ dsb();
+
+ key = gatemp_enter(obj->gatemp_handle);
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)&(obj->attrs->head),
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+ elem->prev = obj->attrs->head.prev;
+ dsb();
+ local_prev_elem = sharedregion_get_ptr((u32 *)elem->prev);
+ WARN_ON(local_prev_elem == NULL);
+ dsb();
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)local_next_elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+ /* Add the new elem into the list */
+ elem->next = local_prev_elem->next;
+ local_prev_elem->next = shared_elem;
+ obj->attrs->head.prev = shared_elem;
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ /* Need to do cache operations */
+ Cache_inv((void *)local_prev_elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ Cache_inv((void *)&(obj->attrs->head),
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ /* writeback invalidate only the elem structure */
+ Cache_inv((void *)elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+ gatemp_leave(obj->gatemp_handle, key);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_put_tail failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+
+/* Function to insert an element into a shared memory list */
+int listmp_insert(void *listmp_handle, struct listmp_elem *new_elem,
+ struct listmp_elem *cur_elem)
+{
+ int status = 0;
+ struct listmp_object *obj = NULL;
+ struct listmp_elem *local_prev_elem = NULL;
+ int *key;
+ struct listmp_elem *shared_new_elem = NULL;
+ struct listmp_elem *shared_cur_elem = NULL;
+ u32 index;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(new_elem == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(cur_elem == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct listmp_object *)listmp_handle;
+ dsb();
+ /* Get SRPtr for new_elem */
+ index = sharedregion_get_id(new_elem);
+ shared_new_elem = (struct listmp_elem *)
+ sharedregion_get_srptr((void *)new_elem, index);
+ WARN_ON((u32 *)shared_new_elem == SHAREDREGION_INVALIDSRPTR);
+ dsb();
+ /* Get SRPtr for cur_elem */
+ index = sharedregion_get_id(cur_elem);
+ shared_cur_elem = (struct listmp_elem *)
+ sharedregion_get_srptr((void *)cur_elem, index);
+ WARN_ON((u32 *)shared_cur_elem == SHAREDREGION_INVALIDSRPTR);
+ dsb();
+
+ key = gatemp_enter(obj->gatemp_handle);
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)cur_elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+ local_prev_elem = sharedregion_get_ptr((u32 *)cur_elem->prev);
+ WARN_ON(local_prev_elem == NULL);
+ dsb();
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)local_prev_elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+ new_elem->next = shared_cur_elem;
+ new_elem->prev = cur_elem->prev;
+ local_prev_elem->next = shared_new_elem;
+ cur_elem->prev = shared_new_elem;
+ dsb();
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ /* Need to do cache operations */
+ Cache_inv((void *)local_prev_elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ Cache_inv((void *)cur_elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ /* writeback invalidate only the elem structure */
+ Cache_inv((void *)new_elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+ gatemp_leave(obj->gatemp_handle, key);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_insert failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+
+/* Function to remove a element from a shared memory list */
+int listmp_remove(void *listmp_handle, struct listmp_elem *elem)
+{
+ int status = 0;
+ struct listmp_object *obj = NULL;
+ struct listmp_elem *local_prev_elem = NULL;
+ struct listmp_elem *local_next_elem = NULL;
+ int *key;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(elem == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct listmp_object *)listmp_handle;
+
+ key = gatemp_enter(obj->gatemp_handle);
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+ local_prev_elem = sharedregion_get_ptr((u32 *)elem->prev);
+ local_next_elem = sharedregion_get_ptr((u32 *)elem->next);
+ dsb();
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ /* Need to do cache operations */
+ Cache_inv((void *)local_prev_elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ Cache_inv((void *)local_next_elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+ local_prev_elem->next = elem->next;
+ local_next_elem->prev = elem->prev;
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ /* Need to do cache operations */
+ Cache_inv((void *)local_prev_elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ Cache_inv((void *)local_next_elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+ gatemp_leave(obj->gatemp_handle, key);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "listmp_remove failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+
+/* Function to traverse to next element in shared memory list */
+void *listmp_next(void *listmp_handle, struct listmp_elem *elem)
+{
+ int status = 0;
+ struct listmp_object *obj = NULL;
+ struct listmp_elem *ret_elem = NULL;
+ int *key;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ obj = (struct listmp_object *)listmp_handle;
+
+ key = gatemp_enter(obj->gatemp_handle);
+ /* If element is NULL start at head */
+ if (elem == NULL)
+ elem = (struct listmp_elem *)&obj->attrs->head;
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+ ret_elem = sharedregion_get_ptr((u32 *)elem->next);
+ WARN_ON(ret_elem == NULL);
+ /* NULL if list is empty */
+ if (ret_elem == (struct listmp_elem *)&obj->attrs->head)
+ ret_elem = NULL;
+ gatemp_leave(obj->gatemp_handle, key);
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "listmp_next failed! status = 0x%x\n", status);
+ return ret_elem;
+}
+
+/* Function to traverse to prev element in shared memory list */
+void *listmp_prev(void *listmp_handle, struct listmp_elem *elem)
+{
+ int status = 0;
+ struct listmp_object *obj = NULL;
+ struct listmp_elem *ret_elem = NULL;
+ int *key;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(listmp_module->ref_count),
+ LISTMP_MAKE_MAGICSTAMP(0),
+ LISTMP_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ obj = (struct listmp_object *)listmp_handle;
+
+ key = gatemp_enter(obj->gatemp_handle);
+ /* If element is NULL start at head */
+ if (elem == NULL)
+ elem = (struct listmp_elem *)&obj->attrs->head;
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+ ret_elem = sharedregion_get_ptr((u32 *)elem->prev);
+ WARN_ON(ret_elem == NULL);
+ /* NULL if list is empty */
+ if (ret_elem == (struct listmp_elem *)&obj->attrs->head)
+ ret_elem = NULL;
+ gatemp_leave(obj->gatemp_handle, key);
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "listmp_prev failed! status = 0x%x\n", status);
+ return ret_elem;
+}
+
+/* Function to return the amount of shared memory required for creation of
+ * each instance. */
+uint listmp_shared_mem_req(const struct listmp_params *params)
+{
+ int retval = 0;
+ uint mem_req = 0;
+ uint min_align;
+ u16 region_id;
+
+ if (WARN_ON(unlikely(params == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (params->shared_addr == NULL)
+ region_id = params->region_id;
+ else
+ region_id = sharedregion_get_id(params->shared_addr);
+ WARN_ON(region_id == SHAREDREGION_INVALIDREGIONID);
+
+ /*min_align = Memory_getMaxDefaultTypeAlign();*/min_align = 4;
+ if (sharedregion_get_cache_line_size(region_id) > min_align)
+ min_align = sharedregion_get_cache_line_size(region_id);
+
+ mem_req = ROUND_UP(sizeof(struct listmp_attrs), min_align);
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "listmp_shared_mem_req failed! status = 0x%x\n",
+ retval);
+ }
+ return mem_req;
+}
+
+/* Clears a listmp element's pointers */
+static void _listmp_elem_clear(struct listmp_elem *elem)
+{
+ u32 *shared_elem;
+ int id;
+
+ WARN_ON(elem == NULL);
+
+ id = sharedregion_get_id(elem);
+ shared_elem = sharedregion_get_srptr(elem, id);
+ elem->next = elem->prev = (struct listmp_elem *)shared_elem;
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)elem,
+ sizeof(struct listmp_elem), Cache_Type_ALL, true);
+ }
+#endif
+}
+
+/* Creates a new instance of listmp module. This is an internal
+ * function because both listmp_create and
+ * listmp_open call use the same functionality. */
+static int _listmp_create(struct listmp_object **handle_ptr,
+ struct listmp_params *params, u32 create_flag)
+{
+ int status = 0;
+ struct listmp_object *obj = NULL;
+ void *local_addr = NULL;
+ u32 *shared_shm_base;
+ struct listmp_params sparams;
+ u16 name_len;
+
+ if (WARN_ON(unlikely(handle_ptr == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(params == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* Allow local lock not being provided. Don't do protection if local
+ * lock is not provided.
+ */
+ /* Create the handle */
+ obj = kzalloc(sizeof(struct listmp_object), GFP_KERNEL);
+ *handle_ptr = obj;
+ if (obj == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ /* Populate the params member */
+ memcpy((void *)&obj->params, (void *)params,
+ sizeof(struct listmp_params));
+
+ if (create_flag == false) {
+ /* Update attrs */
+ obj->attrs = (struct listmp_attrs *)params->shared_addr;
+ obj->region_id = sharedregion_get_id((void *)&obj->attrs->head);
+ obj->cache_enabled = sharedregion_is_cache_enabled(
+ obj->region_id);
+ /* get the local address of the SRPtr */
+ local_addr = sharedregion_get_ptr(obj->attrs->gatemp_addr);
+ status = gatemp_open_by_addr(local_addr, &(obj->gatemp_handle));
+ if (status < 0)
+ goto error;
+ } else {
+ INIT_LIST_HEAD(&obj->list_elem);
+
+ /* init the gate */
+ if (params->gatemp_handle != NULL)
+ obj->gatemp_handle = params->gatemp_handle;
+ else
+ obj->gatemp_handle = gatemp_get_default_remote();
+ if (obj->gatemp_handle == NULL)
+ goto error;
+
+ if (params->shared_addr == NULL) {
+ obj->region_id = params->region_id;
+ obj->cache_enabled = sharedregion_is_cache_enabled(
+ obj->region_id);
+
+ listmp_params_init(&sparams);
+ sparams.region_id = params->region_id;
+ obj->alloc_size = listmp_shared_mem_req(&sparams);
+
+ /* HeapMemMP will do the alignment * */
+ obj->attrs = sl_heap_alloc(
+ sharedregion_get_heap(obj->region_id),
+ obj->alloc_size,
+ 0);
+ if (obj->attrs == NULL) {
+ status = -ENOMEM;
+ goto error;
+ }
+ } else {
+ obj->region_id = sharedregion_get_id(
+ params->shared_addr);
+ if (unlikely(obj->region_id == \
+ SHAREDREGION_INVALIDREGIONID)) {
+ status = -1;
+ goto error;
+ }
+ if (((u32) params->shared_addr % \
+ sharedregion_get_cache_line_size(
+ obj->region_id)) != 0) {
+ status = -EFAULT;
+ goto error;
+ }
+
+ obj->cache_enabled = sharedregion_is_cache_enabled(
+ obj->region_id);
+ obj->attrs = (struct listmp_attrs *)params->shared_addr;
+ }
+
+ _listmp_elem_clear((struct listmp_elem *)&obj->attrs->head);
+ obj->attrs->gatemp_addr = gatemp_get_shared_addr(
+ obj->gatemp_handle);
+#if 0
+ if (unlikely(obj->cache_enabled)) {
+ Cache_inv((void *)obj->attrs,
+ sizeof(struct listmp_attrs), Cache_Type_ALL,
+ true);
+ }
+#endif
+ if (obj->params.name != NULL) {
+ name_len = strlen(obj->params.name) + 1;
+ /* Copy the name */
+ obj->params.name = kmalloc(name_len, GFP_KERNEL);
+ if (obj->params.name == NULL) {
+ /* NULL if Memory allocation failed for
+ name */
+ status = -ENOMEM;
+ goto error;
+ }
+ strncpy(obj->params.name, params->name, name_len);
+ shared_shm_base = sharedregion_get_srptr((void *)
+ obj->attrs, obj->region_id);
+ WARN_ON(shared_shm_base == SHAREDREGION_INVALIDSRPTR);
+
+ /* Add list instance to name server */
+ obj->ns_key = nameserver_add_uint32(
+ listmp_module->ns_handle, params->name,
+ (u32)shared_shm_base);
+ if (unlikely(obj->ns_key == NULL)) {
+ status = -EFAULT;
+ goto error;
+ }
+ }
+ obj->attrs->status = LISTMP_CREATED;
+ }
+
+ /* Update owner and opener details */
+ if (create_flag == true) {
+ obj->owner.creator = true;
+ obj->owner.open_count = 1;
+ obj->owner.proc_id = multiproc_self();
+ } else {
+ obj->owner.creator = false;
+ obj->owner.open_count = 0;
+ obj->owner.proc_id = MULTIPROC_INVALIDID;
+ }
+ obj->top = obj;
+
+ /* Put in the module list */
+ /* Function is called already with mutex acquired. So, no need to lock
+ * here */
+ INIT_LIST_HEAD(&obj->list_elem);
+ list_add_tail((&obj->list_elem), &listmp_module->obj_list);
+ return 0;
+
+error:
+ if (status < 0) {
+ if (create_flag == true) {
+ if (obj->params.name != NULL) {
+ if (obj->ns_key != NULL) {
+ nameserver_remove_entry(
+ listmp_module->ns_handle,
+ obj->ns_key);
+ }
+ kfree(obj->params.name);
+ }
+ if (params->shared_addr == NULL) {
+ if (obj->attrs != NULL) {
+ sl_heap_free(sharedregion_get_heap(
+ obj->region_id),
+ (void *)obj->attrs,
+ obj->alloc_size);
+ }
+ }
+ }
+ kfree(obj);
+ obj = NULL;
+ }
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "_listmp_create failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/listmp_ioctl.c b/drivers/dsp/syslink/multicore_ipc/listmp_ioctl.c
new file mode 100644
index 000000000000..7b2b50e4200f
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/listmp_ioctl.c
@@ -0,0 +1,564 @@
+/*
+ * listmp_ioctl.c
+ *
+ * This file implements all the ioctl operations required on the
+ * listmp module.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Linux headers */
+#include <linux/uaccess.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+/* Module Headers */
+#include <listmp.h>
+#include <_listmp.h>
+#include <listmp_ioctl.h>
+#include <sharedregion.h>
+
+/* ioctl interface to listmp_get_config function */
+static inline int listmp_ioctl_get_config(struct listmp_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct listmp_config config;
+
+ listmp_get_config(&config);
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct listmp_config));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = 0;
+exit:
+ return retval;
+}
+
+/* ioctl interface to listmp_setup function */
+static inline int listmp_ioctl_setup(struct listmp_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct listmp_config config;
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct listmp_config));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = listmp_setup(&config);
+
+exit:
+ return retval;
+}
+
+/* ioctl interface to listmp_destroy function */
+static inline int listmp_ioctl_destroy(struct listmp_cmd_args *cargs)
+{
+ cargs->api_status = listmp_destroy();
+ return 0;
+}
+
+/* ioctl interface to listmp_params_init function */
+static inline int listmp_ioctl_params_init(struct listmp_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct listmp_params params;
+
+ size = copy_from_user(&params,
+ cargs->args.params_init.params,
+ sizeof(struct listmp_params));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ listmp_params_init(&params);
+ size = copy_to_user(cargs->args.params_init.params, &params,
+ sizeof(struct listmp_params));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = 0;
+
+exit:
+ return retval;
+}
+
+/* ioctl interface to listmp_create function */
+static inline int listmp_ioctl_create(struct listmp_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct listmp_params params;
+
+ size = copy_from_user(&params, cargs->args.create.params,
+ sizeof(struct listmp_params));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ /* Allocate memory for the name */
+ if (cargs->args.create.name_len > 0) {
+ params.name = kmalloc(cargs->args.create.name_len, GFP_KERNEL);
+ if (params.name == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+ /* Copy the name */
+ size = copy_from_user(params.name,
+ cargs->args.create.params->name,
+ cargs->args.create.name_len);
+ if (size) {
+ retval = -EFAULT;
+ goto free_name;
+ }
+ }
+
+ params.shared_addr = sharedregion_get_ptr(
+ (u32 *)cargs->args.create.shared_addr_srptr);
+
+ /* Update gate in params. */
+ params.gatemp_handle = cargs->args.create.knl_gate;
+ cargs->args.create.listmp_handle = listmp_create(&params);
+
+ size = copy_to_user(cargs->args.create.params, &params,
+ sizeof(struct listmp_params));
+ if (!size)
+ goto free_name;
+
+ /* Error copying, so delete the handle */
+ retval = -EFAULT;
+ if (cargs->args.create.listmp_handle)
+ listmp_delete(&cargs->args.create.listmp_handle);
+
+free_name:
+ if (cargs->args.create.name_len > 0)
+ kfree(params.name);
+
+ cargs->api_status = 0;
+exit:
+ return retval;
+}
+
+/* ioctl interface to listmp_delete function */
+static inline int listmp_ioctl_delete(struct listmp_cmd_args *cargs)
+{
+ cargs->api_status = listmp_delete(
+ &(cargs->args.delete_instance.listmp_handle));
+ return 0;
+}
+
+/* ioctl interface to listmp_open function */
+static inline int listmp_ioctl_open(struct listmp_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ char *name = NULL;
+ void *listmp_handle = NULL;
+
+ if (cargs->args.open.name_len > 0) {
+ name = kmalloc(cargs->args.open.name_len, GFP_KERNEL);
+ if (name == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+ /* Copy the name */
+ size = copy_from_user(name, cargs->args.open.name,
+ cargs->args.open.name_len);
+ if (size) {
+ retval = -EFAULT;
+ goto free_name;
+ }
+ }
+
+ /* Update gate in params. */
+ cargs->api_status = listmp_open(name, &listmp_handle);
+ cargs->args.open.listmp_handle = listmp_handle;
+
+free_name:
+ if (cargs->args.open.name_len > 0)
+ kfree(name);
+exit:
+ return retval;
+}
+
+/* ioctl interface to listmp_open_by_addr function */
+static inline int listmp_ioctl_open_by_addr(struct listmp_cmd_args *cargs)
+{
+ s32 retval = 0;
+ void *listmp_handle = NULL;
+ void *shared_addr = NULL;
+
+ /* For open_by_addr, the shared_add_srptr may be invalid */
+ if (cargs->args.open_by_addr.shared_addr_srptr != \
+ (u32)SHAREDREGION_INVALIDSRPTR) {
+ shared_addr = sharedregion_get_ptr((u32 *)
+ cargs->args.open_by_addr.shared_addr_srptr);
+ }
+
+ /* Update gate in params. */
+ cargs->api_status = listmp_open_by_addr(shared_addr, &listmp_handle);
+ cargs->args.open_by_addr.listmp_handle = listmp_handle;
+
+ return retval;
+}
+
+/* ioctl interface to listmp_close function */
+static inline int listmp_ioctl_close(struct listmp_cmd_args *cargs)
+{
+ cargs->api_status = listmp_close(&cargs->args.close.listmp_handle);
+ return 0;
+}
+
+/* ioctl interface to listmp_empty function */
+static inline int listmp_ioctl_isempty(struct listmp_cmd_args *cargs)
+{
+ cargs->args.is_empty.is_empty = \
+ listmp_empty(cargs->args.is_empty.listmp_handle);
+ cargs->api_status = 0;
+ return 0;
+}
+
+/* ioctl interface to listmp_get_head function */
+static inline int listmp_ioctl_get_head(struct listmp_cmd_args *cargs)
+{
+ struct listmp_elem *elem;
+ u32 *elem_srptr = SHAREDREGION_INVALIDSRPTR;
+ int index;
+
+ cargs->api_status = LISTMP_E_FAIL;
+
+ elem = listmp_get_head(cargs->args.get_head.listmp_handle);
+ if (unlikely(elem == NULL))
+ goto exit;
+
+ index = sharedregion_get_id(elem);
+ if (unlikely(index < 0))
+ goto exit;
+
+ elem_srptr = sharedregion_get_srptr((void *)elem, index);
+ cargs->api_status = 0;
+
+exit:
+ cargs->args.get_head.elem_srptr = elem_srptr;
+ return 0;
+}
+
+/* ioctl interface to listmp_get_tail function */
+static inline int listmp_ioctl_get_tail(struct listmp_cmd_args *cargs)
+{
+ struct listmp_elem *elem;
+ u32 *elem_srptr = SHAREDREGION_INVALIDSRPTR;
+ int index;
+
+ cargs->api_status = LISTMP_E_FAIL;
+
+ elem = listmp_get_tail(cargs->args.get_tail.listmp_handle);
+ if (unlikely(elem == NULL))
+ goto exit;
+
+ index = sharedregion_get_id(elem);
+ if (unlikely(index < 0))
+ goto exit;
+
+ elem_srptr = sharedregion_get_srptr((void *)elem, index);
+ cargs->api_status = 0;
+
+exit:
+ cargs->args.get_tail.elem_srptr = elem_srptr;
+ return 0;
+}
+
+/* ioctl interface to listmp_put_head function */
+static inline int listmp_ioctl_put_head(struct listmp_cmd_args *cargs)
+{
+ struct listmp_elem *elem;
+
+ elem = (struct listmp_elem *) sharedregion_get_ptr(
+ cargs->args.put_head.elem_srptr);
+ cargs->api_status = listmp_put_head(
+ cargs->args.put_head.listmp_handle, elem);
+
+ return 0;
+}
+
+/* ioctl interface to listmp_put_tail function */
+static inline int listmp_ioctl_put_tail(struct listmp_cmd_args *cargs)
+{
+ struct listmp_elem *elem;
+
+ elem = (struct listmp_elem *) sharedregion_get_ptr(
+ cargs->args.put_tail.elem_srptr);
+ cargs->api_status = listmp_put_tail(
+ cargs->args.put_head.listmp_handle, elem);
+
+ return 0;
+}
+
+/* ioctl interface to listmp_insert function */
+static inline int listmp_ioctl_insert(struct listmp_cmd_args *cargs)
+{
+ struct listmp_elem *new_elem;
+ struct listmp_elem *cur_elem;
+ int status = -1;
+
+ new_elem = (struct listmp_elem *) sharedregion_get_ptr(
+ cargs->args.insert.new_elem_srptr);
+ if (unlikely(new_elem == NULL))
+ goto exit;
+
+ cur_elem = (struct listmp_elem *) sharedregion_get_ptr(
+ cargs->args.insert.cur_elem_srptr);
+ if (unlikely(cur_elem == NULL))
+ goto exit;
+
+ status = listmp_insert(cargs->args.insert.listmp_handle, new_elem,
+ cur_elem);
+exit:
+ cargs->api_status = status;
+ return 0;
+}
+
+/* ioctl interface to listmp_remove function */
+static inline int listmp_ioctl_remove(struct listmp_cmd_args *cargs)
+{
+ struct listmp_elem *elem;
+
+ elem = (struct listmp_elem *) sharedregion_get_ptr(
+ cargs->args.remove.elem_srptr);
+ cargs->api_status = listmp_remove(
+ cargs->args.get_head.listmp_handle, elem);
+
+ return 0;
+}
+
+/* ioctl interface to listmp_next function */
+static inline int listmp_ioctl_next(struct listmp_cmd_args *cargs)
+{
+ struct listmp_elem *elem = NULL;
+ struct listmp_elem *ret_elem = NULL;
+ u32 *next_elem_srptr = SHAREDREGION_INVALIDSRPTR;
+ int index;
+
+ if (cargs->args.next.elem_srptr != NULL) {
+ elem = (struct listmp_elem *) sharedregion_get_ptr(
+ cargs->args.next.elem_srptr);
+ }
+ ret_elem = (struct listmp_elem *) listmp_next(
+ cargs->args.next.listmp_handle, elem);
+ if (unlikely(ret_elem == NULL))
+ goto exit;
+
+ index = sharedregion_get_id(ret_elem);
+ if (unlikely(index < 0))
+ goto exit;
+
+ next_elem_srptr = sharedregion_get_srptr((void *)ret_elem, index);
+ cargs->api_status = 0;
+
+exit:
+ cargs->args.next.next_elem_srptr = next_elem_srptr;
+ return 0;
+}
+
+/* ioctl interface to listmp_prev function */
+static inline int listmp_ioctl_prev(struct listmp_cmd_args *cargs)
+{
+ struct listmp_elem *elem = NULL;
+ struct listmp_elem *ret_elem = NULL;
+ u32 *prev_elem_srptr = SHAREDREGION_INVALIDSRPTR;
+ int index;
+
+ if (cargs->args.next.elem_srptr != NULL) {
+ elem = (struct listmp_elem *) sharedregion_get_ptr(
+ cargs->args.prev.elem_srptr);
+ }
+ ret_elem = (struct listmp_elem *) listmp_prev(
+ cargs->args.prev.listmp_handle, elem);
+ if (unlikely(ret_elem == NULL))
+ goto exit;
+
+ index = sharedregion_get_id(ret_elem);
+ if (unlikely(index < 0))
+ goto exit;
+
+ prev_elem_srptr = sharedregion_get_srptr((void *)ret_elem, index);
+ cargs->api_status = 0;
+
+exit:
+ cargs->args.prev.prev_elem_srptr = prev_elem_srptr;
+ return 0;
+
+}
+
+/* ioctl interface to listmp_shared_mem_req function */
+static inline int listmp_ioctl_shared_mem_req(struct listmp_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct listmp_params params;
+
+ size = copy_from_user(&params, cargs->args.shared_mem_req.params,
+ sizeof(struct listmp_params));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ params.shared_addr = sharedregion_get_ptr(
+ cargs->args.shared_mem_req.shared_addr_srptr);
+ cargs->args.shared_mem_req.bytes = listmp_shared_mem_req(&params);
+ cargs->api_status = 0;
+
+exit:
+ return retval;
+}
+
+/* ioctl interface function for listmp module */
+int listmp_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ int os_status = 0;
+ struct listmp_cmd_args __user *uarg =
+ (struct listmp_cmd_args __user *)args;
+ struct listmp_cmd_args cargs;
+ unsigned long size;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ os_status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ os_status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+ if (os_status) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg, sizeof(struct listmp_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_LISTMP_GETCONFIG:
+ os_status = listmp_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_LISTMP_SETUP:
+ os_status = listmp_ioctl_setup(&cargs);
+ break;
+
+ case CMD_LISTMP_DESTROY:
+ os_status = listmp_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_LISTMP_PARAMS_INIT:
+ os_status = listmp_ioctl_params_init(&cargs);
+ break;
+
+ case CMD_LISTMP_CREATE:
+ os_status = listmp_ioctl_create(&cargs);
+ break;
+
+ case CMD_LISTMP_DELETE:
+ os_status = listmp_ioctl_delete(&cargs);
+ break;
+
+ case CMD_LISTMP_OPEN:
+ os_status = listmp_ioctl_open(&cargs);
+ break;
+
+ case CMD_LISTMP_CLOSE:
+ os_status = listmp_ioctl_close(&cargs);
+ break;
+
+ case CMD_LISTMP_ISEMPTY:
+ os_status = listmp_ioctl_isempty(&cargs);
+ break;
+
+ case CMD_LISTMP_GETHEAD:
+ os_status = listmp_ioctl_get_head(&cargs);
+ break;
+
+ case CMD_LISTMP_GETTAIL:
+ os_status = listmp_ioctl_get_tail(&cargs);
+ break;
+
+ case CMD_LISTMP_PUTHEAD:
+ os_status = listmp_ioctl_put_head(&cargs);
+ break;
+
+ case CMD_LISTMP_PUTTAIL:
+ os_status = listmp_ioctl_put_tail(&cargs);
+ break;
+
+ case CMD_LISTMP_INSERT:
+ os_status = listmp_ioctl_insert(&cargs);
+ break;
+
+ case CMD_LISTMP_REMOVE:
+ os_status = listmp_ioctl_remove(&cargs);
+ break;
+
+ case CMD_LISTMP_NEXT:
+ os_status = listmp_ioctl_next(&cargs);
+ break;
+
+ case CMD_LISTMP_PREV:
+ os_status = listmp_ioctl_prev(&cargs);
+ break;
+
+ case CMD_LISTMP_SHAREDMEMREQ:
+ os_status = listmp_ioctl_shared_mem_req(&cargs);
+ break;
+
+ case CMD_LISTMP_OPENBYADDR:
+ os_status = listmp_ioctl_open_by_addr(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ os_status = -ENOTTY;
+ break;
+ }
+ if (os_status < 0)
+ goto exit;
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs, sizeof(struct listmp_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+ return os_status;
+
+exit:
+ printk(KERN_ERR "listmp_ioctl failed: status = 0x%x\n", os_status);
+ return os_status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/messageq.c b/drivers/dsp/syslink/multicore_ipc/messageq.c
new file mode 100644
index 000000000000..38d91091447d
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/messageq.c
@@ -0,0 +1,1618 @@
+/*
+ * messageq.c
+ *
+ * The messageQ module supports the structured sending and receiving of
+ * variable length messages. This module can be used for homogeneous or
+ * heterogeneous multi-processor messaging.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/*!
+ * MessageQ provides more sophisticated messaging than other modules. It is
+ * typically used for complex situations such as multi-processor messaging.
+ *
+ * The following are key features of the MessageQ module:
+ * -Writers and readers can be relocated to another processor with no
+ * runtime code changes.
+ * -Timeouts are allowed when receiving messages.
+ * -Readers can determine the writer and reply back.
+ * -Receiving a message is deterministic when the timeout is zero.
+ * -Messages can reside on any message queue.
+ * -Supports zero-copy transfers.
+ * -Can send and receive from any type of thread.
+ * -Notification mechanism is specified by application.
+ * -Allows QoS (quality of service) on message buffer pools. For example,
+ * using specific buffer pools for specific message queues.
+ *
+ * Messages are sent and received via a message queue. A reader is a thread
+ * that gets (reads) messages from a message queue. A writer is a thread that
+ * puts (writes) a message to a message queue. Each message queue has one
+ * reader and can have many writers. A thread may read from or write to
+ * multiple message queues.
+ *
+ * Conceptually, the reader thread owns a message queue. The reader thread
+ * creates a message queue. Writer threads a created message queues to
+ * get access to them.
+ *
+ * Message queues are identified by a system-wide unique name. Internally,
+ * MessageQ uses the NameServer module for managing
+ * these names. The names are used for opening a message queue. Using
+ * names is not required.
+ *
+ * Messages must be allocated from the MessageQ module. Once a message is
+ * allocated, it can be sent on any message queue. Once a message is sent, the
+ * writer loses ownership of the message and should not attempt to modify the
+ * message. Once the reader receives the message, it owns the message. It
+ * may either free the message or re-use the message.
+ *
+ * Messages in a message queue can be of variable length. The only
+ * requirement is that the first field in the definition of a message must be a
+ * MsgHeader structure. For example:
+ * typedef struct MyMsg {
+ * messageq_MsgHeader header;
+ * ...
+ * } MyMsg;
+ *
+ * The MessageQ API uses the messageq_MsgHeader internally. Your application
+ * should not modify or directly access the fields in the messageq_MsgHeader.
+ *
+ * All messages sent via the MessageQ module must be allocated from a
+ * Heap implementation. The heap can be used for
+ * other memory allocation not related to MessageQ.
+ *
+ * An application can use multiple heaps. The purpose of having multiple
+ * heaps is to allow an application to regulate its message usage. For
+ * example, an application can allocate critical messages from one heap of fast
+ * on-chip memory and non-critical messages from another heap of slower
+ * external memory
+ *
+ * MessageQ does support the usage of messages that allocated via the
+ * alloc function. Please refer to the static_msg_init
+ * function description for more details.
+ *
+ * In a multiple processor system, MessageQ communications to other
+ * processors via MessageQ_transport} instances. There must be one and
+ * only one IMessageQ_transport instance for each processor where communication
+ * is desired.
+ * So on a four processor system, each processor must have three
+ * IMessageQ_transport instance.
+ *
+ * The user only needs to create the IMessageQ_transport instances. The
+ * instances are responsible for registering themselves with MessageQ.
+ * This is accomplished via the register_transport function.
+ */
+
+
+
+/* Standard headers */
+#include <linux/types.h>
+#include <linux/module.h>
+
+/* Utilities headers */
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+
+/* Syslink headers */
+#include <syslink/atomic_linux.h>
+
+/* Module level headers */
+#include <nameserver.h>
+#include <multiproc.h>
+#include <transportshm_setup_proxy.h>
+#include <heap.h>
+#include <messageq.h>
+#include <transportshm.h>
+
+
+/* Macro to make a correct module magic number with refCount */
+#define MESSAGEQ_MAKE_MAGICSTAMP(x) ((MESSAGEQ_MODULEID << 12u) | (x))
+
+/* =============================================================================
+ * Globals
+ * =============================================================================
+ */
+/*!
+ * @brief Name of the reserved NameServer used for MessageQ.
+ */
+#define MESSAGEQ_NAMESERVER "MessageQ"
+
+/*! Mask to extract priority setting */
+#define MESSAGEQ_TRANSPORTPRIORITYMASK 0x1
+
+/* =============================================================================
+ * Structures & Enums
+ * =============================================================================
+ */
+/* structure for MessageQ module state */
+struct messageq_module_object {
+ atomic_t ref_count;
+ /* Reference count */
+ void *ns_handle;
+ /* Handle to the local NameServer used for storing GP objects */
+ struct mutex *gate_handle;
+ /* Handle of gate to be used for local thread safety */
+ struct messageq_config cfg;
+ /* Current config values */
+ struct messageq_config default_cfg;
+ /* Default config values */
+ struct messageq_params default_inst_params;
+ /* Default instance creation parameters */
+ void *transports[MULTIPROC_MAXPROCESSORS][MESSAGEQ_NUM_PRIORITY_QUEUES];
+ /* Transport to be set in messageq_register_transport */
+ void **queues; /*messageq_handle *queues;*/
+ /* Grow option */
+ void **heaps; /*Heap_Handle *heaps; */
+ /* Heap to be set in messageq_registerHeap */
+ u16 num_queues;
+ /* Heap to be set in messageq_registerHeap */
+ u16 num_heaps;
+ /* Number of Heaps */
+ bool can_free_queues;
+ /* Grow option */
+ u16 seq_num;
+ /* sequence number */
+};
+
+/* Structure for the Handle for the MessageQ. */
+struct messageq_object {
+ struct messageq_params params;
+ /*! Instance specific creation parameters */
+ u32 queue;
+ /* Unique id */
+ struct list_head normal_list;
+ /* Embedded List objects */
+ struct list_head high_list;
+ /* Embedded List objects */
+ void *ns_key;
+ /* NameServer key */
+ struct semaphore *synchronizer;
+ /* Semaphore used for synchronizing message events */
+};
+
+
+static struct messageq_module_object messageq_state = {
+ .ns_handle = NULL,
+ .gate_handle = NULL,
+ .queues = NULL,
+ .heaps = NULL,
+ .num_queues = 1,
+ .num_heaps = 1,
+ .can_free_queues = false,
+ .default_cfg.trace_flag = false,
+ .default_cfg.num_heaps = 1,
+ .default_cfg.max_runtime_entries = 32,
+ .default_cfg.max_name_len = 32,
+ .default_inst_params.synchronizer = NULL
+};
+
+/* Pointer to the MessageQ module state */
+static struct messageq_module_object *messageq_module = &messageq_state;
+
+/* =============================================================================
+ * Constants
+ * =============================================================================
+ */
+/* Used to denote a message that was initialized
+ * with the messageq_static_msg_init function. */
+#define MESSAGEQ_STATICMSG 0xFFFF
+
+
+/* =============================================================================
+ * Forward declarations of internal functions
+ * =============================================================================
+ */
+/* Grow the MessageQ table */
+static u16 _messageq_grow(struct messageq_object *obj);
+
+/* Initializes a message not obtained from MessageQ_alloc */
+static void messageq_msg_init(messageq_msg msg);
+
+/* =============================================================================
+ * APIS
+ * =============================================================================
+ */
+/*
+ * ======== messageq_get_config ========
+ * Purpose:
+ * Function to get the default configuration for the MessageQ
+ * module.
+ *
+ * This function can be called by the application to get their
+ * configuration parameter to MessageQ_setup filled in by the
+ * MessageQ module with the default parameters. If the user does
+ * not wish to make any change in the default parameters, this API
+ * is not required to be called.
+ * the listmp_sharedmemory module.
+ */
+void messageq_get_config(struct messageq_config *cfg)
+{
+ if (WARN_ON(unlikely(cfg == NULL)))
+ goto exit;
+
+ if (likely(atomic_cmpmask_and_lt(&(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true)) {
+ /* (If setup has not yet been called) */
+ memcpy(cfg, &messageq_module->default_cfg,
+ sizeof(struct messageq_config));
+ } else {
+ memcpy(cfg, &messageq_module->cfg,
+ sizeof(struct messageq_config));
+ }
+ return;
+
+exit:
+ printk(KERN_ERR "messageq_get_config: Argument of type "
+ "(struct messageq_config *) passed is null!\n");
+}
+EXPORT_SYMBOL(messageq_get_config);
+
+/*
+ * ======== messageq_setup ========
+ * Purpose:
+ * Function to setup the MessageQ module.
+ *
+ * This function sets up the MessageQ module. This function must
+ * be called before any other instance-level APIs can be invoked.
+ * Module-level configuration needs to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then MessageQ_getConfig can be called to get the
+ * configuration filled with the default values. After this, only
+ * the required configuration values can be changed. If the user
+ * does not wish to make any change in the default parameters, the
+ * application can simply call MessageQ with NULL parameters.
+ * The default parameters would get automatically used.
+ */
+int messageq_setup(const struct messageq_config *cfg)
+{
+ int status = 0;
+ struct nameserver_params params;
+ struct messageq_config tmpcfg;
+
+ /* This sets the ref_count variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of refCount variable.
+ */
+ atomic_cmpmask_and_set(&messageq_module->ref_count,
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(0));
+ if (unlikely(atomic_inc_return(&messageq_module->ref_count)
+ != MESSAGEQ_MAKE_MAGICSTAMP(1))) {
+ return 1;
+ }
+
+ if (unlikely(cfg == NULL)) {
+ messageq_get_config(&tmpcfg);
+ cfg = &tmpcfg;
+ }
+
+ if (WARN_ON(unlikely(cfg->max_name_len == 0))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(cfg->max_runtime_entries == 0))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* User has not provided any gate handle, so create a default
+ * handle for protecting list object */
+ messageq_module->gate_handle = kmalloc(sizeof(struct mutex),
+ GFP_KERNEL);
+ if (unlikely(messageq_module->gate_handle == NULL)) {
+ /*! @retval MESSAGEQ_E_FAIL Failed to create lock! */
+ status = MESSAGEQ_E_FAIL;
+ printk(KERN_ERR "messageq_setup: Failed to create a "
+ "mutex.\n");
+ status = -ENOMEM;
+ goto exit;
+ }
+ mutex_init(messageq_module->gate_handle);
+
+ memcpy(&messageq_module->cfg, (void *) cfg,
+ sizeof(struct messageq_config));
+ /* Initialize the parameters */
+ nameserver_params_init(&params);
+ params.max_value_len = sizeof(u32);
+ params.max_name_len = cfg->max_name_len;
+ params.max_runtime_entries = cfg->max_runtime_entries;
+
+ messageq_module->seq_num = 0;
+
+ /* Create the nameserver for modules */
+ messageq_module->ns_handle = nameserver_create(MESSAGEQ_NAMESERVER,
+ &params);
+ if (unlikely(messageq_module->ns_handle == NULL)) {
+ /*! @retval MESSAGEQ_E_FAIL Failed to create the
+ * MessageQ nameserver*/
+ status = MESSAGEQ_E_FAIL;
+ printk(KERN_ERR "messageq_setup: Failed to create the messageq"
+ "nameserver!\n");
+ goto exit;
+ }
+
+ messageq_module->num_heaps = cfg->num_heaps;
+ messageq_module->heaps = kzalloc(sizeof(void *) * \
+ messageq_module->num_heaps, GFP_KERNEL);
+ if (unlikely(messageq_module->heaps == NULL)) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ messageq_module->num_queues = cfg->max_runtime_entries;
+ messageq_module->queues = kzalloc(sizeof(struct messageq_object *) * \
+ messageq_module->num_queues, GFP_KERNEL);
+ if (unlikely(messageq_module->queues == NULL)) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ memset(&(messageq_module->transports), 0, (sizeof(void *) * \
+ MULTIPROC_MAXPROCESSORS * \
+ MESSAGEQ_NUM_PRIORITY_QUEUES));
+ return status;
+
+exit:
+ if (status < 0) {
+ messageq_destroy();
+ printk(KERN_ERR "messageq_setup failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_setup);
+
+/* Function to destroy the MessageQ module. */
+int messageq_destroy(void)
+{
+ int status = 0;
+ int tmp_status = 0;
+ u32 i;
+
+ if (unlikely(atomic_cmpmask_and_lt(&(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (!(atomic_dec_return(&messageq_module->ref_count)
+ == MESSAGEQ_MAKE_MAGICSTAMP(0))) {
+ status = 1;
+ goto exit;
+ }
+
+ /* Temporarily increment the refcount */
+ atomic_set(&messageq_module->ref_count, MESSAGEQ_MAKE_MAGICSTAMP(1));
+
+ /* Delete any Message Queues that have not been deleted so far. */
+ for (i = 0; i < messageq_module->num_queues; i++) {
+ if (messageq_module->queues[i] != NULL) {
+ tmp_status = \
+ messageq_delete(&(messageq_module->queues[i]));
+ if (unlikely(tmp_status < 0 && status >= 0)) {
+ status = tmp_status;
+ printk(KERN_ERR "messageq_destroy: "
+ "messageq_delete failed for queue %d",
+ i);
+ }
+ }
+ }
+
+ if (likely(messageq_module->ns_handle != NULL)) {
+ /* Delete the nameserver for modules */
+ tmp_status = nameserver_delete(&messageq_module->ns_handle);
+ if (unlikely(tmp_status < 0 && status >= 0)) {
+ status = tmp_status;
+ printk(KERN_ERR "messageq_destroy: "
+ "nameserver_delete failed");
+ }
+ }
+
+ /* Delete the gate if created internally */
+ if (likely(messageq_module->gate_handle != NULL)) {
+ kfree(messageq_module->gate_handle);
+ messageq_module->gate_handle = NULL;
+ }
+
+ memset(&(messageq_module->transports), 0, (sizeof(void *) * \
+ MULTIPROC_MAXPROCESSORS * MESSAGEQ_NUM_PRIORITY_QUEUES));
+ if (likely(messageq_module->heaps != NULL)) {
+ kfree(messageq_module->heaps);
+ messageq_module->heaps = NULL;
+ }
+ if (likely(messageq_module->queues != NULL)) {
+ kfree(messageq_module->queues);
+ messageq_module->queues = NULL;
+ }
+
+ memset(&messageq_module->cfg, 0, sizeof(struct messageq_config));
+ messageq_module->num_queues = 0;
+ messageq_module->num_heaps = 1;
+ messageq_module->can_free_queues = true;
+ atomic_set(&messageq_module->ref_count, MESSAGEQ_MAKE_MAGICSTAMP(0));
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_destroy failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_destroy);
+
+/* Initialize this config-params structure with supplier-specified
+ * defaults before instance creation. */
+void messageq_params_init(struct messageq_params *params)
+{
+ if (unlikely(atomic_cmpmask_and_lt(&(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+ if (WARN_ON(unlikely(params == NULL))) {
+ printk(KERN_ERR "messageq_params_init failed:Argument of "
+ "type(messageq_params *) is NULL!\n");
+ goto exit;
+ }
+
+ memcpy(params, &(messageq_module->default_inst_params),
+ sizeof(struct messageq_params));
+
+exit:
+ return;
+}
+EXPORT_SYMBOL(messageq_params_init);
+
+/* Creates a new instance of MessageQ module. */
+void *messageq_create(char *name, const struct messageq_params *params)
+{
+ int status = 0;
+ struct messageq_object *obj = NULL;
+ bool found = false;
+ u16 count = 0;
+ int i;
+ u16 start;
+ u16 queueIndex = 0;
+
+ if (unlikely(atomic_cmpmask_and_lt(&(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ /* Create the generic obj */
+ obj = kzalloc(sizeof(struct messageq_object), 0);
+ if (unlikely(obj == NULL)) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ status = mutex_lock_interruptible(messageq_module->gate_handle);
+ if (status)
+ goto exit;
+ start = 0; /* Statically allocated objects not supported */
+ count = messageq_module->num_queues;
+ /* Search the dynamic array for any holes */
+ for (i = start; i < count ; i++) {
+ if (messageq_module->queues[i] == NULL) {
+ messageq_module->queues[i] = (void *) obj;
+ queueIndex = i;
+ found = true;
+ break;
+ }
+ }
+ /*
+ * If no free slot was found:
+ * - if no growth allowed, raise an error
+ * - if growth is allowed, grow the array
+ */
+ if (unlikely(found == false)) {
+ /* Growth is always allowed */
+ queueIndex = _messageq_grow(obj);
+ if (unlikely(queueIndex == MESSAGEQ_INVALIDMESSAGEQ)) {
+ mutex_unlock(messageq_module->gate_handle);
+ status = MESSAGEQ_E_FAIL;
+ printk(KERN_ERR "messageq_create: Failed to grow the "
+ "queue array!");
+ goto exit;
+ }
+ }
+
+ if (params != NULL) {
+ /* Populate the params member */
+ memcpy((void *) &obj->params, (void *)params,
+ sizeof(struct messageq_params));
+ if (unlikely(params->synchronizer == NULL))
+ obj->synchronizer = \
+ kzalloc(sizeof(struct semaphore), GFP_KERNEL);
+ else
+ obj->synchronizer = params->synchronizer;
+ } else {
+ /*obj->synchronizer = OsalSemaphore_create(
+ OsalSemaphore_Type_Binary
+ | OsalSemaphore_IntType_Interruptible);*/
+ obj->synchronizer = kzalloc(sizeof(struct semaphore),
+ GFP_KERNEL);
+ }
+ if (unlikely(obj->synchronizer == NULL)) {
+ mutex_unlock(messageq_module->gate_handle);
+ status = MESSAGEQ_E_FAIL;
+ printk(KERN_ERR "messageq_create: Failed to create "
+ "synchronizer semaphore!\n");
+ goto exit;
+ } else {
+ sema_init(obj->synchronizer, 0);
+ }
+ mutex_unlock(messageq_module->gate_handle);
+
+ /* Construct the list object */
+ INIT_LIST_HEAD(&obj->normal_list);
+ INIT_LIST_HEAD(&obj->high_list);
+
+ /* Update processor information */
+ obj->queue = ((u32)(multiproc_self()) << 16) | queueIndex;
+ if (likely(name != NULL)) {
+ obj->ns_key = nameserver_add_uint32(messageq_module->ns_handle,
+ name, obj->queue);
+ if (unlikely(obj->ns_key == NULL)) {
+ status = MESSAGEQ_E_FAIL;
+ printk(KERN_ERR "messageq_create: Failed to add "
+ "the messageq name!\n");
+ }
+ }
+
+exit:
+ if (unlikely(status < 0)) {
+ messageq_delete((void **)&obj);
+ printk(KERN_ERR "messageq_create failed! status = 0x%x\n",
+ status);
+ }
+ return (void *) obj;
+}
+EXPORT_SYMBOL(messageq_create);
+
+/* Deletes a instance of MessageQ module. */
+int messageq_delete(void **msg_handleptr)
+{
+ int status = 0;
+ int tmp_status = 0;
+ struct messageq_object *obj = NULL;
+ messageq_msg temp_msg;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(msg_handleptr == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(*msg_handleptr == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct messageq_object *) (*msg_handleptr);
+
+ /* Take the local lock */
+ status = mutex_lock_interruptible(messageq_module->gate_handle);
+ if (status)
+ goto exit;
+
+ if (unlikely(obj->ns_key != NULL)) {
+ /* remove from the name serve */
+ status = nameserver_remove_entry(messageq_module->ns_handle,
+ obj->ns_key);
+ if (unlikely(status < 0)) {
+ printk(KERN_ERR "messageq_delete: nameserver_remove_"
+ "entry failed! status = 0x%x", status);
+ }
+ }
+
+ /* Remove all the messages for the message queue's normal_list queue
+ * and free the list */
+ while (true) {
+ if (!list_empty(&obj->normal_list)) {
+ temp_msg = (messageq_msg) (obj->normal_list.next);
+ list_del_init(obj->normal_list.next);
+ } else
+ break;
+ tmp_status = messageq_free(temp_msg);
+ if (unlikely((tmp_status < 0) && (status >= 0))) {
+ status = tmp_status;
+ printk(KERN_ERR "messageq_delete: messageq_free failed"
+ " for normal_list!");
+ }
+ }
+ list_del(&obj->normal_list);
+
+ /* Remove all the messages for the message queue's normal_list queue
+ * and free the list */
+ while (true) {
+ if (!list_empty(&obj->high_list)) {
+ temp_msg = (messageq_msg) (obj->high_list.next);
+ list_del_init(obj->high_list.next);
+ } else
+ break;
+ tmp_status = messageq_free(temp_msg);
+ if (unlikely((tmp_status < 0) && (status >= 0))) {
+ status = tmp_status;
+ printk(KERN_ERR "messageq_delete: messageq_free failed"
+ " for high_list!");
+ }
+ }
+ list_del(&obj->high_list);
+
+ /*if (obj->synchronizer != NULL)
+ status = OsalSemaphore_delete(&obj->synchronizer);*/
+ if (obj->synchronizer != NULL) {
+ kfree(obj->synchronizer);
+ obj->synchronizer = NULL;
+ }
+ /* Clear the MessageQ obj from array. */
+ messageq_module->queues[obj->queue & 0xFFFF] = NULL;
+
+ /* Release the local lock */
+ mutex_unlock(messageq_module->gate_handle);
+
+ /* Now free the obj */
+ kfree(obj);
+ *msg_handleptr = NULL;
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_delete failed! status = 0x%x\n",
+ status);;
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_delete);
+
+/* Opens a created instance of MessageQ module. */
+int messageq_open(char *name, u32 *queue_id)
+{
+ int status = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(name == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(queue_id == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* Initialize return queue ID to invalid. */
+ *queue_id = MESSAGEQ_INVALIDMESSAGEQ;
+ status = nameserver_get_uint32(messageq_module->ns_handle, name,
+ queue_id, NULL);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_open failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_open);
+
+/* Closes previously opened/created instance of MessageQ module. */
+int messageq_close(u32 *queue_id)
+{
+ s32 status = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(queue_id == NULL))) {
+ printk(KERN_ERR "messageq_close: queue_id passed is NULL!\n");
+ status = -EINVAL;
+ goto exit;
+ }
+
+ *queue_id = MESSAGEQ_INVALIDMESSAGEQ;
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_close failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_close);
+
+/* Retrieve a message */
+int messageq_get(void *messageq_handle, messageq_msg *msg,
+ u32 timeout)
+{
+ int status = 0;
+ struct messageq_object *obj = (struct messageq_object *)messageq_handle;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(msg == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(obj == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* Keep looping while there is no element in the list */
+ /* Take the local lock */
+ status = mutex_lock_interruptible(messageq_module->gate_handle);
+ if (status)
+ goto exit;
+ if (!list_empty(&obj->high_list)) {
+ *msg = (messageq_msg) (obj->high_list.next);
+ list_del_init(obj->high_list.next);
+ }
+ /* Leave the local lock */
+ mutex_unlock(messageq_module->gate_handle);
+ while (*msg == NULL) {
+ status = mutex_lock_interruptible(messageq_module->gate_handle);
+ if (status)
+ goto exit;
+ if (!list_empty(&obj->normal_list)) {
+ *msg = (messageq_msg) (obj->normal_list.next);
+ list_del_init(obj->normal_list.next);
+ }
+ mutex_unlock(messageq_module->gate_handle);
+
+ if (*msg == NULL) {
+ /*
+ * Block until notified. If pend times-out, no message
+ * should be returned to the caller
+ */
+ /*! @retval NULL timeout has occurred */
+ if (obj->synchronizer != NULL) {
+ /* TODO: cater to different timeout values */
+ /*status = OsalSemaphore_pend(
+ obj->synchronizer, timeout); */
+ if (timeout == MESSAGEQ_FOREVER) {
+ if (down_interruptible
+ (obj->synchronizer)) {
+ status = -ERESTARTSYS;
+ }
+ } else {
+ status = down_timeout(obj->synchronizer,
+ msecs_to_jiffies(timeout));
+ }
+ if (status < 0) {
+ *msg = NULL;
+ break;
+ }
+ }
+ status = mutex_lock_interruptible(
+ messageq_module->gate_handle);
+ if (status)
+ goto exit;
+ if (!list_empty(&obj->high_list)) {
+ *msg = (messageq_msg) (obj->high_list.next);
+ list_del_init(obj->high_list.next);
+ }
+ mutex_unlock(messageq_module->gate_handle);
+ }
+ }
+
+exit:
+ if (unlikely((messageq_module->cfg.trace_flag == true) || \
+ ((*msg != NULL) && \
+ (((*msg)->flags & MESSAGEQ_TRACEMASK) != 0)))) {
+ printk(KERN_INFO "messageq_get: *msg = 0x%x seq_num = 0x%x "
+ "src_proc = 0x%x obj = 0x%x\n", (uint)(*msg),
+ ((*msg)->seq_num), ((*msg)->src_proc), (uint)(obj));
+ }
+ if (status < 0 && status != -ETIME)
+ printk(KERN_ERR "messageq_get failed! status = 0x%x\n", status);
+ return status;
+}
+EXPORT_SYMBOL(messageq_get);
+
+/* Count the number of messages in the queue */
+int messageq_count(void *messageq_handle)
+{
+ struct messageq_object *obj = (struct messageq_object *)messageq_handle;
+ int count = 0;
+ struct list_head *elem;
+ int key;
+ s32 status = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(obj == NULL)) {
+ status = -EINVAL;
+ printk(KERN_ERR "messageq_count: obj passed is NULL!\n");
+ goto exit;
+ }
+
+ key = mutex_lock_interruptible(messageq_module->gate_handle);
+ if (key < 0)
+ return key;
+
+ list_for_each(elem, &obj->high_list) {
+ count++;
+ }
+ list_for_each(elem, &obj->normal_list) {
+ count++;
+ }
+ mutex_unlock(messageq_module->gate_handle);
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "messageq_count failed! status = 0x%x", status);
+ return count;
+}
+EXPORT_SYMBOL(messageq_count);
+
+/* Initialize a static message */
+void messageq_static_msg_init(messageq_msg msg, u32 size)
+{
+ s32 status = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(msg == NULL)) {
+ printk(KERN_ERR "messageq_static_msg_init: msg is invalid!\n");
+ goto exit;
+ }
+
+ /* Fill in the fields of the message */
+ messageq_msg_init(msg);
+ msg->heap_id = MESSAGEQ_STATICMSG;
+ msg->msg_size = size;
+
+ if (unlikely((messageq_module->cfg.trace_flag == true) || \
+ (((*msg).flags & MESSAGEQ_TRACEMASK) != 0))) {
+ printk(KERN_INFO "messageq_static_msg_init: msg = 0x%x "
+ "seq_num = 0x%x src_proc = 0x%x", (uint)(msg),
+ (msg)->seq_num, (msg)->src_proc);
+ }
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_static_msg_init failed! "
+ "status = 0x%x", status);
+ }
+ return;
+}
+EXPORT_SYMBOL(messageq_static_msg_init);
+
+/* Allocate a message and initial the needed fields (note some
+ * of the fields in the header at set via other APIs or in the
+ * messageq_put function. */
+messageq_msg messageq_alloc(u16 heap_id, u32 size)
+{
+ int status = 0;
+ messageq_msg msg = NULL;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(heap_id >= messageq_module->num_heaps))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(messageq_module->heaps[heap_id] == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* Allocate the message. No alignment requested */
+ msg = sl_heap_alloc(messageq_module->heaps[heap_id], size, 0);
+ if (msg == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ /* Fill in the fields of the message */
+ messageq_msg_init(msg);
+ msg->msg_size = size;
+ msg->heap_id = heap_id;
+
+ if (unlikely((messageq_module->cfg.trace_flag == true) || \
+ (((*msg).flags & MESSAGEQ_TRACEMASK) != 0))) {
+ printk(KERN_INFO "messageq_alloc: msg = 0x%x seq_num = 0x%x "
+ "src_proc = 0x%x", (uint)(msg), (msg)->seq_num,
+ (msg)->src_proc);
+ }
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "messageq_alloc failed! status = 0x%x", status);
+ return msg;
+}
+EXPORT_SYMBOL(messageq_alloc);
+
+/* Frees the message. */
+int messageq_free(messageq_msg msg)
+{
+ u32 status = 0;
+ void *heap = NULL;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(msg == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (unlikely(msg->heap_id == MESSAGEQ_STATICMSG)) {
+ status = MESSAGEQ_E_CANNOTFREESTATICMSG;
+ goto exit;
+ }
+ if (unlikely(msg->heap_id >= messageq_module->num_heaps)) {
+ status = MESSAGEQ_E_INVALIDHEAPID;
+ goto exit;
+ }
+ if (unlikely(messageq_module->heaps[msg->heap_id] == NULL)) {
+ status = MESSAGEQ_E_INVALIDHEAPID;
+ goto exit;
+ }
+
+ if (unlikely((messageq_module->cfg.trace_flag == true) || \
+ (((*msg).flags & MESSAGEQ_TRACEMASK) != 0))) {
+ printk(KERN_INFO "messageq_free: msg = 0x%x seq_num = 0x%x "
+ "src_proc = 0x%x", (uint)(msg), (msg)->seq_num,
+ (msg)->src_proc);
+ }
+ heap = messageq_module->heaps[msg->heap_id];
+ sl_heap_free(heap, msg, msg->msg_size);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_free failed! status = 0x%x\n",
+ status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_free);
+
+/* Put a message in the queue */
+int messageq_put(u32 queue_id, messageq_msg msg)
+{
+ int status = 0;
+ u16 dst_proc_id = (u16)(queue_id >> 16);
+ struct messageq_object *obj = NULL;
+ void *transport = NULL;
+ u32 priority;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(queue_id == MESSAGEQ_INVALIDMESSAGEQ))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(msg == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ msg->dst_id = (u16)(queue_id);
+ msg->dst_proc = (u16)(queue_id >> 16);
+ if (likely(dst_proc_id != multiproc_self())) {
+ if (unlikely(dst_proc_id >= multiproc_get_num_processors())) {
+ /* Invalid destination processor id */
+ status = MESSAGEQ_E_INVALIDPROCID;
+ goto exit;
+ }
+
+ priority = (u32)((msg->flags) & MESSAGEQ_TRANSPORTPRIORITYMASK);
+ /* Call the transport associated with this message queue */
+ transport = messageq_module->transports[dst_proc_id][priority];
+ if (transport == NULL) {
+ /* Try the other transport */
+ priority = !priority;
+ transport =
+ messageq_module->transports[dst_proc_id][priority];
+ }
+
+ if (unlikely(transport == NULL)) {
+ status = -ENODEV;
+ goto exit;
+ }
+ status = transportshm_put(transport, msg);
+ if (unlikely(status < 0))
+ goto exit;
+ } else {
+ /* It is a local MessageQ */
+ obj = (struct messageq_object *)
+ (messageq_module->queues[(u16)(queue_id)]);
+ status = mutex_lock_interruptible(messageq_module->gate_handle);
+ if (status < 0)
+ goto exit;
+ if ((msg->flags & MESSAGEQ_PRIORITYMASK) == \
+ MESSAGEQ_URGENTPRI) {
+ list_add((struct list_head *) msg, &obj->high_list);
+ } else {
+ if ((msg->flags & MESSAGEQ_PRIORITYMASK) == \
+ MESSAGEQ_NORMALPRI) {
+ list_add_tail((struct list_head *) msg,
+ &obj->normal_list);
+ } else {
+ list_add_tail((struct list_head *) msg,
+ &obj->high_list);
+ }
+ }
+ mutex_unlock(messageq_module->gate_handle);
+
+ /* Notify the reader. */
+ if (obj->synchronizer != NULL) {
+ up(obj->synchronizer);
+ /*OsalSemaphore_post(obj->synchronizer);*/
+ }
+ }
+ if (unlikely((messageq_module->cfg.trace_flag == true) || \
+ (((*msg).flags & MESSAGEQ_TRACEMASK) != 0))) {
+ printk(KERN_INFO "messageq_put: msg = 0x%x seq_num = 0x%x "
+ "src_proc = 0x%x dst_proc_id = 0x%x\n", (uint)(msg),
+ (msg)->seq_num, (msg)->src_proc, (msg)->dst_proc);
+ }
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "messageq_put failed! status = 0x%x\n", status);
+ return status;
+}
+EXPORT_SYMBOL(messageq_put);
+
+/* Register a heap */
+int messageq_register_heap(void *heap_handle, u16 heap_id)
+{
+ int status = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(heap_handle == NULL))) {
+ /*! @retval -EINVAL Invalid heap_id */
+ status = -EINVAL;
+ goto exit;
+ }
+ /* Make sure the heap_id is valid */
+ if (WARN_ON(unlikely(heap_id >= messageq_module->num_heaps))) {
+ /*! @retval -EINVAL Invalid heap_id */
+ status = -EINVAL;
+ goto exit;
+ }
+
+ status = mutex_lock_interruptible(messageq_module->gate_handle);
+ if (status)
+ goto exit;
+ if (messageq_module->heaps[heap_id] == NULL)
+ messageq_module->heaps[heap_id] = heap_handle;
+ else {
+ /*! @retval MESSAGEQ_E_ALREADYEXISTS Specified heap is
+ already registered. */
+ status = MESSAGEQ_E_ALREADYEXISTS;
+ }
+ mutex_unlock(messageq_module->gate_handle);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_register_heap failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_register_heap);
+
+/* Unregister a heap */
+int messageq_unregister_heap(u16 heap_id)
+{
+ int status = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ /* Make sure the heap_id is valid */
+ if (WARN_ON(unlikely(heap_id >= messageq_module->num_heaps))) {
+ /*! @retval -EINVAL Invalid heap_id */
+ status = -EINVAL;
+ goto exit;
+ }
+
+ status = mutex_lock_interruptible(messageq_module->gate_handle);
+ if (status)
+ goto exit;
+ if (messageq_module->heaps != NULL)
+ messageq_module->heaps[heap_id] = NULL;
+ mutex_unlock(messageq_module->gate_handle);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_unregister_heap failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_unregister_heap);
+
+/* Register a transport */
+int messageq_register_transport(void *messageq_transportshm_handle,
+ u16 proc_id, u32 priority)
+{
+ int status = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(messageq_transportshm_handle == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(proc_id >= multiproc_get_num_processors()))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ status = mutex_lock_interruptible(messageq_module->gate_handle);
+ if (status)
+ goto exit;
+ if (messageq_module->transports[proc_id][priority] == NULL) {
+ messageq_module->transports[proc_id][priority] = \
+ messageq_transportshm_handle;
+ } else {
+ /*! @retval MESSAGEQ_E_ALREADYEXISTS Specified transport is
+ already registered. */
+ status = MESSAGEQ_E_ALREADYEXISTS;
+ }
+ mutex_unlock(messageq_module->gate_handle);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_register_transport failed! "
+ "status = 0x%x\n", status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(messageq_register_transport);
+
+/* Unregister a transport */
+void messageq_unregister_transport(u16 proc_id, u32 priority)
+{
+ int status = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(proc_id >= multiproc_get_num_processors())) {
+ /*! @retval MESSAGEQ_E_PROCIDINVALID Invalid proc_id */
+ status = -EINVAL;
+ goto exit;
+ }
+
+ status = mutex_lock_interruptible(messageq_module->gate_handle);
+ if (status)
+ goto exit;
+ if (messageq_module->transports[proc_id][priority] != NULL)
+ messageq_module->transports[proc_id][priority] = NULL;
+ mutex_unlock(messageq_module->gate_handle);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_unregister_transport failed! "
+ "status = 0x%x\n", status);
+ }
+ return;
+}
+EXPORT_SYMBOL(messageq_unregister_transport);
+
+/* Set the destination queue of the message. */
+void messageq_set_reply_queue(void *messageq_handle, messageq_msg msg)
+{
+ s32 status = 0;
+
+ struct messageq_object *obj = \
+ (struct messageq_object *) messageq_handle;
+
+ if (WARN_ON(unlikely(messageq_handle == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(msg == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ msg->reply_id = (u16)(obj->queue);
+ msg->reply_proc = (u16)(obj->queue >> 16);
+ return;
+
+exit:
+ printk(KERN_ERR "messageq_set_reply_queue failed: status = 0x%x",
+ status);
+ return;
+}
+EXPORT_SYMBOL(messageq_set_reply_queue);
+
+/* Get the queue _id of the message. */
+u32 messageq_get_queue_id(void *messageq_handle)
+{
+ struct messageq_object *obj = \
+ (struct messageq_object *) messageq_handle;
+ u32 queue_id = MESSAGEQ_INVALIDMESSAGEQ;
+
+ if (WARN_ON(unlikely(obj == NULL))) {
+ printk(KERN_ERR "messageq_get_queue_id: obj passed is NULL!\n");
+ goto exit;
+ }
+
+ queue_id = (obj->queue);
+
+exit:
+ return queue_id;
+}
+EXPORT_SYMBOL(messageq_get_queue_id);
+
+/* Get the proc _id of the message. */
+u16 messageq_get_proc_id(void *messageq_handle)
+{
+ struct messageq_object *obj = \
+ (struct messageq_object *) messageq_handle;
+ u16 proc_id = MULTIPROC_INVALIDID;
+
+ if (WARN_ON(unlikely(obj == NULL))) {
+ printk(KERN_ERR "messageq_get_proc_id: obj passed is NULL!\n");
+ goto exit;
+ }
+
+ proc_id = (u16)(obj->queue >> 16);
+
+exit:
+ return proc_id;
+}
+EXPORT_SYMBOL(messageq_get_proc_id);
+
+/* Get the destination queue of the message. */
+u32 messageq_get_dst_queue(messageq_msg msg)
+{
+ u32 queue_id = MESSAGEQ_INVALIDMESSAGEQ;
+
+ if (WARN_ON(unlikely(msg == NULL))) {
+ printk(KERN_ERR "messageq_get_dst_queue: msg passed is "
+ "NULL!\n");
+ goto exit;
+ }
+
+ /*construct queue value */
+ if (msg->dst_id != (u32)MESSAGEQ_INVALIDMESSAGEQ)
+ queue_id = ((u32) multiproc_self() << 16) | msg->dst_id;
+
+exit:
+ return queue_id;
+}
+EXPORT_SYMBOL(messageq_get_dst_queue);
+
+/* Get the message id of the message. */
+u16 messageq_get_msg_id(messageq_msg msg)
+{
+ u16 id = MESSAGEQ_INVALIDMSGID;
+
+ if (WARN_ON(unlikely(msg == NULL))) {
+ printk(KERN_ERR "messageq_get_msg_id: msg passed is NULL!\n");
+ goto exit;
+ }
+
+ id = msg->msg_id;
+
+exit:
+ return id;
+}
+EXPORT_SYMBOL(messageq_get_msg_id);
+
+/* Get the message size of the message. */
+u32 messageq_get_msg_size(messageq_msg msg)
+{
+ u32 size = 0;
+
+ if (WARN_ON(unlikely(msg == NULL))) {
+ printk(KERN_ERR "messageq_get_msg_size: msg passed is NULL!\n");
+ goto exit;
+ }
+
+ size = msg->msg_size;
+
+exit:
+ return size;
+}
+EXPORT_SYMBOL(messageq_get_msg_size);
+
+/* Get the message priority of the message. */
+u32 messageq_get_msg_pri(messageq_msg msg)
+{
+ u32 priority = MESSAGEQ_NORMALPRI;
+
+ if (WARN_ON(unlikely(msg == NULL))) {
+ printk(KERN_ERR "messageq_get_msg_pri: msg passed is NULL!\n");
+ goto exit;
+ }
+
+ priority = ((u32)(msg->flags & MESSAGEQ_PRIORITYMASK));
+
+exit:
+ return priority;
+}
+EXPORT_SYMBOL(messageq_get_msg_pri);
+
+/* Get the embedded source message queue out of the message. */
+u32 messageq_get_reply_queue(messageq_msg msg)
+{
+ u32 queue = MESSAGEQ_INVALIDMESSAGEQ;
+
+ if (WARN_ON(unlikely(msg == NULL))) {
+ printk(KERN_ERR "messageq_get_reply_queue: msg passed is "
+ "NULL!\n");
+ goto exit;
+ }
+
+ if (msg->reply_id != (u16)MESSAGEQ_INVALIDMESSAGEQ)
+ queue = ((u32)(msg->reply_proc) << 16) | msg->reply_id;
+
+exit:
+ return queue;
+}
+EXPORT_SYMBOL(messageq_get_reply_queue);
+
+/* Set the message id of the message. */
+void messageq_set_msg_id(messageq_msg msg, u16 msg_id)
+{
+ if (WARN_ON(unlikely(msg == NULL))) {
+ printk(KERN_ERR "messageq_set_msg_id: msg passed is NULL!\n");
+ goto exit;
+ }
+
+ msg->msg_id = msg_id;
+
+exit:
+ return;
+}
+EXPORT_SYMBOL(messageq_set_msg_id);
+
+/* Set the priority of the message. */
+void messageq_set_msg_pri(messageq_msg msg, u32 priority)
+{
+ if (WARN_ON(unlikely(msg == NULL))) {
+ printk(KERN_ERR "messageq_set_msg_pri: msg passed is NULL!\n");
+ goto exit;
+ }
+
+ msg->flags = priority & MESSAGEQ_PRIORITYMASK;
+
+exit:
+ return;
+}
+EXPORT_SYMBOL(messageq_set_msg_pri);
+
+/* Sets the tracing of a message */
+void messageq_set_msg_trace(messageq_msg msg, bool trace_flag)
+{
+ if (WARN_ON(unlikely(msg == NULL))) {
+ printk(KERN_ERR "messageq_set_msg_trace: msg passed is "
+ "NULL!\n");
+ goto exit;
+ }
+
+ msg->flags = (msg->flags & ~MESSAGEQ_TRACEMASK) | \
+ (trace_flag << MESSAGEQ_TRACESHIFT);
+
+ printk(KERN_INFO "messageq_set_msg_trace: msg = 0x%x, seq_num = 0x%x"
+ "src_proc = 0x%x trace_flag = 0x%x", (uint)msg,
+ msg->seq_num, msg->src_proc, trace_flag);
+exit:
+ return;
+}
+
+/* Returns the amount of shared memory used by one transport instance.
+ *
+ * The MessageQ module itself does not use any shared memory but the
+ * underlying transport may use some shared memory.
+ */
+uint messageq_shared_mem_req(void *shared_addr)
+{
+ uint mem_req;
+
+ if (likely(multiproc_get_num_processors() > 1)) {
+ /* Determine device-specific shared memory requirements */
+ mem_req = messageq_setup_transport_proxy_shared_mem_req(
+ shared_addr);
+ } else {
+ /* Only 1 processor: no shared memory needed */
+ mem_req = 0;
+ }
+
+ return mem_req;
+}
+EXPORT_SYMBOL(messageq_shared_mem_req);
+
+/* Calls the SetupProxy to setup the MessageQ transports. */
+int messageq_attach(u16 remote_proc_id, void *shared_addr)
+{
+ int status = MESSAGEQ_S_SUCCESS;
+
+ if (likely(multiproc_get_num_processors() > 1)) {
+ /* Use the messageq_setup_transport_proxy to attach
+ * transports */
+ status = messageq_setup_transport_proxy_attach(
+ remote_proc_id, shared_addr);
+ if (status < 0) {
+ printk(KERN_ERR "messageq_attach failed in transport"
+ "setup, status = 0x%x", status);
+ }
+ }
+
+ /*! @retval MESSAGEQ_S_SUCCESS Operation successfully completed! */
+ return status;
+}
+EXPORT_SYMBOL(messageq_attach);
+
+/* Calls the SetupProxy to detach the MessageQ transports. */
+int messageq_detach(u16 remote_proc_id)
+{
+ int status = MESSAGEQ_S_SUCCESS;
+
+ if (likely(multiproc_get_num_processors() > 1)) {
+ /* Use the messageq_setup_transport_proxy to detach
+ * transports */
+ status = messageq_setup_transport_proxy_detach(remote_proc_id);
+ if (unlikely(status < 0)) {
+ printk(KERN_ERR "messageq_detach failed in transport"
+ "detach, status = 0x%x", status);
+ }
+ }
+
+ /*! @retval MESSAGEQ_S_SUCCESS Operation successfully completed! */
+ return status;
+}
+EXPORT_SYMBOL(messageq_detach);
+
+/* =============================================================================
+ * Internal functions
+ * =============================================================================
+ */
+/* Grow the MessageQ table */
+u16 _messageq_grow(struct messageq_object *obj)
+{
+ u16 queue_index = messageq_module->num_queues;
+ int old_size;
+ void **queues;
+ void **oldqueues;
+
+ /* No parameter validation required since this is an internal func. */
+ old_size = (messageq_module->num_queues) * \
+ sizeof(struct messageq_object *);
+ queues = kmalloc(old_size + sizeof(struct messageq_object *),
+ GFP_KERNEL);
+ if (queues == NULL) {
+ printk(KERN_ERR "_messageq_grow: Growing the messageq "
+ "failed!\n");
+ goto exit;
+ }
+
+ /* Copy contents into new table */
+ memcpy(queues, messageq_module->queues, old_size);
+ /* Fill in the new entry */
+ queues[queue_index] = (void *)obj;
+ /* Hook-up new table */
+ oldqueues = messageq_module->queues;
+ messageq_module->queues = queues;
+ messageq_module->num_queues++;
+
+ /* Delete old table if not statically defined*/
+ if (messageq_module->can_free_queues == true)
+ kfree(oldqueues);
+ else
+ messageq_module->can_free_queues = true;
+
+exit:
+ return queue_index;
+}
+
+/* This is a helper function to initialize a message. */
+static void messageq_msg_init(messageq_msg msg)
+{
+ s32 status = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(messageq_module->ref_count),
+ MESSAGEQ_MAKE_MAGICSTAMP(0),
+ MESSAGEQ_MAKE_MAGICSTAMP(1)) == true))) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(unlikely(msg == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ msg->reply_id = (u16) MESSAGEQ_INVALIDMESSAGEQ;
+ msg->msg_id = MESSAGEQ_INVALIDMSGID;
+ msg->dst_id = (u16) MESSAGEQ_INVALIDMESSAGEQ;
+ msg->flags = MESSAGEQ_HEADERVERSION | MESSAGEQ_NORMALPRI;
+ msg->src_proc = multiproc_self();
+
+ status = mutex_lock_interruptible(messageq_module->gate_handle);
+ if (status < 0)
+ goto exit;
+ msg->seq_num = messageq_module->seq_num++;
+ mutex_unlock(messageq_module->gate_handle);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "messageq_msg_init: Invalid NULL msg "
+ "specified!\n");
+ }
+ return;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/messageq_ioctl.c b/drivers/dsp/syslink/multicore_ipc/messageq_ioctl.c
new file mode 100644
index 000000000000..bd4cc527557e
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/messageq_ioctl.c
@@ -0,0 +1,566 @@
+/*
+ * messageq_ioctl.c
+ *
+ * This file implements all the ioctl operations required on the messageq
+ * module.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Linux headers */
+#include <linux/uaccess.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+/* Module Headers */
+#include <messageq.h>
+#include <messageq_ioctl.h>
+#include <sharedregion.h>
+
+/*
+ * ======== messageq_ioctl_put ========
+ * Purpose:
+ * This ioctl interface to messageq_put function
+ */
+static inline int messageq_ioctl_put(struct messageq_cmd_args *cargs)
+{
+ int status = 0;
+ messageq_msg msg;
+
+ msg = (messageq_msg) sharedregion_get_ptr(cargs->args.put.msg_srptr);
+ if (unlikely(msg == NULL))
+ goto exit;
+
+ status = messageq_put(cargs->args.put.queue_id, msg);
+
+ cargs->api_status = status;
+exit:
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_get ========
+ * Purpose:
+ * This ioctl interface to messageq_get function
+ */
+static inline int messageq_ioctl_get(struct messageq_cmd_args *cargs)
+{
+ messageq_msg msg = NULL;
+ u32 *msg_srptr = SHAREDREGION_INVALIDSRPTR;
+ u16 index;
+
+ cargs->api_status = messageq_get(cargs->args.get.messageq_handle,
+ &msg,
+ cargs->args.get.timeout);
+ if (unlikely(cargs->api_status < 0))
+ goto exit;
+
+ index = sharedregion_get_id(msg);
+ if (unlikely(index < 0)) {
+ cargs->api_status = index;
+ goto exit;
+ }
+
+ msg_srptr = sharedregion_get_srptr(msg, index);
+
+exit:
+ cargs->args.get.msg_srptr = msg_srptr;
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_count ========
+ * Purpose:
+ * This ioctl interface to messageq_count function
+ */
+static inline int messageq_ioctl_count(struct messageq_cmd_args *cargs)
+{
+ int result = messageq_count(cargs->args.count.messageq_handle);
+ if (result < 0)
+ cargs->api_status = result;
+ else
+ cargs->args.count.count = result;
+
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_alloc ========
+ * Purpose:
+ * This ioctl interface to messageq_alloc function
+ */
+static inline int messageq_ioctl_alloc(struct messageq_cmd_args *cargs)
+{
+ messageq_msg msg;
+ u32 *msg_srptr = SHAREDREGION_INVALIDSRPTR;
+ u16 index;
+
+ msg = messageq_alloc(cargs->args.alloc.heap_id, cargs->args.alloc.size);
+ if (unlikely(msg == NULL))
+ goto exit;
+
+ index = sharedregion_get_id(msg);
+ if (unlikely(index < 0))
+ goto exit;
+
+ msg_srptr = sharedregion_get_srptr(msg, index);
+
+ cargs->api_status = 0;
+exit:
+ cargs->args.alloc.msg_srptr = msg_srptr;
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_free ========
+ * Purpose:
+ * This ioctl interface to messageq_free function
+ */
+static inline int messageq_ioctl_free(struct messageq_cmd_args *cargs)
+{
+ int status = 0;
+ messageq_msg msg;
+
+ msg = sharedregion_get_ptr(cargs->args.free.msg_srptr);
+ if (unlikely(msg == NULL))
+ goto exit;
+ status = messageq_free(msg);
+
+ cargs->api_status = status;
+exit:
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_params_init ========
+ * Purpose:
+ * This ioctl interface to messageq_params_init function
+ */
+static inline int messageq_ioctl_params_init(struct messageq_cmd_args *cargs)
+{
+ s32 retval = 0;
+ int status = 0;
+ unsigned long size;
+ struct messageq_params params;
+
+ messageq_params_init(&params);
+ size = copy_to_user(cargs->args.params_init.params, &params,
+ sizeof(struct messageq_params));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = status;
+exit:
+ return retval;
+}
+
+/*
+ * ======== messageq_ioctl_create ========
+ * Purpose:
+ * This ioctl interface to messageq_create function
+ */
+static inline int messageq_ioctl_create(struct messageq_cmd_args *cargs)
+{
+ s32 retval = 0;
+ int status = 0;
+ unsigned long size;
+ struct messageq_params params;
+ char *name = NULL;
+
+ if (cargs->args.create.params != NULL) {
+ size = copy_from_user(&params, cargs->args.create.params,
+ sizeof(struct messageq_params));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+ }
+
+ /* Allocate memory for the name */
+ if (cargs->args.create.name_len > 0) {
+ name = kmalloc(cargs->args.create.name_len, GFP_KERNEL);
+ if (name == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+ size = copy_from_user(name, cargs->args.create.name,
+ cargs->args.create.name_len);
+ if (size) {
+ retval = -EFAULT;
+ goto free_name;
+ }
+ }
+
+ if (cargs->args.create.params != NULL) {
+ cargs->args.create.messageq_handle = \
+ messageq_create(name, &params);
+ } else {
+ cargs->args.create.messageq_handle = \
+ messageq_create(name, NULL);
+ }
+
+ if (cargs->args.create.messageq_handle != NULL) {
+ cargs->args.create.queue_id = messageq_get_queue_id(
+ cargs->args.create.messageq_handle);
+ }
+
+free_name:
+ if (cargs->args.create.name_len > 0)
+ kfree(name);
+
+ cargs->api_status = status;
+exit:
+ return retval;
+}
+
+/*
+ * ======== messageq_ioctl_delete ========
+ * Purpose:
+ * This ioctl interface to messageq_delete function
+ */
+static inline int messageq_ioctl_delete(struct messageq_cmd_args *cargs)
+{
+ cargs->api_status =
+ messageq_delete(&(cargs->args.delete_messageq.messageq_handle));
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_open ========
+ * Purpose:
+ * This ioctl interface to messageq_open function
+ */
+static inline int messageq_ioctl_open(struct messageq_cmd_args *cargs)
+{
+ s32 retval = 0;
+ int status = 0;
+ unsigned long size;
+ char *name = NULL;
+ u32 queue_id = MESSAGEQ_INVALIDMESSAGEQ;
+
+ /* Allocate memory for the name */
+ if (cargs->args.open.name_len > 0) {
+ name = kmalloc(cargs->args.open.name_len, GFP_KERNEL);
+ if (name == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+ size = copy_from_user(name, cargs->args.open.name,
+ cargs->args.open.name_len);
+ if (size) {
+ retval = -EFAULT;
+ goto free_name;
+ }
+ }
+
+ status = messageq_open(name, &queue_id);
+ cargs->args.open.queue_id = queue_id;
+
+free_name:
+ if (cargs->args.open.name_len > 0)
+ kfree(name);
+
+ cargs->api_status = status;
+exit:
+ return retval;
+}
+
+/*
+ * ======== messageq_ioctl_close ========
+ * Purpose:
+ * This ioctl interface to messageq_close function
+ */
+static inline int messageq_ioctl_close(struct messageq_cmd_args *cargs)
+{
+ u32 queue_id = cargs->args.close.queue_id;
+ messageq_close(&queue_id);
+ cargs->args.close.queue_id = queue_id;
+
+ cargs->api_status = 0;
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_get_config ========
+ * Purpose:
+ * This ioctl interface to messageq_get_config function
+ */
+static inline int messageq_ioctl_get_config(struct messageq_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct messageq_config config;
+
+ messageq_get_config(&config);
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct messageq_config));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = 0;
+exit:
+ return retval;
+}
+
+/*
+ * ======== messageq_ioctl_setup ========
+ * Purpose:
+ * This ioctl interface to messageq_setup function
+ */
+static inline int messageq_ioctl_setup(struct messageq_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct messageq_config config;
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct messageq_config));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = messageq_setup(&config);
+
+exit:
+ return retval;
+}
+
+/*
+ * ======== messageq_ioctl_destroy ========
+ * Purpose:
+ * This ioctl interface to messageq_destroy function
+ */
+static inline int messageq_ioctl_destroy(struct messageq_cmd_args *cargs)
+{
+ cargs->api_status = messageq_destroy();
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_register_heap ========
+ * Purpose:
+ * This ioctl interface to messageq_register_heap function
+ */
+static inline int messageq_ioctl_register_heap(struct messageq_cmd_args *cargs)
+{
+ cargs->api_status = \
+ messageq_register_heap(cargs->args.register_heap.heap_handle,
+ cargs->args.register_heap.heap_id);
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_unregister_heap ========
+ * Purpose:
+ * This ioctl interface to messageq_unregister_heap function
+ */
+static inline int messageq_ioctl_unregister_heap(
+ struct messageq_cmd_args *cargs)
+{
+ cargs->api_status = messageq_unregister_heap(
+ cargs->args.unregister_heap.heap_id);
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_attach ========
+ * Purpose:
+ * This ioctl interface to messageq_ioctl_attach function
+ */
+static inline int messageq_ioctl_attach(struct messageq_cmd_args *cargs)
+{
+ void *shared_addr;
+
+ shared_addr = sharedregion_get_ptr(
+ cargs->args.attach.shared_addr_srptr);
+ if (unlikely(shared_addr == NULL)) {
+ cargs->api_status = -1;
+ goto exit;
+ }
+ cargs->api_status = messageq_attach(cargs->args.attach.remote_proc_id,
+ shared_addr);
+
+exit:
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_detach ========
+ * Purpose:
+ * This ioctl interface to messageq_ioctl_detach function
+ */
+static inline int messageq_ioctl_detach(struct messageq_cmd_args *cargs)
+{
+ cargs->api_status = messageq_detach(cargs->args.detach.remote_proc_id);
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl_sharedmem_req ========
+ * Purpose:
+ * This ioctl interface to messageq_ioctl_sharedmem_req function
+ */
+static inline int messageq_ioctl_shared_mem_req(struct messageq_cmd_args *cargs)
+{
+ void *shared_addr;
+
+ shared_addr = sharedregion_get_ptr(
+ cargs->args.shared_mem_req.shared_addr_srptr);
+ if (unlikely(shared_addr == NULL)) {
+ cargs->api_status = -1;
+ goto exit;
+ }
+ cargs->args.shared_mem_req.mem_req = \
+ messageq_shared_mem_req(shared_addr);
+ cargs->api_status = 0;
+
+exit:
+ return 0;
+}
+
+/*
+ * ======== messageq_ioctl ========
+ * Purpose:
+ * ioctl interface function for messageq module
+ */
+int messageq_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ int os_status = 0;
+ struct messageq_cmd_args __user *uarg =
+ (struct messageq_cmd_args __user *)args;
+ struct messageq_cmd_args cargs;
+ unsigned long size;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ os_status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ os_status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+ if (os_status) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg, sizeof(struct messageq_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_MESSAGEQ_PUT:
+ os_status = messageq_ioctl_put(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_GET:
+ os_status = messageq_ioctl_get(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_COUNT:
+ os_status = messageq_ioctl_count(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_ALLOC:
+ os_status = messageq_ioctl_alloc(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_FREE:
+ os_status = messageq_ioctl_free(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_PARAMS_INIT:
+ os_status = messageq_ioctl_params_init(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_CREATE:
+ os_status = messageq_ioctl_create(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_DELETE:
+ os_status = messageq_ioctl_delete(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_OPEN:
+ os_status = messageq_ioctl_open(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_CLOSE:
+ os_status = messageq_ioctl_close(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_GETCONFIG:
+ os_status = messageq_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_SETUP:
+ os_status = messageq_ioctl_setup(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_DESTROY:
+ os_status = messageq_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_REGISTERHEAP:
+ os_status = messageq_ioctl_register_heap(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_UNREGISTERHEAP:
+ os_status = messageq_ioctl_unregister_heap(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_ATTACH:
+ os_status = messageq_ioctl_attach(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_DETACH:
+ os_status = messageq_ioctl_detach(&cargs);
+ break;
+
+ case CMD_MESSAGEQ_SHAREDMEMREQ:
+ os_status = messageq_ioctl_shared_mem_req(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ os_status = -ENOTTY;
+ break;
+ }
+
+ if ((cargs.api_status == -ERESTARTSYS) || (cargs.api_status == -EINTR))
+ os_status = -ERESTARTSYS;
+
+ if (os_status < 0)
+ goto exit;
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs, sizeof(struct messageq_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+ return os_status;
+
+exit:
+ printk(KERN_ERR "messageq_ioctl failed: status = 0x%x\n", os_status);
+ return os_status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/multiproc.c b/drivers/dsp/syslink/multicore_ipc/multiproc.c
new file mode 100644
index 000000000000..df9ddf57e0a6
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/multiproc.c
@@ -0,0 +1,301 @@
+/*
+* multiproc.c
+*
+* Many multi-processor modules have the concept of processor id. MultiProc
+* centeralizes the processor id management.
+*
+* Copyright (C) 2008-2009 Texas Instruments, Inc.
+*
+* This package is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE.
+*/
+
+/*
+ * ======== multiproc.c ========
+ * Notes:
+ * The processor id start at 0 and ascend without skipping values till maximum_
+ * no_of_processors - 1
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+#include <linux/module.h>
+#include <syslink/atomic_linux.h>
+/* Utilities headers */
+#include <linux/string.h>
+
+/* Module level headers */
+#include <multiproc.h>
+
+/* Macro to make a correct module magic number with ref_count */
+#define MULTIPROC_MAKE_MAGICSTAMP(x) ((MULTIPROC_MODULEID << 12u) | (x))
+
+/*
+ * multiproc module state object
+ */
+struct multiproc_module_object {
+ struct multiproc_config cfg; /* Module configuration structure */
+ struct multiproc_config def_cfg; /* Default module configuration */
+ atomic_t ref_count; /* Reference count */
+ u16 id; /* Local processor ID */
+};
+
+static struct multiproc_module_object multiproc_state = {
+ .def_cfg.num_processors = 4,
+ .def_cfg.name_list[0][0] = "Tesla",
+ .def_cfg.name_list[1][0] = "AppM3",
+ .def_cfg.name_list[2][0] = "SysM3",
+ .def_cfg.name_list[3][0] = "MPU",
+ .def_cfg.id = 3,
+ .id = MULTIPROC_INVALIDID
+};
+
+/*
+ * ========= multiproc_module =========
+ * Pointer to the MultiProc module state.
+ */
+static struct multiproc_module_object *multiproc_module = &multiproc_state;
+
+
+/*
+ * ======== multiproc_get_config ========
+ * Purpose:
+ * This will get the default configuration for the multiproc module
+ */
+void multiproc_get_config(struct multiproc_config *cfg)
+{
+ BUG_ON(cfg == NULL);
+ if (atomic_cmpmask_and_lt(
+ &(multiproc_module->ref_count),
+ MULTIPROC_MAKE_MAGICSTAMP(0),
+ MULTIPROC_MAKE_MAGICSTAMP(1)) == true) {
+ /* (If setup has not yet been called) */
+ memcpy(cfg, &multiproc_module->def_cfg,
+ sizeof(struct multiproc_config));
+ } else {
+ memcpy(cfg, &multiproc_module->cfg,
+ sizeof(struct multiproc_config));
+ }
+}
+EXPORT_SYMBOL(multiproc_get_config);
+
+/*
+ * ======== multiproc_setup ========
+ * Purpose:
+ * This function sets up the multiproc module. This function
+ * must be called before any other instance-level APIs can be
+ * invoked
+ */
+s32 multiproc_setup(struct multiproc_config *cfg)
+{
+ s32 status = 0;
+ struct multiproc_config tmp_cfg;
+
+ /* This sets the ref_count variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of ref_count variable.
+ */
+ atomic_cmpmask_and_set(&multiproc_module->ref_count,
+ MULTIPROC_MAKE_MAGICSTAMP(0),
+ MULTIPROC_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&multiproc_module->ref_count)
+ != MULTIPROC_MAKE_MAGICSTAMP(1u)) {
+ status = 1;
+ } else {
+ if (cfg == NULL) {
+ multiproc_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+
+ memcpy(&multiproc_module->cfg, cfg,
+ sizeof(struct multiproc_config));
+ multiproc_module->id = cfg->id;
+ }
+
+ return status;
+}
+EXPORT_SYMBOL(multiproc_setup);
+
+/*
+ * ======== multiproc_setup ========
+ * Purpose:
+ * This function destroy the multiproc module.
+ * Once this function is called, other multiproc module APIs,
+ * except for the multiproc_get_config API cannot be called
+ * anymore.
+ */
+s32 multiproc_destroy(void)
+{
+ int status = 0;
+
+ if (atomic_cmpmask_and_lt(
+ &(multiproc_module->ref_count),
+ MULTIPROC_MAKE_MAGICSTAMP(0),
+ MULTIPROC_MAKE_MAGICSTAMP(1)) == true) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ atomic_dec_return(&multiproc_module->ref_count);
+
+exit:
+ return status;
+}
+EXPORT_SYMBOL(multiproc_destroy);
+
+/*
+ * ======== multiProc_set_local_id ========
+ * Purpose:
+ * This will set the processor id of local processor on run time
+ */
+int multiproc_set_local_id(u16 proc_id)
+{
+ int status = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(multiproc_module->ref_count),
+ MULTIPROC_MAKE_MAGICSTAMP(0),
+ MULTIPROC_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(proc_id >= MULTIPROC_MAXPROCESSORS)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ multiproc_module->cfg.id = proc_id;
+
+exit:
+ return status;
+}
+EXPORT_SYMBOL(multiproc_set_local_id);
+
+/*
+ * ======== multiProc_get_local_id ========
+ * Purpose:
+ * This will get the processor id from proccessor name
+ */
+u16 multiproc_get_id(const char *proc_name)
+{
+ s32 i;
+ u16 proc_id = MULTIPROC_INVALIDID;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(multiproc_module->ref_count),
+ MULTIPROC_MAKE_MAGICSTAMP(0),
+ MULTIPROC_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ /* If the name is NULL, just return the local id */
+ if (proc_name == NULL)
+ proc_id = multiproc_module->cfg.id;
+ else {
+ for (i = 0; i < multiproc_module->cfg.num_processors ; i++) {
+ if (strcmp(proc_name,
+ &multiproc_module->cfg.name_list[i][0]) == 0) {
+ proc_id = i;
+ break;
+ }
+ }
+ }
+
+exit:
+ return proc_id;
+}
+EXPORT_SYMBOL(multiproc_get_id);
+
+/*
+ * ======== multiProc_set_local_id ========
+ * Purpose:
+ * This will get the processor name from proccessor id
+ */
+char *multiproc_get_name(u16 proc_id)
+{
+ char *proc_name = NULL;
+
+ /* On error condition return NULL pointer, else entry from name list */
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(multiproc_module->ref_count),
+ MULTIPROC_MAKE_MAGICSTAMP(0),
+ MULTIPROC_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (WARN_ON(proc_id >= MULTIPROC_MAXPROCESSORS))
+ goto exit;
+
+ proc_name = multiproc_module->cfg.name_list[proc_id];
+
+exit:
+ return proc_name;
+}
+EXPORT_SYMBOL(multiproc_get_name);
+
+/*
+ * ======== multiproc_get_num_processors ========
+ * Purpose:
+ * This will get the number of processors in the system
+ */
+u16 multiproc_get_num_processors(void)
+{
+ return multiproc_module->cfg.num_processors;
+}
+EXPORT_SYMBOL(multiproc_get_num_processors);
+
+/*
+ * ======== multiproc_self ========
+ * Purpose:
+ * Return Id of current processor
+ */
+u16 multiproc_self(void)
+{
+ return multiproc_module->id;
+}
+EXPORT_SYMBOL(multiproc_self);
+
+/*
+ * ======== multiproc_get_slot ========
+ * Determines the offset for any two processors.
+ */
+u32 multiproc_get_slot(u16 remote_proc_id)
+{
+ u32 slot = 0u;
+ u32 i;
+ u32 j;
+ u32 small_id;
+ u32 large_id;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(multiproc_module->ref_count),
+ MULTIPROC_MAKE_MAGICSTAMP(0),
+ MULTIPROC_MAKE_MAGICSTAMP(1)) == true))
+ goto exit;
+
+ if (remote_proc_id > multiproc_self()) {
+ small_id = multiproc_self();
+ large_id = remote_proc_id;
+ } else {
+ large_id = multiproc_self();
+ small_id = remote_proc_id;
+ }
+
+ /* determine what offset to create for the remote Proc Id */
+ for (i = 0; i < multiproc_module->cfg.num_processors; i++) {
+ for (j = i + 1; j < multiproc_module->cfg.num_processors; j++) {
+ if ((small_id == i) && (large_id == j))
+ break;
+ slot++;
+ }
+ }
+
+exit:
+ return slot;
+}
+EXPORT_SYMBOL(multiproc_get_slot);
diff --git a/drivers/dsp/syslink/multicore_ipc/multiproc_ioctl.c b/drivers/dsp/syslink/multicore_ipc/multiproc_ioctl.c
new file mode 100644
index 000000000000..8f36304f3397
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/multiproc_ioctl.c
@@ -0,0 +1,171 @@
+/*
+* multiproc_ioctl.c
+*
+* This provides the ioctl interface for multiproc module
+*
+* Copyright (C) 2008-2009 Texas Instruments, Inc.
+*
+* This package is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE.
+*/
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <multiproc.h>
+#include <multiproc_ioctl.h>
+
+/*
+ * ======== mproc_ioctl_setup ========
+ * Purpose:
+ * This wrapper function will call the multproc function
+ * to setup the module
+ */
+static int mproc_ioctl_setup(struct multiproc_cmd_args *cargs)
+{
+ struct multiproc_config config;
+ s32 status = 0;
+ ulong size;
+
+ size = copy_from_user(&config,
+ cargs->args.setup.config,
+ sizeof(struct multiproc_config));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = multiproc_setup(&config);
+
+exit:
+ return status;
+}
+
+/*
+ * ======== mproc_ioctl_destroy ========
+ * Purpose:
+ * This wrapper function will call the multproc function
+ * to destroy the module
+ */
+static int mproc_ioctl_destroy(struct multiproc_cmd_args *cargs)
+{
+ cargs->api_status = multiproc_destroy();
+ return 0;
+}
+
+/*
+ * ======== mproc_ioctl_get_config ========
+ * Purpose:
+ * This wrapper function will call the multproc function
+ * to get the default configuration the module
+ */
+static int mproc_ioctl_get_config(struct multiproc_cmd_args *cargs)
+{
+ struct multiproc_config config;
+ u32 size;
+
+ multiproc_get_config(&config);
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct multiproc_config));
+ if (size) {
+ cargs->api_status = -EFAULT;
+ return 0;
+ }
+ cargs->api_status = 0;
+ return 0;
+}
+
+/*
+ * ======== mproc_ioctl_setup ========
+ * Purpose:
+ * This wrapper function will call the multproc function
+ * to setup the module
+ */
+static int multiproc_ioctl_set_local_id(struct multiproc_cmd_args *cargs)
+{
+ cargs->api_status = multiproc_set_local_id(cargs->args.set_local_id.id);
+ return 0;
+}
+
+/*
+ * ======== multiproc_ioctl ========
+ * Purpose:
+ * This ioctl interface for multiproc module
+ */
+int multiproc_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ s32 status = 0;
+ s32 size = 0;
+ struct multiproc_cmd_args __user *uarg =
+ (struct multiproc_cmd_args __user *)args;
+ struct multiproc_cmd_args cargs;
+
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (status) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg,
+ sizeof(struct multiproc_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_MULTIPROC_SETUP:
+ status = mproc_ioctl_setup(&cargs);
+ break;
+
+ case CMD_MULTIPROC_DESTROY:
+ status = mproc_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_MULTIPROC_GETCONFIG:
+ status = mproc_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_MULTIPROC_SETLOCALID:
+ status = multiproc_ioctl_set_local_id(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ status = -ENOTTY;
+ break;
+ }
+
+ if ((cargs.api_status == -ERESTARTSYS) || (cargs.api_status == -EINTR))
+ status = -ERESTARTSYS;
+
+ if (status < 0)
+ goto exit;
+
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs, sizeof(struct multiproc_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ return status;
+
+}
+
diff --git a/drivers/dsp/syslink/multicore_ipc/nameserver.c b/drivers/dsp/syslink/multicore_ipc/nameserver.c
new file mode 100644
index 000000000000..4278da5eca34
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/nameserver.c
@@ -0,0 +1,1540 @@
+/*
+ * nameserver.c
+ *
+ * The nameserver module manages local name/value pairs that
+ * enables an application and other modules to store and retrieve
+ * values based on a name.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <syslink/atomic_linux.h>
+
+#include <nameserver.h>
+#include <multiproc.h>
+#include <nameserver_remote.h>
+
+#define NS_MAX_NAME_LEN 32
+#define NS_MAX_RUNTIME_ENTRY (~0)
+#define NS_MAX_VALUE_LEN 4
+
+/*
+ * The dynamic name/value table looks like the following. This approach allows
+ * each instance table to have different value and different name lengths.
+ * The names block is allocated on the create. The size of that block is
+ * (max_runtime_entries * max_name_en). That block is sliced and diced up and
+ * given to each table entry.
+ * The same thing is done for the values block.
+ *
+ * names table values
+ * ------------- ------------- -------------
+ * | |<-\ | elem | /----->| |
+ * | | \-------| name | / | |
+ * | | | value |-/ | |
+ * | | | len | | |
+ * | |<-\ |-----------| | |
+ * | | \ | elem | | |
+ * | | \------| name | /------>| |
+ * | | | value |-/ | |
+ * ------------- | len | | |
+ * ------------- | |
+ * | |
+ * | |
+ * -------------
+ *
+ * There is an optimization for small values (e.g. <= sizeof(UInt32).
+ * In this case, there is no values block allocated. Instead the value
+ * field is used directly. This optimization occurs and is managed when
+ * obj->max_value_len <= sizeof(Us3232).
+ *
+ * The static create is a little different. The static entries point directly
+ * to a name string (and value). Since it points directly to static items,
+ * this entries cannot be removed.
+ * If max_runtime_entries is non-zero, a names and values block is created.
+ * Here is an example of a table with 1 static entry and 2 dynamic entries
+ *
+ * ------------
+ * this entries cannot be removed.
+ * If max_runtime_entries is non-zero, a names and values block is created.
+ * Here is an example of a table with 1 static entry and 2 dynamic entries
+ *
+ * ------------
+ * | elem |
+ * "myName" <-----------| name |----------> someValue
+ * | value |
+ * names | len | values
+ * ------------- ------------- -------------
+ * | |<-\ | elem | /----->| |
+ * | | \-------| name | / | |
+ * | | | value |-/ | |
+ * | | | len | | |
+ * | |<-\ |-----------| | |
+ * | | \ | elem | | |
+ * | | \------| name | /------>| |
+ * | | | value |-/ | |
+ * ------------- | len | | |
+ * ------------- | |
+ * | |
+ * | |
+ * -------------
+ *
+ * NameServerD uses a freeList and namelist to maintain the empty
+ * and filled-in entries. So when a name/value pair is added, an entry
+ * is pulled off the freeList, filled-in and placed on the namelist.
+ * The reverse happens on a remove.
+ *
+ * For static adds, the entries are placed on the namelist statically.
+ *
+ * For dynamic creates, the freeList is populated in postInt and there are no
+ * entries placed on the namelist (this happens when the add is called).
+ *
+ */
+
+/* Macro to make a correct module magic number with refCount */
+#define NAMESERVER_MAKE_MAGICSTAMP(x) ((NAMESERVER_MODULEID << 12u) | (x))
+
+/*
+ * A name/value table entry
+ */
+struct nameserver_table_entry {
+ struct list_head elem; /* List element */
+ u32 hash; /* Hash value */
+ char *name; /* Name portion of name/value pair */
+ u32 len; /* Length of the value field. */
+ void *buf; /* Value portion of name/value entry */
+ bool collide; /* Does the hash collides? */
+ struct nameserver_table_entry *next; /* Pointer to the next entry,
+ used incase of collision only */
+};
+
+/*
+ * A nameserver instance object
+ */
+struct nameserver_object {
+ struct list_head elem;
+ char *name; /* Name of the instance */
+ struct list_head name_list; /* Filled entries list */
+ struct mutex *gate_handle; /* Gate for critical regions */
+ struct nameserver_params params; /* The parameter structure */
+ u32 count; /* Counter for entries */
+};
+
+
+/* nameserver module state object */
+struct nameserver_module_object {
+ struct list_head obj_list; /* List holding created objects */
+ struct mutex *mod_gate_handle; /* Handle to module gate */
+ struct nameserver_remote_object **remote_handle_list;
+ /* List of Remote driver handles for processors */
+ atomic_t ref_count; /* Reference count */
+ struct nameserver_params def_inst_params;
+ /* Default instance paramters */
+ struct nameserver_config def_cfg; /* Default module configuration */
+ struct nameserver_config cfg; /* Module configuration */
+
+};
+
+/*
+ * Variable for holding state of the nameserver module.
+ */
+static struct nameserver_module_object nameserver_state = {
+ .def_cfg.reserved = 0x0,
+ .def_inst_params.max_runtime_entries = 0u,
+ .def_inst_params.table_heap = NULL,
+ .def_inst_params.check_existing = true,
+ .def_inst_params.max_value_len = 0u,
+ .def_inst_params.max_name_len = 16u,
+ .mod_gate_handle = NULL,
+ .remote_handle_list = NULL,
+};
+
+/*
+ * Pointer to the SharedRegion module state
+ */
+static struct nameserver_module_object *nameserver_module = &(nameserver_state);
+
+/*
+ * Lookup table for CRC calculation.
+ */
+static const u32 nameserver_crc_table[256u] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
+ 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
+ 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
+ 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
+ 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
+ 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
+ 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
+ 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
+ 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
+ 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
+ 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
+ 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
+ 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
+ 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
+ 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
+ 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
+ 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
+ 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
+ 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
+ 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
+ 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
+ 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,
+};
+
+/* Function to calculate hash for a string */
+static u32 _nameserver_string_hash(const char *string);
+
+#if 0
+/* This will return true if the entry is found in the table */
+static bool _nameserver_is_entry_found(const char *name, u32 hash,
+ struct list_head *list,
+ struct nameserver_table_entry **entry);
+#endif
+
+/* This will return true if the hash is found in the table */
+static bool _nameserver_is_hash_found(const char *name, u32 hash,
+ struct list_head *list,
+ struct nameserver_table_entry **entry);
+
+/* This will return true if entry is found in the hash collide list */
+static bool _nameserver_check_for_entry(const char *name,
+ struct nameserver_table_entry **entry);
+
+/* Function to get the default configuration for the NameServer module. */
+void nameserver_get_config(struct nameserver_config *cfg)
+{
+ s32 retval = 0;
+
+ if (WARN_ON(cfg == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (atomic_cmpmask_and_lt(&(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true) {
+ /* (If setup has not yet been called) */
+ memcpy(cfg, &nameserver_module->def_cfg,
+ sizeof(struct nameserver_config));
+ } else {
+ memcpy(cfg, &nameserver_module->cfg,
+ sizeof(struct nameserver_config));
+ }
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "nameserver_get_config failed! retval = 0x%x",
+ retval);
+ }
+ return;
+}
+EXPORT_SYMBOL(nameserver_get_config);
+
+/* This will setup the nameserver module */
+int nameserver_setup(void)
+{
+ struct nameserver_remote_object **list = NULL;
+ s32 retval = 0;
+ u16 nr_procs = 0;
+
+ /* This sets the ref_count variable if not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of refCount variable
+ */
+ atomic_cmpmask_and_set(&nameserver_module->ref_count,
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&nameserver_module->ref_count)
+ != NAMESERVER_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ INIT_LIST_HEAD(&nameserver_state.obj_list),
+
+ nameserver_module->mod_gate_handle = kmalloc(sizeof(struct mutex),
+ GFP_KERNEL);
+ if (nameserver_module->mod_gate_handle == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+ /* mutex is initialized with state = UNLOCKED */
+ mutex_init(nameserver_module->mod_gate_handle);
+
+ nr_procs = multiproc_get_num_processors();
+ list = kmalloc(nr_procs * sizeof(struct nameserver_remote_object *),
+ GFP_KERNEL);
+ if (list == NULL) {
+ retval = -ENOMEM;
+ goto remote_alloc_fail;
+ }
+ memset(list, 0, nr_procs * sizeof(struct nameserver_remote_object *));
+ nameserver_module->remote_handle_list = list;
+
+ return 0;
+
+remote_alloc_fail:
+ kfree(nameserver_module->mod_gate_handle);
+exit:
+ printk(KERN_ERR "nameserver_setup failed, retval: %x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_setup);
+
+/* This will destroy the nameserver module */
+int nameserver_destroy(void)
+{
+ s32 retval = 0;
+ struct mutex *lock = NULL;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (!(atomic_dec_return(&nameserver_module->ref_count)
+ == NAMESERVER_MAKE_MAGICSTAMP(0))) {
+ retval = 1;
+ goto exit;
+ }
+
+ if (WARN_ON(nameserver_module->mod_gate_handle == NULL)) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ /* If a nameserver instance exist, do not proceed */
+ if (!list_empty(&nameserver_module->obj_list)) {
+ retval = -EBUSY;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(nameserver_module->mod_gate_handle);
+ if (retval)
+ goto exit;
+
+ lock = nameserver_module->mod_gate_handle;
+ nameserver_module->mod_gate_handle = NULL;
+ mutex_unlock(lock);
+ kfree(lock);
+ kfree(nameserver_module->remote_handle_list);
+ nameserver_module->remote_handle_list = NULL;
+ return 0;
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "nameserver_destroy failed, retval: %x\n",
+ retval);
+ }
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_destroy);
+
+/* Initialize this config-params structure with supplier-specified
+ * defaults before instance creation. */
+void nameserver_params_init(struct nameserver_params *params)
+{
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(params == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ memcpy(params, &nameserver_module->def_inst_params,
+ sizeof(struct nameserver_params));
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "nameserver_params_init failed! status = 0x%x",
+ retval);
+ }
+ return;
+}
+EXPORT_SYMBOL(nameserver_params_init);
+
+/* This will create a name server instance */
+void *nameserver_create(const char *name,
+ const struct nameserver_params *params)
+{
+ struct nameserver_object *new_obj = NULL;
+ u32 name_len;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(params == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(name == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ name_len = strlen(name) + 1;
+ if (name_len > params->max_name_len) {
+ retval = -E2BIG;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(nameserver_module->mod_gate_handle);
+ if (retval)
+ goto exit;
+
+ /* check if the name is already registered or not */
+ new_obj = nameserver_get_handle(name);
+ if (new_obj != NULL) {
+ retval = -EEXIST;
+ goto error_handle;
+ }
+
+ new_obj = kmalloc(sizeof(struct nameserver_object), GFP_KERNEL);
+ if (new_obj == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ new_obj->name = kmalloc(name_len, GFP_ATOMIC);
+ if (new_obj->name == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ strncpy(new_obj->name, name, name_len);
+ memcpy(&new_obj->params, params, sizeof(struct nameserver_params));
+ if (params->max_value_len < sizeof(u32))
+ new_obj->params.max_value_len = sizeof(u32);
+ else
+ new_obj->params.max_value_len = params->max_value_len;
+
+ new_obj->gate_handle = kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (new_obj->gate_handle == NULL) {
+ retval = -ENOMEM;
+ goto error_mutex;
+ }
+
+ mutex_init(new_obj->gate_handle);
+ new_obj->count = 0;
+ /* Put in the nameserver instance to local list */
+ INIT_LIST_HEAD(&new_obj->name_list);
+ INIT_LIST_HEAD(&new_obj->elem);
+ list_add(&new_obj->elem, &nameserver_module->obj_list);
+ mutex_unlock(nameserver_module->mod_gate_handle);
+ return (void *)new_obj;
+
+error_mutex:
+ kfree(new_obj->name);
+error:
+ kfree(new_obj);
+error_handle:
+ mutex_unlock(nameserver_module->mod_gate_handle);
+exit:
+ printk(KERN_ERR "nameserver_create failed retval:%x\n", retval);
+ return NULL;
+}
+EXPORT_SYMBOL(nameserver_create);
+
+/* Function to construct a name server. */
+void nameserver_construct(void *handle, const char *name,
+ const struct nameserver_params *params)
+{
+ struct nameserver_object *obj = NULL;
+ u32 name_len = 0;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(params == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(name == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(params->table_heap == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ /* check if the name is already registered or not */
+ if (nameserver_get_handle(name)) {
+ retval = -EEXIST; /* NameServer_E_ALREADYEXISTS */
+ goto exit;
+ }
+ name_len = strlen(name) + 1;
+ if (name_len > params->max_name_len) {
+ retval = -E2BIG;
+ goto exit;
+ }
+
+ obj = (struct nameserver_object *) handle;
+ /* Allocate memory for the name */
+ obj->name = kmalloc(name_len, GFP_ATOMIC);
+ if (obj->name == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ /* Copy the name */
+ strncpy(obj->name, name, strlen(name) + 1u);
+ /* Copy the params */
+ memcpy((void *) &obj->params, (void *) params,
+ sizeof(struct nameserver_params));
+
+ if (params->max_value_len < sizeof(u32))
+ obj->params.max_value_len = sizeof(u32);
+ else
+ obj->params.max_value_len = params->max_value_len;
+
+ /* Construct the list */
+ INIT_LIST_HEAD(&obj->name_list);
+
+ obj->gate_handle = kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (obj->gate_handle == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+ mutex_init(obj->gate_handle);
+
+ /* Initialize the count */
+ obj->count = 0u;
+
+ /* Put in the local list */
+ retval = mutex_lock_interruptible(nameserver_module->mod_gate_handle);
+ if (retval)
+ goto exit;
+ INIT_LIST_HEAD(&obj->elem);
+ list_add(&obj->elem, &nameserver_module->obj_list);
+ mutex_unlock(nameserver_module->mod_gate_handle);
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "nameserver_construct failed! retval = 0x%x",
+ retval);
+ }
+ return;
+}
+
+/* This will delete a name server instance */
+int nameserver_delete(void **handle)
+{
+ struct nameserver_object *temp_obj = NULL;
+ struct mutex *gate_handle = NULL;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(*handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ temp_obj = (struct nameserver_object *) (*handle);
+ if (WARN_ON(unlikely((temp_obj->name == NULL) &&
+ (nameserver_get_handle(temp_obj->name) == NULL)))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ gate_handle = temp_obj->gate_handle;
+ retval = mutex_lock_interruptible(gate_handle);
+ if (retval)
+ goto exit;
+
+ /* Do not proceed if an entry in the in the table */
+ if (temp_obj->count != 0) {
+ retval = -EBUSY;
+ goto error;
+ }
+
+ retval = mutex_lock_interruptible(nameserver_module->mod_gate_handle);
+ if (retval)
+ goto error;
+ list_del(&temp_obj->elem);
+ mutex_unlock(nameserver_module->mod_gate_handle);
+
+ /* free the memory allocated for instance name */
+ kfree(temp_obj->name);
+ temp_obj->name = NULL;
+
+ /* Free the memory used for handle */
+ INIT_LIST_HEAD(&temp_obj->name_list);
+ kfree(temp_obj);
+ *handle = NULL;
+ mutex_unlock(gate_handle);
+ kfree(gate_handle);
+ return 0;
+
+error:
+ mutex_unlock(gate_handle);
+exit:
+ printk(KERN_ERR "nameserver_delete failed retval:%x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_delete);
+
+/* Function to destroy a name server. */
+void nameserver_destruct(void *handle)
+{
+ struct nameserver_object *obj = NULL;
+ struct mutex *gate_handle = NULL;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct nameserver_object *) handle;
+ if (nameserver_get_handle(obj->name) == NULL) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ /* enter the critical section */
+ gate_handle = obj->gate_handle;
+ retval = mutex_lock_interruptible(gate_handle);
+ if (retval)
+ goto exit;
+ /* Do not proceed if an entry in the in the table */
+ if (obj->count != 0) {
+ retval = -EBUSY;
+ goto error;
+ }
+
+ retval = mutex_lock_interruptible(nameserver_module->mod_gate_handle);
+ if (retval)
+ goto error;
+ list_del(&obj->elem);
+ mutex_unlock(nameserver_module->mod_gate_handle);
+
+ /* free the memory allocated for the name */
+ kfree(obj->name);
+ obj->name = NULL;
+
+ /* Destruct the list */
+ INIT_LIST_HEAD(&obj->name_list);
+
+ /* Free the memory used for obj */
+ memset(obj, 0, sizeof(struct nameserver_object));
+
+ /* leave the critical section */
+ mutex_unlock(gate_handle);
+ kfree(gate_handle);
+ return;
+
+error:
+ /* leave the critical section */
+ mutex_unlock(obj->gate_handle);
+
+exit:
+ printk(KERN_ERR "nameserver_destruct failed! status = 0x%x", retval);
+ return;
+}
+
+/* This will add an entry into a nameserver instance */
+void *nameserver_add(void *handle, const char *name,
+ void *buf, u32 len)
+{
+ struct nameserver_table_entry *node = NULL;
+ struct nameserver_table_entry *new_node = NULL;
+ struct nameserver_object *temp_obj = NULL;
+ bool found = false;
+ bool exact_entry = false;
+ u32 hash;
+ u32 name_len;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(name == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(buf == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(len == 0))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ temp_obj = (struct nameserver_object *)handle;
+ retval = mutex_lock_interruptible(temp_obj->gate_handle);
+ if (retval)
+ goto exit;
+ if (temp_obj->count >= temp_obj->params.max_runtime_entries) {
+ retval = -ENOSPC;
+ goto error;
+ }
+
+ /* make the null char in to account */
+ name_len = strlen(name) + 1;
+ if (name_len > temp_obj->params.max_name_len) {
+ retval = -E2BIG;
+ goto error;
+ }
+
+ /* TODO : hash and collide ?? */
+ hash = _nameserver_string_hash(name);
+ found = _nameserver_is_hash_found(name, hash,
+ &temp_obj->name_list, &node);
+ if (found == true)
+ exact_entry = _nameserver_check_for_entry(name, &node);
+
+ if (exact_entry == true && temp_obj->params.check_existing == true) {
+ retval = -EEXIST;
+ goto error;
+ }
+
+ new_node = kmalloc(sizeof(struct nameserver_table_entry), GFP_KERNEL);
+ if (new_node == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ new_node->hash = hash;
+ new_node->collide = found;
+ new_node->len = len;
+ new_node->next = NULL;
+ new_node->name = kmalloc(name_len, GFP_KERNEL);
+ if (new_node->name == NULL) {
+ retval = -ENOMEM;
+ goto error_name;
+ }
+ new_node->buf = kmalloc(len, GFP_KERNEL);
+ if (new_node->buf == NULL) {
+ retval = -ENOMEM;
+ goto error_buf;
+ }
+
+ strncpy(new_node->name, name, name_len);
+ memcpy(new_node->buf, buf, len);
+ if (found == true) {
+ /* If hash is found, need to stitch the list to link the
+ * new node to the existing node with the same hash. */
+ new_node->next = node->next;
+ node->next = new_node;
+ node->collide = found;
+ } else
+ list_add(&new_node->elem, &temp_obj->name_list);
+ temp_obj->count++;
+ mutex_unlock(temp_obj->gate_handle);
+ return new_node;
+
+error_buf:
+ kfree(new_node->name);
+error_name:
+ kfree(new_node);
+error:
+ mutex_unlock(temp_obj->gate_handle);
+exit:
+ printk(KERN_ERR "nameserver_add failed status: %x\n", retval);
+ return NULL;
+}
+EXPORT_SYMBOL(nameserver_add);
+
+/* This will add a Uint32 value into a nameserver instance */
+void *nameserver_add_uint32(void *handle, const char *name,
+ u32 value)
+{
+ s32 retval = 0;
+ struct nameserver_table_entry *new_node = NULL;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(name == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ new_node = nameserver_add(handle, name, &value, sizeof(u32));
+
+exit:
+ if (retval < 0 || new_node == NULL) {
+ printk(KERN_ERR "nameserver_add_uint32 failed! status = 0x%x "
+ "new_node = 0x%x", retval, (u32)new_node);
+ }
+ return new_node;
+}
+EXPORT_SYMBOL(nameserver_add_uint32);
+
+/* This will remove a name/value pair from a name server */
+int nameserver_remove(void *handle, const char *name)
+{
+ struct nameserver_object *temp_obj = NULL;
+ struct nameserver_table_entry *entry = NULL;
+ struct nameserver_table_entry *prev = NULL;
+ bool found = false;
+ u32 hash;
+ u32 name_len;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(name == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ temp_obj = (struct nameserver_object *)handle;
+ name_len = strlen(name) + 1;
+ if (name_len > temp_obj->params.max_name_len) {
+ retval = -E2BIG;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(temp_obj->gate_handle);
+ if (retval)
+ goto exit;
+
+ hash = _nameserver_string_hash(name);
+ found = _nameserver_is_hash_found(name, hash,
+ &temp_obj->name_list, &entry);
+ if (found == false) {
+ retval = -ENOENT;
+ goto error;
+ }
+
+ if (entry->collide == true) {
+ if (strcmp(entry->name, name) == 0u) {
+ kfree(entry->buf);
+ kfree(entry->name);
+ entry->hash = entry->next->hash;
+ entry->name = entry->next->name;
+ entry->len = entry->next->len;
+ entry->buf = entry->next->buf;
+ entry->collide = entry->next->collide;
+ entry->next = entry->next->next;
+ kfree(entry->next);
+ temp_obj->count--;
+ } else {
+ found = false;
+ prev = entry;
+ entry = entry->next;
+ while (entry) {
+ if (strcmp(entry->name, name) == 0u) {
+ kfree(entry->buf);
+ kfree(entry->name);
+ prev->next = entry->next;
+ kfree(entry);
+ temp_obj->count--;
+ found = true;
+ break;
+ }
+ prev = entry;
+ entry = entry->next;
+ }
+ if (found == false) {
+ retval = -ENOENT;
+ goto error;
+ }
+ }
+ } else {
+ kfree(entry->buf);
+ kfree(entry->name);
+ list_del(&entry->elem);
+ kfree(entry);
+ temp_obj->count--;
+ }
+
+ mutex_unlock(temp_obj->gate_handle);
+ return 0;
+
+error:
+ mutex_unlock(temp_obj->gate_handle);
+exit:
+ printk(KERN_ERR "nameserver_remove failed status:%x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_remove);
+
+/* This will remove a name/value pair from a name server */
+int nameserver_remove_entry(void *nshandle, void *nsentry)
+{
+ struct nameserver_table_entry *node = NULL;
+ struct nameserver_object *obj = NULL;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(nshandle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(nsentry == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct nameserver_object *)nshandle;
+ node = (struct nameserver_table_entry *)nsentry;
+ retval = mutex_lock_interruptible(obj->gate_handle);
+ if (retval)
+ goto exit;
+
+ kfree(node->buf);
+ kfree(node->name);
+ list_del(&node->elem);
+ kfree(node);
+ obj->count--;
+ mutex_unlock(obj->gate_handle);
+ return 0;
+
+exit:
+ printk(KERN_ERR "nameserver_remove_entry failed status:%x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_remove_entry);
+
+
+/* This will retrieve the value portion of a name/value
+ * pair from local table */
+int nameserver_get_local(void *handle, const char *name,
+ void *value, u32 *len)
+{
+ struct nameserver_object *temp_obj = NULL;
+ struct nameserver_table_entry *entry = NULL;
+ bool found = false;
+ u32 hash;
+ u32 length = 0;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(name == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(value == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(len == 0))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(*len == 0))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ length = *len;
+ temp_obj = (struct nameserver_object *)handle;
+ retval = mutex_lock_interruptible(temp_obj->gate_handle);
+ if (retval)
+ goto exit;
+
+ hash = _nameserver_string_hash(name);
+ found = _nameserver_is_hash_found(name, hash,
+ &temp_obj->name_list, &entry);
+ if (found == false) {
+ retval = -ENOENT;
+ goto error;
+ }
+
+ if (entry->collide == true) {
+ found = _nameserver_check_for_entry(name, &entry);
+ if (found == false) {
+ retval = -ENOENT;
+ goto error;
+ }
+ }
+
+ if (entry->len >= length) {
+ memcpy(value, entry->buf, length);
+ *len = length;
+ } else {
+ memcpy(value, entry->buf, entry->len);
+ *len = entry->len;
+ }
+
+error:
+ mutex_unlock(temp_obj->gate_handle);
+
+exit:
+ if (retval < 0)
+ printk(KERN_ERR "nameserver_get_local entry not found!\n");
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_get_local);
+
+/* This will retrieve the value portion of a name/value
+ * pair from local table */
+int nameserver_get(void *handle, const char *name,
+ void *value, u32 *len, u16 proc_id[])
+{
+ struct nameserver_object *temp_obj = NULL;
+ u16 max_proc_id;
+ u16 local_proc_id;
+ s32 retval = -ENOENT;
+ u32 i;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(name == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(value == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(len == 0))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(*len == 0))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ temp_obj = (struct nameserver_object *)handle;
+ max_proc_id = multiproc_get_num_processors();
+ local_proc_id = multiproc_self();
+ if (proc_id == NULL) {
+ retval = nameserver_get_local(temp_obj, name, value, len);
+ if (retval == -ENOENT) {
+ for (i = 0; i < max_proc_id; i++) {
+ /* Skip current processor */
+ if (i == local_proc_id)
+ continue;
+
+ if (nameserver_module->remote_handle_list[i] \
+ == NULL)
+ continue;
+
+ retval = nameserver_remote_get(
+ nameserver_module->
+ remote_handle_list[i],
+ temp_obj->name, name, value,
+ len, NULL);
+ if (retval >= 0 || ((retval < 0) &&
+ (retval != -ENOENT)))
+ break;
+ }
+ }
+ goto exit;
+ }
+
+ for (i = 0; i < max_proc_id; i++) {
+ /* Skip processor with invalid id */
+ if (proc_id[i] == MULTIPROC_INVALIDID)
+ continue;
+
+ if (i == local_proc_id) {
+ retval = nameserver_get_local(temp_obj,
+ name, value, len);
+ } else {
+ retval = nameserver_remote_get(
+ nameserver_module->
+ remote_handle_list[proc_id[i]],
+ temp_obj->name, name, value, len, NULL);
+ }
+ if (retval >= 0 || ((retval < 0) && (retval != -ENOENT)))
+ break;
+ }
+
+exit:
+ if (retval < 0)
+ printk(KERN_ERR "nameserver_get failed: status=%x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_get);
+
+/* Gets a 32-bit value by name */
+int nameserver_get_uint32(void *handle, const char *name, void *value,
+ u16 proc_id[])
+{
+ /* Initialize retval to not found */
+ int retval = -ENOENT;
+ u32 len = sizeof(u32);
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(name == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(value == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = nameserver_get(handle, name, value, &len, proc_id);
+
+exit:
+ /* -ENOENT is a valid run-time failure. */
+ if ((retval < 0) && (retval != -ENOENT)) {
+ printk(KERN_ERR "nameserver_get_uint32 failed! status = 0x%x",
+ retval);
+ }
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_get_uint32);
+
+/* Gets a 32-bit value by name from the local table
+ *
+ * If the name is found, the 32-bit value is copied into the value
+ * argument and a success retval is returned.
+ *
+ * If the name is not found, zero is returned in len and the contents
+ * of value are not modified. Not finding a name is not considered
+ * an error.
+ *
+ * This function only searches the local name/value table. */
+int nameserver_get_local_uint32(void *handle, const char *name, void *value)
+{
+ int retval = 0;
+ u32 len = sizeof(u32);
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(name == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(value == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = nameserver_get_local(handle, name, value, &len);
+
+exit:
+ /* -ENOENT is a valid run-time failure. */
+ if ((retval < 0) && (retval != -ENOENT)) {
+ printk(KERN_ERR "nameserver_get_local_uint32 failed! "
+ "status = 0x%x", retval);
+ }
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_get_local_uint32);
+
+/* This will retrieve the value portion of a name/value
+ * pair from local table. Returns the number of characters that
+ * matched with an entry. So if "abc" was an entry and you called
+ * match with "abcd", this function will have the "abc" entry.
+ * The return would be 3 since three characters matched */
+int nameserver_match(void *handle, const char *name, u32 *value)
+{
+ struct nameserver_object *temp_obj = NULL;
+ struct nameserver_table_entry *node = NULL;
+ struct nameserver_table_entry *temp = NULL;
+ u32 len = 0;
+ u32 found_len = 0;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(name == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(value == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ temp_obj = (struct nameserver_object *)handle;
+ retval = mutex_lock_interruptible(temp_obj->gate_handle);
+ if (retval)
+ goto exit;
+ list_for_each_entry(node, &temp_obj->name_list, elem) {
+ temp = node;
+ while (temp) {
+ len = strlen(temp->name);
+ if (len > found_len) {
+ if (strncmp(temp->name, name, len) == 0u) {
+ *value = (u32)temp->buf;
+ found_len = len;
+ }
+ }
+ temp = temp->next;
+ }
+ }
+ mutex_unlock(temp_obj->gate_handle);
+
+exit:
+ if (retval < 0)
+ printk(KERN_ERR "nameserver_match failed status:%x\n", retval);
+ return found_len;
+}
+EXPORT_SYMBOL(nameserver_match);
+
+/* This will get the handle of a nameserver instance from name */
+void *nameserver_get_handle(const char *name)
+{
+ struct nameserver_object *obj = NULL;
+ bool found = false;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(name == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ list_for_each_entry(obj, &nameserver_module->obj_list, elem) {
+ if (strcmp(obj->name, name) == 0) {
+ found = true;
+ break;
+ }
+ }
+ if (found == false) {
+ retval = -ENOENT;
+ goto exit;
+ }
+ return (void *)obj;
+
+exit:
+ printk(KERN_ERR "nameserver_get_handle failed! status = 0x%x", retval);
+ return (void *)NULL;
+}
+EXPORT_SYMBOL(nameserver_get_handle);
+
+/* =============================================================================
+ * Internal functions
+ * =============================================================================
+ */
+/* Function to register a remote driver for a processor */
+int nameserver_register_remote_driver(void *handle, u16 proc_id)
+{
+ s32 retval = 0;
+ u16 proc_count;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ proc_count = multiproc_get_num_processors();
+ if (WARN_ON(unlikely(proc_id >= proc_count))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ nameserver_module->remote_handle_list[proc_id] = \
+ (struct nameserver_remote_object *)handle;
+ return 0;
+
+exit:
+ printk(KERN_ERR "nameserver_register_remote_driver failed! "
+ "status:%x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_register_remote_driver);
+
+/* Function to unregister a remote driver. */
+int nameserver_unregister_remote_driver(u16 proc_id)
+{
+ s32 retval = 0;
+ u16 proc_count;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ proc_count = multiproc_get_num_processors();
+ if (WARN_ON(proc_id >= proc_count)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ nameserver_module->remote_handle_list[proc_id] = NULL;
+ return 0;
+
+exit:
+ printk(KERN_ERR "nameserver_unregister_remote_driver failed! "
+ "status:%x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_unregister_remote_driver);
+
+/* Determines if a remote driver is registered for the specified id. */
+bool nameserver_is_registered(u16 proc_id)
+{
+ bool registered = false;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_module->ref_count),
+ NAMESERVER_MAKE_MAGICSTAMP(0),
+ NAMESERVER_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(proc_id >= multiproc_get_num_processors()))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ registered = (nameserver_module->remote_handle_list[proc_id] != NULL);
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "nameserver_is_registered failed! "
+ "status = 0x%x", retval);
+ }
+ return registered;
+}
+EXPORT_SYMBOL(nameserver_is_registered);
+
+/* Function to calculate hash for a string */
+static u32 _nameserver_string_hash(const char *string)
+{
+ u32 i;
+ u32 hash ;
+ u32 len = strlen(string);
+
+ for (i = 0, hash = len; i < len; i++)
+ hash = (hash >> 8) ^
+ nameserver_crc_table[(hash & 0xff)] ^ string[i];
+
+ return hash;
+}
+
+#if 0
+/* This will return true if the entry is found in the table */
+static bool _nameserver_is_entry_found(const char *name, u32 hash,
+ struct list_head *list,
+ struct nameserver_table_entry **entry)
+{
+ struct nameserver_table_entry *node = NULL;
+ bool hash_match = false;
+ bool name_match = false;
+
+ list_for_each_entry(node, list, elem) {
+ /* Hash not matches, take next node */
+ if (node->hash == hash)
+ hash_match = true;
+ else
+ continue;
+ /* If the name matches, incase hash is duplicate */
+ if (strcmp(node->name, name) == 0)
+ name_match = true;
+
+ if (hash_match && name_match) {
+ if (entry != NULL)
+ *entry = node;
+ return true;
+ }
+
+ hash_match = false;
+ name_match = false;
+ }
+ return false;
+}
+#endif
+
+/* This will return true if the hash is found in the table */
+static bool _nameserver_is_hash_found(const char *name, u32 hash,
+ struct list_head *list,
+ struct nameserver_table_entry **entry)
+{
+ struct nameserver_table_entry *node = NULL;
+
+ /* No parameter checking as function is internal */
+
+ list_for_each_entry(node, list, elem) {
+ /* Hash not matches, take next node */
+ if (node->hash == hash) {
+ *entry = node;
+ return true;
+ }
+ }
+ return false;
+}
+
+/* This will return true if entry is found in the hash collide list */
+static bool _nameserver_check_for_entry(const char *name,
+ struct nameserver_table_entry **entry)
+{
+ struct nameserver_table_entry *temp = NULL;
+ bool found = false;
+
+ /* No parameter checking as function is internal */
+
+ temp = *entry;
+ while (temp) {
+ if (strcmp(((struct nameserver_table_entry *)temp)->name,
+ name) == 0u) {
+ *entry = temp;
+ found = true;
+ break;
+ }
+ temp = temp->next;
+ }
+
+ return found;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/nameserver_ioctl.c b/drivers/dsp/syslink/multicore_ipc/nameserver_ioctl.c
new file mode 100644
index 000000000000..c4cc75e40cb6
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/nameserver_ioctl.c
@@ -0,0 +1,593 @@
+/*
+* nameserver_ioctl.c
+*
+* This provides the ioctl interface for nameserver module
+*
+* Copyright (C) 2008-2009 Texas Instruments, Inc.
+*
+* This package is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE.
+*/
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <nameserver.h>
+#include <nameserver_ioctl.h>
+
+/*
+ * FUNCTIONS NEED TO BE REVIEWED OPTIMIZED!
+ */
+
+/*
+ * ======== nameserver_ioctl_setup ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * setup nameserver module
+ */
+static int nameserver_ioctl_setup(
+ struct nameserver_cmd_args *cargs)
+{
+ cargs->api_status = nameserver_setup();
+ return 0;
+}
+
+/*
+ * ======== nameserver_ioctl_destroy ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * destroy nameserver module
+ */
+static int nameserver_ioctl_destroy(
+ struct nameserver_cmd_args *cargs)
+{
+ cargs->api_status = nameserver_destroy();
+ return 0;
+}
+
+/*
+ * ======== nameserver_ioctl_params_init ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * get the default configuration of a nameserver instance
+ */
+static int nameserver_ioctl_params_init(struct nameserver_cmd_args *cargs)
+{
+ struct nameserver_params params;
+ s32 status = 0;
+ ulong size;
+
+ nameserver_params_init(&params);
+ size = copy_to_user(cargs->args.params_init.params, &params,
+ sizeof(struct nameserver_params));
+ if (size)
+ status = -EFAULT;
+ cargs->api_status = 0;
+ return status;
+}
+
+/*
+ * ======== nameserver_ioctl_get_handle ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * get the handle of a nameserver instance from name
+ */
+static int nameserver_ioctl_get_handle(struct nameserver_cmd_args *cargs)
+{
+ void *handle = NULL;
+ char *name = NULL;
+ s32 status = 0;
+ ulong size;
+
+ name = kmalloc(cargs->args.get_handle.name_len, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ name[cargs->args.get_handle.name_len] = '\0';
+ size = copy_from_user(name, cargs->args.get_handle.name,
+ cargs->args.get_handle.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+
+ handle = nameserver_get_handle(name);
+ cargs->args.get_handle.handle = handle;
+ cargs->api_status = 0;
+
+name_from_usr_error:
+ kfree(name);
+
+exit:
+ return status;
+}
+
+/*
+ * ======== nameserver_ioctl_create ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * create a name server instance
+ */
+static int nameserver_ioctl_create(struct nameserver_cmd_args *cargs)
+{
+ struct nameserver_params params;
+ void *handle = NULL;
+ char *name = NULL;
+ s32 status = 0;
+ ulong size;
+
+ name = kmalloc(cargs->args.create.name_len, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ name[cargs->args.get_handle.name_len] = '\0';
+ size = copy_from_user(name, cargs->args.create.name,
+ cargs->args.create.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto copy_from_usr_error;
+ }
+
+ size = copy_from_user(&params, cargs->args.create.params,
+ sizeof(struct nameserver_params));
+ if (size) {
+ status = -EFAULT;
+ goto copy_from_usr_error;
+ }
+
+ handle = nameserver_create(name, &params);
+ cargs->args.create.handle = handle;
+ cargs->api_status = 0;
+
+copy_from_usr_error:
+ kfree(name);
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== nameserver_ioctl_delete ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * delete a name server instance
+ */
+static int nameserver_ioctl_delete(struct nameserver_cmd_args *cargs)
+{
+ cargs->api_status =
+ nameserver_delete(&cargs->args.delete_instance.handle);
+ return 0;
+}
+
+/*
+ * ======== nameserver_ioctl_add ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * add an entry into a nameserver instance
+ */
+static int nameserver_ioctl_add(struct nameserver_cmd_args *cargs)
+{
+ char *name = NULL;
+ char *buf = NULL;
+ void *entry;
+ s32 status = 0;
+ ulong size;
+
+ name = kmalloc(cargs->args.add.name_len, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ name[cargs->args.get_handle.name_len] = '\0';
+ size = copy_from_user(name, cargs->args.add.name,
+ cargs->args.add.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+
+ buf = kmalloc(cargs->args.add.len, GFP_KERNEL);
+ if (buf == NULL) {
+ status = -ENOMEM;
+ goto buf_alloc_error;
+ }
+
+ size = copy_from_user(buf, cargs->args.add.buf,
+ cargs->args.add.len);
+ if (size) {
+ status = -EFAULT;
+ goto buf_from_usr_error;
+ }
+
+ entry = nameserver_add(cargs->args.add.handle, name, buf,
+ cargs->args.add.len);
+ cargs->args.add.entry = entry;
+ cargs->args.add.node = entry;
+ cargs->api_status = 0;
+
+buf_from_usr_error:
+ kfree(buf);
+
+buf_alloc_error: /* Fall through */
+name_from_usr_error:
+ kfree(name);
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== nameserver_ioctl_add_uint32 ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * add a Uint32 entry into a nameserver instance
+ */
+static int nameserver_ioctl_add_uint32(struct nameserver_cmd_args *cargs)
+{
+ char *name = NULL;
+ void *entry;
+ s32 status = 0;
+ ulong size;
+
+ name = kmalloc(cargs->args.addu32.name_len, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ name[cargs->args.get_handle.name_len] = '\0';
+ size = copy_from_user(name, cargs->args.addu32.name,
+ cargs->args.addu32.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+
+ entry = nameserver_add_uint32(cargs->args.addu32.handle, name,
+ cargs->args.addu32.value);
+ cargs->args.addu32.entry = entry;
+ cargs->api_status = 0;
+
+name_from_usr_error:
+ kfree(name);
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== nameserver_ioctl_match ========
+ * Purpose:
+ * This wrapper function will call the nameserver function
+ * to retrieve the value portion of a name/value
+ * pair from local table
+ */
+static int nameserver_ioctl_match(struct nameserver_cmd_args *cargs)
+{
+ char *name = NULL;
+ s32 status = 0;
+ ulong size;
+
+ name = kmalloc(cargs->args.match.name_len, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ name[cargs->args.get_handle.name_len] = '\0';
+ size = copy_from_user(name, cargs->args.match.name,
+ cargs->args.match.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+
+ cargs->args.match.count = nameserver_match(cargs->args.match.handle,
+ name, &cargs->args.match.value);
+ cargs->api_status = 0;
+
+name_from_usr_error: /* Fall through */
+ kfree(name);
+
+exit:
+ return status;
+}
+
+/*
+ * ======== nameserver_ioctl_remove ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * remove a name/value pair from a name server
+ */
+static int nameserver_ioctl_remove(struct nameserver_cmd_args *cargs)
+{
+ char *name = NULL;
+ s32 status = 0;
+ ulong size;
+
+ name = kmalloc(cargs->args.remove.name_len, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ name[cargs->args.get_handle.name_len] = '\0';
+ size = copy_from_user(name, cargs->args.remove.name,
+ cargs->args.remove.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+
+ cargs->api_status =
+ nameserver_remove(cargs->args.remove.handle, name);
+
+name_from_usr_error:
+ kfree(name);
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== nameserver_ioctl_remove_entry ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * remove an entry from a name server
+ */
+static int nameserver_ioctl_remove_entry(struct nameserver_cmd_args *cargs)
+{
+ cargs->api_status = nameserver_remove_entry(
+ cargs->args.remove_entry.handle,
+ cargs->args.remove_entry.entry);
+ return 0;
+}
+
+/*
+ * ======== nameserver_ioctl_get_local ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * retrieve the value portion of a name/value pair from local table
+ */
+static int nameserver_ioctl_get_local(struct nameserver_cmd_args *cargs)
+{
+ char *name = NULL;
+ char *value = NULL;
+ s32 status = 0;
+ ulong size;
+
+ name = kmalloc(cargs->args.get_local.name_len, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ name[cargs->args.get_handle.name_len] = '\0';
+ value = kmalloc(cargs->args.get_local.len, GFP_KERNEL);
+ if (value == NULL) {
+ status = -ENOMEM;
+ goto value_alloc_error;
+ }
+
+ size = copy_from_user(name, cargs->args.get_local.name,
+ cargs->args.get_local.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+
+ cargs->api_status = nameserver_get_local(
+ cargs->args.get_local.handle, name,
+ value, &cargs->args.get_local.len);
+ size = copy_to_user(cargs->args.get_local.value, value,
+ cargs->args.get_local.len);
+ if (size)
+ status = -EFAULT;
+
+name_from_usr_error:
+ kfree(value);
+
+value_alloc_error:
+ kfree(name);
+
+exit:
+ return status;
+}
+
+
+/*
+ * ======== nameserver_ioctl_get ========
+ * Purpose:
+ * This wrapper function will call the nameserver function to
+ * retrieve the value portion of a name/value pair from table
+ */
+static int nameserver_ioctl_get(struct nameserver_cmd_args *cargs)
+{
+ char *name = NULL;
+ char *value = NULL;
+ u16 *proc_id = NULL;
+ s32 status = 0;
+ ulong size;
+
+ name = kmalloc(cargs->args.get.name_len, GFP_KERNEL);
+ if (name == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ name[cargs->args.get_handle.name_len] = '\0';
+ value = kmalloc(cargs->args.get.len, GFP_KERNEL);
+ if (value == NULL) {
+ status = -ENOMEM;
+ goto value_alloc_error;
+ }
+
+ if (cargs->args.get.proc_len > 0) {
+ proc_id = kmalloc(cargs->args.get.proc_len, GFP_KERNEL);
+ if (proc_id == NULL) {
+ status = -ENOMEM;
+ goto proc_alloc_error;
+ }
+ }
+
+ size = copy_from_user(name, cargs->args.get.name,
+ cargs->args.get.name_len);
+ if (size) {
+ status = -EFAULT;
+ goto name_from_usr_error;
+ }
+
+ status = copy_from_user(proc_id, cargs->args.get.proc_id,
+ cargs->args.get.proc_len);
+ if (size) {
+ status = -EFAULT;
+ goto proc_from_usr_error;
+ }
+
+ cargs->api_status = nameserver_get(cargs->args.get.handle, name, value,
+ &cargs->args.get.len, proc_id);
+ size = copy_to_user(cargs->args.get.value, value,
+ cargs->args.get.len);
+ if (size)
+ status = -EFAULT;
+
+
+proc_from_usr_error: /* Fall through */
+name_from_usr_error:
+ kfree(proc_id);
+
+proc_alloc_error:
+ kfree(value);
+
+value_alloc_error:
+ kfree(name);
+
+exit:
+ return status;
+}
+
+/*
+ * ======== nameserver_ioctl ========
+ * Purpose:
+ * This ioctl interface for nameserver module
+ */
+int nameserver_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ s32 status = 0;
+ s32 size = 0;
+ struct nameserver_cmd_args __user *uarg =
+ (struct nameserver_cmd_args __user *)args;
+ struct nameserver_cmd_args cargs;
+
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (status) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg,
+ sizeof(struct nameserver_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_NAMESERVER_ADD:
+ status = nameserver_ioctl_add(&cargs);
+ break;
+
+ case CMD_NAMESERVER_ADDUINT32:
+ status = nameserver_ioctl_add_uint32(&cargs);
+ break;
+
+ case CMD_NAMESERVER_GET:
+ status = nameserver_ioctl_get(&cargs);
+ break;
+
+ case CMD_NAMESERVER_GETLOCAL:
+ status = nameserver_ioctl_get_local(&cargs);
+ break;
+
+ case CMD_NAMESERVER_MATCH:
+ status = nameserver_ioctl_match(&cargs);
+ break;
+
+ case CMD_NAMESERVER_REMOVE:
+ status = nameserver_ioctl_remove(&cargs);
+ break;
+
+ case CMD_NAMESERVER_REMOVEENTRY:
+ status = nameserver_ioctl_remove_entry(&cargs);
+ break;
+
+ case CMD_NAMESERVER_PARAMS_INIT:
+ status = nameserver_ioctl_params_init(&cargs);
+ break;
+
+ case CMD_NAMESERVER_CREATE:
+ status = nameserver_ioctl_create(&cargs);
+ break;
+
+ case CMD_NAMESERVER_DELETE:
+ status = nameserver_ioctl_delete(&cargs);
+ break;
+
+ case CMD_NAMESERVER_GETHANDLE:
+ status = nameserver_ioctl_get_handle(&cargs);
+ break;
+
+ case CMD_NAMESERVER_SETUP:
+ status = nameserver_ioctl_setup(&cargs);
+ break;
+
+ case CMD_NAMESERVER_DESTROY:
+ status = nameserver_ioctl_destroy(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ status = -ENOTTY;
+ break;
+ }
+
+ if ((cargs.api_status == -ERESTARTSYS) || (cargs.api_status == -EINTR))
+ status = -ERESTARTSYS;
+
+ if (status < 0)
+ goto exit;
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs, sizeof(struct nameserver_cmd_args));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ return status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/nameserver_remote.c b/drivers/dsp/syslink/multicore_ipc/nameserver_remote.c
new file mode 100644
index 000000000000..adc949927c9f
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/nameserver_remote.c
@@ -0,0 +1,48 @@
+/*
+ * nameserver_remote.c
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include <nameserver_remote.h>
+
+/* This will get data from remote name server */
+int nameserver_remote_get(const struct nameserver_remote_object *handle,
+ const char *instance_name, const char *name,
+ void *value, u32 *value_len, void *reserved)
+{
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(handle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (WARN_ON(unlikely(((instance_name == NULL) || (name == NULL)
+ || (value == NULL) || (value_len == NULL))))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = handle->get(handle, instance_name,
+ name, value, value_len, NULL);
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "nameserver_remote_get failed! status = 0x%x",
+ retval);
+ }
+ return retval;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/nameserver_remotenotify.c b/drivers/dsp/syslink/multicore_ipc/nameserver_remotenotify.c
new file mode 100644
index 000000000000..9dd60557a5f4
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/nameserver_remotenotify.c
@@ -0,0 +1,824 @@
+/*
+ * nameserver_remotenotify.c
+ *
+ * The nameserver_remotenotify module provides functionality to get name
+ * value pair from a remote nameserver.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Utilities headers */
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+
+/* Syslink headers */
+#include <syslink/atomic_linux.h>
+
+/* Module level headers */
+#include <multiproc.h>
+#include <sharedregion.h>
+#include <gate_remote.h>
+#include <gatemp.h>
+#include <nameserver.h>
+#include <nameserver_remotenotify.h>
+#include <notify.h>
+
+
+/*
+ * Macro to make a correct module magic number with refCount
+ */
+#define NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(x) \
+ ((NAMESERVERREMOTENOTIFY_MODULEID << 12u) | (x))
+
+/*
+ * Cache line length
+ * TODO: Short-term hack. Make parameter or figure out some other way!
+ */
+#define NAMESERVERREMOTENOTIFY_CACHESIZE 128
+
+/*
+ * Maximum length of value buffer that can be stored in the NameServer
+ * managed by this NameServerRemoteNotify instance. Value in 4-byte words
+ */
+#define NAMESERVERREMOTENOTIFY_MAXVALUEBUFLEN 75
+
+/* Defines the nameserver_remotenotify state object, which contains all the
+ * module specific information
+ */
+struct nameserver_remotenotify_module_object {
+ struct nameserver_remotenotify_config cfg;
+ struct nameserver_remotenotify_config def_cfg;
+ struct nameserver_remotenotify_params def_inst_params;
+ bool is_setup;
+ void *gate_handle;
+ atomic_t ref_count;
+ void *nsr_handles[MULTIPROC_MAXPROCESSORS];
+};
+
+/*
+ * NameServer remote transport packet definition
+ */
+struct nameserver_remotenotify_message {
+ u32 request; /* If this is a request set to 1 */
+ u32 response; /* If this is a response set to 1 */
+ u32 request_status; /* If request sucessful set to 1 */
+ u32 value; /* Holds value if len <= 4 */
+ u32 value_len; /* Len of value */
+ u32 instance_name[8]; /* Name of NameServer instance */
+ u32 name[8]; /* Size is 8 to align to 128 cache line boundary */
+ u32 value_buf[NAMESERVERREMOTENOTIFY_MAXVALUEBUFLEN];
+ /* Supports up to 300-byte value */
+};
+
+/*
+ * NameServer remote transport state object definition
+ */
+struct nameserver_remotenotify_obj {
+ struct nameserver_remotenotify_message *msg[2];
+ struct nameserver_remotenotify_params params;
+ u16 region_id;
+ u16 remote_proc_id;
+ bool cache_enable;
+ struct mutex *local_gate;
+ void *gatemp;
+ struct semaphore *sem_handle; /* Binary semaphore */
+ u16 notify_event_id;
+};
+
+/*
+ * NameServer remote transport state object definition
+ */
+struct nameserver_remotenotify_object {
+ int (*get)(void *,
+ const char *instance_name, const char *name,
+ void *value, u32 value_len, void *reserved);
+ void *obj; /* Implementation specific object */
+};
+
+/*
+ * nameserver_remotenotify state object variable
+ */
+static struct nameserver_remotenotify_module_object
+ nameserver_remotenotify_state = {
+ .is_setup = false,
+ .gate_handle = NULL,
+ .def_cfg.notify_event_id = 1u,
+ .def_inst_params.gatemp = NULL,
+ .def_inst_params.shared_addr = 0x0,
+};
+
+static void _nameserver_remotenotify_callback(u16 proc_id, u16 line_id,
+ u32 event_id, uint *arg, u32 payload);
+
+/*
+ * This will get the default configuration for the nameserver remote
+ * module. This function can be called by the application to get their
+ * configuration parameter to nameserver_remotenotify_setup filled
+ * in by the nameserver_remotenotify module with the default
+ * parameters. If the user does not wish to make any change in the
+ * default parameters, this API is not required to be called
+ */
+void nameserver_remotenotify_get_config(
+ struct nameserver_remotenotify_config *cfg)
+{
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(cfg == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (nameserver_remotenotify_state.is_setup == false)
+ memcpy(cfg, &(nameserver_remotenotify_state.def_cfg),
+ sizeof(struct nameserver_remotenotify_config));
+ else
+ memcpy(cfg, &(nameserver_remotenotify_state.cfg),
+ sizeof(struct nameserver_remotenotify_config));
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "nameserver_remotenotify_get_config failed!"
+ " retval = 0x%x", retval);
+ }
+ return;
+}
+EXPORT_SYMBOL(nameserver_remotenotify_get_config);
+
+/*
+ * This will setup the nameserver_remotenotify module
+ * This function sets up the nameserver_remotenotify module. This
+ * function must be called before any other instance-level APIs can
+ * be invoked
+ * Module-level configuration needs to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then nameserver_remotenotify_get_config can be called
+ * to get the configuration filled with the default values. After
+ * this, only the required configuration values can be changed. If
+ * the user does not wish to make any change in the default
+ * parameters, the application can simply call
+ * nameserver_remotenotify_setup with NULL parameters. The default
+ * parameters would get automatically used
+ */
+int nameserver_remotenotify_setup(struct nameserver_remotenotify_config *cfg)
+{
+ struct nameserver_remotenotify_config tmp_cfg;
+ s32 retval = 0;
+ struct mutex *lock = NULL;
+
+ /* This sets the ref_count variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of refCount variable.
+ */
+ atomic_cmpmask_and_set(&nameserver_remotenotify_state.ref_count,
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0));
+ if (atomic_inc_return(&nameserver_remotenotify_state.ref_count)
+ != NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ if (cfg == NULL) {
+ nameserver_remotenotify_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+
+ /* Create a default gate handle for local module protection */
+ lock = kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (lock == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+ mutex_init(lock);
+ nameserver_remotenotify_state.gate_handle = lock;
+
+ memcpy(&nameserver_remotenotify_state.cfg, cfg,
+ sizeof(struct nameserver_remotenotify_config));
+ memset(&nameserver_remotenotify_state.nsr_handles, 0,
+ (sizeof(void *) * MULTIPROC_MAXPROCESSORS));
+ nameserver_remotenotify_state.is_setup = true;
+ return 0;
+
+exit:
+ printk(KERN_ERR "nameserver_remotenotify_setup failed! retval = 0x%x",
+ retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_remotenotify_setup);
+
+/*
+ * This will destroy the nameserver_remotenotify module.
+ * Once this function is called, other nameserver_remotenotify
+ * module APIs, except for the nameserver_remotenotify_get_config
+ * API cannot be called anymore.
+ */
+int nameserver_remotenotify_destroy(void)
+{
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_remotenotify_state.ref_count),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (!(atomic_dec_return(&nameserver_remotenotify_state.ref_count)
+ == NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0))) {
+ retval = 1;
+ goto exit;
+ }
+
+ kfree(nameserver_remotenotify_state.gate_handle);
+
+ nameserver_remotenotify_state.is_setup = false;
+ return 0;
+
+exit:
+ printk(KERN_ERR "nameserver_remotenotify_destroy failed! retval = 0x%x",
+ retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_remotenotify_destroy);
+
+/* This will get the current configuration values */
+void nameserver_remotenotify_params_init(
+ struct nameserver_remotenotify_params *params)
+{
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_remotenotify_state.ref_count),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ printk(KERN_ERR "nameserver_remotenotify_params_init failed: "
+ "Module is not initialized!\n");
+ return;
+ }
+
+ if (WARN_ON(unlikely(params == NULL))) {
+ printk(KERN_ERR "nameserver_remotenotify_params_init failed: "
+ "Argument of type(nameserver_remotenotify_params *) "
+ "is NULL!\n");
+ return;
+ }
+
+ memcpy(params, &(nameserver_remotenotify_state.def_inst_params),
+ sizeof(struct nameserver_remotenotify_params));
+
+}
+EXPORT_SYMBOL(nameserver_remotenotify_params_init);
+
+/* This will be called when a notify event is received */
+static void _nameserver_remotenotify_callback(u16 proc_id, u16 line_id,
+ u32 event_id, uint *arg, u32 payload)
+{
+ struct nameserver_remotenotify_obj *handle = NULL;
+ u16 offset = 0;
+ void *nshandle = NULL;
+ u32 value_len;
+ int *key;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_remotenotify_state.ref_count),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(arg == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(proc_id >= multiproc_get_num_processors()))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ handle = (struct nameserver_remotenotify_obj *)arg;
+ if ((multiproc_self() > proc_id))
+ offset = 1;
+
+#if 0
+ if (handle->cache_enable) {
+ /* write back shared memory that was modified */
+ Cache_wbInv(handle->msg[0],
+ sizeof(struct nameserver_remotenotify_message) << 1,
+ Cache_Type_ALL, TRUE);
+ }
+#endif
+
+ if (handle->msg[1 - offset]->request != true)
+ goto signal_response;
+
+ /* This is a request */
+ value_len = handle->msg[1 - offset]->value_len;
+ nshandle = nameserver_get_handle((const char *)
+ handle->msg[1 - offset]->instance_name);
+ if (nshandle != NULL) {
+ /* Search for the NameServer entry */
+ if (value_len == sizeof(u32)) {
+ retval = nameserver_get_local_uint32(nshandle,
+ (const char *) handle->msg[1 - offset]->
+ name, &handle->msg[1 - offset]->value);
+ } else {
+ retval = nameserver_get_local(nshandle,
+ (const char *) handle->msg[1 - offset]->
+ name,
+ &handle->msg[1 - offset]->value_buf,
+ &value_len);
+ }
+ }
+ BUG_ON(retval != 0 && retval != -ENOENT);
+
+ key = gatemp_enter(handle->gatemp);
+ if (retval == 0) {
+ handle->msg[1 - offset]->request_status = true;
+ handle->msg[1 - offset]->value_len = value_len;
+ }
+ /* Send a response back */
+ handle->msg[1 - offset]->response = true;
+ handle->msg[1 - offset]->request = false;
+
+#if 0
+ if (handle->cache_enable) {
+ /* write back shared memory that was modified */
+ Cache_wbInv(handle->msg[1 - offset],
+ sizeof(struct nameserver_remotenotify_message),
+ Cache_Type_ALL, TRUE);
+ }
+#endif
+ /* now we can leave the gate */
+ gatemp_leave(handle->gatemp, key);
+
+ /*
+ * The NotifyDriver handle must exists at this point,
+ * otherwise the notify_sendEvent should have failed
+ */
+ retval = notify_send_event(handle->remote_proc_id, 0,
+ (handle->notify_event_id | (NOTIFY_SYSTEMKEY << 16)),
+ 0xCBC7, false);
+
+signal_response:
+ if (handle->msg[offset]->response == true)
+ up(handle->sem_handle);
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "nameserver_remotenotify_callback failed! "
+ "status = 0x%x\n", retval);
+ }
+ return;
+}
+
+/* This will get a remote name value pair */
+int nameserver_remotenotify_get(void *rhandle, const char *instance_name,
+ const char *name, void *value, u32 *value_len,
+ void *reserved)
+{
+ struct nameserver_remotenotify_object *handle = NULL;
+ struct nameserver_remotenotify_obj *obj = NULL;
+ s32 offset = 0;
+ s32 len;
+ int *key;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_remotenotify_state.ref_count),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(rhandle == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(instance_name == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(name == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(value == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(value_len == 0))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely((*value_len == 0) || \
+ (*value_len > NAMESERVERREMOTENOTIFY_MAXVALUEBUFLEN)))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ handle = (struct nameserver_remotenotify_object *)rhandle;
+ obj = (struct nameserver_remotenotify_obj *)handle->obj;
+ if (multiproc_self() > obj->remote_proc_id)
+ offset = 1;
+
+#if 0
+ if (obj->cache_enable) {
+ /* write back shared memory that was modified */
+ Cache_wbInv(obj->msg[offset],
+ sizeof(struct nameserver_remotenotify_message),
+ Cache_Type_ALL, TRUE);
+ }
+#endif
+ /* Allow only one request to be procesed at a time */
+ retval = mutex_lock_interruptible(obj->local_gate);
+ if (retval)
+ goto exit;
+
+ key = gatemp_enter(obj->gatemp);
+ /* This is a request message */
+ obj->msg[offset]->request = 1;
+ obj->msg[offset]->response = 0;
+ obj->msg[offset]->request_status = 0;
+ obj->msg[offset]->value_len = *value_len;
+ len = strlen(instance_name) + 1; /* Take termination null char */
+ if (len >= 32) {
+ retval = -EINVAL;
+ goto inval_len_error;
+ }
+ strncpy((char *)obj->msg[offset]->instance_name, instance_name, len);
+ len = strlen(name) + 1;
+ if (len >= 32) {
+ retval = -EINVAL;
+ goto inval_len_error;
+ }
+ strncpy((char *)obj->msg[offset]->name, name, len);
+
+ /* Send the notification to remote processor */
+ retval = notify_send_event(obj->remote_proc_id, 0,
+ (obj->notify_event_id | (NOTIFY_SYSTEMKEY << 16)),
+ 0x8307, /* Payload */
+ false); /* Not sending a payload */
+ if (retval < 0) {
+ /* Undo previous operations */
+ obj->msg[offset]->request = 0;
+ obj->msg[offset]->value_len = 0;
+ goto notify_error;
+ }
+ gatemp_leave(obj->gatemp, key);
+
+ /* Pend on the semaphore */
+ retval = down_interruptible(obj->sem_handle);
+ if (retval)
+ goto exit;
+
+ key = gatemp_enter(obj->gatemp);
+
+ if (obj->cache_enable) {
+#if 0
+ /* write back shared memory that was modified */
+ Cache_wbInv(obj->msg[offset],
+ sizeof(struct nameserver_remotenotify_message),
+ Cache_Type_ALL, TRUE);
+#endif
+ }
+ if (obj->msg[offset]->request_status != true) {
+ retval = -ENOENT;
+ goto request_error;
+ }
+
+ if (obj->msg[offset]->value_len == sizeof(u32))
+ memcpy((void *)value, (void *) &(obj->msg[offset]->value),
+ sizeof(u32));
+ else
+ memcpy((void *)value, (void *)&(obj->msg[offset]->value_buf),
+ obj->msg[offset]->value_len);
+ *value_len = obj->msg[offset]->value_len;
+
+ obj->msg[offset]->request_status = false;
+ retval = 0;
+
+inval_len_error:
+notify_error:
+request_error:
+ obj->msg[offset]->request = 0;
+ obj->msg[offset]->response = 0;
+ gatemp_leave(obj->gatemp, key);
+exit:
+ /* un-block so that subsequent requests can be honored */
+ mutex_unlock(obj->local_gate);
+
+ if (retval < 0)
+ printk(KERN_ERR "nameserver_remotenotify_get failed! "
+ "status = 0x%x", retval);
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_remotenotify_get);
+
+/* This will setup the nameserver remote module */
+void *nameserver_remotenotify_create(u16 remote_proc_id,
+ const struct nameserver_remotenotify_params *params)
+{
+ struct nameserver_remotenotify_object *handle = NULL;
+ struct nameserver_remotenotify_obj *obj = NULL;
+ u32 offset = 0;
+ s32 retval = 0;
+ s32 retval1 = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_remotenotify_state.ref_count),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely((remote_proc_id == multiproc_self()) &&
+ (remote_proc_id >= multiproc_get_num_processors())))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(params == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(params->shared_addr == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ obj = kzalloc(sizeof(struct nameserver_remotenotify_obj), GFP_KERNEL);
+ handle = kmalloc(sizeof(struct nameserver_remotenotify_object),
+ GFP_KERNEL);
+ if (obj == NULL || handle == NULL) {
+ retval = -ENOMEM;
+ goto mem_error;
+ }
+
+ handle->get = (void *)&nameserver_remotenotify_get;
+ handle->obj = (void *)obj;
+ obj->local_gate = kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (obj->local_gate == NULL) {
+ retval = -ENOMEM;
+ goto mem_error;
+ }
+
+ obj->remote_proc_id = remote_proc_id;
+ if (multiproc_self() > remote_proc_id)
+ offset = 1;
+
+ obj->region_id = sharedregion_get_id(params->shared_addr);
+ if (((u32) params->shared_addr % sharedregion_get_cache_line_size(
+ obj->region_id)) != 0) {
+ retval = -EFAULT;
+ goto notify_error;
+ }
+
+ obj->msg[0] = (struct nameserver_remotenotify_message *)
+ (params->shared_addr);
+ obj->msg[1] = (struct nameserver_remotenotify_message *)
+ ((u32)obj->msg[0] +
+ sizeof(struct nameserver_remotenotify_message));
+ obj->gatemp = params->gatemp;
+ obj->remote_proc_id = remote_proc_id;
+ obj->notify_event_id = \
+ nameserver_remotenotify_state.cfg.notify_event_id;
+ /* Clear out self shared structures */
+ memset(obj->msg[offset], 0,
+ sizeof(struct nameserver_remotenotify_message));
+ memcpy((void *)&obj->params, (void *)params,
+ sizeof(struct nameserver_remotenotify_params));
+
+ /* determine cacheability of the object from the regionId */
+ obj->cache_enable = sharedregion_is_cache_enabled(obj->region_id);
+ if (obj->cache_enable) {
+#if 0
+ /* write back shared memory that was modified */
+ Cache_wbInv(obj->msg[offset],
+ sizeof(struct nameserver_remotenotify_message),
+ Cache_Type_ALL, TRUE);
+#endif
+ }
+
+ retval = notify_register_event_single(remote_proc_id,
+ 0, /* TBD: Interrupt line id */
+ (obj->notify_event_id | \
+ (NOTIFY_SYSTEMKEY << 16)),
+ _nameserver_remotenotify_callback,
+ (void *)obj);
+ if (retval < 0)
+ goto notify_error;
+
+ retval = nameserver_register_remote_driver((void *)handle,
+ remote_proc_id);
+ obj->sem_handle = kzalloc(sizeof(struct semaphore), GFP_KERNEL);
+ if (obj->sem_handle == NULL) {
+ retval = -ENOMEM;
+ goto sem_alloc_error;
+ }
+
+ sema_init(obj->sem_handle, 0);
+ /* its is at the end since its init state = unlocked? */
+ mutex_init(obj->local_gate);
+ return (void *)handle;
+
+sem_alloc_error:
+ nameserver_unregister_remote_driver(remote_proc_id);
+ /* Do we want to check the staus ? */
+ retval1 = notify_unregister_event_single(obj->remote_proc_id, 0,
+ (obj->notify_event_id | (NOTIFY_SYSTEMKEY << 16)));
+
+notify_error:
+ kfree(obj->local_gate);
+
+mem_error:
+ kfree(obj);
+ kfree(handle);
+
+exit:
+ printk(KERN_ERR "nameserver_remotenotify_create failed! "
+ "status = 0x%x\n", retval);
+ return NULL;
+}
+EXPORT_SYMBOL(nameserver_remotenotify_create);
+
+/* This will delete the nameserver remote transport instance */
+int nameserver_remotenotify_delete(void **rhandle)
+{
+ struct nameserver_remotenotify_object *handle = NULL;
+ struct nameserver_remotenotify_obj *obj = NULL;
+ s32 retval = 0;
+ s32 retval1 = 0;
+ struct mutex *gate = NULL;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_remotenotify_state.ref_count),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely((rhandle == NULL) || (*rhandle == NULL)))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ handle = (struct nameserver_remotenotify_object *)(*rhandle);
+ obj = (struct nameserver_remotenotify_obj *)handle->obj;
+ if (obj == NULL) {
+ retval = -EINVAL;
+ goto free_handle;
+ }
+
+ gate = obj->local_gate;
+ retval = mutex_lock_interruptible(gate);
+ if (retval)
+ goto free_handle;
+
+ kfree(obj->sem_handle);
+ obj->sem_handle = NULL;
+
+ retval1 = nameserver_unregister_remote_driver(obj->remote_proc_id);
+ /* Do we have to bug_on/warn_on oops here intead of exit ?*/
+ if (retval1 < 0 && retval >= 0)
+ retval = retval1;
+
+ /* Unregister the event from Notify */
+ retval1 = notify_unregister_event_single(obj->remote_proc_id, 0,
+ (obj->notify_event_id | (NOTIFY_SYSTEMKEY << 16)));
+ if (retval1 < 0 && retval >= 0)
+ retval = retval1;
+ kfree(obj);
+ mutex_unlock(gate);
+ kfree(gate);
+
+free_handle:
+ kfree(handle);
+ *rhandle = NULL;
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "nameserver_remotenotify_delete failed! "
+ "status = 0x%x\n", retval);
+ }
+ return retval;
+}
+EXPORT_SYMBOL(nameserver_remotenotify_delete);
+
+/* This will give shared memory requirements for the
+ * nameserver remote transport instance */
+uint nameserver_remotenotify_shared_mem_req(const
+ struct nameserver_remotenotify_params *params)
+{
+ uint total_size;
+
+ /* params is not used- to remove warning. */
+ (void)params;
+
+ /* Two Message structs are required. One for sending request and
+ * another one for sending response. */
+ if (multiproc_get_num_processors() > 1)
+ total_size = \
+ (2 * sizeof(struct nameserver_remotenotify_message));
+
+ return total_size;
+}
+EXPORT_SYMBOL(nameserver_remotenotify_shared_mem_req);
+
+int nameserver_remotenotify_attach(u16 remote_proc_id, void *shared_addr)
+{
+ struct nameserver_remotenotify_params nsr_params;
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_remotenotify_state.ref_count),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(shared_addr == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(gatemp_get_default_remote() == NULL))) {
+ retval = -1;
+ goto exit;
+ }
+
+ /* Use default GateMP */
+ nameserver_remotenotify_params_init(&nsr_params);
+ nsr_params.gatemp = gatemp_get_default_remote();
+ nsr_params.shared_addr = shared_addr;
+
+ /* create only if notify driver has been created to remote proc */
+ if (!notify_is_registered(remote_proc_id, 0)) {
+ retval = -1;
+ goto exit;
+ }
+
+ nameserver_remotenotify_state.nsr_handles[remote_proc_id] =
+ nameserver_remotenotify_create(remote_proc_id, &nsr_params);
+ if (nameserver_remotenotify_state.nsr_handles[remote_proc_id] == NULL) {
+ retval = -1;
+ goto exit;
+ }
+ return 0;
+
+exit:
+ printk(KERN_ERR "nameserver_remotenotify_attach failed! status = 0x%x",
+ retval);
+ return retval;
+}
+
+void *_nameserver_remotenotify_get_handle(u16 remote_proc_id)
+{
+ void *handle = NULL;
+
+ if (remote_proc_id <= multiproc_get_num_processors()) {
+ handle = \
+ nameserver_remotenotify_state.nsr_handles[remote_proc_id];
+ }
+
+ return handle;
+};
+
+
+int nameserver_remotenotify_detach(u16 remote_proc_id)
+{
+ void *handle = NULL;
+ int retval = 0;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(nameserver_remotenotify_state.ref_count),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(0),
+ NAMESERVERREMOTENOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ handle = _nameserver_remotenotify_get_handle(remote_proc_id);
+ if (handle == NULL) {
+ retval = -1;
+ goto exit;
+ }
+
+ nameserver_remotenotify_delete(&handle);
+ nameserver_remotenotify_state.nsr_handles[remote_proc_id] = NULL;
+ return 0;
+
+exit:
+ printk(KERN_ERR "nameserver_remotenotify_detach failed! status = 0x%x",
+ retval);
+ return retval;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/platform.c b/drivers/dsp/syslink/multicore_ipc/platform.c
new file mode 100644
index 000000000000..e58b7ddb9944
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/platform.c
@@ -0,0 +1,1877 @@
+/*
+ * platform.c
+ *
+ * Implementation of platform initialization logic for Syslink IPC.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+
+/* Standard header files */
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+/* SysLink device specific headers */
+#include "../procmgr/proc4430/proc4430.h"
+
+/* Ipu Power Management Header (ipu_pm) */
+#include "../ipu_pm/ipu_pm.h"
+/* Module level headers */
+#include <multiproc.h>
+#include <platform.h>
+#include <gatemp.h>
+#include <gatepeterson.h>
+#include <gatehwspinlock.h>
+#include <sharedregion.h>
+#include <listmp.h>
+#include <_listmp.h>
+#include <heap.h>
+#include <heapbufmp.h>
+#include <heapmemmp.h>
+#include <messageq.h>
+#include <transportshm.h>
+#include <notify.h>
+#include <ipc.h>
+
+#include <notify_ducatidriver.h>
+#include <nameserver.h>
+#include <nameserver_remote.h>
+#include <nameserver_remotenotify.h>
+#include <procmgr.h>
+
+#include <platform_mem.h>
+
+
+/** ============================================================================
+ * Macros.
+ * ============================================================================
+ */
+#define RESETVECTOR_SYMBOL "_Ipc_ResetVector"
+
+/** ============================================================================
+ * Application specific configuration, please change these value according to
+ * your application's need.
+ * ============================================================================
+ */
+/*! @brief Start of IPC shared memory */
+#define SHAREDMEMORY_PHY_BASEADDR CONFIG_DUCATI_BASEIMAGE_PHYS_ADDR
+#define SHAREDMEMORY_PHY_BASESIZE 0x00100000
+
+/*! @brief Start of IPC shared memory for SysM3 */
+#define SHAREDMEMORY_PHY_BASEADDR_SYSM3 SHAREDMEMORY_PHY_BASEADDR
+#define SHAREDMEMORY_PHY_BASESIZE_SYSM3 0x00054000
+
+/*! @brief Start of IPC shared memory AppM3 */
+#define SHAREDMEMORY_PHY_BASEADDR_APPM3 (SHAREDMEMORY_PHY_BASEADDR + 0x54000)
+#define SHAREDMEMORY_PHY_BASESIZE_APPM3 0x000AC000
+
+/*! @brief Start of IPC SHM for SysM3 */
+#define SHAREDMEMORY_SLV_VRT_BASEADDR_SYSM3 0xA0000000
+#define SHAREDMEMORY_SLV_VRT_BASESIZE_SYSM3 0x00054000
+
+/*! @brief Start of IPC SHM for AppM3 */
+#define SHAREDMEMORY_SLV_VRT_BASEADDR_APPM3 0xA0054000
+#define SHAREDMEMORY_SLV_VRT_BASESIZE_APPM3 0x000AC000
+
+/*! @brief Start of Code memory for SysM3 */
+#define SHAREDMEMORY_SLV_VRT_CODE0_BASEADDR 0x00000000
+#define SHAREDMEMORY_SLV_VRT_CODE0_BASESIZE 0x00200000
+
+/*! @brief Start of Code section for SysM3 */
+#define SHAREDMEMORY_PHY_CODE0_BASEADDR (SHAREDMEMORY_PHY_BASEADDR + 0x100000)
+#define SHAREDMEMORY_PHY_CODE0_BASESIZE 0x00200000
+
+/*! @brief Start of Code memory for SysM3 */
+#define SHAREDMEMORY_SLV_VRT_CODE1_BASEADDR 0x00800000
+#define SHAREDMEMORY_SLV_VRT_CODE1_BASESIZE 0x00200000
+
+/*! @brief Start of Code section for SysM3 */
+#define SHAREDMEMORY_PHY_CODE1_BASEADDR (SHAREDMEMORY_PHY_CODE0_BASEADDR + \
+ SHAREDMEMORY_SLV_VRT_CODE1_BASEADDR)
+#define SHAREDMEMORY_PHY_CODE1_BASESIZE 0x00200000
+
+/*! @brief Start of Const section for SysM3 */
+#define SHAREDMEMORY_PHY_CONST0_BASEADDR (SHAREDMEMORY_PHY_CODE0_BASEADDR + \
+ 0x1000000)
+#define SHAREDMEMORY_PHY_CONST0_BASESIZE 0x00100000
+
+/*! @brief Start of Const section for SysM3 */
+#define SHAREDMEMORY_SLV_VRT_CONST0_BASEADDR 0x80000000
+#define SHAREDMEMORY_SLV_VRT_CONST0_BASESIZE 0x00100000
+
+/*! @brief Start of Const section for AppM3 */
+#define SHAREDMEMORY_PHY_CONST1_BASEADDR (SHAREDMEMORY_PHY_CONST0_BASEADDR + \
+ SHAREDMEMORY_SLV_VRT_CONST0_BASESIZE)
+#define SHAREDMEMORY_PHY_CONST1_BASESIZE 0x00100000
+
+/*! @brief Start of Const section for AppM3 */
+#define SHAREDMEMORY_SLV_VRT_CONST1_BASEADDR 0x80100000
+#define SHAREDMEMORY_SLV_VRT_CONST1_BASESIZE 0x00100000
+
+/*! @brief Start of SW DMM shared memory */
+#define SHAREDMEMORY_SWDMM_PHY_BASEADDR (SHAREDMEMORY_PHY_BASEADDR + 0x2400000)
+#define SHAREDMEMORY_SWDMM_PHY_BASESIZE 0x00C00000
+
+/*! @brief Start of SW DMM SHM for Ducati */
+#define SHAREDMEMORY_SWDMM_SLV_VRT_BASEADDR 0x81300000
+#define SHAREDMEMORY_SWDMM_SLV_VRT_BASESIZE 0x00C00000
+
+#define USE_NEW_PROCMGR 0
+
+/** ============================================================================
+ * Struct & Enums.
+ * ============================================================================
+ */
+
+/* Struct for reading platform specific gate peterson configuration values */
+struct platform_gaterpeterson_params {
+ u32 shared_mem_addr; /* Shared memory address */
+ u32 shared_mem_size; /* Shared memory size */
+ u32 remote_proc_id; /* Remote processor identifier */
+};
+
+struct platform_notify_ducatidrv_params {
+ u32 shared_mem_addr; /* Shared memory address */
+ u32 shared_mem_size; /* Shared memory size */
+ u16 remote_proc_id; /* Remote processor identifier */
+};
+
+struct platform_nameserver_remotenotify_params {
+ u32 shared_mem_addr; /* Shared memory address */
+ u32 shared_mem_size; /* Shared memory size */
+ u32 notify_event_no; /* Notify Event number to used */
+};
+
+struct platform_heapbuf_params {
+ u32 shared_mem_addr; /* Shared memory address */
+ u32 shared_mem_size; /* Shared memory size */
+ u32 shared_buf_addr; /* Shared memory address */
+ u32 shared_buf_size; /* Shared memory size */
+ u32 num_blocks;
+ u32 block_size;
+};
+
+struct platform_transportshm_params {
+ u32 shared_mem_addr; /* Shared memory address */
+ u32 shared_mem_size; /* Shared memory size */
+ u32 notify_event_no; /* Notify Event number */
+};
+
+/** ============================================================================
+ * Application specific configuration, please change these value according to
+ * your application's need.
+ * ============================================================================
+ */
+/*
+ * Structure defining config parameters for overall System.
+ */
+struct platform_config {
+ struct multiproc_config multiproc_config;
+ /* multiproc_config parameter */
+
+ struct gatemp_config gatemp_config;
+ /* gatemp_config parameter */
+
+ struct gatepeterson_config gatepeterson_config;
+ /* gatepeterson_config parameter */
+
+ struct gatehwspinlock_config gatehwspinlock_config;
+ /* gatehwspinlock parameter */
+
+ struct sharedregion_config sharedregion_config;
+ /* sharedregion_config parameter */
+
+ struct messageq_config messageq_config;
+ /* messageq_config parameter */
+
+ struct notify_config notify_config;
+ /* notify config parameter */
+ struct ipu_pm_config ipu_pm_config;
+ /* ipu_pm config parameter */
+
+ struct proc_mgr_config proc_mgr_config;
+ /* processor manager config parameter */
+
+ struct heapbufmp_config heapbufmp_config;
+ /* heapbufmp_config parameter */
+
+ struct heapmemmp_config heapmemmp_config;
+ /* heapmemmp_config parameter */
+#if 0
+
+ struct heapmultibuf_config heapmultibuf_config;
+ /* heapmultibuf_config parameter */
+#endif
+ struct listmp_config listmp_config;
+ /* listmp_config parameter */
+
+ struct transportshm_config transportshm_config;
+ /* transportshm_config parameter */
+#if 0
+ struct ringio_config ringio_config;
+ /* ringio_config parameter */
+
+ struct ringiotransportshm_config ringiotransportshm_config;
+ /* ringiotransportshm_config parameter */
+#endif
+ struct notify_ducatidrv_config notify_ducatidrv_config;
+ /* notify_ducatidrv_config parameter */
+
+ struct nameserver_remotenotify_config nameserver_remotenotify_config;
+ /* nameserver_remotenotify_config parameter */
+#if 0
+ struct clientnotifymgr_config clinotifymgr_config_params;
+ /* clientnotifymgr_config parameter */
+
+ struct frameqbufmgr_config frameqbufmgr_config_params;
+ /* frameqbufmgr_config parameter */
+
+ struct frameq_config frameq_config_params;
+ /* frameq_config parameter */
+#endif
+};
+
+
+/* struct embedded into slave binary */
+struct platform_slave_config {
+ u32 cache_line_size;
+ u32 br_offset;
+ u32 sr0_memory_setup;
+ u32 setup_messageq;
+ u32 setup_notify;
+ u32 setup_ipu_pm;
+ u32 proc_sync;
+ u32 num_srs;
+};
+
+struct platform_proc_config_params {
+ u32 use_notify;
+ u32 use_messageq;
+ u32 use_heapbuf;
+ u32 use_frameq;
+ u32 use_ring_io;
+ u32 use_listmp;
+ u32 use_nameserver;
+};
+
+/* shared region configuration */
+struct platform_slave_sr_config {
+ u32 entry_base;
+ u32 entry_len;
+ u32 owner_proc_id;
+ u32 id;
+ u32 create_heap;
+ u32 cache_line_size;
+};
+
+/* Shared region configuration information for host side. */
+struct platform_host_sr_config {
+ u16 ref_count;
+};
+
+/* structure for platform instance */
+struct platform_object {
+ void *pm_handle;
+ /* handle to the proc_mgr instance used */
+ void *phandle;
+ /* handle to the processor instance used */
+ struct platform_slave_config slave_config;
+ /* slave embedded config */
+ struct platform_slave_sr_config *slave_sr_config;
+ /* shared region details from slave */
+};
+
+
+/* structure for platform instance */
+struct platform_module_state {
+ bool multiproc_init_flag;
+ /* multiproc initialize flag */
+ bool gatemp_init_flag;
+ /* gatemp initialize flag */
+ bool gatepeterson_init_flag;
+ /* gatepeterson initialize flag */
+ bool gatehwspinlock_init_flag;
+ /* gatehwspinlock initialize flag */
+ bool sharedregion_init_flag;
+ /* sharedregion initialize flag */
+ bool listmp_init_flag;
+ /* listmp initialize flag */
+ bool messageq_init_flag;
+ /* messageq initialize flag */
+ bool ringio_init_flag;
+ /* ringio initialize flag */
+ bool notify_init_flag;
+ /* notify initialize flag */
+ bool ipu_pm_init_flag;
+ /* ipu_pm initialize flag */
+ bool proc_mgr_init_flag;
+ /* processor manager initialize flag */
+ bool heapbufmp_init_flag;
+ /* heapbufmp initialize flag */
+ bool heapmemmp_init_flag;
+ /* heapmemmp initialize flag */
+ bool heapmultibuf_init_flag;
+ /* heapbufmp initialize flag */
+ bool nameserver_init_flag;
+ /* nameserver initialize flag */
+ bool transportshm_init_flag;
+ /* transportshm initialize flag */
+ bool ringiotransportshm_init_flag;
+ /* ringiotransportshm initialize flag */
+ bool notify_ducatidrv_init_flag;
+ /* notify_ducatidrv initialize flag */
+ bool nameserver_remotenotify_init_flag;
+ /* nameserverremotenotify initialize flag */
+ bool clientnotifymgr_init_flag;
+ /* clientnotifymgr initialize flag */
+ bool frameqbufmgr_init_flag;
+ /* frameqbufmgr initialize flag */
+ bool frameq_init_flag;
+ /* frameq initialize flag */
+ bool platform_init_flag;
+ /* flag to indicate platform initialization status */
+ bool platform_mem_init_flag;
+ /* Platform memory manager initialize flag */
+};
+
+
+/* =============================================================================
+ * GLOBALS
+ * =============================================================================
+ */
+static struct platform_object platform_objects[MULTIPROC_MAXPROCESSORS];
+static struct platform_module_state platform_module_state;
+static struct platform_module_state *platform_module = &platform_module_state;
+static u16 platform_num_srs_unmapped;
+static struct platform_host_sr_config *platform_host_sr_config;
+
+/* ============================================================================
+ * Forward declarations of internal functions.
+ * ============================================================================
+ */
+static int _platform_setup(void);
+static int _platform_destroy(void);
+
+/* function to read slave memory */
+static int
+_platform_read_slave_memory(u16 proc_id,
+ u32 addr,
+ void *value,
+ u32 *num_bytes);
+
+/* function to write slave memory */
+static int
+_platform_write_slave_memory(u16 proc_id,
+ u32 addr,
+ void *value,
+ u32 *num_bytes);
+
+
+/** ============================================================================
+ * Macros and types
+ * ============================================================================
+ */
+/*!
+ * @brief Number of slave memory entries for OMAP4430.
+ */
+#define NUM_MEM_ENTRIES 6
+
+/*!
+ * @brief Position of reset vector memory region in the memEntries array.
+ */
+#define RESET_VECTOR_ENTRY_ID 0
+
+
+/** ============================================================================
+ * Globals
+ * ============================================================================
+ */
+/*!
+ * @brief Array of memory entries for OMAP4430
+ */
+static struct proc4430_mem_entry mem_entries[NUM_MEM_ENTRIES] = {
+ {
+ "DUCATI_CODE_SYSM3", /* NAME : Name of the memory region */
+ SHAREDMEMORY_PHY_CODE0_BASEADDR,
+ /* PHYSADDR : Physical address */
+ SHAREDMEMORY_SLV_VRT_CODE0_BASEADDR,
+ /* SLAVEVIRTADDR : Slave virtual address */
+ (u32) -1u,
+ /* MASTERVIRTADDR : Master virtual address (if known) */
+ SHAREDMEMORY_SLV_VRT_CODE0_BASESIZE,
+ /* SIZE : Size of the memory region */
+ true, /* SHARE : Shared access memory? */
+ },
+ {
+ "DUCATI_CODE_APPM3", /* NAME : Name of the memory region */
+ SHAREDMEMORY_PHY_CODE1_BASEADDR,
+ /* PHYSADDR : Physical address */
+ SHAREDMEMORY_SLV_VRT_CODE1_BASEADDR,
+ /* SLAVEVIRTADDR : Slave virtual address */
+ (u32) -1u,
+ /* MASTERVIRTADDR : Master virtual address (if known) */
+ SHAREDMEMORY_SLV_VRT_CODE1_BASESIZE,
+ /* SIZE : Size of the memory region */
+ true, /* SHARE : Shared access memory? */
+ },
+ {
+ "DUCATI_SHM_SYSM3", /* NAME : Name of the memory region */
+ SHAREDMEMORY_PHY_BASEADDR_SYSM3,
+ /* PHYSADDR : Physical address */
+ SHAREDMEMORY_SLV_VRT_BASEADDR_SYSM3,
+ /* SLAVEVIRTADDR : Slave virtual address */
+ (u32) -1u,
+ /* MASTERVIRTADDR : Master virtual address (if known) */
+ SHAREDMEMORY_SLV_VRT_BASESIZE_SYSM3,
+ /* SIZE : Size of the memory region */
+ true, /* SHARE : Shared access memory? */
+ },
+ {
+ "DUCATI_SHM_APPM3", /* NAME : Name of the memory region */
+ SHAREDMEMORY_PHY_BASEADDR_APPM3,
+ /* PHYSADDR : Physical address */
+ SHAREDMEMORY_SLV_VRT_BASEADDR_APPM3,
+ /* SLAVEVIRTADDR : Slave virtual address */
+ (u32) -1u,
+ /* MASTERVIRTADDR : Master virtual address (if known) */
+ SHAREDMEMORY_SLV_VRT_BASESIZE_APPM3,
+ /* SIZE : Size of the memory region */
+ true, /* SHARE : Shared access memory? */
+ },
+ {
+ "DUCATI_CONST_SYSM3", /* NAME : Name of the memory region */
+ SHAREDMEMORY_PHY_CONST0_BASEADDR,
+ /* PHYSADDR : Physical address */
+ SHAREDMEMORY_SLV_VRT_CONST0_BASEADDR,
+ /* SLAVEVIRTADDR : Slave virtual address */
+ (u32) -1u,
+ /* MASTERVIRTADDR : Master virtual address (if known) */
+ SHAREDMEMORY_SLV_VRT_CONST0_BASESIZE,
+ /* SIZE : Size of the memory region */
+ true, /* SHARE : Shared access memory? */
+ },
+ {
+ "DUCATI_CONST_APPM3", /* NAME : Name of the memory region */
+ SHAREDMEMORY_PHY_CONST1_BASEADDR,
+ /* PHYSADDR : Physical address */
+ SHAREDMEMORY_SLV_VRT_CONST1_BASEADDR,
+ /* SLAVEVIRTADDR : Slave virtual address */
+ (u32) -1u,
+ /* MASTERVIRTADDR : Master virtual address (if known) */
+ SHAREDMEMORY_SLV_VRT_CONST1_BASESIZE,
+ /* SIZE : Size of the memory region */
+ true, /* SHARE : Shared access memory? */
+ }
+
+};
+
+
+
+
+
+/* =============================================================================
+ * APIS
+ * =============================================================================
+ */
+
+/*
+ * ======== platform_get_config =======
+ * function to get the default values for confiurations.
+ */
+void
+platform_get_config(struct platform_config *config)
+{
+ int status = PLATFORM_S_SUCCESS;
+
+ BUG_ON(config == NULL);
+ if (config == NULL) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* get the gatepeterson default config */
+ multiproc_get_config(&config->multiproc_config);
+
+ /* get the gatemp default config */
+ gatemp_get_config(&config->gatemp_config);
+
+ /* get the gatepeterson default config */
+ gatepeterson_get_config(&config->gatepeterson_config);
+
+ /* get the gatehwspinlock default config */
+ gatehwspinlock_get_config(&config->gatehwspinlock_config);
+
+ /* get the sharedregion default config */
+ sharedregion_get_config(&config->sharedregion_config);
+
+ /* get the messageq default config */
+ messageq_get_config(&config->messageq_config);
+
+ /* get the notify default config */
+ notify_get_config(&config->notify_config);
+ /* get the ipu_pm default config */
+ ipu_pm_get_config(&config->ipu_pm_config);
+
+ /* get the procmgr default config */
+ proc_mgr_get_config(&config->proc_mgr_config);
+
+ /* get the heapbufmpfault config */
+ heapbufmp_get_config(&config->heapbufmp_config);
+
+ /* get the heapmemmpfault config */
+ heapmemmp_get_config(&config->heapmemmp_config);
+#if 0
+ /* get the heapmultibuf default config */
+ heapmultibuf_get_config(&config->heapmultibuf_config
+#endif
+ /* get the listmp default config */
+ listmp_get_config(&config->listmp_config);
+
+ /* get the transportshm default config */
+ transportshm_get_config(&config->transportshm_config);
+ /* get the notifyshmdriver default config */
+ notify_ducatidrv_get_config(&config->notify_ducatidrv_config);
+
+ /* get the nameserver_remotenotify default config */
+ nameserver_remotenotify_get_config(&config->
+ nameserver_remotenotify_config);
+#if 0
+ /* get the clientnotifymgr default config */
+ clientnotifymgr_get_config(&config->clinotifymgr_config_params);
+
+ /* get the frameqbufmgr default config */
+ frameqbufmgr_get_config(&config->frameqbufmgr_config_params);
+ /* get the frameq default config */
+ frameq_get_config(&config->frameqcfg_params);
+
+ /* get the ringio default config */
+ ringio_get_config(&config->ringio_config);
+
+ /* get the ringiotransportshm default config */
+ ringiotransportshm_get_config(&config->ringiotransportshm_config);
+#endif
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "platform_get_config failed! status = 0x%x\n",
+ status);
+ return;
+}
+
+
+/*
+ * ======== platform_override_config ======
+ * Function to override the default configuration values.
+ *
+ */
+int
+platform_override_config(struct platform_config *config)
+{
+ int status = PLATFORM_S_SUCCESS;
+
+ BUG_ON(config == NULL);
+
+ if (config == NULL) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* Override the multiproc_config default config */
+ config->multiproc_config.num_processors = 4;
+ config->multiproc_config.id = 3;
+ strcpy(config->multiproc_config.name_list[0], "Tesla");
+ strcpy(config->multiproc_config.name_list[1], "AppM3");
+ strcpy(config->multiproc_config.name_list[2], "SysM3");
+ strcpy(config->multiproc_config.name_list[3], "MPU");
+
+ /* Override the gate,p default config */
+ config->gatemp_config.num_resources = 64;
+
+ /* Override the Sharedregion default config */
+ config->sharedregion_config.cache_line_size = 128;
+
+ /* Override the LISTMP default config */
+
+ /* Override the MESSAGEQ default config */
+ config->messageq_config.num_heaps = 2;
+
+ /* Override the NOTIFY default config */
+
+ /* Override the PROCMGR default config */
+
+ /* Override the HeapBuf default config */
+
+ /* Override the LISTMPSHAREDMEMORY default config */
+
+ /* Override the MESSAGEQTRANSPORTSHM default config */
+
+ /* Override the NOTIFYSHMDRIVER default config */
+
+ /* Override the NAMESERVERREMOTENOTIFY default config */
+
+ /* Override the ClientNotifyMgr default config */
+ /* Override the FrameQBufMgr default config */
+
+ /* Override the FrameQ default config */
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "platform_override_config failed! status "
+ "= 0x%x\n", status);
+ return status;
+}
+
+/*
+ * ======= platform_setup ========
+ * function to setup platform.
+ * TBD: logic would change completely in the final system.
+ */
+int
+platform_setup(void)
+{
+ int status = PLATFORM_S_SUCCESS;
+ struct platform_config _config;
+ struct platform_config *config;
+ struct platform_mem_map_info m_info;
+
+ platform_get_config(&_config);
+ config = &_config;
+
+ /* Initialize PlatformMem */
+ status = platform_mem_setup();
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : platform_mem_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "platform_mem_setup : status [0x%x]\n" ,
+ status);
+ platform_module->platform_mem_init_flag = true;
+ }
+
+ platform_override_config(config);
+
+ status = multiproc_setup(&(config->multiproc_config));
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : multiproc_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "platform_setup : status [0x%x]\n", status);
+ platform_module->multiproc_init_flag = true;
+ }
+
+ /* Initialize ProcMgr */
+ if (status >= 0) {
+ status = proc_mgr_setup(&(config->proc_mgr_config));
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : proc_mgr_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "proc_mgr_setup : status [0x%x]\n",
+ status);
+ platform_module->proc_mgr_init_flag = true;
+ }
+ }
+
+ /* Initialize SharedRegion */
+ if (status >= 0) {
+ status = sharedregion_setup(&config->sharedregion_config);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : sharedregion_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "sharedregion_setup : status [0x%x]\n",
+ status);
+ platform_module->sharedregion_init_flag = true;
+ }
+ }
+
+ /* Initialize Notify DucatiDriver */
+ if (status >= 0) {
+ status = notify_ducatidrv_setup(&config->
+ notify_ducatidrv_config);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : "
+ "notify_ducatidrv_setup failed [0x%x]\n",
+ status);
+ } else {
+ printk(KERN_ERR "notify_ducatidrv_setup : "
+ "status [0x%x]\n", status);
+ platform_module->notify_ducatidrv_init_flag = true;
+ }
+ }
+
+ /* Initialize Notify */
+ if (status >= 0) {
+ status = notify_setup(&config->notify_config);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : notify_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "notify_setup : status [0x%x]\n",
+ status);
+ platform_module->notify_init_flag = true;
+ }
+ }
+
+ /* Initialize ipu_pm */
+ if (status >= 0) {
+ status = ipu_pm_setup(&config->ipu_pm_config);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : ipu_pm_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "ipu_pm_setup : status [0x%x]\n",
+ status);
+ platform_module->ipu_pm_init_flag = true;
+ }
+ }
+ /* Initialize NameServer */
+ if (status >= 0) {
+ status = nameserver_setup();
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : nameserver_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "nameserver_setup : status [0x%x]\n",
+ status);
+ platform_module->nameserver_init_flag = true;
+ }
+ }
+
+ /* Initialize GateMP */
+ if (status >= 0) {
+ status = gatemp_setup(&config->gatemp_config);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : gatemp_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "gatemp_setup : status [0x%x]\n",
+ status);
+ platform_module->gatemp_init_flag = true;
+ }
+ }
+
+ /* Initialize GatePeterson */
+ if (status >= 0) {
+ status = gatepeterson_setup(&config->gatepeterson_config);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : gatepeterson_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "gatepeterson_setup : status [0x%x]\n",
+ status);
+ platform_module->gatepeterson_init_flag = true;
+ }
+ }
+
+ /* Initialize GateHWSpinlock */
+ if (status >= 0) {
+ m_info.src = 0x4A0F6000;
+ m_info.size = 0x1000;
+ m_info.is_cached = false;
+ status = platform_mem_map(&m_info);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : platform_mem_map "
+ "failed [0x%x]\n", status);
+ } else {
+ config->gatehwspinlock_config.num_locks = 64;
+ config->gatehwspinlock_config.base_addr = \
+ m_info.dst + 0x800;
+ status = gatehwspinlock_setup(&config->
+ gatehwspinlock_config);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : "
+ "gatehwspinlock_setup failed [0x%x]\n",
+ status);
+ } else
+ platform_module->gatehwspinlock_init_flag =
+ true;
+ }
+ }
+
+ /* Initialize MessageQ */
+ if (status >= 0) {
+ status = messageq_setup(&config->messageq_config);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : messageq_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "messageq_setup : status [0x%x]\n",
+ status);
+ platform_module->messageq_init_flag = true;
+ }
+ }
+#if 0
+ /* Initialize RingIO */
+ if (status >= 0) {
+ status = ringio_setup(&config->ringio_config);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : ringio_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "ringio_setup : status [0x%x]\n",
+ status);
+ platform_module->ringio_init_flag = true;
+ }
+ }
+
+ /* Initialize RingIOTransportShm */
+ if (status >= 0) {
+ status = ringiotransportshm_setup(&config->
+ ringiotransportshm_config);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : "
+ "ringiotransportshm_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "ringiotransportshm_setup : status "
+ "[0x%x]\n", status);
+ platform_module->ringiotransportshm_init_flag = true;
+ }
+ }
+#endif
+ /* Initialize HeapBufMP */
+ if (status >= 0) {
+ status = heapbufmp_setup(&config->heapbufmp_config);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : heapbufmp_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "heapbufmp_setup : status [0x%x]\n",
+ status);
+ platform_module->heapbufmp_init_flag = true;
+ }
+ }
+
+ /* Initialize HeapMemMP */
+ if (status >= 0) {
+ status = heapmemmp_setup(&config->heapmemmp_config);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : heapmemmp_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "heapmemmp_setup : status [0x%x]\n",
+ status);
+ platform_module->heapmemmp_init_flag = true;
+ }
+ }
+#if 0
+ /* Initialize HeapMultiBuf */
+ if (status >= 0) {
+ status = heapmultibuf_setup(&config->heapmultibuf_config);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : heapmultibuf_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "heapmultibuf_setup : status [0x%x]\n",
+ status);
+ platform_module->heapmultibuf_init_flag = true;
+ }
+ }
+#endif
+ /* Initialize ListMP */
+ if (status >= 0) {
+ status = listmp_setup(
+ &config->listmp_config);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : "
+ "listmp_setup failed [0x%x]\n",
+ status);
+ } else {
+ printk(KERN_ERR "listmp_setup : "
+ "status [0x%x]\n", status);
+ platform_module->listmp_init_flag = true;
+ }
+ }
+
+ /* Initialize TransportShm */
+ if (status >= 0) {
+ status = transportshm_setup(
+ &config->transportshm_config);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : "
+ "transportshm_setup failed [0x%x]\n",
+ status);
+ } else {
+ printk(KERN_ERR "transportshm_setup : "
+ "status [0x%x]\n", status);
+ platform_module->transportshm_init_flag = true;
+ }
+ }
+
+ /* Initialize NameServerRemoteNotify */
+ if (status >= 0) {
+ status = nameserver_remotenotify_setup(
+ &config->nameserver_remotenotify_config);
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : "
+ "nameserver_remotenotify_setup failed "
+ "[0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "nameserver_remotenotify_setup : "
+ "status [0x%x]\n", status);
+ platform_module->nameserver_remotenotify_init_flag =
+ true;
+ }
+ }
+#if 0
+ /* Get the ClientNotifyMgr default config */
+ if (status >= 0) {
+ status = ClientNotifyMgr_setup(&config->cliNotifyMgrCfgParams);
+ if (status < 0)
+ GT_setFailureReason(curTrace,
+ GT_4CLASS,
+ "Platform_setup",
+ status,
+ "ClientNotifyMgr_setup failed!");
+ else
+ Platform_module->clientNotifyMgrInitFlag = true;
+ }
+
+ /* Get the FrameQBufMgr default config */
+ if (status >= 0) {
+ status = FrameQBufMgr_setup(&config->frameQBufMgrCfgParams);
+ if (status < 0)
+ GT_setFailureReason(curTrace,
+ GT_4CLASS,
+ "Platform_setup",
+ status,
+ "FrameQBufMgr_setup failed!");
+ else
+ Platform_module->frameQBufMgrInitFlag = true;
+ }
+ /* Get the FrameQ default config */
+ if (status >= 0) {
+ status = FrameQ_setup(&config->frameQCfgParams);
+ if (status < 0)
+ GT_setFailureReason(curTrace,
+ GT_4CLASS,
+ "Platform_setup",
+ status,
+ "FrameQ_setup failed!");
+ else
+ Platform_module->frameQInitFlag = true;
+ }
+#endif
+
+ if (status >= 0) {
+ memset(platform_objects, 0,
+ (sizeof(struct platform_object) * \
+ multiproc_get_num_processors()));
+ }
+
+
+ /* Initialize Platform */
+ if (status >= 0) {
+ status = _platform_setup();
+ if (status < 0) {
+ printk(KERN_ERR "platform_setup : _platform_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "_platform_setup : status [0x%x]\n",
+ status);
+ platform_module->platform_init_flag = true;
+ }
+
+ }
+
+ return status;
+}
+
+
+/*
+ * =========== platform_destroy ==========
+ * Function to destroy the System.
+ */
+int
+platform_destroy(void)
+{
+ int status = PLATFORM_S_SUCCESS;
+ struct platform_mem_unmap_info u_info;
+
+ /* Finalize Platform module*/
+ if (platform_module->platform_init_flag == true) {
+ status = _platform_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : _platform_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ platform_module->platform_init_flag = false;
+ }
+ }
+#if 0
+ /* Finalize Frame module */
+ if (Platform_module->frameQInitFlag == true) {
+ status = FrameQ_destroy();
+ if (status < 0)
+ GT_setFailureReason(curTrace,
+ GT_4CLASS,
+ "Platform_destroy",
+ status,
+ "FrameQ_destroy failed!");
+ else
+ Platform_module->frameQInitFlag = false;
+ }
+
+ /* Finalize FrameQBufMgr module */
+ if (Platform_module->frameQBufMgrInitFlag == true) {
+ status = FrameQBufMgr_destroy();
+ if (status < 0)
+ GT_setFailureReason(curTrace,
+ GT_4CLASS,
+ "Platform_destroy",
+ status,
+ "FrameQBufMgr_destroy failed!");
+ else
+ Platform_module->frameQBufMgrInitFlag = false;
+ }
+
+ /* Finalize ClientNotifyMgr module */
+ if (Platform_module->clientNotifyMgrInitFlag == true) {
+ status = ClientNotifyMgr_destroy();
+ if (status < 0)
+ GT_setFailureReason(curTrace,
+ GT_4CLASS,
+ "Platform_destroy",
+ status,
+ "ClientNotifyMgr_destroy failed!");
+ else
+ Platform_module->clientNotifyMgrInitFlag = false;
+ }
+#endif
+ /* Finalize NameServerRemoteNotify module */
+ if (platform_module->nameserver_remotenotify_init_flag == true) {
+ status = nameserver_remotenotify_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : "
+ "nameserver_remotenotify_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ platform_module->nameserver_remotenotify_init_flag \
+ = false;
+ }
+ }
+
+ /* Finalize TransportShm module */
+ if (platform_module->transportshm_init_flag == true) {
+ status = transportshm_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : "
+ "transportshm_destroy failed "
+ "[0x%x]\n", status);
+ } else {
+ platform_module->transportshm_init_flag = \
+ false;
+ }
+ }
+
+ /* Finalize ListMP module */
+ if (platform_module->listmp_init_flag == true) {
+ status = listmp_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : "
+ "listmp_destroy failed [0x%x]\n",
+ status);
+ } else {
+ platform_module->listmp_init_flag = \
+ false;
+ }
+ }
+#if 0
+ /* Finalize HeapMultiBuf module */
+ if (platform_module->heapmultibuf_init_flag == true) {
+ status = heapmultibuf_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : "
+ "heapmultibuf_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ platform_module->heapmultibuf_init_flag = false;
+ }
+ }
+#endif
+ /* Finalize HeapBufMP module */
+ if (platform_module->heapbufmp_init_flag == true) {
+ status = heapbufmp_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : heapbufmp_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ platform_module->heapbufmp_init_flag = false;
+ }
+ }
+
+ /* Finalize HeapMemMP module */
+ if (platform_module->heapmemmp_init_flag == true) {
+ status = heapmemmp_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : heapmemmp_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ platform_module->heapmemmp_init_flag = false;
+ }
+ }
+
+ /* Finalize MessageQ module */
+ if (platform_module->messageq_init_flag == true) {
+ status = messageq_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : messageq_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ platform_module->messageq_init_flag = false;
+ }
+ }
+#if 0
+ /* Finalize RingIO module */
+ if (platform_module->ringio_init_flag == true) {
+ status = ringio_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : ringio_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ platform_module->ringio_init_flag = false;
+ }
+ }
+
+
+ /* Finalize RingIOTransportShm module */
+ if (platform_module->ringiotransportshm_init_flag == true) {
+ status = ringiotransportshm_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : "
+ "ringiotransportshm_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ platform_module->ringiotransportshm_init_flag = false;
+ }
+ }
+#endif
+ /* Finalize GatePeterson module */
+ if (platform_module->gatepeterson_init_flag == true) {
+ status = gatepeterson_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : "
+ "gatepeterson_destroy failed [0x%x]\n", status);
+ } else {
+ platform_module->gatepeterson_init_flag = false;
+ }
+ }
+
+ /* Finalize GateHWSpinlock module */
+ if (platform_module->gatehwspinlock_init_flag == true) {
+ status = gatehwspinlock_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : "
+ "gatehwspinlock_destroy failed "
+ "[0x%x]\n", status);
+ } else {
+ platform_module->gatehwspinlock_init_flag = false;
+ }
+
+ u_info.addr = 0x4A0F6000;
+ u_info.size = 0x1000;
+ u_info.is_cached = false;
+ status = platform_mem_unmap(&u_info);
+ if (status < 0)
+ printk(KERN_ERR "platform_destroy : platform_mem_unmap"
+ " failed [0x%x]\n", status);
+ }
+
+ /* Finalize GateMP module */
+ if (platform_module->gatemp_init_flag == true) {
+ status = gatemp_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : "
+ "gatemp_destroy failed [0x%x]\n", status);
+ } else {
+ platform_module->gatemp_init_flag = false;
+ }
+ }
+
+ /* Finalize NameServer module */
+ if (platform_module->nameserver_init_flag == true) {
+ status = nameserver_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : nameserver_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ platform_module->nameserver_init_flag = false;
+ }
+ }
+ /* Finalize ipu_pm module */
+ if (platform_module->ipu_pm_init_flag == true) {
+ status = ipu_pm_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : ipu_pm_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ platform_module->ipu_pm_init_flag = false;
+ }
+ }
+
+ /* Finalize Notify Ducati Driver module */
+ if (platform_module->notify_ducatidrv_init_flag == true) {
+ status = notify_ducatidrv_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : "
+ "notify_ducatidrv_destroy failed [0x%x]\n",
+ status);
+ } else {
+ platform_module->notify_ducatidrv_init_flag = false;
+ }
+ }
+
+ /* Finalize Notify module */
+ if (platform_module->notify_init_flag == true) {
+ status = notify_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : notify_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ platform_module->notify_init_flag = false;
+ }
+ }
+
+ /* Finalize SharedRegion module */
+ if (platform_module->sharedregion_init_flag == true) {
+ status = sharedregion_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : "
+ "sharedregion_destroy failed [0x%x]\n", status);
+ } else {
+ platform_module->sharedregion_init_flag = false;
+ }
+ }
+
+ /* Finalize ProcMgr module */
+ if (platform_module->proc_mgr_init_flag == true) {
+ status = proc_mgr_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : proc_mgr_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ platform_module->proc_mgr_init_flag = false;
+ }
+ }
+
+ /* Finalize MultiProc module */
+ if (platform_module->multiproc_init_flag == true) {
+ status = multiproc_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : multiproc_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ platform_module->multiproc_init_flag = false;
+ }
+ }
+
+ /* Finalize PlatformMem module */
+ if (platform_module->platform_mem_init_flag == true) {
+ status = platform_mem_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "platform_destroy : "
+ "platform_mem_destroy failed [0x%x]\n", status);
+ } else {
+ platform_module->platform_mem_init_flag = false;
+ }
+ }
+
+ if (status >= 0)
+ memset(platform_objects,
+ 0,
+ (sizeof(struct platform_object) *
+ multiproc_get_num_processors()));
+
+ return status;
+}
+
+/*
+ * ======== platform_setup ========
+ * Purpose:
+ * TBD: logic would change completely in the final system.
+ */
+s32 _platform_setup(void)
+{
+
+ s32 status = 0;
+ struct proc4430_config proc_config;
+ struct proc_mgr_params params;
+ struct proc4430_params proc_params;
+ u16 proc_id;
+ struct platform_object *handle;
+ void *proc_mgr_handle;
+ void *proc_mgr_proc_handle;
+
+ /* Create the SysM3 ProcMgr object */
+ proc4430_get_config(&proc_config);
+ status = proc4430_setup(&proc_config);
+ if (status < 0)
+ goto exit;
+
+ /* Get MultiProc ID by name. */
+ proc_id = multiproc_get_id("SysM3");
+ handle = &platform_objects[proc_id];
+
+ /* Create an instance of the Processor object for OMAP4430 */
+ proc4430_params_init(NULL, &proc_params);
+ /* TODO: SysLink-38 has these in individual Proc Objects */
+ proc_params.num_mem_entries = NUM_MEM_ENTRIES;
+ proc_params.mem_entries = mem_entries;
+ proc_params.reset_vector_mem_entry = RESET_VECTOR_ENTRY_ID;
+ proc_mgr_proc_handle = proc4430_create(proc_id, &proc_params);
+ if (proc_mgr_proc_handle == NULL) {
+ status = PLATFORM_E_FAIL;
+ goto proc_create_fail;
+ }
+
+ /* Initialize parameters */
+ proc_mgr_params_init(NULL, &params);
+ params.proc_handle = proc_mgr_proc_handle;
+ proc_mgr_handle = proc_mgr_create(proc_id, &params);
+ if (proc_mgr_handle == NULL) {
+ status = PLATFORM_E_FAIL;
+ goto proc_mgr_create_fail;
+ }
+
+ /* SysM3 and AppM3 use the same handle */
+ handle->phandle = proc_mgr_proc_handle;
+ handle->pm_handle = proc_mgr_handle;
+
+ proc_mgr_handle = NULL;
+ proc_mgr_proc_handle = NULL;
+
+
+ /* Create the AppM3 ProcMgr object */
+ /* Get MultiProc ID by name. */
+ proc_id = multiproc_get_id("AppM3");
+ handle = &platform_objects[proc_id];
+
+ /* Create an instance of the Processor object for OMAP4430 */
+ proc4430_params_init(NULL, &proc_params);
+ proc_params.num_mem_entries = NUM_MEM_ENTRIES;
+ proc_params.mem_entries = mem_entries;
+ proc_params.reset_vector_mem_entry = RESET_VECTOR_ENTRY_ID;
+ proc_mgr_proc_handle = proc4430_create(proc_id, &proc_params);
+ if (proc_mgr_proc_handle == NULL) {
+ status = PLATFORM_E_FAIL;
+ goto proc_create_fail;
+ }
+
+ /* Initialize parameters */
+ proc_mgr_params_init(NULL, &params);
+ params.proc_handle = proc_mgr_proc_handle;
+ proc_mgr_handle = proc_mgr_create(proc_id, &params);
+ if (proc_mgr_handle == NULL) {
+ status = PLATFORM_E_FAIL;
+ goto proc_mgr_create_fail;
+ }
+
+ handle->phandle = proc_mgr_proc_handle;
+ handle->pm_handle = proc_mgr_handle;
+
+ /* TODO: See if we need to do proc_mgr_attach on both SysM3 & AppM3
+ * to set the memory maps before hand. Or fix ProcMgr_open &
+ * ProcMgr_attach from the userspace */
+ return status;
+
+proc_create_fail:
+proc_mgr_create_fail:
+ /* Clean up created objects */
+ _platform_destroy();
+exit:
+ return status;
+}
+
+
+/*
+ * ======== platform_destroy ========
+ * Purpose:
+ * Function to finalize the platform.
+ */
+s32 _platform_destroy(void)
+{
+ s32 status = 0;
+ struct platform_object *handle;
+ int i;
+
+ for (i = 0; i < MULTIPROC_MAXPROCESSORS; i++) {
+ handle = &platform_objects[i];
+
+ /* Delete the Processor instances */
+ if (handle->phandle != NULL) {
+ status = proc4430_delete(&handle->phandle);
+ WARN_ON(status < 0);
+ }
+
+ if (handle->pm_handle != NULL) {
+ status = proc_mgr_delete(&handle->pm_handle);
+ WARN_ON(status < 0);
+ }
+ }
+
+ status = proc4430_destroy();
+ WARN_ON(status < 0);
+
+ return status;
+}
+
+
+/*
+ * ======== platform_load_callback ========
+ * Purpose:
+ * Function called by proc_mgr when slave is in loaded state.
+ */
+int platform_load_callback(u16 proc_id, void *arg)
+{
+ int status = PLATFORM_S_SUCCESS;
+ struct platform_object *handle;
+ u32 start;
+ u32 num_bytes;
+ struct sharedregion_entry entry;
+ u32 m_addr = 0;
+ /*struct proc_mgr_addr_info ai;*/
+ struct ipc_params ipc_params;
+ int i;
+ void *pm_handle;
+
+ handle = &platform_objects[proc_id];
+ pm_handle = handle->pm_handle;
+
+ /* TODO: hack */
+ start = (u32)arg; /* start address passed in as argument */
+
+ /* Read the slave config */
+ num_bytes = sizeof(struct platform_slave_config);
+ status = _platform_read_slave_memory(proc_id,
+ start,
+ &handle->slave_config,
+ &num_bytes);
+ if (status < 0) {
+ status = PLATFORM_E_FAIL;
+ goto exit;
+ }
+
+ if (platform_host_sr_config == NULL)
+ platform_host_sr_config = kmalloc(sizeof(struct
+ platform_host_sr_config) * handle->
+ slave_config.num_srs, GFP_KERNEL);
+
+ if (platform_host_sr_config == NULL) {
+ status = -ENOMEM;
+ goto alloced_host_sr_config_exit;
+ }
+
+ if (handle->slave_config.num_srs > 0) {
+ num_bytes = handle->slave_config.num_srs * sizeof(struct
+ platform_slave_sr_config);
+ handle->slave_sr_config = kmalloc(num_bytes, GFP_KERNEL);
+ if (handle->slave_sr_config == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ } else {
+ status = _platform_read_slave_memory(
+ proc_id,
+ start + sizeof(struct
+ platform_slave_config),
+ handle->slave_sr_config,
+ &num_bytes);
+ if (status < 0) {
+ status = PLATFORM_E_FAIL;
+ goto alloced_slave_sr_config_exit;
+ }
+ }
+ }
+
+ if (status >= 0) {
+ ipc_params.setup_messageq = handle->slave_config.setup_messageq;
+ ipc_params.setup_notify = handle->slave_config.setup_notify;
+ ipc_params.setup_ipu_pm = handle->slave_config.setup_ipu_pm;
+ ipc_params.proc_sync = handle->slave_config.proc_sync;
+ status = ipc_create(proc_id, &ipc_params);
+ if (status < 0) {
+ status = PLATFORM_E_FAIL;
+ goto alloced_slave_sr_config_exit;
+ }
+ }
+
+ /* Setup the shared memory for region with owner == host */
+ /* TODO: May need to replace proc_mgr_map with platform_mem_map */
+ for (i = 0; i < handle->slave_config.num_srs; i++) {
+ status = sharedregion_get_entry(i, &entry);
+ if (status < 0) {
+ status = PLATFORM_E_FAIL;
+ goto alloced_slave_sr_config_exit;
+ }
+ BUG_ON(!((entry.is_valid == false)
+ || ((entry.is_valid == true)
+ && (entry.len == (handle->
+ slave_sr_config[i].entry_len)))));
+
+ platform_host_sr_config[i].ref_count++;
+
+ /* Add the entry only if previously not added */
+ if (entry.is_valid == false) {
+ /* Translate the slave address to master */
+
+ /* This SharedRegion is already pre-mapped. So, no need
+ * to do a new mapping. Just need to translate to get
+ * the master virtual address */
+ status = proc_mgr_translate_addr(pm_handle,
+ (void **)&m_addr,
+ PROC_MGR_ADDRTYPE_MASTERKNLVIRT,
+ (void *)handle->slave_sr_config[i].entry_base,
+ PROC_MGR_ADDRTYPE_SLAVEVIRT);
+ if (status < 0) {
+ status = PLATFORM_E_FAIL;
+ goto alloced_slave_sr_config_exit;
+ }
+
+ /* TODO: compatibility with new procmgr */
+ /* No need to map this to Slave. Slave is pre-mapped */
+ /*status = proc_mgr_map(pm_handle,
+ handle->slave_sr_config[i].entry_base,
+ handle->slave_sr_config[i].entry_len,
+ &ai.addr[PROC_MGR_ADDRTYPE_MASTERKNLVIRT],
+ &handle->slave_sr_config[i].entry_len,
+ PROC_MGR_MAPTYPE_VIRT);
+ if (status < 0) {
+ status = PLATFORM_E_FAIL;
+ goto alloced_slave_sr_config_exit;
+ }
+
+ memset((u32 *)ai.addr[PROC_MGR_ADDRTYPE_MASTERKNLVIRT],
+ 0, handle->slave_sr_config[i].entry_len); */
+ memset((u32 *)m_addr, 0,
+ handle->slave_sr_config[i].entry_len);
+ memset(&entry, 0, sizeof(struct sharedregion_entry));
+ /*entry.base = (void *)ai.
+ addr[PROC_MGR_ADDRTYPE_MASTERKNLVIRT];*/
+ entry.base = (void *) m_addr;
+ entry.len = handle->slave_sr_config[i].entry_len;
+ entry.owner_proc_id = handle->slave_sr_config[i].
+ owner_proc_id;
+ entry.is_valid = true;
+ entry.cache_line_size = handle->slave_sr_config[i].
+ cache_line_size;
+ entry.create_heap = handle->slave_sr_config[i].
+ create_heap;
+ _sharedregion_set_entry(handle->slave_sr_config[i].id,
+ &entry);
+ }
+ }
+
+ /* Read sr0_memory_setup */
+ num_bytes = sizeof(struct platform_slave_config);
+ handle->slave_config.sr0_memory_setup = 1;
+ status = _platform_write_slave_memory(proc_id,
+ start,
+ &handle->slave_config,
+ &num_bytes);
+ if (status < 0) {
+ status = PLATFORM_E_FAIL;
+ goto alloced_slave_sr_config_exit;
+ }
+
+ status = ipc_start();
+ if (status < 0) {
+ status = PLATFORM_E_FAIL;
+ goto alloced_slave_sr_config_exit;
+ }
+
+ return 0;
+
+alloced_slave_sr_config_exit:
+ kfree(handle->slave_sr_config);
+
+alloced_host_sr_config_exit:
+ kfree(platform_host_sr_config);
+exit:
+ if (status < 0)
+ printk(KERN_ERR "platform_load_callback failed, status [0x%x]\n",
+ status);
+
+ return status;
+}
+EXPORT_SYMBOL(platform_load_callback);
+
+
+/*
+ * ======== platform_start_callback ========
+ * Purpose:
+ * Function called by proc_mgr when slave is in started state.
+ * FIXME: logic would change completely in the final system.
+ */
+int platform_start_callback(u16 proc_id, void *arg)
+{
+ int status = PLATFORM_S_SUCCESS;
+
+ do {
+ status = ipc_attach(proc_id);
+ msleep(1);
+ } while (status < 0);
+
+ if (status < 0)
+ printk(KERN_ERR "platform_load_callback failed, status [0x%x]\n",
+ status);
+
+ return status;
+}
+EXPORT_SYMBOL(platform_start_callback);
+/* FIXME: since application has to call this API for now */
+
+
+/*
+ * ======== platform_stop_callback ========
+ * Purpose:
+ * Function called by proc_mgr when slave is in stopped state.
+ * FIXME: logic would change completely in the final system.
+ */
+int platform_stop_callback(u16 proc_id, void *arg)
+{
+ int status = PLATFORM_S_SUCCESS;
+ u32 i;
+ u32 m_addr;
+ struct platform_object *handle;
+ void *pm_handle;
+
+ handle = (struct platform_object *)&platform_objects[proc_id];
+ pm_handle = handle->pm_handle;
+ /* delete the System manager instance here */
+ for (i = 0;
+ ((handle->slave_sr_config != NULL) &&
+ (i < handle->slave_config.num_srs));
+ i++) {
+ platform_host_sr_config[i].ref_count--;
+ if (platform_host_sr_config[i].ref_count == 0) {
+ platform_num_srs_unmapped++;
+ /* Translate the slave address to master */
+ /* TODO: backwards compatibility with old procmgr */
+ status = proc_mgr_translate_addr(pm_handle,
+ (void **)&m_addr,
+ PROC_MGR_ADDRTYPE_MASTERKNLVIRT,
+ (void *)handle->slave_sr_config[i].entry_base,
+ PROC_MGR_ADDRTYPE_SLAVEVIRT);
+ if (status < 0) {
+ status = PLATFORM_E_FAIL;
+ continue;
+ }
+
+ status = proc_mgr_unmap(pm_handle, m_addr);
+ }
+ }
+
+ if (platform_num_srs_unmapped == handle->slave_config.num_srs) {
+ if (handle->slave_sr_config != NULL) {
+ kfree(handle->slave_sr_config);
+ handle->slave_sr_config = NULL;
+ }
+ if (platform_host_sr_config != NULL) {
+ kfree(platform_host_sr_config);
+ platform_host_sr_config = NULL;
+ platform_num_srs_unmapped = 0;
+ }
+ }
+
+ ipc_detach(proc_id);
+
+ ipc_stop();
+
+ return status;
+}
+EXPORT_SYMBOL(platform_stop_callback);
+
+/* ============================================================================
+ * Internal functions
+ * ============================================================================
+ */
+/* Function to read slave memory */
+int
+_platform_read_slave_memory(u16 proc_id,
+ u32 addr,
+ void *value,
+ u32 *num_bytes)
+{
+ int status = 0;
+ bool done = false;
+ struct platform_object *handle;
+ u32 m_addr;
+ void *pm_handle;
+
+ handle = (struct platform_object *)&platform_objects[proc_id];
+ BUG_ON(handle == NULL);
+ if (handle == NULL) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ pm_handle = handle->pm_handle;
+ BUG_ON(pm_handle == NULL);
+ if (pm_handle == NULL) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* TODO: backwards compatibility with old procmgr */
+ status = proc_mgr_translate_addr(pm_handle,
+ (void **)&m_addr,
+ PROC_MGR_ADDRTYPE_MASTERKNLVIRT,
+ (void *)addr,
+ PROC_MGR_ADDRTYPE_SLAVEVIRT);
+ if (status >= 0) {
+ memcpy(value, (void *) m_addr, *num_bytes);
+ done = true;
+ printk(KERN_ERR "_platform_read_slave_memory successful! "
+ "status = 0x%x, proc_id = %d, addr = 0x%x, "
+ "m_addr = 0x%x, size = 0x%x", status, proc_id, addr,
+ m_addr, *num_bytes);
+ } else {
+ printk(KERN_ERR "_platform_read_slave_memory failed! "
+ "status = 0x%x, proc_id = %d, addr = 0x%x, "
+ "m_addr = 0x%x, size = 0x%x", status, proc_id, addr,
+ m_addr, *num_bytes);
+ status = PLATFORM_E_FAIL;
+ goto exit;
+ }
+
+ /* This code path is not validated for OMAP4, as it does not comply
+ * with the latest ProcMgr */
+ if (done == false) {
+ /* Map the address */
+ /* TODO: backwards compatibility with old procmgr */
+ status = proc_mgr_map(pm_handle,
+ addr,
+ *num_bytes,
+ &m_addr,
+ num_bytes,
+ PROC_MGR_MAPTYPE_VIRT);
+ if (status < 0) {
+ status = PLATFORM_E_FAIL;
+ goto exit;
+ }
+ }
+
+ if (done == false) {
+ status = proc_mgr_read(pm_handle,
+ addr,
+ num_bytes,
+ value);
+ if (status < 0) {
+ status = PLATFORM_E_FAIL;
+ goto exit;
+ }
+ }
+
+ if (done == false) {
+ /* Unmap the address */
+ /* TODO: backwards compatibility with old procmgr */
+ status = proc_mgr_unmap(pm_handle, m_addr);
+ if (status < 0) {
+ status = PLATFORM_E_FAIL;
+ goto exit;
+ }
+ }
+exit:
+ return status;
+}
+
+
+/* Function to write slave memory */
+int _platform_write_slave_memory(u16 proc_id, u32 addr, void *value,
+ u32 *num_bytes)
+{
+ int status = 0;
+ bool done = false;
+ struct platform_object *handle;
+ u32 m_addr;
+ void *pm_handle = NULL;
+
+ handle = (struct platform_object *)&platform_objects[proc_id];
+ BUG_ON(handle == NULL);
+ if (handle == NULL) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ pm_handle = handle->pm_handle;
+ BUG_ON(pm_handle == NULL);
+ if (pm_handle == NULL) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* Translate the slave address to master address */
+ /* TODO: backwards compatibility with old procmgr */
+ status = proc_mgr_translate_addr(pm_handle,
+ (void **)&m_addr,
+ PROC_MGR_ADDRTYPE_MASTERKNLVIRT,
+ (void *)addr,
+ PROC_MGR_ADDRTYPE_SLAVEVIRT);
+ if (status >= 0) {
+ memcpy((void *) m_addr, value, *num_bytes);
+ done = true;
+ printk(KERN_ERR "_platform_write_slave_memory successful! "
+ "status = 0x%x, proc_id = %d, addr = 0x%x, "
+ "m_addr = 0x%x, size = 0x%x", status, proc_id, addr,
+ m_addr, *num_bytes);
+ } else {
+ printk(KERN_ERR "_platform_write_slave_memory failed! "
+ "status = 0x%x, proc_id = %d, addr = 0x%x, "
+ "m_addr = 0x%x, size = 0x%x", status, proc_id, addr,
+ m_addr, *num_bytes);
+ status = PLATFORM_E_FAIL;
+ goto exit;
+ }
+
+ /* This code path is not validated for OMAP4, as it does not comply
+ * with the latest ProcMgr */
+ if (done == false) {
+ /* Map the address */
+ /* TODO: backwards compatibility with old procmgr */
+ status = proc_mgr_map(pm_handle,
+ addr,
+ *num_bytes,
+ &m_addr,
+ num_bytes,
+ PROC_MGR_MAPTYPE_VIRT);
+ if (status < 0) {
+ status = PLATFORM_E_FAIL;
+ goto exit;
+ }
+ }
+
+ if (done == false) {
+ status = proc_mgr_write(pm_handle,
+ addr,
+ num_bytes,
+ value);
+ if (status < 0) {
+ status = PLATFORM_E_FAIL;
+ goto exit;
+ }
+ }
+
+ if (done == false) {
+ /* Map the address */
+ /* TODO: backwards compatibility with old procmgr */
+ status = proc_mgr_unmap(pm_handle, m_addr);
+ if (status < 0) {
+ status = PLATFORM_E_FAIL;
+ goto exit;
+ }
+ }
+
+exit:
+ return status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/platform_mem.c b/drivers/dsp/syslink/multicore_ipc/platform_mem.c
new file mode 100644
index 000000000000..1eaa3cba5e52
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/platform_mem.c
@@ -0,0 +1,325 @@
+/*
+ * platform_mem.c
+ *
+ * Target memory management interface implementation.
+ *
+ * This abstracts the Memory management interface in the kernel
+ * code. Allocation, Freeing-up, copy and address translate are
+ * supported for the kernel memory management.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Linux specific header files */
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <platform_mem.h>
+#include <atomic_linux.h>
+
+/* Macro to make a correct module magic number with ref_count */
+#define PLATFORM_MEM_MAKE_MAGICSTAMP(x) ((PLATFORM_MEM_MODULEID << 12u) | (x))
+
+/*
+ * Structure for containing
+ */
+struct platform_mem_map_table_info {
+ struct list_head mem_entry; /* Pointer to mem_entry entry */
+ u32 physical_address; /* Actual address */
+ u32 knl_virtual_address; /* Mapped address */
+ u32 size; /* Size of the region mapped */
+ u16 ref_count; /* Reference count of mapped entry */
+ bool is_cached;
+};
+
+/*
+ * Structure defining state object of system memory manager
+ */
+struct platform_mem_module_object {
+ atomic_t ref_count; /* Reference count */
+ struct list_head map_table; /* Head of map table */
+ struct mutex *gate; /* Pointer to lock */
+};
+
+
+/*
+ * Object containing state of the platform mem module
+ */
+static struct platform_mem_module_object platform_mem_state;
+
+/*
+ * ======== platform_mem_setup ========
+ * Purpose:
+ * This will initialize the platform mem module.
+ */
+int platform_mem_setup(void)
+{
+ s32 retval = 0;
+
+ atomic_cmpmask_and_set(&platform_mem_state.ref_count,
+ PLATFORM_MEM_MAKE_MAGICSTAMP(0),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(0));
+ if (atomic_inc_return(&platform_mem_state.ref_count)
+ != PLATFORM_MEM_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ /* Create the Gate handle */
+ platform_mem_state.gate =
+ kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (platform_mem_state.gate == NULL) {
+ retval = -ENOMEM;
+ goto gate_create_fail;
+ }
+
+ /* Construct the map table */
+ INIT_LIST_HEAD(&platform_mem_state.map_table);
+ mutex_init(platform_mem_state.gate);
+ goto exit;
+
+gate_create_fail:
+ atomic_set(&platform_mem_state.ref_count,
+ PLATFORM_MEM_MAKE_MAGICSTAMP(0));
+exit:
+ return retval;
+}
+EXPORT_SYMBOL(platform_mem_setup);
+
+/*
+ * ======== platform_mem_destroy ========
+ * Purpose:
+ * This will finalize the platform mem module.
+ */
+int platform_mem_destroy(void)
+{
+ s32 retval = 0;
+ struct platform_mem_map_table_info *info = NULL, *temp = NULL;
+
+ if (atomic_cmpmask_and_lt(&(platform_mem_state.ref_count),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(0),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (atomic_dec_return(&platform_mem_state.ref_count)
+ == PLATFORM_MEM_MAKE_MAGICSTAMP(0)) {
+ /* Delete the node in the map table */
+ list_for_each_entry_safe(info, temp,
+ &platform_mem_state.map_table,
+ mem_entry) {
+ iounmap((unsigned int *) info->knl_virtual_address);
+ list_del(&info->mem_entry);
+ kfree(info);
+ }
+ list_del(&platform_mem_state.map_table);
+ /* Delete the gate handle */
+ kfree(platform_mem_state.gate);
+ }
+
+exit:
+ return retval;
+}
+EXPORT_SYMBOL(platform_mem_destroy);
+
+/*
+ * ======== platform_mem_map ========
+ * Purpose:
+ * This will maps a memory area into virtual space.
+ */
+int platform_mem_map(memory_map_info *map_info)
+{
+ int retval = 0;
+ bool exists = false;
+ struct platform_mem_map_table_info *info = NULL;
+ struct list_head *list_info = NULL;
+
+ if (atomic_cmpmask_and_lt(&(platform_mem_state.ref_count),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(0),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(map_info == NULL)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (map_info->src == (u32) NULL) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(platform_mem_state.gate);
+ if (retval)
+ goto exit;
+
+ /* First check if the mapping already exists in the map table */
+ list_for_each(list_info, (struct list_head *)
+ &platform_mem_state.map_table) {
+ if ((((struct platform_mem_map_table_info *)
+ list_info)->physical_address == map_info->src) && \
+ (((struct platform_mem_map_table_info *)
+ list_info)->is_cached == map_info->is_cached)) {
+ exists = true;
+ map_info->dst = ((struct platform_mem_map_table_info *)
+ list_info)->knl_virtual_address;
+ /* Increase the refcount. */
+ ((struct platform_mem_map_table_info *)
+ list_info)->ref_count++;
+ break;
+ }
+ }
+ if (exists) {
+ mutex_unlock(platform_mem_state.gate);
+ goto exit;
+ }
+
+ map_info->dst = 0;
+ if (map_info->is_cached == true)
+ map_info->dst = (u32) ioremap((dma_addr_t)
+ (map_info->src), map_info->size);
+ else
+ map_info->dst = (u32) ioremap_nocache((dma_addr_t)
+ (map_info->src), map_info->size);
+ if (map_info->dst == 0) {
+ retval = -EFAULT;
+ goto ioremap_fail;
+ }
+
+ info = kmalloc(sizeof(struct platform_mem_map_table_info), GFP_KERNEL);
+ if (info == NULL) {
+ retval = -ENOMEM;
+ goto ioremap_fail;
+ }
+ /* Populate the info */
+ info->physical_address = map_info->src;
+ info->knl_virtual_address = map_info->dst;
+ info->size = map_info->size;
+ info->ref_count = 1;
+ info->is_cached = map_info->is_cached;
+ /* Put the info into the list */
+ list_add(&info->mem_entry, &platform_mem_state.map_table);
+ mutex_unlock(platform_mem_state.gate);
+ goto exit;
+
+ioremap_fail:
+ mutex_unlock(platform_mem_state.gate);
+exit:
+ return retval;
+}
+EXPORT_SYMBOL(platform_mem_map);
+
+/*
+ * ======== platform_mem_unmap ========
+ * Purpose:
+ * This will unmaps a memory area into virtual space.
+ */
+int platform_mem_unmap(memory_unmap_info *unmap_info)
+{
+ s32 retval = 0;
+ bool found = false;
+ struct platform_mem_map_table_info *info = NULL;
+
+ if (atomic_cmpmask_and_lt(&(platform_mem_state.ref_count),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(0),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (unmap_info == NULL) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ if (unmap_info->addr == (u32) NULL) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(platform_mem_state.gate);
+ if (retval)
+ goto exit;
+
+ list_for_each_entry(info,
+ (struct list_head *)&platform_mem_state.map_table, mem_entry) {
+ if ((info->knl_virtual_address == unmap_info->addr) && \
+ (info->is_cached == unmap_info->is_cached)) {
+ info->ref_count--;
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ mutex_unlock(platform_mem_state.gate);
+ goto exit;
+ }
+
+ if (info->ref_count == 0) {
+ list_del(&info->mem_entry);
+ kfree(info);
+ iounmap((unsigned int *) unmap_info->addr);
+ }
+ mutex_unlock(platform_mem_state.gate);
+
+exit:
+ return retval;
+}
+EXPORT_SYMBOL(platform_mem_unmap);
+
+/*
+ * ======== platform_mem_translate ========
+ * Purpose:
+ * This will translate an address.
+ */
+void *platform_mem_translate(void *src_addr, enum memory_xlt_flags flags)
+{
+ void *buf = NULL;
+ struct platform_mem_map_table_info *tinfo = NULL;
+ u32 frm_addr;
+ u32 to_addr;
+ s32 retval = 0;
+
+ if (atomic_cmpmask_and_lt(&(platform_mem_state.ref_count),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(0),
+ PLATFORM_MEM_MAKE_MAGICSTAMP(1)) == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ retval = mutex_lock_interruptible(platform_mem_state.gate);
+ if (retval)
+ goto exit;
+
+ /* Traverse to the node in the map table */
+ list_for_each_entry(tinfo, &platform_mem_state.map_table, mem_entry) {
+ frm_addr = (flags == PLATFORM_MEM_XLT_FLAGS_VIRT2PHYS) ?
+ tinfo->knl_virtual_address : tinfo->physical_address;
+ to_addr = (flags == PLATFORM_MEM_XLT_FLAGS_VIRT2PHYS) ?
+ tinfo->physical_address : tinfo->knl_virtual_address;
+ if ((((u32) src_addr) >= frm_addr)
+ && (((u32) src_addr) < (frm_addr + tinfo->size))) {
+ buf = (void *) (to_addr + ((u32)src_addr - frm_addr));
+ break;
+ }
+ }
+ mutex_unlock(platform_mem_state.gate);
+
+exit:
+ return buf;
+}
+EXPORT_SYMBOL(platform_mem_translate);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dsp/syslink/multicore_ipc/sharedregion.c b/drivers/dsp/syslink/multicore_ipc/sharedregion.c
new file mode 100644
index 000000000000..843d83ac695c
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/sharedregion.c
@@ -0,0 +1,1606 @@
+/*
+ * sharedregion.c
+ *
+ * The SharedRegion module is designed to be used in a
+ * multi-processor environment where there are memory regions
+ * that are shared and accessed across different processors
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <syslink/atomic_linux.h>
+
+#include <multiproc.h>
+#include <nameserver.h>
+#include <heapmemmp.h>
+#include <sharedregion.h>
+
+/* Macro to make a correct module magic number with refCount */
+#define SHAREDREGION_MAKE_MAGICSTAMP(x) ((SHAREDREGION_MODULEID << 16u) | (x))
+
+#define SHAREDREGION_MAX_REGIONS_DEFAULT 4
+
+#define ROUND_UP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
+
+/* Module state object */
+struct sharedregion_module_object {
+ atomic_t ref_count; /* Reference count */
+ struct mutex *local_lock; /* Handle to a gate instance */
+ struct sharedregion_region *regions; /* Pointer to the regions */
+ struct sharedregion_config cfg; /* Current config values */
+ struct sharedregion_config def_cfg; /* Default config values */
+ u32 num_offset_bits;
+ /* no. of bits for the offset for a SRPtr. This value is calculated */
+ u32 offset_mask; /* offset bitmask using for generating a SRPtr */
+};
+
+/* Shared region state object variable with default settings */
+static struct sharedregion_module_object sharedregion_state = {
+ .num_offset_bits = 0,
+ .regions = NULL,
+ .local_lock = NULL,
+ .offset_mask = 0,
+ .def_cfg.num_entries = 4u,
+ .def_cfg.translate = true,
+ .def_cfg.cache_line_size = 128u
+};
+
+/* Pointer to the SharedRegion module state */
+static struct sharedregion_module_object *sharedregion_module = \
+ &sharedregion_state;
+
+/* Checks to make sure overlap does not exists.
+ * Return error if overlap found. */
+static int _sharedregion_check_overlap(void *base, u32 len);
+
+/* Return the number of offsetBits bits */
+static u32 _sharedregion_get_num_offset_bits(void);
+
+/* This will get the sharedregion module configuration */
+int sharedregion_get_config(struct sharedregion_config *config)
+{
+ BUG_ON((config == NULL));
+ if (atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true) {
+ memcpy(config, &sharedregion_module->def_cfg,
+ sizeof(struct sharedregion_config));
+ } else {
+ memcpy(config, &sharedregion_module->cfg,
+ sizeof(struct sharedregion_config));
+ }
+ return 0;
+}
+EXPORT_SYMBOL(sharedregion_get_config);
+
+/* This will get setup the sharedregion module */
+int sharedregion_setup(const struct sharedregion_config *config)
+{
+ struct sharedregion_config tmpcfg;
+ u32 i;
+ s32 retval = 0;
+
+ /* This sets the refCount variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of refCount variable
+ */
+ atomic_cmpmask_and_set(&sharedregion_module->ref_count,
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&sharedregion_module->ref_count)
+ != SHAREDREGION_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ if (config == NULL) {
+ sharedregion_get_config(&tmpcfg);
+ config = &tmpcfg;
+ }
+ if (config != NULL) {
+ if (WARN_ON(config->num_entries == 0)) {
+ retval = -EINVAL;
+ goto error;
+ }
+ }
+
+ memcpy(&sharedregion_module->cfg, config,
+ sizeof(struct sharedregion_config));
+ sharedregion_module->cfg.translate = true;
+
+ sharedregion_module->regions = kmalloc(
+ (sizeof(struct sharedregion_region) * \
+ sharedregion_module->cfg.num_entries),
+ GFP_KERNEL);
+ if (sharedregion_module->regions == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+ for (i = 0; i < sharedregion_module->cfg.num_entries; i++) {
+ sharedregion_module->regions[i].entry.base = NULL;
+ sharedregion_module->regions[i].entry.len = 0;
+ sharedregion_module->regions[i].entry.owner_proc_id = 0;
+ sharedregion_module->regions[i].entry.is_valid = false;
+ sharedregion_module->regions[i].entry.cache_enable = true;
+ sharedregion_module->regions[i].entry.cache_line_size =
+ sharedregion_module->cfg.cache_line_size;
+ sharedregion_module->regions[i].entry.create_heap = false;
+ sharedregion_module->regions[i].reserved_size = 0;
+ sharedregion_module->regions[i].heap = NULL;
+ sharedregion_module->regions[i].entry.name = NULL;
+ }
+
+ /* set the defaults for region 0 */
+ sharedregion_module->regions[0].entry.create_heap = true;
+ sharedregion_module->regions[0].entry.owner_proc_id = multiproc_self();
+
+ sharedregion_module->num_offset_bits = \
+ _sharedregion_get_num_offset_bits();
+ sharedregion_module->offset_mask =
+ (1 << sharedregion_module->num_offset_bits) - 1;
+
+ sharedregion_module->local_lock = kmalloc(sizeof(struct mutex),
+ GFP_KERNEL);
+ if (sharedregion_module->local_lock == NULL) {
+ retval = -ENOMEM;
+ goto gate_create_fail;
+ }
+ mutex_init(sharedregion_module->local_lock);
+
+ return 0;
+
+gate_create_fail:
+ kfree(sharedregion_module->regions);
+
+error:
+ printk(KERN_ERR "sharedregion_setup failed status:%x\n", retval);
+ sharedregion_destroy();
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_setup);
+
+/* This will get destroy the sharedregion module */
+int sharedregion_destroy(void)
+{
+ s32 retval = 0;
+ void *local_lock = NULL;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (!(atomic_dec_return(&sharedregion_module->ref_count)
+ == SHAREDREGION_MAKE_MAGICSTAMP(0))) {
+ retval = 1;
+ goto error;
+ }
+
+ retval = mutex_lock_interruptible(sharedregion_module->local_lock);
+ if (retval)
+ goto error;
+ kfree(sharedregion_module->regions);
+ memset(&sharedregion_module->cfg, 0,
+ sizeof(struct sharedregion_config));
+ sharedregion_module->num_offset_bits = 0;
+ sharedregion_module->offset_mask = 0;
+ mutex_unlock(sharedregion_module->local_lock);
+
+ kfree(local_lock);
+ return 0;
+
+error:
+ if (retval < 0) {
+ printk(KERN_ERR "sharedregion_destroy failed status:%x\n",
+ retval);
+ }
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_destroy);
+
+/* Creates a heap by owner of region for each SharedRegion.
+ * Function is called by Ipc_start(). Requires that SharedRegion 0
+ * be valid before calling start(). */
+int sharedregion_start(void)
+{
+ int retval = 0;
+ struct sharedregion_region *region = NULL;
+ void *shared_addr = NULL;
+ struct heapmemmp_object *heap_handle = NULL;
+ struct heapmemmp_params params;
+ int i;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if ((sharedregion_module->cfg.num_entries == 0) ||
+ (sharedregion_module->regions[0].entry.is_valid == false)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ /*
+ * Loop through shared regions. If an owner of a region is specified
+ * and create_heap has been specified for the SharedRegion, then
+ * the owner creates a HeapMemMP and the other processors open it.
+ */
+ for (i = 0; i < sharedregion_module->cfg.num_entries; i++) {
+ region = &(sharedregion_module->regions[i]);
+ if ((region->entry.is_valid)
+ && (region->entry.owner_proc_id == multiproc_self())
+ && (region->entry.create_heap)
+ && (region->heap == NULL)) {
+ /* get the next free address in each region */
+ shared_addr = (void *)((u32) region->entry.base
+ + region->reserved_size);
+
+ /* Create the HeapMemMP in the region. */
+ heapmemmp_params_init(&params);
+ params.shared_addr = shared_addr;
+ params.shared_buf_size =
+ region->entry.len - region->reserved_size;
+
+ /* Adjust to account for the size of HeapMemMP_Attrs */
+ params.shared_buf_size -=
+ ((heapmemmp_shared_mem_req(&params) - \
+ params.shared_buf_size));
+ heap_handle = heapmemmp_create(&params);
+ if (heap_handle == NULL) {
+ retval = -1;
+ break;
+ } else {
+ region->heap = heap_handle;
+ }
+ }
+ }
+
+error:
+ if (retval < 0) {
+ printk(KERN_ERR "sharedregion_start failed status:%x\n",
+ retval);
+ }
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_start);
+
+/* Function to stop the SharedRegion module */
+int sharedregion_stop(void)
+{
+ int retval = 0;
+ int tmp_status = 0;
+ struct sharedregion_region *region = NULL;
+ int i;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON((sharedregion_module->cfg.num_entries == 0)
+ || (sharedregion_module->regions[0].entry.is_valid == false))) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ /*
+ * Loop through shared regions. If an owner of a region is specified
+ * and create_heap has been specified for the SharedRegion, then
+ * the other processors close it and the owner deletes the HeapMemMP.
+ */
+ for (i = 0; i < sharedregion_module->cfg.num_entries; i++) {
+ region = &(sharedregion_module->regions[i]);
+ if ((region->entry.is_valid)
+ && (region->entry.owner_proc_id == multiproc_self())
+ && (region->entry.create_heap)
+ && (region->heap != NULL)) {
+ /* Delete heap */
+ tmp_status = heapmemmp_delete((void **)&(region->heap));
+ if ((tmp_status < 0) && (retval >= 0))
+ retval = -1;
+ }
+ memset(region, 0, sizeof(struct sharedregion_region));
+ }
+
+ /* set the defaults for region 0 */
+ sharedregion_module->regions[0].entry.create_heap = true;
+ sharedregion_module->regions[0].entry.owner_proc_id = multiproc_self();
+
+error:
+ if (retval < 0)
+ printk(KERN_ERR "sharedregion_stop failed status:%x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_stop);
+
+/* Opens a heap, for non-owner processors, for each SharedRegion. */
+int sharedregion_attach(u16 remote_proc_id)
+{
+ int retval = 0;
+ struct sharedregion_region *region = NULL;
+ void *shared_addr = NULL;
+ int i;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON((remote_proc_id > MULTIPROC_MAXPROCESSORS))) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * Loop through the regions and open the heap if not owner
+ */
+ for (i = 0; i < sharedregion_module->cfg.num_entries; i++) {
+ region = &(sharedregion_module->regions[i]);
+ if ((region->entry.is_valid) && \
+ (region->entry.owner_proc_id != multiproc_self()) && \
+ (region->entry.owner_proc_id != \
+ SHAREDREGION_DEFAULTOWNERID) && \
+ (region->entry.create_heap) && (region->heap == NULL)) {
+ /* SharedAddr should match creator's for each region */
+ shared_addr = (void *)((u32) region->entry.base +
+ region->reserved_size);
+
+ /* Heap should already be created so open by address */
+ retval = heapmemmp_open_by_addr(shared_addr,
+ (void **) &(region->heap));
+ if (retval < 0) {
+ retval = -1;
+ break;
+ }
+ }
+ }
+
+error:
+ if (retval < 0) {
+ printk(KERN_ERR "sharedregion_attach failed status:%x\n",
+ retval);
+ }
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_attach);
+
+/* Closes a heap, for non-owner processors, for each SharedRegion. */
+int sharedregion_detach(u16 remote_proc_id)
+{
+ int retval = 0;
+ int tmp_status = 0;
+ struct sharedregion_region *region = NULL;
+ u16 i;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON((remote_proc_id > MULTIPROC_MAXPROCESSORS))) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * Loop through the regions and open the heap if not owner
+ */
+ for (i = 0; i < sharedregion_module->cfg.num_entries; i++) {
+ region = &(sharedregion_module->regions[i]);
+ if ((region->entry.is_valid) && \
+ (region->entry.owner_proc_id != multiproc_self()) && \
+ (region->entry.owner_proc_id != \
+ SHAREDREGION_DEFAULTOWNERID) && \
+ (region->entry.create_heap) && (region->heap != NULL)) {
+ /* Heap should already be created so open by address */
+ tmp_status = heapmemmp_close((void **) &(region->heap));
+ if ((tmp_status < 0) && (retval >= 0)) {
+ retval = -1;
+ printk(KERN_ERR "sharedregion_detach: "
+ "heapmemmp_close failed!");
+ }
+ }
+ }
+
+error:
+ if (retval < 0) {
+ printk(KERN_ERR "sharedregion_detach failed status:%x\n",
+ retval);
+ }
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_detach);
+
+/* This will return the address pointer associated with the
+ * shared region pointer */
+void *sharedregion_get_ptr(u32 *srptr)
+{
+ struct sharedregion_region *region = NULL;
+ void *return_ptr = NULL;
+ u16 region_id;
+ s32 retval = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (srptr == SHAREDREGION_INVALIDSRPTR)
+ goto error;
+
+ if (sharedregion_module->cfg.translate == false)
+ return_ptr = (void *)srptr;
+ else {
+ region_id = \
+ ((u32)(srptr) >> sharedregion_module->num_offset_bits);
+ if (region_id >= sharedregion_module->cfg.num_entries) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ region = &(sharedregion_module->regions[region_id]);
+ return_ptr = (void *)(((u32)srptr & \
+ sharedregion_module->offset_mask) + \
+ (u32) region->entry.base);
+ }
+ return return_ptr;
+
+error:
+ printk(KERN_ERR "sharedregion_get_ptr failed 0x%x\n", retval);
+ return (void *)NULL;
+
+}
+EXPORT_SYMBOL(sharedregion_get_ptr);
+
+/* This will return sharedregion pointer associated with the
+ * an address in a shared region area registered with the
+ * sharedregion module */
+u32 *sharedregion_get_srptr(void *addr, u16 id)
+{
+ struct sharedregion_region *region = NULL;
+ u32 *ret_ptr = SHAREDREGION_INVALIDSRPTR ;
+ s32 retval = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(addr == NULL))
+ goto error;
+
+ if (WARN_ON(id >= sharedregion_module->cfg.num_entries))
+ goto error;
+
+ if (sharedregion_module->cfg.translate == false)
+ ret_ptr = (u32 *)addr;
+ else {
+ region = &(sharedregion_module->regions[id]);
+ /*
+ * Note: The very last byte on the very last id cannot be
+ * mapped because SharedRegion_INVALIDSRPTR which is ~0
+ * denotes an error. Since pointers should be word
+ * aligned, we don't expect this to be a problem.
+ *
+ * ie: numEntries = 4, id = 3, base = 0x00000000,
+ * len = 0x40000000 ==> address 0x3fffffff would be
+ * invalid because the SRPtr for this address is
+ * 0xffffffff
+ */
+ if (((u32) addr >= (u32) region->entry.base) && ((u32) addr < \
+ ((u32) region->entry.base + region->entry.len))) {
+ ret_ptr = (u32 *)
+ ((id << sharedregion_module->num_offset_bits) |
+ ((u32) addr - (u32) region->entry.base));
+ }
+ }
+ return ret_ptr;
+
+error:
+ printk(KERN_ERR "sharedregion_get_srptr failed 0x%x\n", retval);
+ return (u32 *)NULL;
+}
+EXPORT_SYMBOL(sharedregion_get_srptr);
+
+#if 0
+/*
+ * ======== sharedregion_add ========
+ * Purpose:
+ * This will add a memory segment to the lookup table
+ * during runtime by base and length
+ */
+int sharedregion_add(u32 index, void *base, u32 len)
+{
+ struct sharedregion_info *entry = NULL;
+ struct sharedregion_info *table = NULL;
+ s32 retval = 0;
+ u32 i;
+ u16 myproc_id;
+ bool overlap = false;
+ bool same = false;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (index >= sharedregion_module->cfg.num_entries ||
+ sharedregion_module->region_size < len) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ myproc_id = multiproc_get_id(NULL);
+ retval = mutex_lock_interruptible(sharedregion_module->local_lock);
+ if (retval)
+ goto error;
+
+
+ table = sharedregion_module->table;
+ /* Check for overlap */
+ for (i = 0; i < sharedregion_module->cfg.num_entries; i++) {
+ entry = (table
+ + (myproc_id * sharedregion_module->cfg.num_entries)
+ + i);
+ if (entry->is_valid) {
+ /* Handle duplicate entry */
+ if ((base == entry->base) && (len == entry->len)) {
+ same = true;
+ break;
+ }
+
+ if ((base >= entry->base) &&
+ (base < (void *)
+ ((u32)entry->base + entry->len))) {
+ overlap = true;
+ break;
+ }
+
+ if ((base < entry->base) &&
+ (void *)((u32)base + len) >= entry->base) {
+ overlap = true;
+ break;
+ }
+ }
+ }
+
+ if (same) {
+ retval = 1;
+ goto success;
+ }
+
+ if (overlap) {
+ /* FHACK: FIX ME */
+ retval = 1;
+ goto mem_overlap_error;
+ }
+
+ entry = (table
+ + (myproc_id * sharedregion_module->cfg.num_entries)
+ + index);
+ if (entry->is_valid == false) {
+ entry->base = base;
+ entry->len = len;
+ entry->is_valid = true;
+
+ } else {
+ /* FHACK: FIX ME */
+ sharedregion_module->ref_count_table[(myproc_id *
+ sharedregion_module->cfg.num_entries)
+ + index] += 1;
+ retval = 1;
+ goto dup_entry_error;
+ }
+
+success:
+ mutex_unlock(sharedregion_module->local_lock);
+ return 0;
+
+dup_entry_error: /* Fall through */
+mem_overlap_error:
+ printk(KERN_WARNING "sharedregion_add entry exists status: %x\n",
+ retval);
+ mutex_unlock(sharedregion_module->local_lock);
+
+error:
+ if (retval < 0)
+ printk(KERN_ERR "sharedregion_add failed status:%x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_add);
+
+/*
+ * ======== sharedregion_remove ========
+ * Purpose:
+ * This will removes a memory segment to the lookup table
+ * during runtime by base and length
+ */
+int sharedregion_remove(u32 index)
+{
+ struct sharedregion_info *entry = NULL;
+ struct sharedregion_info *table = NULL;
+ u16 myproc_id;
+ s32 retval = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (index >= sharedregion_module->cfg.num_entries) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ retval = mutex_lock_interruptible(sharedregion_module->local_lock);
+ if (retval)
+ goto error;
+
+ myproc_id = multiproc_get_id(NULL);
+ table = sharedregion_module->table;
+ entry = (table
+ + (myproc_id * sharedregion_module->cfg.num_entries)
+ + index);
+
+ if (sharedregion_module->ref_count_table[(myproc_id *
+ sharedregion_module->cfg.num_entries)
+ + index] > 0)
+ sharedregion_module->ref_count_table[(myproc_id *
+ sharedregion_module->cfg.num_entries)
+ + index] -= 1;
+ else {
+ entry->is_valid = false;
+ entry->base = NULL;
+ entry->len = 0;
+ }
+ mutex_unlock(sharedregion_module->local_lock);
+ return 0;
+
+error:
+ printk(KERN_ERR "sharedregion_remove failed status:%x\n", retval);
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_remove);
+
+/*
+ * ======== sharedregion_get_table_info ========
+ * Purpose:
+ * This will get the table entry information for the
+ * specified index and id
+ */
+int sharedregion_get_table_info(u32 index, u16 proc_id,
+ struct sharedregion_info *info)
+{
+ struct sharedregion_info *entry = NULL;
+ struct sharedregion_info *table = NULL;
+ u16 proc_count;
+ s32 retval = 0;
+
+ BUG_ON(info == NULL);
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ proc_count = multiproc_get_max_processors();
+ if (index >= sharedregion_module->cfg.num_entries ||
+ proc_id >= proc_count) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ retval = mutex_lock_interruptible(sharedregion_module->local_lock);
+ if (retval)
+ goto error;
+
+ table = sharedregion_module->table;
+ entry = (table
+ + (proc_id * sharedregion_module->cfg.num_entries)
+ + index);
+ memcpy((void *) info, (void *) entry, sizeof(struct sharedregion_info));
+ mutex_unlock(sharedregion_module->local_lock);
+ return 0;
+
+error:
+ printk(KERN_ERR "sharedregion_get_table_info failed status:%x\n",
+ retval);
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_get_table_info);
+
+/*
+ * ======== sharedregion_set_table_info ========
+ * Purpose:
+ * This will set the table entry information for the
+ * specified index and id
+ */
+int sharedregion_set_table_info(u32 index, u16 proc_id,
+ struct sharedregion_info *info)
+{
+ struct sharedregion_info *entry = NULL;
+ struct sharedregion_info *table = NULL;
+ u16 proc_count;
+ s32 retval = 0;
+
+ BUG_ON(info == NULL);
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ proc_count = multiproc_get_max_processors();
+ if (index >= sharedregion_module->cfg.num_entries ||
+ proc_id >= proc_count) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ retval = mutex_lock_interruptible(sharedregion_module->local_lock);
+ if (retval)
+ goto error;
+
+ table = sharedregion_module->table;
+ entry = (table
+ + (proc_id * sharedregion_module->cfg.num_entries)
+ + index);
+ memcpy((void *) entry, (void *) info, sizeof(struct sharedregion_info));
+ mutex_unlock(sharedregion_module->local_lock);
+ return 0;
+
+error:
+ printk(KERN_ERR "sharedregion_set_table_info failed status:%x\n",
+ retval);
+ return retval;
+}
+EXPORT_SYMBOL(sharedregion_set_table_info);
+#endif
+
+/* Return the region info */
+void sharedregion_get_region_info(u16 id, struct sharedregion_region *region)
+{
+ struct sharedregion_region *regions = NULL;
+ s32 retval = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(id >= sharedregion_module->cfg.num_entries)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (WARN_ON(region == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ regions = &(sharedregion_module->regions[id]);
+ memcpy((void *) region, (void *) regions,
+ sizeof(struct sharedregion_region));
+
+error:
+ if (retval < 0) {
+ printk(KERN_ERR "sharedregion_get_region_info failed: "
+ "status = 0x%x", retval);
+ }
+ return;
+}
+
+/* Whether address translation is enabled */
+bool sharedregion_translate_enabled(void)
+{
+ return sharedregion_module->cfg.translate;
+}
+
+/* Gets the number of regions */
+u16 sharedregion_get_num_regions(void)
+{
+ return sharedregion_module->cfg.num_entries;
+}
+
+/* Sets the table information entry in the table */
+int sharedregion_set_entry(u16 id, struct sharedregion_entry *entry)
+{
+ int retval = 0;
+ struct sharedregion_region *region = NULL;
+ void *shared_addr = NULL;
+ struct heapmemmp_object *heap_handle = NULL;
+ struct heapmemmp_object **heap_handle_ptr = NULL;
+ struct heapmemmp_params params;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(entry == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (WARN_ON(id >= sharedregion_module->cfg.num_entries)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ region = &(sharedregion_module->regions[id]);
+
+ /* Make sure region does not overlap existing ones */
+ retval = _sharedregion_check_overlap(region->entry.base,
+ region->entry.len);
+ if (retval < 0) {
+ printk(KERN_ERR "sharedregion_set_entry: Entry is overlapping "
+ "existing entry!");
+ goto error;
+ }
+ if (region->entry.is_valid) {
+ /*region entry should be invalid at this point */
+ retval = -EEXIST;
+ printk(KERN_ERR "_sharedregion_setEntry: Entry already exists");
+ goto error;
+ }
+ if ((entry->cache_enable) && (entry->cache_line_size == 0)) {
+ /* if cache enabled, cache line size must != 0 */
+ retval = -1;
+ printk(KERN_ERR "_sharedregion_setEntry: If cache enabled, "
+ "cache line size must != 0");
+ goto error;
+ }
+
+ /* needs to be thread safe */
+ retval = mutex_lock_interruptible(sharedregion_module->local_lock);
+ if (retval)
+ goto error;
+ /* set specified region id to entry values */
+ memcpy((void *)&(region->entry), (void *)entry,
+ sizeof(struct sharedregion_entry));
+ mutex_unlock(sharedregion_module->local_lock);
+
+ if (entry->owner_proc_id == multiproc_self()) {
+ if ((entry->create_heap) && (region->heap == NULL)) {
+ /* get current Ptr (reserve memory with size of 0) */
+ shared_addr = sharedregion_reserve_memory(id, 0);
+ heapmemmp_params_init(&params);
+ params.shared_addr = shared_addr;
+ params.shared_buf_size = region->entry.len - \
+ region->reserved_size;
+
+ /*
+ * Calculate size of HeapMemMP_Attrs and adjust
+ * shared_buf_size. Size of HeapMemMP_Attrs =
+ * HeapMemMP_sharedMemReq(&params) -
+ * params.shared_buf_size
+ */
+ params.shared_buf_size -= \
+ (heapmemmp_shared_mem_req(&params) - \
+ params.shared_buf_size);
+
+ heap_handle = heapmemmp_create(&params);
+ if (heap_handle == NULL) {
+ region->entry.is_valid = false;
+ retval = -ENOMEM;
+ goto error;
+ } else
+ region->heap = heap_handle;
+ }
+ } else {
+ if ((entry->create_heap) && (region->heap == NULL)) {
+ /* shared_addr should match creator's for each region */
+ shared_addr = (void *)((u32) region->entry.base
+ + region->reserved_size);
+
+ /* set the pointer to a heap handle */
+ heap_handle_ptr = \
+ (struct heapmemmp_object **) &(region->heap);
+
+ /* open the heap by address */
+ retval = heapmemmp_open_by_addr(shared_addr, (void **)
+ heap_handle_ptr);
+ if (retval < 0) {
+ region->entry.is_valid = false;
+ retval = -1;
+ goto error;
+ }
+ }
+ }
+ return 0;
+
+error:
+ printk(KERN_ERR "sharedregion_set_entry failed! status = 0x%x", retval);
+ return retval;
+}
+
+/* Clears the region in the table */
+int sharedregion_clear_entry(u16 id)
+{
+ int retval = 0;
+ struct sharedregion_region *region = NULL;
+ struct heapmemmp_object *heapmem_ptr = NULL;
+ u16 my_id;
+ u16 owner_proc_id;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(id >= sharedregion_module->cfg.num_entries)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /* Need to make sure not trying to clear Region 0 */
+ if (WARN_ON(id == 0)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ my_id = multiproc_self();
+
+ /* Needs to be thread safe */
+ retval = mutex_lock_interruptible(sharedregion_module->local_lock);
+ if (retval)
+ goto error;
+ region = &(sharedregion_module->regions[id]);
+
+ /* Store these fields to local variables */
+ owner_proc_id = region->entry.owner_proc_id;
+ heapmem_ptr = region->heap;
+
+ /* Clear region to their defaults */
+ region->entry.is_valid = false;
+ region->entry.base = NULL;
+ region->entry.len = 0u;
+ region->entry.owner_proc_id = SHAREDREGION_DEFAULTOWNERID;
+ region->entry.cache_enable = true;
+ region->entry.cache_line_size = \
+ sharedregion_module->cfg.cache_line_size;
+ region->entry.create_heap = false;
+ region->entry.name = NULL;
+ region->reserved_size = 0u;
+ region->heap = NULL;
+ mutex_unlock(sharedregion_module->local_lock);
+
+ /* Delete or close previous created heap outside the gate */
+ if (heapmem_ptr != NULL) {
+ if (owner_proc_id == my_id) {
+ retval = heapmemmp_delete((void **) &heapmem_ptr);
+ if (retval < 0) {
+ retval = -1;
+ goto error;
+ }
+ } else if (owner_proc_id != (u16) SHAREDREGION_DEFAULTOWNERID) {
+ retval = heapmemmp_close((void **) &heapmem_ptr);
+ if (retval < 0) {
+ retval = -1;
+ goto error;
+ }
+ }
+ }
+ return 0;
+
+error:
+ printk(KERN_ERR "sharedregion_clear_entry failed! status = 0x%x",
+ retval);
+ return retval;
+}
+
+/* Clears the reserve memory for each region in the table */
+void sharedregion_clear_reserved_memory(void)
+{
+ struct sharedregion_region *region = NULL;
+ int i;
+
+ /*
+ * Loop through shared regions. If an owner of a region is specified,
+ * the owner zeros out the reserved memory in each region.
+ */
+ for (i = 0; i < sharedregion_module->cfg.num_entries; i++) {
+ region = &(sharedregion_module->regions[i]);
+ if ((region->entry.is_valid) && \
+ (region->entry.owner_proc_id == multiproc_self())) {
+ /* Clear reserved memory */
+ memset(region->entry.base, 0, region->reserved_size);
+
+ /* Writeback invalidate cache if enabled in region */
+ if (region->entry.cache_enable) {
+ /* TODO: Enable cache */
+ /* Cache_wbInv(region->entry.base,
+ region->reserved_size,
+ Cache_Type_ALL,
+ true); */
+ }
+ }
+ }
+}
+
+/* Initializes the entry fields */
+void sharedregion_entry_init(struct sharedregion_entry *entry)
+{
+ s32 retval = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(entry == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /* init the entry to default values */
+ entry->base = NULL;
+ entry->len = 0;
+ entry->owner_proc_id = SHAREDREGION_DEFAULTOWNERID;
+ entry->cache_enable = false; /*Set to true once cache API is done */
+ entry->cache_line_size = sharedregion_module->cfg.cache_line_size;
+ entry->create_heap = false;
+ entry->name = NULL;
+ entry->is_valid = false;
+
+error:
+ if (retval < 0) {
+ printk(KERN_ERR "sharedregion_entry_init failed: "
+ "status = 0x%x", retval);
+ }
+ return;
+}
+
+/* Returns Heap Handle of associated id */
+void *sharedregion_get_heap(u16 id)
+{
+ struct heapmemmp_object *heap = NULL;
+ s32 retval = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(id >= sharedregion_module->cfg.num_entries)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * If translate == true or translate == false
+ * and 'id' is not INVALIDREGIONID, then assert id is valid.
+ * Return the heap associated with the region id.
+ *
+ * If those conditions are not met, the id is from
+ * an addres in local memory so return NULL.
+ */
+ if ((sharedregion_module->cfg.translate) || \
+ ((sharedregion_module->cfg.translate == false) && \
+ (id != SHAREDREGION_INVALIDREGIONID))) {
+ heap = sharedregion_module->regions[id].heap;
+ }
+ return (void *)heap;
+
+error:
+ printk(KERN_ERR "sharedregion_get_heap failed: status = 0x%x", retval);
+ return (void *)NULL;
+}
+
+/* This will return the id for the specified address pointer. */
+u16 sharedregion_get_id(void *addr)
+{
+ struct sharedregion_region *region = NULL;
+ u16 region_id = SHAREDREGION_INVALIDREGIONID;
+ u16 i;
+ s32 retval = -ENOENT;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(addr == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ retval = mutex_lock_interruptible(sharedregion_module->local_lock);
+ if (retval) {
+ retval = -ENODEV;
+ goto error;
+ }
+ for (i = 0; i < sharedregion_module->cfg.num_entries; i++) {
+ region = &(sharedregion_module->regions[i]);
+ if (region->entry.is_valid && (addr >= region->entry.base) &&
+ (addr < (void *)((u32)region->entry.base + \
+ (region->entry.len)))) {
+ region_id = i;
+ retval = 0;
+ break;
+ }
+ }
+ mutex_unlock(sharedregion_module->local_lock);
+
+error:
+ if (retval < 0) {
+ printk(KERN_ERR "sharedregion_get_id failed: "
+ "status = 0x%x", retval);
+ }
+ return region_id;
+}
+EXPORT_SYMBOL(sharedregion_get_id);
+
+/* Returns the id of shared region that matches name.
+ * Returns sharedregion_INVALIDREGIONID if no region is found. */
+u16 sharedregion_get_id_by_name(char *name)
+{
+ struct sharedregion_region *region = NULL;
+ u16 region_id = SHAREDREGION_INVALIDREGIONID;
+ u16 i;
+ s32 retval = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(name == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /* Needs to be thread safe */
+ retval = mutex_lock_interruptible(sharedregion_module->local_lock);
+ if (retval)
+ goto error;
+ /* loop through entries to find matching name */
+ for (i = 0; i < sharedregion_module->cfg.num_entries; i++) {
+ region = &(sharedregion_module->regions[i]);
+ if (region->entry.is_valid) {
+ if (strcmp(region->entry.name, name) == 0) {
+ region_id = i;
+ break;
+ }
+ }
+ }
+ mutex_unlock(sharedregion_module->local_lock);
+
+error:
+ if (retval < 0) {
+ printk(KERN_ERR "sharedregion_get_id_by_name failed: "
+ "status = 0x%x", retval);
+ }
+ return region_id;
+}
+
+/* Gets the entry information for the specified region id */
+int sharedregion_get_entry(u16 id, struct sharedregion_entry *entry)
+{
+ int retval = 0;
+ struct sharedregion_region *region = NULL;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(entry == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (WARN_ON(id >= sharedregion_module->cfg.num_entries)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ region = &(sharedregion_module->regions[id]);
+ memcpy((void *) entry, (void *) &(region->entry),
+ sizeof(struct sharedregion_entry));
+ return 0;
+
+error:
+ printk(KERN_ERR "sharedregion_get_entry failed: status = 0x%x", retval);
+ return retval;
+}
+
+/* Get cache line size */
+uint sharedregion_get_cache_line_size(u16 id)
+{
+ uint cache_line_size = sizeof(int);
+ s32 retval = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(id >= sharedregion_module->cfg.num_entries)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * If translate == true or translate == false
+ * and 'id' is not INVALIDREGIONID, then assert id is valid.
+ * Return the heap associated with the region id.
+ *
+ * If those conditions are not met, the id is from
+ * an addres in local memory so return NULL.
+ */
+ if ((sharedregion_module->cfg.translate) || \
+ ((sharedregion_module->cfg.translate == false) && \
+ (id != SHAREDREGION_INVALIDREGIONID))) {
+ cache_line_size =
+ sharedregion_module->regions[id].entry.cache_line_size;
+ }
+ return cache_line_size;
+
+error:
+ printk(KERN_ERR "sharedregion_get_cache_line_size failed: "
+ "status = 0x%x", retval);
+ return cache_line_size;
+}
+
+/* Is cache enabled? */
+bool sharedregion_is_cache_enabled(u16 id)
+{
+ bool cache_enable = false;
+ s32 retval = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(id >= sharedregion_module->cfg.num_entries)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * If translate == true or translate == false
+ * and 'id' is not INVALIDREGIONID, then assert id is valid.
+ * Return the heap associated with the region id.
+ *
+ * If those conditions are not met, the id is from
+ * an address in local memory so return NULL.
+ */
+ if ((sharedregion_module->cfg.translate) || \
+ ((sharedregion_module->cfg.translate == false) && \
+ (id != SHAREDREGION_INVALIDREGIONID))) {
+ cache_enable = \
+ sharedregion_module->regions[id].entry.cache_enable;
+ }
+ return cache_enable;
+
+error:
+ printk(KERN_ERR "sharedregion_is_cache_enabled failed: "
+ "status = 0x%x", retval);
+ return false;
+}
+
+/* Reserves the specified amount of memory from the specified region id. */
+void *sharedregion_reserve_memory(u16 id, uint size)
+{
+ void *ret_ptr = NULL;
+ struct sharedregion_region *region = NULL;
+ u32 min_align;
+ uint new_size;
+ uint cur_size;
+ uint cache_line_size = 0;
+ s32 retval = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(id >= sharedregion_module->cfg.num_entries)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (WARN_ON(sharedregion_module->regions[id].entry.is_valid == false)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /*TODO: min_align = Memory_getMaxDefaultTypeAlign();*/min_align = 4;
+ cache_line_size = sharedregion_get_cache_line_size(id);
+ if (cache_line_size > min_align)
+ min_align = cache_line_size;
+
+ region = &(sharedregion_module->regions[id]);
+
+ /* Set the current size to the reserved_size */
+ cur_size = region->reserved_size;
+
+ /* No need to round here since cur_size is already aligned */
+ ret_ptr = (void *)((u32) region->entry.base + cur_size);
+
+ /* Round the new size to the min alignment since */
+ new_size = ROUND_UP(size, min_align);
+
+ /* Need to make sure (cur_size + new_size) is smaller than region len */
+ if (region->entry.len < (cur_size + new_size)) {
+ retval = -EINVAL;
+ printk(KERN_ERR "sharedregion_reserve_memory: Too large size "
+ "is requested to be reserved!");
+ goto error;
+ }
+
+ /* Add the new size to current size */
+ region->reserved_size = cur_size + new_size;
+ return ret_ptr;
+
+error:
+ printk(KERN_ERR "sharedregion_reserve_memory failed: "
+ "status = 0x%x", retval);
+ return (void *)NULL;
+}
+
+/* Unreserve the specified amount of memory from the specified region id. */
+void sharedregion_unreserve_memory(u16 id, uint size)
+{
+ struct sharedregion_region *region = NULL;
+ u32 min_align;
+ uint new_size;
+ uint cur_size;
+ uint cache_line_size = 0;
+ s32 retval = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(id >= sharedregion_module->cfg.num_entries)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (WARN_ON(sharedregion_module->regions[id].entry.is_valid == false)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ /*TODO: min_align = Memory_getMaxDefaultTypeAlign();*/min_align = 4;
+ cache_line_size = sharedregion_get_cache_line_size(id);
+ if (cache_line_size > min_align)
+ min_align = cache_line_size;
+
+ region = &(sharedregion_module->regions[id]);
+
+ /* Set the current size to the unreservedSize */
+ cur_size = region->reserved_size;
+
+ /* Round the new size to the min alignment since */
+ new_size = ROUND_UP(size, min_align);
+
+ /* Add the new size to current size */
+ region->reserved_size = cur_size - new_size;
+
+error:
+ if (retval < 0) {
+ printk(KERN_ERR "sharedregion_unreserve_memory failed: "
+ "status = 0x%x", retval);
+ }
+ return;
+}
+
+/* =============================================================================
+ * Internal Functions
+ * =============================================================================
+ */
+/* Checks to make sure overlap does not exists. */
+int _sharedregion_check_overlap(void *base, u32 len)
+{
+ int retval = 0;
+ struct sharedregion_region *region = NULL;
+ u32 i;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ retval = mutex_lock_interruptible(sharedregion_module->local_lock);
+ if (retval)
+ goto error;
+
+ /* check whether new region overlaps existing ones */
+ for (i = 0; i < sharedregion_module->cfg.num_entries; i++) {
+ region = &(sharedregion_module->regions[i]);
+ if (region->entry.is_valid) {
+ if (base >= region->entry.base) {
+ if (base < (void *)((u32) region->entry.base
+ + region->entry.len)) {
+ retval = -1;
+ printk(KERN_ERR "_sharedregion_check_"
+ "_overlap failed: Specified "
+ "region falls within another "
+ "region!");
+ break;
+ }
+ } else {
+ if ((void *)((u32) base + len) > \
+ region->entry.base) {
+ retval = -1;
+ printk(KERN_ERR "_sharedregion_check_"
+ "_overlap failed: Specified "
+ "region spans across multiple "
+ "regions!");
+ break;
+ }
+ }
+ }
+ }
+
+ mutex_unlock(sharedregion_module->local_lock);
+ return 0;
+
+error:
+ printk(KERN_ERR "_sharedregion_check_overlap failed: "
+ "status = 0x%x", retval);
+ return retval;
+}
+
+/* Return the number of offset_bits bits */
+u32 _sharedregion_get_num_offset_bits(void)
+{
+ u32 num_entries = sharedregion_module->cfg.num_entries;
+ u32 index_bits = 0;
+ u32 num_offset_bits = 0;
+ s32 retval = 0;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (num_entries == 0 || num_entries == 1)
+ index_bits = num_entries;
+ else {
+ num_entries = num_entries - 1;
+
+ /* determine the number of bits for the index */
+ while (num_entries) {
+ index_bits++;
+ num_entries = num_entries >> 1;
+ }
+ }
+ num_offset_bits = 32 - index_bits;
+
+error:
+ if (retval < 0) {
+ printk(KERN_ERR "_sharedregion_get_num_offset_bits failed: "
+ "status = 0x%x", retval);
+ }
+ return num_offset_bits;
+}
+
+/* Sets the table information entry in the table (doesn't create heap). */
+int _sharedregion_set_entry(u16 id, struct sharedregion_entry *entry)
+{
+ int retval = 0;
+ struct sharedregion_region *region = NULL;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(sharedregion_module->ref_count),
+ SHAREDREGION_MAKE_MAGICSTAMP(0),
+ SHAREDREGION_MAKE_MAGICSTAMP(1)) == true)) {
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (WARN_ON(entry == NULL)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (WARN_ON(id >= sharedregion_module->cfg.num_entries)) {
+ retval = -EINVAL;
+ goto error;
+ }
+
+ region = &(sharedregion_module->regions[id]);
+
+ /* Make sure region does not overlap existing ones */
+ retval = _sharedregion_check_overlap(region->entry.base,
+ region->entry.len);
+ if (retval < 0) {
+ printk(KERN_ERR "_sharedregion_set_entry: Entry is overlapping "
+ "existing entry!");
+ goto error;
+ }
+ if (region->entry.is_valid) {
+ /*region entry should be invalid at this point */
+ retval = -EEXIST;
+ printk(KERN_ERR "_sharedregion_set_entry: ntry already exists");
+ goto error;
+ }
+ /* Fail if cacheEnabled and cache_line_size equal 0 */
+ if ((entry->cache_enable) && (entry->cache_line_size == 0)) {
+ /* if cache enabled, cache line size must != 0 */
+ retval = -1;
+ printk(KERN_ERR "_sharedregion_set_entry: If cache enabled, "
+ "cache line size must != 0");
+ goto error;
+ }
+
+ /* needs to be thread safe */
+ retval = mutex_lock_interruptible(sharedregion_module->local_lock);
+ if (retval)
+ goto error;
+ /* set specified region id to entry values */
+ memcpy((void *)&(region->entry), (void *)entry,
+ sizeof(struct sharedregion_entry));
+ mutex_unlock(sharedregion_module->local_lock);
+ return 0;
+
+error:
+ printk(KERN_ERR "_sharedregion_set_entry failed! status = 0x%x",
+ retval);
+ return retval;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/sharedregion_ioctl.c b/drivers/dsp/syslink/multicore_ipc/sharedregion_ioctl.c
new file mode 100644
index 000000000000..646e34a00790
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/sharedregion_ioctl.c
@@ -0,0 +1,479 @@
+/*
+ * sharedregion_ioctl.c
+ *
+ * The sharedregion module is designed to be used in a
+ * multi-processor environment where there are memory regions
+ * that are shared and accessed across different processors
+ *
+ * Copyright (C) 2008-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+
+#include <multiproc.h>
+#include <sharedregion.h>
+#include <sharedregion_ioctl.h>
+#include <platform_mem.h>
+
+/* This ioctl interface to sharedregion_get_config function */
+static int sharedregion_ioctl_get_config(struct sharedregion_cmd_args *cargs)
+{
+
+ struct sharedregion_config config;
+ s32 status = 0;
+ s32 size;
+
+ sharedregion_get_config(&config);
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct sharedregion_config));
+ if (size)
+ status = -EFAULT;
+
+ cargs->api_status = 0;
+ return status;
+}
+
+
+/* This ioctl interface to sharedregion_setup function */
+static int sharedregion_ioctl_setup(struct sharedregion_cmd_args *cargs)
+{
+ struct sharedregion_config config;
+ struct sharedregion_region region;
+ u16 i;
+ s32 status = 0;
+ s32 size;
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct sharedregion_config));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = sharedregion_setup(&config);
+ if (cargs->api_status < 0)
+ goto exit;
+
+ for (i = 0; i < config.num_entries; i++) {
+ sharedregion_get_region_info(i, &region);
+ if (region.entry.is_valid == true) {
+ /* Convert kernel virtual address to physical
+ * addresses */
+ /*region.entry.base = MemoryOS_translate(
+ (Ptr) region.entry.base,
+ Memory_XltFlags_Virt2Phys);*/
+ region.entry.base = platform_mem_translate(
+ region.entry.base,
+ PLATFORM_MEM_XLT_FLAGS_VIRT2PHYS);
+ if (region.entry.base == NULL) {
+ printk(KERN_ERR "sharedregion_ioctl_setup: "
+ "failed to translate region virtual "
+ "address.\n");
+ status = -ENOMEM;
+ goto exit;
+ }
+ size = copy_to_user((void *)
+ &(cargs->args.setup.regions[i]),
+ &region,
+ sizeof(struct sharedregion_region));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+ }
+ }
+
+exit:
+ return status;
+}
+
+/* This ioctl interface to sharedregion_destroy function */
+static int sharedregion_ioctl_destroy(
+ struct sharedregion_cmd_args *cargs)
+{
+ cargs->api_status = sharedregion_destroy();
+ return 0;
+}
+
+/* This ioctl interface to sharedregion_start function */
+static int sharedregion_ioctl_start(struct sharedregion_cmd_args *cargs)
+{
+ cargs->api_status = sharedregion_start();
+ return 0;
+}
+
+/* This ioctl interface to sharedregion_stop function */
+static int sharedregion_ioctl_stop(struct sharedregion_cmd_args *cargs)
+{
+ cargs->api_status = sharedregion_stop();
+ return 0;
+}
+
+/* This ioctl interface to sharedregion_attach function */
+static int sharedregion_ioctl_attach(struct sharedregion_cmd_args *cargs)
+{
+ cargs->api_status = sharedregion_attach(
+ cargs->args.attach.remote_proc_id);
+ return 0;
+}
+
+/* This ioctl interface to sharedregion_detach function */
+static int sharedregion_ioctl_detach(struct sharedregion_cmd_args *cargs)
+{
+ cargs->api_status = sharedregion_detach(
+ cargs->args.detach.remote_proc_id);
+ return 0;
+}
+
+/* This ioctl interface to sharedregion_get_heap function */
+static int sharedregion_ioctl_get_heap(struct sharedregion_cmd_args *cargs)
+{
+ struct heap_object *heap_handle = NULL;
+ s32 status = 0;
+
+ heap_handle = (struct heap_object *) sharedregion_get_heap(
+ cargs->args.get_heap.id);
+ if (heap_handle != NULL)
+ cargs->api_status = 0;
+ else {
+ printk(KERN_ERR "sharedregion_ioctl_get_heap failed: "
+ "heap_handle is NULL!");
+ cargs->api_status = -1;
+ }
+
+ cargs->args.get_heap.heap_handle = heap_handle;
+
+ return status;
+}
+
+/* This ioctl interface to sharedregion_clear_entry function */
+static int sharedregion_ioctl_clear_entry(struct sharedregion_cmd_args *cargs)
+{
+ cargs->api_status = sharedregion_clear_entry(
+ cargs->args.clear_entry.id);
+ return 0;
+}
+
+/* This ioctl interface to sharedregion_set_entry function */
+static int sharedregion_ioctl_set_entry(struct sharedregion_cmd_args *cargs)
+{
+ struct sharedregion_entry entry;
+ s32 status = 0;
+
+ entry = cargs->args.set_entry.entry;
+ /* entry.base = Memory_translate ((Ptr)cargs->args.setEntry.entry.base,
+ Memory_XltFlags_Phys2Virt); */
+ entry.base = platform_mem_translate(
+ (void *)cargs->args.set_entry.entry.base,
+ PLATFORM_MEM_XLT_FLAGS_PHYS2VIRT);
+ if (entry.base == NULL) {
+ printk(KERN_ERR "sharedregion_ioctl_set_entry: failed to"
+ "translate region virtual address.\n");
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ cargs->api_status = sharedregion_set_entry(cargs->args.set_entry.id,
+ &entry);
+
+exit:
+ return status;
+}
+
+/* This ioctl interface to sharedregion_reserve_memory function */
+static int sharedregion_ioctl_reserve_memory
+ (struct sharedregion_cmd_args *cargs)
+{
+ /* Ignore the return value. */
+ sharedregion_reserve_memory(cargs->args.reserve_memory.id,
+ cargs->args.reserve_memory.size);
+ cargs->api_status = 0;
+ return 0;
+}
+
+/* This ioctl interface to sharedregion_clear_reserved_memory function */
+static int sharedregion_ioctl_clear_reserved_memory(
+ struct sharedregion_cmd_args *cargs)
+{
+ /* Ignore the return value. */
+ sharedregion_clear_reserved_memory();
+ cargs->api_status = 0;
+ return 0;
+}
+
+/* This ioctl interface to sharedregion_get_region_info function */
+static int sharedregion_ioctl_get_region_info(
+ struct sharedregion_cmd_args *cargs)
+{
+ struct sharedregion_config config;
+ struct sharedregion_region region;
+ u16 i;
+ s32 status = 0;
+ s32 size;
+
+ sharedregion_get_config(&config);
+ for (i = 0; i < config.num_entries; i++) {
+ sharedregion_get_region_info(i, &region);
+ if (region.entry.is_valid == true) {
+ /* Convert the kernel virtual address to physical
+ * addresses */
+ /*region.entry.base = MemoryOS_translate (
+ (Ptr)region.entry.base,
+ Memory_XltFlags_Virt2Phys);*/
+ region.entry.base = platform_mem_translate(
+ region.entry.base,
+ PLATFORM_MEM_XLT_FLAGS_VIRT2PHYS);
+ if (region.entry.base == NULL) {
+ printk(KERN_ERR
+ "sharedregion_ioctl_get_region_info: "
+ "failed to translate region virtual "
+ "address.\n");
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ size = copy_to_user(&(cargs->args.setup.regions[i]),
+ &region,
+ sizeof(struct sharedregion_region));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+ } else {
+ region.entry.base = NULL;
+ size = copy_to_user(&(cargs->args.setup.regions[i]),
+ &region,
+ sizeof(struct sharedregion_region));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+ }
+ }
+ cargs->api_status = 0;
+
+exit:
+ if (status < 0)
+ cargs->api_status = -1;
+ return status;
+}
+
+#if 0
+/*
+ * ======== sharedregion_ioctl_add ========
+ * Purpose:
+ * This ioctl interface to sharedregion_add function
+ */
+static int sharedregion_ioctl_add(struct sharedregion_cmd_args *cargs)
+{
+ u32 base = (u32)platform_mem_translate(cargs->args.add.base,
+ PLATFORM_MEM_XLT_FLAGS_PHYS2VIRT);
+ cargs->api_status = sharedregion_add(cargs->args.add.index,
+ (void *)base, cargs->args.add.len);
+ return 0;
+}
+
+/*
+ * ======== sharedregion_ioctl_get_index ========
+ * Purpose:
+ * This ioctl interface to sharedregion_get_index function
+ */
+static int sharedregion_ioctl_get_index(struct sharedregion_cmd_args *cargs)
+{
+ s32 index = 0;
+
+ index = sharedregion_get_index(cargs->args.get_index.addr);
+ cargs->args.get_index.index = index;
+ cargs->api_status = 0;
+ return 0;
+}
+
+/*
+ * ======== sharedregion_ioctl_get_ptr ========
+ * Purpose:
+ * This ioctl interface to sharedregion_get_ptr function
+ */
+static int sharedregion_ioctl_get_ptr(struct sharedregion_cmd_args *cargs)
+{
+ void *addr = NULL;
+
+ addr = sharedregion_get_ptr(cargs->args.get_ptr.srptr);
+ /* We are not checking the return from the module, its user
+ responsibilty to pass proper value to application
+ */
+ cargs->args.get_ptr.addr = addr;
+ cargs->api_status = 0;
+ return 0;
+}
+
+/*
+ * ======== sharedregion_ioctl_get_srptr ========
+ * Purpose:
+ * This ioctl interface to sharedregion_get_srptr function
+ */
+static int sharedregion_ioctl_get_srptr(struct sharedregion_cmd_args *cargs)
+{
+ u32 *srptr = NULL;
+
+ srptr = sharedregion_get_srptr(cargs->args.get_srptr.addr,
+ cargs->args.get_srptr.index);
+ /* We are not checking the return from the module, its user
+ responsibilty to pass proper value to application
+ */
+ cargs->args.get_srptr.srptr = srptr;
+ cargs->api_status = 0;
+ return 0;
+}
+
+/*
+ * ======== sharedregion_ioctl_remove ========
+ * Purpose:
+ * This ioctl interface to sharedregion_remove function
+ */
+static int sharedregion_ioctl_remove(struct sharedregion_cmd_args *cargs)
+{
+ cargs->api_status = sharedregion_remove(cargs->args.remove.index);
+ return 0;
+}
+
+/*
+ * ======== sharedregion_ioctl_set_table_info ========
+ * Purpose:
+ * This ioctl interface to sharedregion_set_table_info function
+ */
+static int sharedregion_ioctl_set_table_info(
+ struct sharedregion_cmd_args *cargs)
+{
+ struct sharedregion_info info;
+ s32 status = 0;
+ s32 size;
+
+ size = copy_from_user(&info, cargs->args.set_table_info.info,
+ sizeof(struct sharedregion_info));
+ if (size) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = sharedregion_set_table_info(
+ cargs->args.set_table_info.index,
+ cargs->args.set_table_info.proc_id, &info);
+
+exit:
+ return status;
+}
+#endif
+
+/* This ioctl interface for sharedregion module */
+int sharedregion_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ s32 os_status = 0;
+ s32 size = 0;
+ struct sharedregion_cmd_args __user *uarg =
+ (struct sharedregion_cmd_args __user *)args;
+ struct sharedregion_cmd_args cargs;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ os_status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ os_status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+ if (os_status) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg,
+ sizeof(struct sharedregion_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_SHAREDREGION_GETCONFIG:
+ os_status = sharedregion_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_SETUP:
+ os_status = sharedregion_ioctl_setup(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_DESTROY:
+ os_status = sharedregion_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_START:
+ os_status = sharedregion_ioctl_start(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_STOP:
+ os_status = sharedregion_ioctl_stop(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_ATTACH:
+ os_status = sharedregion_ioctl_attach(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_DETACH:
+ os_status = sharedregion_ioctl_detach(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_GETHEAP:
+ os_status = sharedregion_ioctl_get_heap(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_CLEARENTRY:
+ os_status = sharedregion_ioctl_clear_entry(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_SETENTRY:
+ os_status = sharedregion_ioctl_set_entry(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_RESERVEMEMORY:
+ os_status = sharedregion_ioctl_reserve_memory(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_CLEARRESERVEDMEMORY:
+ os_status = sharedregion_ioctl_clear_reserved_memory(&cargs);
+ break;
+
+ case CMD_SHAREDREGION_GETREGIONINFO:
+ os_status = sharedregion_ioctl_get_region_info(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ os_status = -ENOTTY;
+ break;
+ }
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs, sizeof(struct sharedregion_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ if (os_status < 0) {
+ printk(KERN_ERR "sharedregion_ioctl failed! status = 0x%x",
+ os_status);
+ }
+ return os_status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/sysipc_ioctl.c b/drivers/dsp/syslink/multicore_ipc/sysipc_ioctl.c
new file mode 100644
index 000000000000..9fa1af213b7d
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/sysipc_ioctl.c
@@ -0,0 +1,207 @@
+/*
+ * sysipc_ioctl.c
+ *
+ * This file implements all the ioctl operations required on the sysmgr
+ * module.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Linux headers */
+#include <linux/uaccess.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+/* Module Headers */
+#include <ipc.h>
+#include <sysipc_ioctl.h>
+/*#include <platform.h>*/
+
+
+/*
+ * ioctl interface to ipc_setup function
+ */
+static inline int sysipc_ioctl_setup(struct sysipc_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct ipc_config config;
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct ipc_config));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = ipc_setup(&config);
+
+exit:
+ return retval;
+}
+
+/*
+ * ioctl interface to ipc_control function
+ */
+static inline int sysipc_ioctl_control(struct sysipc_cmd_args *cargs)
+{
+ cargs->api_status = ipc_control(cargs->args.control.proc_id,
+ cargs->args.control.cmd_id,
+ cargs->args.control.arg);
+ return 0;
+}
+
+/*
+ * ioctl interface to ipc_read_config function
+ */
+static inline int sysipc_ioctl_read_config(struct sysipc_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ void *cfg = NULL;
+
+ cfg = kzalloc(cargs->args.read_config.size, GFP_KERNEL);
+ if (cfg == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ cargs->api_status = ipc_read_config(
+ cargs->args.read_config.remote_proc_id,
+ cargs->args.read_config.tag, cfg,
+ cargs->args.read_config.size);
+
+ size = copy_to_user(cargs->args.read_config.cfg, cfg,
+ cargs->args.read_config.size);
+ if (size)
+ retval = -EFAULT;
+
+ kfree(cfg);
+
+exit:
+ return retval;
+}
+
+/*
+ * ioctl interface to ipc_write_config function
+ */
+static inline int sysipc_ioctl_write_config(struct sysipc_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ void *cfg = NULL;
+
+ cfg = kzalloc(cargs->args.write_config.size, GFP_KERNEL);
+ if (cfg == NULL) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ size = copy_from_user(cfg, cargs->args.write_config.cfg,
+ cargs->args.write_config.size);
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = ipc_write_config(
+ cargs->args.write_config.remote_proc_id,
+ cargs->args.write_config.tag, cfg,
+ cargs->args.write_config.size);
+
+ kfree(cfg);
+
+exit:
+ return retval;
+}
+
+/*
+ * ioctl interface to sysmgr_destroy function
+ */
+static inline int sysipc_ioctl_destroy(struct sysipc_cmd_args *cargs)
+{
+ cargs->api_status = ipc_destroy();
+ return 0;
+}
+
+/*
+ * ioctl interface function for sysmgr module
+ */
+int sysipc_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ int os_status = 0;
+ struct sysipc_cmd_args __user *uarg =
+ (struct sysipc_cmd_args __user *)args;
+ struct sysipc_cmd_args cargs;
+ unsigned long size;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ os_status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ os_status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+ if (os_status) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg, sizeof(struct sysipc_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_IPC_SETUP:
+ os_status = sysipc_ioctl_setup(&cargs);
+ break;
+
+ case CMD_IPC_DESTROY:
+ os_status = sysipc_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_IPC_CONTROL:
+ os_status = sysipc_ioctl_control(&cargs);
+ break;
+
+ case CMD_IPC_READCONFIG:
+ os_status = sysipc_ioctl_read_config(&cargs);
+ break;
+
+ case CMD_IPC_WRITECONFIG:
+ os_status = sysipc_ioctl_write_config(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ os_status = -ENOTTY;
+ break;
+ }
+ if (os_status < 0)
+ goto exit;
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs, sizeof(struct sysipc_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ return os_status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/sysmemmgr.c b/drivers/dsp/syslink/multicore_ipc/sysmemmgr.c
new file mode 100644
index 000000000000..42c64e3d8972
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/sysmemmgr.c
@@ -0,0 +1,459 @@
+/*
+ * sysmemmgr.c
+ *
+ * Manager for the Slave system memory. Slave system level memory is allocated
+ * through this modules.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+
+/* Standard headers */
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+/* Utils headers */
+#include <linux/vmalloc.h>
+#include <syslink/atomic_linux.h>
+#include <syslink/platform_mem.h>
+/*#include <GateMutex.h>
+#include <Memory.h>
+#include <Trace.h>*/
+
+
+/* Module level headers */
+#include <sysmemmgr.h>
+/*#include <BuddyPageAllocator.h>*/
+
+
+/* =============================================================================
+ * Macros
+ * =============================================================================
+ */
+/*! @brief Event reserved for System memory manager */
+#define SYSMEMMGR_EVENTNO 12
+
+/* Macro to make a correct module magic number with ref_count */
+#define SYSMEMMGR_MAKE_MAGICSTAMP(x) ((SYSMEMMGR_MODULEID << 12) | (x))
+
+/* =============================================================================
+ * Structs & Enums
+ * =============================================================================
+ */
+/*! @brief Structure containing list of buffers. The list is kept sorted by
+ * address. */
+struct sysmemmgr_static_mem_struct {
+ struct sysmemmgr_static_mem_struct *next;
+ /*!< Pointer to next entry */
+ u32 address;
+ /*!< Address of this entry */
+ u32 size;
+ /*!< Size of this entry */
+};
+
+
+/*! @brief Static memory manager object. */
+struct sysmemmgr_static_mem_mgr_obj {
+ struct sysmemmgr_static_mem_struct head;
+ /*!< Pointer to head entry */
+ struct sysmemmgr_static_mem_struct tail;
+ /*!< Pointer to tail entry */
+};
+
+/*!
+ * @brief Structure defining state object of system memory manager.
+ */
+struct sysmemmgr_module_object {
+ atomic_t ref_count;
+ /*!< Reference count */
+ struct sysmemmgr_static_mem_mgr_obj static_mem_obj;
+ /*!< Static memory manager object */
+ struct mutex *gate_handle;
+ /*!< Pointer to lock */
+ struct sysmemmgr_config cfg;
+ /*!< Current configuration values */
+ struct sysmemmgr_config default_cfg;
+ /*!< Default configuration values */
+};
+
+
+/*!
+ * @brief Object containing state of the system memory manager.
+ */
+static struct sysmemmgr_module_object sysmemmgr_state = {
+ .default_cfg.sizeof_valloc = 0x100000,
+ .default_cfg.sizeof_palloc = 0x100000,
+ .default_cfg.page_size = 0x1000,
+ .default_cfg.event_no = SYSMEMMGR_EVENTNO,
+};
+
+
+/* =============================================================================
+ * APIS
+ * =============================================================================
+ */
+/*
+ * ======== sysmemmgr_get_config ========
+ * Purpose:
+ * Function to get the default values for configuration.
+ */
+void sysmemmgr_get_config(struct sysmemmgr_config *config)
+{
+ if (WARN_ON(config == NULL))
+ goto err_exit;
+
+ if (atomic_cmpmask_and_lt(&(sysmemmgr_state.ref_count),
+ SYSMEMMGR_MAKE_MAGICSTAMP(0),
+ SYSMEMMGR_MAKE_MAGICSTAMP(1)) == true)
+ memcpy((void *) config, (void *)(&sysmemmgr_state.default_cfg),
+ sizeof(struct sysmemmgr_config));
+ else
+ memcpy((void *) config, (void *)(&sysmemmgr_state.cfg),
+ sizeof(struct sysmemmgr_config));
+
+ return;
+
+err_exit:
+ printk(KERN_ERR "sysmemmgr_get_config: Argument of type "
+ "(struct sysmemmgr_config *) passed is NULL\n");
+ return;
+}
+
+
+/*
+ * ======== sysmemmgr_setup ========
+ * Purpose:
+ * Function to get the default values for configuration.
+ */
+int sysmemmgr_setup(struct sysmemmgr_config *config)
+{
+ int status = 0;
+ struct sysmemmgr_static_mem_mgr_obj *smmObj = NULL;
+
+ /* This sets the ref_count variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of ref_count variable.
+ */
+ atomic_cmpmask_and_set(&sysmemmgr_state.ref_count,
+ SYSMEMMGR_MAKE_MAGICSTAMP(0), SYSMEMMGR_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&sysmemmgr_state.ref_count) != \
+ SYSMEMMGR_MAKE_MAGICSTAMP(1)) {
+ status = SYSMEMMGR_S_ALREADYSETUP;
+ goto exit;
+ }
+
+ if (WARN_ON(config == NULL)) {
+ /* Config parameters are not provided */
+ status = -EINVAL;
+ goto err_config;
+ }
+ if (WARN_ON((config->static_virt_base_addr == (u32) NULL)
+ && (config->static_mem_size != 0))) {
+ /* Virtual Base address of static memory region is NULL */
+ status = -EINVAL;
+ goto err_virt_addr;
+ }
+ if (WARN_ON((config->static_phys_base_addr == (u32) NULL)
+ && (config->static_mem_size != 0))) {
+ /*Physical Base address of static memory region is NULL */
+ status = -EINVAL;
+ goto err_phys_addr;
+ }
+
+ /* Copy the config parameters to the module state */
+ memcpy((void *)(&sysmemmgr_state.cfg), (void *) config,
+ sizeof(struct sysmemmgr_config));
+
+ /* Create the static memory allocator */
+ if (config->static_mem_size != 0) {
+ smmObj = &sysmemmgr_state.static_mem_obj;
+ smmObj->head.address = config->static_virt_base_addr;
+ smmObj->head.size = 0;
+ smmObj->tail.address = (config->static_virt_base_addr + \
+ config->static_mem_size);
+ smmObj->tail.size = 0;
+ smmObj->head.next = &smmObj->tail;
+ smmObj->tail.next = NULL;
+ }
+
+ /* Create the lock */
+ sysmemmgr_state.gate_handle = kzalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (sysmemmgr_state.gate_handle == NULL) {
+ /* Failed to create gate handle */
+ status = -ENOMEM;
+ goto err_mem_gate;
+ }
+ return 0;
+
+err_mem_gate:
+ printk(KERN_ERR "sysmemmgr_setup: Failed to create gate handle\n");
+ goto exit;
+
+err_phys_addr:
+ printk(KERN_ERR "sysmemmgr_setup: Physical Base address of static "
+ "memory region is NULL\n");
+ goto exit;
+
+err_virt_addr:
+ printk(KERN_ERR "sysmemmgr_setup: Virtual Base address of static "
+ "memory region is NULL\n");
+ goto exit;
+
+err_config:
+ printk(KERN_ERR "sysmemmgr_setup: Argument of type "
+ "(struct sysmemmgr_config *) passed is NULL\n");
+ goto exit;
+
+exit:
+ if (status < 0) {
+ atomic_set(&sysmemmgr_state.ref_count,
+ SYSMEMMGR_MAKE_MAGICSTAMP(0));
+ }
+ return status;
+}
+
+
+/*
+ * ======== sysmemmgr_destroy ========
+ * Purpose:
+ * Function to finalize the system memory manager module.
+ */
+int sysmemmgr_destroy(void)
+{
+ int status = 0;
+
+ if (atomic_cmpmask_and_lt(&(sysmemmgr_state.ref_count),
+ SYSMEMMGR_MAKE_MAGICSTAMP(0), SYSMEMMGR_MAKE_MAGICSTAMP(1)) == \
+ true) {
+ /*! @retval SYSMEMMGR_E_INVALIDSTATE Module was not
+ * initialized */
+ status = SYSMEMMGR_E_INVALIDSTATE;
+ goto err_exit;
+ }
+
+ if (atomic_dec_return(&sysmemmgr_state.ref_count) == \
+ SYSMEMMGR_MAKE_MAGICSTAMP(0)) {
+ /* Delete the lock */
+ kfree(sysmemmgr_state.gate_handle);
+ }
+ return 0;
+
+err_exit:
+ printk(KERN_ERR "sysmemgr_destroy: Module was not initialized\n");
+ return status;
+}
+
+
+/*
+ * ======== sysmemmgr_alloc ========
+ * Purpose:
+ * Function to allocate a memory block.
+ */
+void *sysmemmgr_alloc(u32 size, enum sysmemmgr_allocflag flag)
+{
+ int status = 0;
+ struct sysmemmgr_static_mem_mgr_obj *smObj = NULL;
+ struct sysmemmgr_static_mem_struct *ptr = NULL;
+ struct sysmemmgr_static_mem_struct *newptr = NULL;
+ void *ret_ptr = NULL;
+
+ if (atomic_cmpmask_and_lt(&(sysmemmgr_state.ref_count),
+ SYSMEMMGR_MAKE_MAGICSTAMP(0), SYSMEMMGR_MAKE_MAGICSTAMP(1)) == \
+ true) {
+ /*! @retval SYSMEMMGR_E_INVALIDSTATE Module was not
+ * initialized */
+ status = SYSMEMMGR_E_INVALIDSTATE;
+ goto err_exit;
+ }
+
+ if ((flag & sysmemmgr_allocflag_physical) && \
+ !(flag & sysmemmgr_allocflag_dma)) {
+ /* TBD: works with DMM
+ ret_ptr = platform_mem_alloc (size, 0,
+ MemoryOS_MemTypeFlags_Physical); */
+ if (ret_ptr == NULL) {
+ if (sysmemmgr_state.cfg.static_mem_size == 0) {
+ /* Memory pool is not configured. */
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ smObj = &sysmemmgr_state.static_mem_obj;
+ ptr = &smObj->head;
+ while (ptr && ptr->next) {
+ if (((ptr->next->address - \
+ (ptr->address + ptr->size)) >= size))
+ break;
+ ptr = ptr->next;
+ }
+
+ if (ptr->next == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ newptr = vmalloc(
+ sizeof(struct sysmemmgr_static_mem_struct));
+ if (newptr != NULL) {
+ newptr->address = ptr->address + ptr->size;
+ newptr->size = size;
+ newptr->next = ptr->next;
+ ptr->next = newptr;
+ ret_ptr = (void *) newptr->address;
+ } else {
+ status = -ENOMEM;
+ }
+ }
+ goto exit;
+ }
+
+ if (flag & sysmemmgr_allocflag_physical) {
+ ret_ptr = kmalloc(size, GFP_KERNEL);
+ if (ret_ptr == NULL)
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ if (flag & sysmemmgr_allocflag_dma) {
+ ret_ptr = kmalloc(size, GFP_KERNEL | GFP_DMA);
+ if (ret_ptr == NULL)
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ ret_ptr = vmalloc(size);
+ if (ret_ptr == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+err_exit:
+ printk(KERN_ERR "sysmemgr_alloc: Module was not initialized\n");
+exit:
+ if (WARN_ON(ret_ptr == NULL))
+ printk(KERN_ERR "sysmemmgr_alloc: Allocation failed\n");
+ return ret_ptr;
+}
+
+
+/*
+ * ======== sysmemmgr_free ========
+ * Purpose:
+ * Function to de-allocate a previous allocated memory block.
+ */
+int sysmemmgr_free(void *blk, u32 size, enum sysmemmgr_allocflag flag)
+{
+ int status = 0;
+ struct sysmemmgr_static_mem_mgr_obj *smObj = NULL;
+ struct sysmemmgr_static_mem_struct *ptr = NULL;
+ struct sysmemmgr_static_mem_struct *prev = NULL;
+
+ if (atomic_cmpmask_and_lt(&(sysmemmgr_state.ref_count),
+ SYSMEMMGR_MAKE_MAGICSTAMP(0), SYSMEMMGR_MAKE_MAGICSTAMP(1)) == \
+ true) {
+ /*! @retval SYSMEMMGR_E_INVALIDSTATE Module was not
+ * initialized */
+ status = SYSMEMMGR_E_INVALIDSTATE;
+ goto err_exit;
+ }
+
+ if ((flag & sysmemmgr_allocflag_physical) && \
+ !(flag & sysmemmgr_allocflag_dma)) {
+ if (((u32) blk >= sysmemmgr_state.cfg.static_virt_base_addr)
+ && ((u32) blk < \
+ (sysmemmgr_state.cfg.static_virt_base_addr + \
+ sysmemmgr_state.cfg.static_mem_size))) {
+ smObj = &sysmemmgr_state.static_mem_obj;
+ ptr = &smObj->head;
+ while (ptr && ptr->next) {
+ if (ptr->next->address == (u32) blk)
+ break;
+ ptr = ptr->next;
+ }
+ prev = ptr;
+ ptr = ptr->next;
+ prev->next = ptr->next;
+
+ /* Free the node */
+ vfree(ptr);
+ } else {
+ kfree(blk);
+ }
+ } else if (flag & sysmemmgr_allocflag_physical) {
+ kfree(blk);
+ } else if (flag & sysmemmgr_allocflag_dma) {
+ kfree(blk);
+ } else {
+ vfree(blk);
+ }
+ return 0;
+
+err_exit:
+ printk(KERN_ERR "sysmemgr_free: Module was not initialized\n");
+ return status;
+}
+
+
+/*
+ * ======== sysmemmgr_setup ========
+ * Purpose:
+ * Function to translate an address among different address spaces.
+ */
+void *sysmemmgr_translate(void *src_addr, enum sysmemmgr_xltflag flags)
+{
+ void *ret_ptr = NULL;
+
+ switch (flags) {
+ case sysmemmgr_xltflag_kvirt2phys:
+ {
+ if (((u32) src_addr >= \
+ sysmemmgr_state.cfg.static_virt_base_addr) && \
+ ((u32) src_addr < \
+ (sysmemmgr_state.cfg.static_virt_base_addr + \
+ sysmemmgr_state.cfg.static_mem_size))) {
+ ret_ptr = (void *)(((u32) src_addr - \
+ sysmemmgr_state.cfg.static_virt_base_addr) + \
+ (sysmemmgr_state.cfg.static_phys_base_addr));
+ } else {
+ ret_ptr = platform_mem_translate(src_addr,
+ PLATFORM_MEM_XLT_FLAGS_VIRT2PHYS);
+ }
+ }
+ break;
+
+ case sysmemmgr_xltflag_phys2kvirt:
+ {
+ if (((u32) src_addr >= \
+ sysmemmgr_state.cfg.static_phys_base_addr) && \
+ ((u32) src_addr < \
+ (sysmemmgr_state.cfg.static_phys_base_addr + \
+ sysmemmgr_state.cfg.static_mem_size))) {
+ ret_ptr = (void *)(((u32) src_addr - \
+ sysmemmgr_state.cfg.static_phys_base_addr) + \
+ (sysmemmgr_state.cfg.static_virt_base_addr));
+ } else {
+ ret_ptr = platform_mem_translate(src_addr,
+ PLATFORM_MEM_XLT_FLAGS_PHYS2VIRT);
+ }
+ }
+ break;
+
+ default:
+ {
+ printk(KERN_ALERT "sysmemmgr_translate: Unhandled translation "
+ "flag\n");
+ }
+ break;
+ }
+
+ return ret_ptr;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/sysmemmgr_ioctl.c b/drivers/dsp/syslink/multicore_ipc/sysmemmgr_ioctl.c
new file mode 100644
index 000000000000..591e04849873
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/sysmemmgr_ioctl.c
@@ -0,0 +1,227 @@
+/*
+ * sysmemmgr_ioctl.c
+ *
+ * This file implements all the ioctl operations required on the sysmemmgr
+ * module.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Linux headers */
+#include <linux/uaccess.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+
+/* Module Headers */
+#include <sysmemmgr.h>
+#include <sysmemmgr_ioctl.h>
+
+
+/*
+ * ======== sysmemmgr_ioctl_get_config ========
+ * Purpose:
+ * This ioctl interface to sysmemmgr_get_config function
+ */
+static inline int sysmemmgr_ioctl_get_config(struct sysmemmgr_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct sysmemmgr_config config;
+
+ sysmemmgr_get_config(&config);
+ size = copy_to_user(cargs->args.get_config.config, &config,
+ sizeof(struct sysmemmgr_config));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = 0;
+exit:
+ return retval;
+}
+
+/*
+ * ======== sysmemmgr_ioctl_setup ========
+ * Purpose:
+ * This ioctl interface to sysmemmgr_setup function
+ */
+static inline int sysmemmgr_ioctl_setup(struct sysmemmgr_cmd_args *cargs)
+{
+ s32 retval = 0;
+ unsigned long size;
+ struct sysmemmgr_config config;
+
+ if (cargs->args.setup.config == NULL) {
+ cargs->api_status = sysmemmgr_setup(NULL);
+ goto exit;
+ }
+
+ size = copy_from_user(&config, cargs->args.setup.config,
+ sizeof(struct sysmemmgr_config));
+ if (size) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ cargs->api_status = sysmemmgr_setup(&config);
+
+exit:
+ return retval;
+}
+
+/*
+ * ======== sysmemmgr_ioctl_destroy ========
+ * Purpose:
+ * This ioctl interface to sysmemmgr_destroy function
+ */
+static inline int sysmemmgr_ioctl_destroy(struct sysmemmgr_cmd_args *cargs)
+{
+ cargs->api_status = sysmemmgr_destroy();
+ return 0;
+}
+
+/*
+ * ======== sysmemmgr_ioctl_alloc ========
+ * Purpose:
+ * This ioctl interface to sysmemmgr_alloc function
+ */
+static inline int sysmemmgr_ioctl_alloc(struct sysmemmgr_cmd_args *cargs)
+{
+ void *kbuf = NULL;
+ void *phys = NULL;
+
+ kbuf = sysmemmgr_alloc(cargs->args.alloc.size,
+ cargs->args.alloc.flags);
+ if (unlikely(kbuf == NULL))
+ goto exit;
+
+ /* If the flag is not virtually contiguous */
+ if (cargs->args.alloc.flags != sysmemmgr_allocflag_virtual)
+ phys = sysmemmgr_translate(kbuf, sysmemmgr_xltflag_kvirt2phys);
+ cargs->api_status = 0;
+
+exit:
+ cargs->args.alloc.kbuf = kbuf;
+ cargs->args.alloc.kbuf = phys;
+ return 0;
+}
+
+/*
+ * ======== sysmemmgr_ioctl_free ========
+ * Purpose:
+ * This ioctl interface to sysmemmgr_free function
+ */
+static inline int sysmemmgr_ioctl_free(struct sysmemmgr_cmd_args *cargs)
+{
+ cargs->api_status = sysmemmgr_free(cargs->args.free.kbuf,
+ cargs->args.free.size,
+ cargs->args.alloc.flags);
+ return 0;
+}
+
+/*
+ * ======== sysmemmgr_ioctl_translate ========
+ * Purpose:
+ * This ioctl interface to sysmemmgr_translate function
+ */
+static inline int sysmemmgr_ioctl_translate(struct sysmemmgr_cmd_args *cargs)
+{
+ cargs->args.translate.ret_ptr = sysmemmgr_translate(
+ cargs->args.translate.buf,
+ cargs->args.translate.flags);
+ WARN_ON(cargs->args.translate.ret_ptr == NULL);
+ cargs->api_status = 0;
+ return 0;
+}
+
+/*
+ * ======== sysmemmgr_ioctl ========
+ * Purpose:
+ * ioctl interface function for sysmemmgr module
+ */
+int sysmemmgr_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ int os_status = 0;
+ struct sysmemmgr_cmd_args __user *uarg =
+ (struct sysmemmgr_cmd_args __user *)args;
+ struct sysmemmgr_cmd_args cargs;
+ unsigned long size;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ os_status = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ os_status = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+ if (os_status) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ /* Copy the full args from user-side */
+ size = copy_from_user(&cargs, uarg, sizeof(struct sysmemmgr_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case CMD_SYSMEMMGR_GETCONFIG:
+ os_status = sysmemmgr_ioctl_get_config(&cargs);
+ break;
+
+ case CMD_SYSMEMMGR_SETUP:
+ os_status = sysmemmgr_ioctl_setup(&cargs);
+ break;
+
+ case CMD_SYSMEMMGR_DESTROY:
+ os_status = sysmemmgr_ioctl_destroy(&cargs);
+ break;
+
+ case CMD_SYSMEMMGR_ALLOC:
+ os_status = sysmemmgr_ioctl_alloc(&cargs);
+ break;
+
+ case CMD_SYSMEMMGR_FREE:
+ os_status = sysmemmgr_ioctl_free(&cargs);
+ break;
+
+ case CMD_SYSMEMMGR_TRANSLATE:
+ os_status = sysmemmgr_ioctl_translate(&cargs);
+ break;
+
+ default:
+ WARN_ON(cmd);
+ os_status = -ENOTTY;
+ break;
+ }
+
+ if ((cargs.api_status == -ERESTARTSYS) || (cargs.api_status == -EINTR))
+ os_status = -ERESTARTSYS;
+
+ if (os_status < 0)
+ goto exit;
+
+ /* Copy the full args to the user-side. */
+ size = copy_to_user(uarg, &cargs, sizeof(struct sysmemmgr_cmd_args));
+ if (size) {
+ os_status = -EFAULT;
+ goto exit;
+ }
+
+exit:
+ return os_status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/sysmgr.c b/drivers/dsp/syslink/multicore_ipc/sysmgr.c
new file mode 100644
index 000000000000..bbf9b4be4b27
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/sysmgr.c
@@ -0,0 +1,846 @@
+/*
+ * sysmgr.c
+ *
+ * Implementation of System manager.
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+
+/* Standard headers */
+#include <linux/types.h>
+#include <linux/module.h>
+
+#include <syslink/atomic_linux.h>
+
+/* Module headers */
+#include <multiproc.h>
+#include <sysmemmgr.h>
+#include <sysmgr.h>
+#include <_sysmgr.h>
+#include <platform.h>
+#include <platform_mem.h>
+
+#include <gatepeterson.h>
+#include <sharedregion.h>
+#include <listmp.h>
+#include <messageq.h>
+#include <messageq_transportshm.h>
+#include <notify.h>
+/*#include <notify_driver.h>*/
+#include <notify_ducatidriver.h>
+
+#include <nameserver.h>
+#include <nameserver_remote.h>
+#include <nameserver_remotenotify.h>
+#include <procmgr.h>
+#include <heap.h>
+#include <heapbuf.h>
+
+/* =============================================================================
+ * Macros
+ * =============================================================================
+ */
+/*!
+ * @def BOOTLOADPAGESIZE
+ * @brief Error code base for System manager.
+ */
+#define BOOTLOADPAGESIZE (0x1000) /* 4K page size */
+
+/*!
+ * @def SYSMGR_ENTRYVALIDITYSTAMP
+ * @brief Validity stamp for boot load page entries.
+ */
+#define SYSMGR_ENTRYVALIDITYSTAMP (0xBABAC0C0)
+
+/*!
+ * @def SYSMGR_ENTRYVALIDSTAMP
+ * @brief Validity stamp for boot load page entries.
+ */
+#define SYSMGR_ENTRYVALIDSTAMP (0xBABAC0C0)
+
+/*!
+ * @def SYSMGR_SCALABILITYHANDSHAKESTAMP
+ * @brief scalability configuration handshake value.
+ */
+#define SYSMGR_SCALABILITYHANDSHAKESTAMP (0xBEEF0000)
+
+/*!
+ * @def SYSMGR_SETUPHANDSHAKESTAMP
+ * @brief Platform configured handshake value.
+ */
+#define SYSMGR_SETUPHANDSHAKESTAMP (0xBEEF0001)
+
+/*!
+ * @def SYSMGR_DESTROYHANDSHAKESTAMP
+ * @brief Destroy handshake value.
+ */
+#define SYSMGR_DESTROYHANDSHAKESTAMP (0xBEEF0002)
+
+/*!
+ * @def SYSMGR_BOOTLOADPAGESIZE
+ * @brief Boot load page size.
+ */
+#define SYSMGR_BOOTLOADPAGESIZE (0x00001000)
+
+/* Macro to make a correct module magic number with ref_count */
+#define SYSMGR_MAKE_MAGICSTAMP(x) ((SYSMGR_MODULEID << 12) | (x))
+
+
+/* =============================================================================
+ * Structures & Enums
+ * =============================================================================
+ */
+/*! @brief structure for System manager boot load page entry */
+struct sysmgr_bootload_page_entry {
+ VOLATILE u32 offset;
+ /* Offset of next entry (-1 if not present) */
+ VOLATILE u32 valid;
+ /* Validity of the entry */
+ VOLATILE u32 size;
+ /* Size of the entry data */
+ VOLATILE u32 cmd_id;
+ /* Command ID */
+};
+
+/*! @brief structure containg system manager state object */
+struct sysmgr_boot_load_page {
+ VOLATILE struct sysmgr_bootload_page_entry host_config;
+ /* First entry, host specific configuration in the boot load page */
+ u8 padding1[(BOOTLOADPAGESIZE/2) - \
+ sizeof(struct sysmgr_bootload_page_entry)];
+ /* Padding1 */
+ VOLATILE u32 handshake;
+ /* Handshake variable, wrote by slave to indicate configuration done. */
+ VOLATILE struct sysmgr_bootload_page_entry slave_config;
+ /* First entry, slave specific configuration in the boot load page */
+ u8 padding2[(BOOTLOADPAGESIZE/2) - \
+ sizeof(struct sysmgr_bootload_page_entry) - \
+ sizeof(u32)];
+ /* Padding2 */
+};
+
+/*! @brief structure for System manager module state */
+struct sysmgr_module_object {
+ atomic_t ref_count;
+ /* Reference count */
+ struct sysmgr_config config;
+ /* Overall system configuration */
+ struct sysmgr_boot_load_page *boot_load_page[MULTIPROC_MAXPROCESSORS];
+ /* Boot load page of the slaves */
+ bool platform_mem_init_flag;
+ /* Platform memory manager initialize flag */
+ bool multiproc_init_flag;
+ /* Multiproc Initialize flag */
+ bool gatepeterson_init_flag;
+ /* Gatepeterson Initialize flag */
+ bool sharedregion_init_flag;
+ /* Sharedregion Initialize flag */
+ bool listmp_init_flag;
+ /* Listmp Initialize flag */
+ bool messageq_init_flag;
+ /* Messageq Initialize flag */
+ bool notify_init_flag;
+ /* Notify Initialize flag */
+ bool proc_mgr_init_flag;
+ /* Processor manager Initialize flag */
+ bool heapbuf_init_flag;
+ /* Heapbuf Initialize flag */
+ bool nameserver_init_flag;
+ /* Nameserver_remotenotify Initialize flag */
+ bool listmp_sharedmemory_init_flag;
+ /* Listmp_sharedmemory Initialize flag */
+ bool messageq_transportshm_init_flag;
+ /* Messageq_transportshm Initialize flag */
+ bool notify_ducatidrv_init_flag;
+ /* notify_ducatidrv Initialize flag */
+ bool nameserver_remotenotify_init_flag;
+ /* nameserver_remotenotify Initialize flag */
+ bool platform_init_flag;
+ /* Flag to indicate platform initialization status */
+};
+
+
+/* =============================================================================
+ * Globals
+ * =============================================================================
+ */
+/*!
+ * @var sysmgr_state
+ *
+ * @brief Variable holding state of system manager.
+ */
+static struct sysmgr_module_object sysmgr_state;
+
+
+/* =============================================================================
+ * APIS
+ * =============================================================================
+ */
+/*
+ * ======== sysmgr_get_config ========
+ * Purpose:
+ * Function to get the default values for configuration.
+ */
+void sysmgr_get_config(struct sysmgr_config *config)
+{
+ s32 status = 0;
+
+ if (WARN_ON(config == NULL)) {
+ status = -EINVAL;
+ printk(KERN_ALERT "sysmgr_get_config [0x%x] : Argument of type"
+ " (sysmgr_get_config *) passed is null!",
+ status);
+ return;
+ }
+
+ /* Get the gatepeterson default config */
+ multiproc_get_config(&config->multiproc_cfg);
+
+ /* Get the gatepeterson default config */
+ gatepeterson_get_config(&config->gatepeterson_cfg);
+
+ /* Get the sharedregion default config */
+ sharedregion_get_config(&config->sharedregion_cfg);
+
+ /* Get the messageq default config */
+ messageq_get_config(&config->messageq_cfg);
+
+ /* Get the notify default config */
+ notify_get_config(&config->notify_cfg);
+
+ /* Get the proc_mgr default config */
+ proc_mgr_get_config(&config->proc_mgr_cfg);
+
+ /* Get the heapbuf default config */
+ heapbuf_get_config(&config->heapbuf_cfg);
+
+ /* Get the listmp_sharedmemory default config */
+ listmp_sharedmemory_get_config(&config->listmp_sharedmemory_cfg);
+
+ /* Get the messageq_transportshm default config */
+ messageq_transportshm_get_config(&config->messageq_transportshm_cfg);
+
+ /* Get the notify_ducati driver default config */
+ notify_ducatidrv_getconfig(&config->notify_ducatidrv_cfg);
+
+ /* Get the nameserver_remotenotify default config */
+ nameserver_remotenotify_get_config(
+ &config->nameserver_remotenotify_cfg);
+}
+EXPORT_SYMBOL(sysmgr_get_config);
+
+/*
+ * ======== sysmgr_get_object_config ========
+ * Purpose:
+ * Function to get the SysMgr Object configuration from Slave.
+ */
+u32 sysmgr_get_object_config(u16 proc_id, void *config, u32 cmd_id, u32 size)
+{
+ struct sysmgr_bootload_page_entry *entry = NULL;
+ u32 offset = 0;
+ u32 ret = 0;
+ struct sysmgr_boot_load_page *blp = NULL;
+
+ if ((proc_id < 0) || (proc_id >= MULTIPROC_MAXPROCESSORS)) {
+ ret = 0;
+ goto exit;
+ }
+
+ blp = (struct sysmgr_boot_load_page *)
+ sysmgr_state.boot_load_page[proc_id];
+
+ entry = (struct sysmgr_bootload_page_entry *) &blp->slave_config;
+ while (entry->valid == SYSMGR_ENTRYVALIDSTAMP) {
+ if (entry->cmd_id == cmd_id) {
+ if (size == entry->size) {
+ memcpy(config, (void *)((u32)entry + \
+ sizeof(struct sysmgr_bootload_page_entry)),
+ size);
+ ret = size;
+ break;
+ }
+ }
+ if (entry->offset != -1) {
+ offset += entry->offset;
+ entry = (struct sysmgr_bootload_page_entry *)
+ ((u32) &blp->slave_config + entry->offset);
+ } else {
+ break;
+ }
+ }
+
+exit:
+ /* return number of bytes wrote to the boot load page */
+ return ret;
+}
+
+
+/*
+ * ======== sysmgr_put_object_config ========
+ * Purpose:
+ * Function to put the SysMgr Object configuration to Slave.
+ */
+u32 sysmgr_put_object_config(u16 proc_id, void *config, u32 cmd_id, u32 size)
+{
+ struct sysmgr_bootload_page_entry *entry = NULL;
+ struct sysmgr_bootload_page_entry *prev = NULL;
+ u32 offset = 0;
+ struct sysmgr_boot_load_page *blp = NULL;
+
+ if ((proc_id < 0) || (proc_id >= MULTIPROC_MAXPROCESSORS)) {
+ size = 0;
+ goto exit;
+ }
+
+ /* Get the boot load page pointer */
+ blp = sysmgr_state.boot_load_page[proc_id];
+
+ /* Put the entry at the end of list */
+ entry = (struct sysmgr_bootload_page_entry *) &blp->host_config;
+ while (entry->valid == SYSMGR_ENTRYVALIDSTAMP) {
+ prev = entry;
+ if (entry->offset != -1) {
+ offset += entry->offset;
+ entry = (struct sysmgr_bootload_page_entry *)
+ ((u32) &blp->host_config + entry->offset);
+ } else {
+ break;
+ }
+ }
+
+ /* First entry has prev set to NULL */
+ if (prev == NULL) {
+ entry->offset = -1;
+ entry->cmd_id = cmd_id;
+ entry->size = size;
+ memcpy((void *)((u32)entry + \
+ sizeof(struct sysmgr_bootload_page_entry)),
+ config, size);
+ entry->valid = SYSMGR_ENTRYVALIDSTAMP;
+ } else {
+ entry = (struct sysmgr_bootload_page_entry *)((u32)entry + \
+ sizeof(struct sysmgr_bootload_page_entry) + \
+ entry->size);
+ entry->offset = -1;
+ entry->cmd_id = cmd_id;
+ entry->size = size;
+ memcpy((void *)((u32)entry + \
+ sizeof(struct sysmgr_bootload_page_entry)),
+ config, size);
+ entry->valid = SYSMGR_ENTRYVALIDSTAMP;
+
+ /* Attach the new created entry */
+ prev->offset = ((u32) entry - (u32) &blp->host_config);
+ }
+
+exit:
+ /* return number of bytes wrote to the boot load page */
+ return size;
+}
+
+
+/*
+ * ======== sysmgr_setup ========
+ * Purpose:
+ * Function to setup the System.
+ */
+s32 sysmgr_setup(const struct sysmgr_config *cfg)
+{
+ s32 status = 0;
+ struct sysmgr_config *config = NULL;
+
+ /* This sets the ref_count variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of ref_count variable.
+ */
+ atomic_cmpmask_and_set(&sysmgr_state.ref_count,
+ SYSMGR_MAKE_MAGICSTAMP(0),
+ SYSMGR_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&sysmgr_state.ref_count)
+ != SYSMGR_MAKE_MAGICSTAMP(1)) {
+ status = 1;
+ goto exit;
+ }
+
+ if (cfg == NULL) {
+ sysmgr_get_config(&sysmgr_state.config);
+ config = &sysmgr_state.config;
+ } else {
+ memcpy((void *) (&sysmgr_state.config), (void *) cfg,
+ sizeof(struct sysmgr_config));
+ config = (struct sysmgr_config *) cfg;
+ }
+
+ /* Initialize PlatformMem */
+ status = platform_mem_setup();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : platform_mem_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "platform_mem_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.platform_mem_init_flag = true;
+ }
+
+ /* Override the platform specific configuration */
+ platform_override_config(config);
+
+ status = multiproc_setup(&(config->multiproc_cfg));
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : multiproc_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "sysmgr_setup : status [0x%x]\n" , status);
+ sysmgr_state.multiproc_init_flag = true;
+ }
+
+ /* Initialize ProcMgr */
+ if (status >= 0) {
+ status = proc_mgr_setup(&(config->proc_mgr_cfg));
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : proc_mgr_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "proc_mgr_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.proc_mgr_init_flag = true;
+ }
+ }
+
+ /* Initialize SharedRegion */
+ if (status >= 0) {
+ status = sharedregion_setup(&config->sharedregion_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : sharedregion_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "sharedregion_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.sharedregion_init_flag = true;
+ }
+ }
+
+ /* Initialize Notify */
+ if (status >= 0) {
+ status = notify_setup(&config->notify_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : notify_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "notify_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.notify_init_flag = true;
+ }
+ }
+
+ /* Initialize NameServer */
+ if (status >= 0) {
+ status = nameserver_setup();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : nameserver_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "nameserver_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.nameserver_init_flag = true;
+ }
+ }
+
+ /* Initialize GatePeterson */
+ if (status >= 0) {
+ status = gatepeterson_setup(&config->gatepeterson_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : gatepeterson_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "gatepeterson_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.gatepeterson_init_flag = true;
+ }
+ }
+
+ /* Intialize MessageQ */
+ if (status >= 0) {
+ status = messageq_setup(&config->messageq_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : messageq_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "messageq_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.messageq_init_flag = true;
+ }
+ }
+
+ /* Intialize HeapBuf */
+ if (status >= 0) {
+ status = heapbuf_setup(&config->heapbuf_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : heapbuf_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "heapbuf_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.heapbuf_init_flag = true;
+ }
+ }
+
+ /* Initialize ListMPSharedMemory */
+ if (status >= 0) {
+ status = listmp_sharedmemory_setup(
+ &config->listmp_sharedmemory_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : "
+ "listmp_sharedmemory_setup failed [0x%x]\n",
+ status);
+ } else {
+ printk(KERN_ERR "listmp_sharedmemory_setup : "
+ "status [0x%x]\n" , status);
+ sysmgr_state.listmp_sharedmemory_init_flag = true;
+ }
+ }
+
+ /* Initialize MessageQTransportShm */
+ if (status >= 0) {
+ status = messageq_transportshm_setup(
+ &config->messageq_transportshm_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : "
+ "messageq_transportshm_setup failed [0x%x]\n",
+ status);
+ } else {
+ printk(KERN_ERR "messageq_transportshm_setup : "
+ "status [0x%x]\n", status);
+ sysmgr_state.messageq_transportshm_init_flag = true;
+ }
+ }
+
+ /* Initialize Notify DucatiDriver */
+ if (status >= 0) {
+ status = notify_ducatidrv_setup(&config->notify_ducatidrv_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : "
+ "notify_ducatidrv_setup failed [0x%x]\n",
+ status);
+ } else {
+ printk(KERN_ERR "notify_ducatidrv_setup : "
+ "status [0x%x]\n" , status);
+ sysmgr_state.notify_ducatidrv_init_flag = true;
+ }
+ }
+
+ /* Initialize NameServerRemoteNotify */
+ if (status >= 0) {
+ status = nameserver_remotenotify_setup(
+ &config->nameserver_remotenotify_cfg);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : "
+ "nameserver_remotenotify_setup failed [0x%x]\n",
+ status);
+ } else {
+ printk(KERN_ERR "nameserver_remotenotify_setup : "
+ "status [0x%x]\n" , status);
+ sysmgr_state.nameserver_remotenotify_init_flag = true;
+ }
+ }
+
+ if (status >= 0) {
+ /* Call platform setup function */
+ status = platform_setup(config);
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_setup : platform_setup "
+ "failed [0x%x]\n", status);
+ } else {
+ printk(KERN_ERR "platform_setup : status [0x%x]\n" ,
+ status);
+ sysmgr_state.platform_init_flag = true;
+ }
+ }
+
+exit:
+ if (status < 0)
+ atomic_set(&sysmgr_state.ref_count, SYSMGR_MAKE_MAGICSTAMP(0));
+
+ return status;
+}
+EXPORT_SYMBOL(sysmgr_setup);
+
+/*
+ * ======== sysmgr_setup ========
+ * Purpose:
+ * Function to finalize the System.
+ */
+s32 sysmgr_destroy(void)
+{
+ s32 status = 0;
+
+ if (atomic_cmpmask_and_lt(&(sysmgr_state.ref_count),
+ SYSMGR_MAKE_MAGICSTAMP(0),
+ SYSMGR_MAKE_MAGICSTAMP(1)) != false) {
+ /*! @retval SYSMGR_E_INVALIDSTATE Module was not initialized */
+ status = SYSMGR_E_INVALIDSTATE;
+ goto exit;
+ }
+
+ if (atomic_dec_return(&sysmgr_state.ref_count)
+ != SYSMGR_MAKE_MAGICSTAMP(0)) {
+ status = 1;
+ goto exit;
+ }
+
+ /* Finalize Platform module*/
+ if (sysmgr_state.platform_init_flag == true) {
+ status = platform_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : platform_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.platform_init_flag = false;
+ }
+ }
+
+ /* Finalize NameServerRemoteNotify module */
+ if (sysmgr_state.nameserver_remotenotify_init_flag == true) {
+ status = nameserver_remotenotify_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : "
+ "nameserver_remotenotify_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.nameserver_remotenotify_init_flag \
+ = false;
+ }
+ }
+
+ /* Finalize Notify Ducati Driver module */
+ if (sysmgr_state.notify_ducatidrv_init_flag == true) {
+ status = notify_ducatidrv_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : "
+ "notify_ducatidrv_destroy failed [0x%x]\n",
+ status);
+ } else {
+ sysmgr_state.notify_ducatidrv_init_flag = false;
+ }
+ }
+
+ /* Finalize MessageQTransportShm module */
+ if (sysmgr_state.messageq_transportshm_init_flag == true) {
+ status = messageq_transportshm_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : "
+ "messageq_transportshm_destroy failed [0x%x]\n",
+ status);
+ } else {
+ sysmgr_state.messageq_transportshm_init_flag = \
+ false;
+ }
+ }
+
+ /* Finalize ListMPSharedMemory module */
+ if (sysmgr_state.listmp_sharedmemory_init_flag == true) {
+ status = listmp_sharedmemory_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : "
+ "listmp_sharedmemory_destroy failed [0x%x]\n",
+ status);
+ } else {
+ sysmgr_state.listmp_sharedmemory_init_flag = \
+ false;
+ }
+ }
+
+ /* Finalize HeapBuf module */
+ if (sysmgr_state.heapbuf_init_flag == true) {
+ status = heapbuf_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : heapbuf_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.heapbuf_init_flag = false;
+ }
+ }
+
+ /* Finalize MessageQ module */
+ if (sysmgr_state.messageq_init_flag == true) {
+ status = messageq_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : messageq_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.messageq_init_flag = false;
+ }
+ }
+
+ /* Finalize GatePeterson module */
+ if (sysmgr_state.gatepeterson_init_flag == true) {
+ status = gatepeterson_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : "
+ "gatepeterson_destroy failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.gatepeterson_init_flag = false;
+ }
+ }
+
+ /* Finalize NameServer module */
+ if (sysmgr_state.nameserver_init_flag == true) {
+ status = nameserver_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : nameserver_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.nameserver_init_flag = false;
+ }
+ }
+
+ /* Finalize Notify module */
+ if (sysmgr_state.notify_init_flag == true) {
+ status = notify_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : sysmgr_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.notify_init_flag = false;
+ }
+ }
+
+ /* Finalize SharedRegion module */
+ if (sysmgr_state.sharedregion_init_flag == true) {
+ status = sharedregion_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : "
+ "sharedregion_destroy failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.sharedregion_init_flag = false;
+ }
+ }
+
+ /* Finalize ProcMgr module */
+ if (sysmgr_state.proc_mgr_init_flag == true) {
+ status = proc_mgr_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : proc_mgr_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.proc_mgr_init_flag = false;
+ }
+ }
+
+ /* Finalize MultiProc module */
+ if (sysmgr_state.multiproc_init_flag == true) {
+ status = multiproc_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : multiproc_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.proc_mgr_init_flag = false;
+ }
+ }
+
+ /* Finalize PlatformMem module */
+ if (sysmgr_state.platform_mem_init_flag == true) {
+ status = platform_mem_destroy();
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : platform_mem_destroy "
+ "failed [0x%x]\n", status);
+ } else {
+ sysmgr_state.platform_mem_init_flag = false;
+ }
+ }
+
+ atomic_set(&sysmgr_state.ref_count, SYSMGR_MAKE_MAGICSTAMP(0));
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "sysmgr_destroy : failed with "
+ "status = [0x%x]\n", status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(sysmgr_destroy);
+
+/*
+ * ======== sysmgr_set_boot_load_page ========
+ * Purpose:
+ * Function to set the boot load page address for a slave.
+ */
+void sysmgr_set_boot_load_page(u16 proc_id, u32 boot_load_page)
+{
+ struct sysmgr_boot_load_page *temp = \
+ (struct sysmgr_boot_load_page *) boot_load_page;
+
+ if ((proc_id < 0) || (proc_id >= MULTIPROC_MAXPROCESSORS)) {
+ printk(KERN_ERR
+ "sysmgr_set_boot_load_page failed: Invalid proc_id passed\n");
+ return;
+ }
+
+ /* Initialize the host config area */
+ sysmgr_state.boot_load_page[proc_id] = temp;
+ temp->host_config.offset = -1;
+ temp->host_config.valid = 0;
+ temp->handshake = 0;
+}
+
+
+/*
+ * ======== sysmgr_wait_for_scalability_info ========
+ * Purpose:
+ * Function to wait for scalability handshake value.
+ */
+void sysmgr_wait_for_scalability_info(u16 proc_id)
+{
+ VOLATILE struct sysmgr_boot_load_page *temp = NULL;
+
+ if ((proc_id < 0) || (proc_id >= MULTIPROC_MAXPROCESSORS)) {
+ printk(KERN_ERR "sysmgr_wait_for_scalability_info failed: "
+ "Invalid proc_id passed\n");
+ return;
+ }
+ temp = sysmgr_state.boot_load_page[proc_id];
+
+ printk(KERN_ERR "sysmgr_wait_for_scalability_info: BF while temp->handshake:%x\n",
+ temp->handshake);
+ while (temp->handshake != SYSMGR_SCALABILITYHANDSHAKESTAMP)
+ ;
+ printk(KERN_ERR "sysmgr_wait_for_scalability_info:AF while temp->handshake:%x\n",
+ temp->handshake);
+
+ /* Reset the handshake value for reverse synchronization */
+ temp->handshake = 0;
+}
+
+
+/*
+ * ======== sysmgr_wait_for_slave_setup ========
+ * Purpose:
+ * Function to wait for slave to complete setup.
+ */
+void sysmgr_wait_for_slave_setup(u16 proc_id)
+{
+ VOLATILE struct sysmgr_boot_load_page *temp = NULL;
+
+ if ((proc_id < 0) || (proc_id >= MULTIPROC_MAXPROCESSORS)) {
+ printk(KERN_ERR "sysmgr_wait_for_slave_setup failed: "
+ "Invalid proc_id passed\n");
+ return;
+ }
+ temp = sysmgr_state.boot_load_page[proc_id];
+
+ while (temp->handshake != SYSMGR_SETUPHANDSHAKESTAMP)
+ ;
+
+ /* Reset the handshake value for reverse synchronization */
+ temp->handshake = 0;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/transportshm.c b/drivers/dsp/syslink/multicore_ipc/transportshm.c
new file mode 100755
index 000000000000..9ce61cf03c6e
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/transportshm.c
@@ -0,0 +1,1160 @@
+/*
+ * transportshm.c
+ *
+ * Shared Memory Transport module
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Utilities headers */
+#include <linux/string.h>
+#include <linux/slab.h>
+
+/* Syslink headers */
+#include <syslink/atomic_linux.h>
+/* Module level headers */
+#include <multiproc.h>
+#include <sharedregion.h>
+#include <nameserver.h>
+#include <gatepeterson.h>
+#include <notify.h>
+#include <messageq.h>
+#include <listmp.h>
+#include <gatemp.h>
+#include <transportshm.h>
+
+/* =============================================================================
+ * Globals
+ * =============================================================================
+ */
+
+/* Indicates that the transport is up. */
+#define TRANSPORTSHM_UP 0xBADC0FFE
+
+/* transportshm Version. */
+#define TRANSPORTSHM_VERSION 1
+
+/*!
+ * @brief Macro to make a correct module magic number with refCount
+ */
+#define TRANSPORTSHM_MAKE_MAGICSTAMP(x) \
+ ((TRANSPORTSHM_MODULEID << 12u) | (x))
+
+#define ROUND_UP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
+
+typedef void (*transportshm_err_fxn)(enum transportshm_reason reason,
+ void *handle,
+ void *msg,
+ u32 info);
+
+/* =============================================================================
+ * Structures & Enums
+ * =============================================================================
+ */
+/*
+ * Defines the transportshm state object, which contains all the
+ * module specific information.
+ */
+struct transportshm_module_object {
+ atomic_t ref_count;
+ /* Flag to indicate initialization state of transportshr */
+ struct transportshm_config cfg;
+ /* transportshm configuration structure */
+ struct transportshm_config def_cfg;
+ /* Default module configuration */
+ struct transportshm_params def_inst_params;
+ /* Default instance parameters */
+ void *gate_handle;
+ /* Handle to the gate for local thread safety */
+ void *transports
+ [MULTIPROC_MAXPROCESSORS][MESSAGEQ_NUM_PRIORITY_QUEUES];
+ /* Transport to be set in messageq_register_transport */
+ transportshm_err_fxn err_fxn;
+ /* Error function */
+
+};
+
+/*
+ * Structure of attributes in shared memory
+ */
+struct transportshm_attrs {
+ VOLATILE u32 flag; /* flag */
+ VOLATILE u32 creator_proc_id; /* Creator processor ID */
+ VOLATILE u32 notify_event_id; /* Notify event number */
+ VOLATILE u16 priority; /* priority */
+ VOLATILE u32 *gatemp_addr; /* gatemp shared memory srptr */
+};
+
+/*
+ * Structure defining config parameters for the MessageQ transport
+ * instances.
+ */
+struct transportshm_object {
+ VOLATILE struct transportshm_attrs *self;
+ /* Attributes for local processor in shared memory */
+ VOLATILE struct transportshm_attrs *other;
+ /* Attributes for remote processor in shared memory */
+ void *local_list;
+ /* List for this processor */
+ void *remote_list;
+ /* List for remote processor */
+ VOLATILE int status;
+ /* Current status */
+ u32 alloc_size;
+ /* Shared memory allocated */
+ bool cache_enabled;
+ /* Whether to cache calls */
+ int notify_event_id;
+ /* Notify event to be used */
+ u16 region_id;
+ /* The shared region id */
+ u16 remote_proc_id;
+ /* dst proc id */
+ u32 priority;
+ /* Priority of messages supported by this transport */
+ void *gate;
+ /* Gate for critical regions */
+ struct transportshm_params params;
+ /* Instance specific parameters */
+};
+
+/* =============================================================================
+ * Globals
+ * =============================================================================
+ */
+/*
+ * @var transportshm_state
+ *
+ * transportshm state object variable
+ */
+static struct transportshm_module_object transportshm_state = {
+ .gate_handle = NULL,
+ .def_cfg.err_fxn = NULL,
+ .def_inst_params.gate = NULL,
+ .def_inst_params.shared_addr = 0x0,
+ .def_inst_params.notify_event_id = (u32)(-1),
+ .def_inst_params.priority = MESSAGEQ_NORMALPRI
+};
+
+/* Pointer to module state */
+static struct transportshm_module_object *transportshm_module =
+ &transportshm_state;
+
+/* =============================================================================
+ * Forward declarations of internal functions
+ * =============================================================================
+ */
+/* Callback function registered with the Notify module. */
+static void _transportshm_notify_fxn(u16 proc_id,
+ u16 line_id,
+ u32 event_id,
+ uint *arg,
+ u32 payload);
+
+/* Function to create/open the handle. */
+static int _transportshm_create(struct transportshm_object **handle_ptr,
+ u16 proc_id,
+ const struct transportshm_params *params,
+ bool create_flag);
+
+/* =============================================================================
+ * APIs called directly by applications
+ * =============================================================================
+ */
+/*
+ * ======== transportshm_get_config ========
+ * Purpose:
+ * Get the default configuration for the transportshm
+ * module.
+ *
+ * This function can be called by the application to get their
+ * configuration parameter to transportshm_setup filled in
+ * by the transportshm module with the default parameters.
+ * If the user does not wish to make any change in the default
+ * parameters, this API is not required to be called.
+ */
+void transportshm_get_config(struct transportshm_config *cfg)
+{
+ if (WARN_ON(cfg == NULL))
+ goto exit;
+
+ if (atomic_cmpmask_and_lt(&(transportshm_module->ref_count),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(0),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(1)) == true) {
+ memcpy(cfg, &(transportshm_module->def_cfg),
+ sizeof(struct transportshm_config));
+ } else {
+ memcpy(cfg, &(transportshm_module->cfg),
+ sizeof(struct transportshm_config));
+ }
+ return;
+
+exit:
+ printk(KERN_ERR "transportshm_get_config: Argument of type"
+ "(struct transportshm_config *) passed is null!\n");
+}
+
+
+/*
+ * ======== transportshm_setup ========
+ * Purpose:
+ * Setup the transportshm module.
+ *
+ * This function sets up the transportshm module. This
+ * function must be called before any other instance-level APIs can
+ * be invoked.
+ * Module-level configuration needs to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then transportshm_getconfig can be called
+ * to get the configuration filled with the default values. After
+ * this, only the required configuration values can be changed. If
+ * the user does not wish to make any change in the default
+ * parameters, the application can simply call
+ * transportshm_setup with NULL parameters. The default
+ * parameters would get automatically used.
+ */
+int transportshm_setup(const struct transportshm_config *cfg)
+{
+ int status = TRANSPORTSHM_SUCCESS;
+ struct transportshm_config tmpCfg;
+
+ /* This sets the refCount variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of refCount variable.
+ */
+ atomic_cmpmask_and_set(&transportshm_module->ref_count,
+ TRANSPORTSHM_MAKE_MAGICSTAMP(0),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&transportshm_module->ref_count)
+ != TRANSPORTSHM_MAKE_MAGICSTAMP(1u)) {
+ return 1;
+ }
+
+ if (cfg == NULL) {
+ transportshm_get_config(&tmpCfg);
+ cfg = &tmpCfg;
+ }
+
+ transportshm_module->gate_handle = \
+ kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (transportshm_module->gate_handle == NULL) {
+ /* @retval TRANSPORTSHM_E_FAIL Failed to create
+ GateMutex! */
+ status = TRANSPORTSHM_E_FAIL;
+ printk(KERN_ERR "transportshm_setup: Failed to create "
+ "mutex!\n");
+ atomic_set(&transportshm_module->ref_count,
+ TRANSPORTSHM_MAKE_MAGICSTAMP(0));
+ goto exit;
+ }
+ mutex_init(transportshm_module->gate_handle);
+
+ /* Copy the user provided values into the state object. */
+ memcpy(&transportshm_module->cfg, cfg,
+ sizeof(struct transportshm_config));
+ memset(&(transportshm_module->transports), 0, (sizeof(void *) * \
+ MULTIPROC_MAXPROCESSORS * MESSAGEQ_NUM_PRIORITY_QUEUES));
+ return status;
+
+exit:
+ printk(KERN_ERR "transportshm_setup failed: status = 0x%x",
+ status);
+ return status;
+}
+
+
+/*
+ * ======== transportshm_destroy ========
+ * Purpose:
+ * Destroy the transportshm module.
+ *
+ * Once this function is called, other transportshm module
+ * APIs, except for the transportshm_getConfig API cannot
+ * be called anymore.
+ */
+int transportshm_destroy(void)
+{
+ struct transportshm_object *obj = NULL;
+ int status = 0;
+ u16 i;
+ u16 j;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(transportshm_module->ref_count),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(0),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (!(atomic_dec_return(&transportshm_module->ref_count)
+ == TRANSPORTSHM_MAKE_MAGICSTAMP(0))) {
+ status = 1;
+ goto exit;
+ }
+
+ /* Temporarily increment ref_count here. */
+ atomic_set(&transportshm_module->ref_count,
+ TRANSPORTSHM_MAKE_MAGICSTAMP(1));
+
+ /* Delete any Transports that have not been deleted so far. */
+ for (i = 0; i < MULTIPROC_MAXPROCESSORS; i++) {
+ for (j = 0 ; j < MESSAGEQ_NUM_PRIORITY_QUEUES; j++) {
+ if (transportshm_module->transports[i][j] != \
+ NULL) {
+ obj = (struct transportshm_object *)
+ transportshm_module->transports[i][j];
+ if (obj->self != NULL) {
+ if (obj->self->creator_proc_id
+ == multiproc_self())
+ transportshm_delete(
+ &(transportshm_module->
+ transports[i][j]));
+ else
+ transportshm_close(
+ &(transportshm_module->
+ transports[i][j]));
+ }
+ }
+ }
+ }
+
+ /* Decrease the ref_count */
+ atomic_set(&transportshm_module->ref_count,
+ TRANSPORTSHM_MAKE_MAGICSTAMP(0));
+
+ if (transportshm_module->gate_handle != NULL) {
+ kfree(transportshm_module->gate_handle);
+ transportshm_module->gate_handle = NULL;
+ }
+ return 0;
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "transportshm_destroy failed: "
+ "status = 0x%x\n", status);
+ return status;
+}
+
+
+/*
+ * ======== transportshm_params_init ========
+ * Purpose:
+ * Get Instance parameters
+ */
+void transportshm_params_init(struct transportshm_params *params)
+{
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(transportshm_module->ref_count),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(0),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(1)) == true)) {
+ printk(KERN_ERR "transportshm_params_init: Module was "
+ " not initialized\n");
+ goto exit;
+ }
+
+ if (WARN_ON(params == NULL)) {
+ printk(KERN_ERR "transportshm_params_init: Argument of"
+ " type (struct transportshm_params *) "
+ "is NULL!");
+ goto exit;
+ }
+
+ memcpy(params, &(transportshm_module->def_inst_params),
+ sizeof(struct transportshm_params));
+
+exit:
+ return;
+}
+
+/*
+ * ======== transportshm_create ========
+ * Purpose:
+ * Create a transport instance. This function waits for the remote
+ * processor to complete its transport creation. Hence it must be
+ * called only after the remote processor is running.
+ */
+void *transportshm_create(u16 proc_id,
+ const struct transportshm_params *params)
+{
+ struct transportshm_object *handle = NULL;
+ int status = 0;
+
+ BUG_ON(params == NULL);
+ BUG_ON(!(proc_id < multiproc_get_num_processors()));
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(transportshm_module->ref_count),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(0),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(params == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (transportshm_module->transports[proc_id][params->priority] \
+ != NULL) {
+ /* Specified transport is already registered. */
+ status = MESSAGEQ_E_ALREADYEXISTS;
+ goto exit;
+ }
+
+ status = _transportshm_create(&handle, proc_id, params, true);
+
+ if (status < 0) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ return handle;
+
+exit:
+ printk(KERN_ERR "transportshm_create failed: status = 0x%x\n",
+ status);
+ return handle;
+}
+
+
+/*
+ * ======== transportshm_delete ========
+ * Purpose:
+ * Delete instance
+ */
+int transportshm_delete(void **handle_ptr)
+{
+ int status = 0;
+ int tmp_status = 0;
+ struct transportshm_object *handle;
+ u16 proc_id;
+
+ u32 key;
+
+ key = mutex_lock_interruptible(transportshm_state.gate_handle);
+
+ if (key < 0)
+ goto mutex_fail;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(transportshm_module->ref_count),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(0),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (WARN_ON(handle_ptr == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(*handle_ptr == NULL)) {
+ status = -EINVAL;
+ printk(KERN_WARNING "transportshm_delete: Invalid NULL"
+ " mqtshm_handle specified! status = 0x%x\n", status);
+ goto exit;
+ }
+
+ handle = (struct transportshm_object *) (*handle_ptr);
+
+ if (handle != NULL) {
+ proc_id = handle->self->creator_proc_id;
+ /* Clear handle in the local array */
+ transportshm_module->
+ transports[proc_id][handle->priority] = NULL;
+ if (handle->self != NULL) {
+ /* clear the self flag */
+ handle->self->flag = 0;
+#if 0
+ if (EXPECT_FALSE(handle->cache_enabled)) {
+ Cache_wbinv((Ptr)&(handle->self->flag),
+ sharedregion_get_cache_line_size(
+ handle->region_id),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+ }
+
+ if (handle->local_list != NULL) {
+ status = listmp_delete(&handle->local_list);
+
+ if (status < 0)
+ printk(KERN_WARNING "transportshm_delete: "
+ "Failed to delete local listmp "
+ "instance!\n");
+ }
+
+ if (handle->remote_list != NULL) {
+ tmp_status = listmp_close(&handle->remote_list);
+ if ((tmp_status < 0) && (status >= 0)) {
+ status = tmp_status;
+ printk(KERN_WARNING "transportshm_delete: "
+ "Failed to close remote listmp "
+ "instance!\n");
+ }
+ }
+
+ messageq_unregister_transport(handle->
+ remote_proc_id, handle->params.priority);
+
+ tmp_status = notify_unregister_event_single(handle->
+ remote_proc_id,
+ 0,
+ handle->notify_event_id);
+ if (tmp_status < 0) {
+ status = tmp_status;
+ printk(KERN_WARNING "transportshm_delete: Failed to "
+ "unregister notify event!\n");
+ }
+
+ kfree(handle);
+ *handle_ptr = NULL;
+ }
+
+ return status;
+
+exit:
+ mutex_unlock(transportshm_state.gate_handle);
+mutex_fail:
+ if (status < 0)
+ printk(KERN_ERR "transportshm_delete failed: "
+ "status = 0x%x\n", status);
+ return status;
+}
+
+/*
+ * ========== transportshm_open_by_addr ===========
+ * Open a transport instance
+ */
+int
+transportshm_open_by_addr(void *shared_addr, void **handle_ptr)
+{
+ int status = 0;
+ struct transportshm_attrs *attrs = NULL;
+ struct transportshm_params params;
+ u16 id;
+
+ BUG_ON(shared_addr == NULL);
+ BUG_ON(handle_ptr == NULL);
+
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(transportshm_module->ref_count),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(0),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+
+ if (shared_addr == NULL) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (handle_ptr == NULL) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ attrs = (struct transportshm_attrs *) shared_addr;
+ id = sharedregion_get_id(shared_addr);
+
+ if (id == SHAREDREGION_INVALIDREGIONID) {
+ status = -EFAULT;
+ goto exit;
+ }
+ if (((u32) shared_addr % sharedregion_get_cache_line_size(id) != 0)) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+#if 0
+ /* invalidate the attrs before using it */
+ if (EXPECT_FALSE(SharedRegion_isCacheEnabled(id))) {
+ Cache_inv((Ptr) attrs,
+ sizeof(struct transportshm_attrs),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+ transportshm_params_init(&params);
+ /* set params field */
+ params.shared_addr = shared_addr;
+ params.notify_event_id = attrs->notify_event_id | \
+ (NOTIFY_SYSTEMKEY << 16);
+ params.priority = attrs->priority;
+
+ if (unlikely(attrs->flag != TRANSPORTSHM_UP)) {
+ status = -EFAULT;
+ *handle_ptr = NULL;
+ goto exit;
+ }
+
+ /* Create the object */
+ status = _transportshm_create((struct transportshm_object **)
+ handle_ptr,
+ attrs->creator_proc_id, &params, false);
+ if (status < 0)
+ goto exit;
+
+ return status;
+
+exit:
+ printk(KERN_ERR "transportshm_open_by_addr failed: status = 0x%x\n",
+ status);
+ return status;
+}
+
+/*
+ * ========== transportshm_close ===========
+ * Close an opened transport instance
+ */
+int
+transportshm_close(void **handle_ptr)
+{
+ int status = 0;
+ int tmp_status = 0;
+ struct transportshm_object *obj;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(transportshm_module->ref_count),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(0),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(handle_ptr == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(*handle_ptr == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct transportshm_object *)(*handle_ptr);
+ transportshm_module->transports[obj->remote_proc_id]
+ [obj->params.priority] = NULL;
+
+ if (obj->other != NULL) {
+ /* other flag was set by remote proc */
+ obj->other->flag = 0;
+#if 0
+ if (EXPECT_FALSE(obj->cache_enabled)) {
+ Cache_wbInv(&(obj->other->flag),
+ sharedregion_get_cache_line_size
+ (obj->region_id),
+ Cache_Type_ALL,
+ TRUE);
+ }
+#endif
+ }
+
+ if (obj->gate != NULL) {
+ status = gatemp_close(&obj->gate);
+ if (status < 0) {
+ status = TRANSPORTSHM_E_FAIL;
+ printk(KERN_ERR "transportshm_close: "
+ "gatemp_close failed, status [0x%x]\n",
+ status);
+ }
+ }
+
+ if (obj->local_list != NULL) {
+ tmp_status = listmp_close(&obj->local_list);
+ if ((tmp_status < 0) && (status >= 0)) {
+ status = TRANSPORTSHM_E_FAIL;
+ printk(KERN_ERR "transportshm_close: "
+ "listmp_close(local_list) failed, "
+ "status [0x%x]\n", status);
+ }
+ }
+
+ if (obj->remote_list != NULL) {
+ tmp_status = listmp_close(&obj->remote_list);
+ if ((tmp_status < 0) && (status >= 0)) {
+ status = TRANSPORTSHM_E_FAIL;
+ printk(KERN_ERR "transportshm_close: "
+ "listmp_close(remote_list) failed, "
+ "status [0x%x]\n", status);
+ }
+ }
+
+ messageq_unregister_transport(obj->remote_proc_id,
+ obj->params.priority);
+
+ tmp_status = notify_unregister_event_single(obj->remote_proc_id, 0,
+ (obj->notify_event_id | (NOTIFY_SYSTEMKEY << 16)));
+ if ((tmp_status < 0) && (status >= 0))
+ status = TRANSPORTSHM_E_FAIL;
+
+ kfree(obj);
+ *handle_ptr = NULL;
+exit:
+ if (status < 0)
+ printk(KERN_ERR "transportshm_close failed: status = 0x%x\n",
+ status);
+ return status;
+}
+
+
+/*
+ * ======== transportshm_put ========
+ * Purpose:
+ * Put msg to remote list
+*/
+int transportshm_put(void *handle, void *msg)
+{
+ int status = 0;
+ struct transportshm_object *obj = NULL;
+ /*int *key;*/
+
+ if (WARN_ON(atomic_cmpmask_and_lt(
+ &(transportshm_module->ref_count),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(0),
+ TRANSPORTSHM_MAKE_MAGICSTAMP(1)) == true)) {
+ status = -ENODEV;
+ goto exit;
+ }
+ if (WARN_ON(handle == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+ if (WARN_ON(msg == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ obj = (struct transportshm_object *)handle;
+#if 0
+ /* writeback invalidate the message */
+ if (EXPECT_FALSE(obj->cache_enabled)) {
+ Cache_wbinv((Ptr) msg,
+ ((MessageQ_Msg)(msg))->msgSize,
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+ /* make sure ListMP_put and sendEvent are done before remote executes */
+ /*key = gatemp_enter(obj->gate);*/
+ status = listmp_put_tail(obj->remote_list, (struct listmp_elem *) msg);
+ if (status < 0) {
+ printk(KERN_ERR "transportshm_put: Failed to put "
+ "message in the shared list! status = 0x%x\n", status);
+ goto exit_with_gate;
+ }
+
+ status = notify_send_event(obj->remote_proc_id, 0,
+ obj->notify_event_id, 0, false);
+ if (status < 0)
+ goto notify_send_fail;
+ else
+ goto exit_with_gate;
+
+
+notify_send_fail:
+ printk(KERN_ERR "transportshm_put: Notification to remote "
+ "processor failed, status = 0x%x\n", status);
+ /* If sending the event failed, then remove the element from the */
+ /* list. Ignore the status of remove. */
+ listmp_remove(obj->remote_list, (struct listmp_elem *) msg);
+
+exit_with_gate:
+ /*gatemp_leave(obj->gate, key);*/
+exit:
+ if (status < 0)
+ printk(KERN_ERR "transportshm_put failed: "
+ "status = 0x%x\n", status);
+ return status;
+}
+
+/*
+ * ======== transportshm_control ========
+ * Purpose:
+ * Control Function
+*/
+int transportshm_control(void *handle, u32 cmd, u32 *cmd_arg)
+{
+ BUG_ON(handle == NULL);
+
+ printk(KERN_ALERT "transportshm_control not supported!\n");
+ return TRANSPORTSHM_E_NOTSUPPORTED;
+}
+
+/*
+ * ======== transportshm_get_status ========
+ * Purpose:
+ * Get status
+ */
+enum transportshm_status transportshm_get_status(void *handle)
+{
+ struct transportshm_object *obj = \
+ (struct transportshm_object *) handle;
+
+ BUG_ON(obj == NULL);
+
+ return obj->status;
+}
+
+/*
+ * ======== transportshm_put ========
+ * Purpose:
+ * Get shared memory requirements.
+ */
+u32 transportshm_shared_mem_req(const struct transportshm_params *params)
+{
+ u32 mem_req = 0;
+ s32 min_align;
+ u16 region_id;
+ struct listmp_params list_params;
+ s32 status = 0;
+
+ if (params == NULL) {
+ status = -EINVAL;
+ goto exit;
+ }
+ region_id = sharedregion_get_id(params->shared_addr);
+
+ if (region_id == SHAREDREGION_INVALIDREGIONID) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ min_align = 4; /*memory_get_max_default_type_align(); */
+ if (sharedregion_get_cache_line_size(region_id) > min_align)
+ min_align = sharedregion_get_cache_line_size(region_id);
+
+ /* for the Attrs structure */
+ mem_req = ROUND_UP(sizeof(struct transportshm_attrs), min_align);
+
+ /* for the second Attrs structure */
+ mem_req += ROUND_UP(sizeof(struct transportshm_attrs), min_align);
+
+ listmp_params_init(&list_params);
+ list_params.region_id = region_id;
+ /* for local listMP */
+ mem_req += listmp_shared_mem_req(&list_params);
+
+ /* for remote listMP */
+ mem_req += listmp_shared_mem_req(&list_params);
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "transportshm_shared_mem_req failed: "
+ "status = 0x%x\n", status);
+
+ return mem_req;
+}
+
+
+/* =============================================================================
+ * internal functions
+ * =============================================================================
+ */
+/*
+ * ======== _transportshm_notify_fxn ========
+ * Purpose:
+ * Callback function registered with the Notify module.
+ */
+void _transportshm_notify_fxn(u16 proc_id, u16 line_id, u32 event_no,
+ uint *arg, u32 payload)
+{
+ struct transportshm_object *obj = NULL;
+ messageq_msg msg = NULL;
+ u32 queue_id;
+ u32 key;
+
+ key = mutex_lock_interruptible(transportshm_state.gate_handle);
+
+ if (key < 0)
+ goto mutex_fail;
+
+ if (WARN_ON(arg == NULL))
+ goto exit;
+
+ obj = (struct transportshm_object *)arg;
+ /* While there is are messages, get them out and send them to
+ * their final destination. */
+ if (obj->local_list)
+ msg = (messageq_msg) listmp_get_head(obj->local_list);
+ else
+ goto exit;
+ while (msg != NULL) {
+ /* Get the destination message queue Id */
+ queue_id = messageq_get_dst_queue(msg);
+
+ /* put the message to the destination queue */
+ messageq_put(queue_id, msg);
+ if (obj->local_list)
+ msg = (messageq_msg)
+ listmp_get_head(obj->local_list);
+ else
+ msg = NULL;
+ }
+ mutex_unlock(transportshm_state.gate_handle);
+ return;
+
+exit:
+ mutex_unlock(transportshm_state.gate_handle);
+mutex_fail:
+ printk(KERN_ERR "transportshm_notify_fxn: argument passed is "
+ "NULL!\n");
+ return;
+}
+
+
+/*
+ * ======== transportshm_delete ========
+ * Purpose:
+ * This will set the asynchronous error function for the transport module
+ */
+void transportshm_set_err_fxn( void (*err_fxn)(
+ enum transportshm_reason reason,
+ void *handle,
+ void *msg,
+ u32 info))
+{
+ int key;
+
+ key = mutex_lock_interruptible(transportshm_module->gate_handle);
+ if (key < 0)
+ goto exit;
+
+ transportshm_module->cfg.err_fxn = err_fxn;
+ mutex_unlock(transportshm_module->gate_handle);
+
+exit:
+ return;
+}
+
+
+/*
+ * ========= _transportshm_create =========
+ * Purpose:
+ * Internal function for create()/open()
+ */
+int _transportshm_create(struct transportshm_object **handle_ptr, u16 proc_id,
+ const struct transportshm_params *params, bool create_flag)
+{
+ int status = 0;
+ struct transportshm_object *handle = NULL;
+ void *local_addr = NULL;
+ int local_index;
+ int remote_index;
+ u32 min_align;
+ struct listmp_params listmp_params[2];
+
+ BUG_ON(handle_ptr == NULL);
+ BUG_ON(params == NULL);
+ BUG_ON(proc_id >= multiproc_get_num_processors());
+
+ /*
+ * Determine who gets the '0' slot and who gets the '1' slot
+ * The '0' slot is given to the lower multiproc id.
+ */
+ if (multiproc_self() < proc_id) {
+ local_index = 0;
+ remote_index = 1;
+ } else {
+ local_index = 1;
+ remote_index = 0;
+ }
+
+ handle = kzalloc(sizeof(struct transportshm_object), GFP_KERNEL);
+ if (handle == NULL) {
+ status = -ENOMEM;
+ goto exit;
+ }
+ *handle_ptr = handle;
+
+ if (create_flag == false) {
+ /* Open by shared addr */
+ handle->self = (struct transportshm_attrs *)
+ params->shared_addr;
+ handle->region_id = sharedregion_get_id(params->shared_addr);
+ BUG_ON(handle->region_id == SHAREDREGION_INVALIDREGIONID);
+
+ handle->cache_enabled = sharedregion_is_cache_enabled(handle->
+ region_id);
+
+ local_addr = sharedregion_get_ptr((u32 *)handle->self->
+ gatemp_addr);
+ BUG_ON(local_addr == NULL);
+
+ status = gatemp_open_by_addr(local_addr, &handle->gate);
+ if (status < 0) {
+ status = -EFAULT;
+ goto exit;
+ }
+ } else {
+ /* Init the gate for ListMP create below */
+ if (params->gate != NULL)
+ handle->gate = params->gate;
+ else
+ handle->gate = gatemp_get_default_remote();
+
+ if (handle->gate == NULL) {
+ status = -EFAULT;
+ goto exit;
+ }
+ memcpy(&(handle->params), params,
+ sizeof(struct transportshm_params));
+ handle->region_id = sharedregion_get_id(params->shared_addr);
+
+ /* Assert that the buffer is in a valid shared
+ * region
+ */
+ if (handle->region_id == SHAREDREGION_INVALIDREGIONID) {
+ status = -EFAULT;
+ goto exit;
+ }
+ if (((u32)params->shared_addr
+ % sharedregion_get_cache_line_size(handle->region_id)
+ != 0)) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ /* set handle's cache_enabled, type, attrs */
+ handle->cache_enabled = sharedregion_is_cache_enabled(
+ handle->region_id);
+ handle->self = (struct transportshm_attrs *)
+ params->shared_addr;
+ }
+
+ /* Determine the minimum alignment to align to */
+ min_align = 4; /*memory_get_max_default_type_align(); */
+ if (sharedregion_get_cache_line_size(handle->region_id) > min_align)
+ min_align = sharedregion_get_cache_line_size(handle->
+ region_id);
+ /*
+ * Carve up the shared memory.
+ * If cache is enabled, these need to be on separate cache
+ * lines. This is done with min_align and ROUND_UP function.
+ */
+
+ handle->other = (struct transportshm_attrs *)(((u32)handle->self) +
+ (ROUND_UP(sizeof(struct transportshm_attrs), min_align)));
+
+
+ listmp_params_init(&(listmp_params[0]));
+ listmp_params[0].gatemp_handle = handle->gate;
+ listmp_params[0].shared_addr = (void *)(((u32)handle->other)
+ + (ROUND_UP(sizeof(struct transportshm_attrs), min_align)));
+
+ listmp_params_init(&listmp_params[1]);
+ listmp_params[1].gatemp_handle = handle->gate;
+ listmp_params[1].shared_addr = (void *)
+ (((u32)listmp_params[0].shared_addr)
+ + listmp_shared_mem_req(&listmp_params[0]));
+
+ handle->notify_event_id = params->notify_event_id;
+ handle->priority = params->priority;
+ handle->remote_proc_id = proc_id;
+
+ if (create_flag == true) {
+ handle->local_list =
+ listmp_create(&(listmp_params[local_index]));
+ if (handle->local_list == NULL) {
+ status = -EFAULT;
+ goto exit;
+ }
+ handle->remote_list = listmp_create(
+ &(listmp_params[remote_index]));
+ if (handle->remote_list == NULL) {
+ status = -EFAULT;
+ goto exit;
+ }
+ } else {
+ /* Open the local ListMP instance */
+ status = listmp_open_by_addr(
+ listmp_params[local_index].shared_addr,
+ &(handle->local_list));
+ if (status < 0) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ status = listmp_open_by_addr(
+ listmp_params[remote_index].shared_addr,
+ &(handle->remote_list));
+ if (status < 0) {
+ status = -EFAULT;
+ goto exit;
+ }
+ }
+
+ status = notify_register_event_single(proc_id,
+ 0, /* lineId */
+ params->notify_event_id,
+ _transportshm_notify_fxn,
+ handle);
+ if (status < 0) {
+ status = -EFAULT;
+ goto exit;
+ }
+
+ if (create_flag == true) {
+ handle->self->creator_proc_id = multiproc_self();
+ handle->self->notify_event_id = handle->notify_event_id;
+ handle->self->priority = handle->priority;
+
+ /* Store the GateMP shared_addr in the Attrs */
+ handle->self->gatemp_addr =
+ gatemp_get_shared_addr(handle->gate);
+ handle->self->flag = TRANSPORTSHM_UP;
+#if 0
+ if (EXPECT_FALSE(handle->cache_enabled)) {
+ Cache_wbinv((Ptr) handle->self,
+ sizeof(struct transportshm_attrs),
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+ } else {
+ handle->other->flag = TRANSPORTSHM_UP;
+#if 0
+ if (EXPECT_FALSE(handle->cache_enabled)) {
+ Cache_wb((Ptr)&(handle->other->flag),
+ min_align,
+ Cache_Type_ALL,
+ true);
+ }
+#endif
+ }
+
+ /* Register the transport with MessageQ */
+ status = messageq_register_transport(handle, proc_id,
+ params->priority);
+ if (status < 0) {
+ status = -EFAULT;
+ goto exit;
+ }
+ handle->status = TRANSPORTSHM_UP;
+ /* Set handle in the local array. */
+ transportshm_module->transports[handle->remote_proc_id]
+ [handle->params.priority] = handle;
+
+ return status;
+
+exit:
+ /* Cleanup in case of error. */
+ if (create_flag == true)
+ transportshm_delete((void **)handle_ptr);
+ else
+ transportshm_close((void **)handle_ptr);
+
+ return status;
+}
diff --git a/drivers/dsp/syslink/multicore_ipc/transportshm_setup.c b/drivers/dsp/syslink/multicore_ipc/transportshm_setup.c
new file mode 100644
index 000000000000..d41e8b5749d6
--- /dev/null
+++ b/drivers/dsp/syslink/multicore_ipc/transportshm_setup.c
@@ -0,0 +1,205 @@
+/*
+ * transportshm_setup.c
+ *
+ * Shared Memory Transport setup module
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/* Standard headers */
+#include <linux/types.h>
+
+/* Utilities headers */
+#include <linux/string.h>
+#include <linux/slab.h>
+
+/* Syslink headers */
+#include <syslink/atomic_linux.h>
+/* Module level headers */
+#include <multiproc.h>
+#include <sharedregion.h>
+#include <nameserver.h>
+#include <gatepeterson.h>
+#include <notify.h>
+#include <messageq.h>
+#include <listmp.h>
+#include <gatemp.h>
+#include <transportshm.h>
+
+/* =============================================================================
+ * Structures & Enums
+ * =============================================================================
+ */
+/* structure for transportshm_setup module state */
+struct transportshm_setup_module_object {
+ void *handles[MULTIPROC_MAXPROCESSORS];
+ /* Store a handle per remote proc */
+};
+
+
+/* =============================================================================
+ * Globals
+ * =============================================================================
+ */
+static struct transportshm_setup_module_object transportshm_setup_state = {
+ .handles[0] = NULL
+};
+
+/* Pointer to the transportshm_setup module state */
+static struct transportshm_setup_module_object *transportshm_setup_module =
+ &transportshm_setup_state;
+
+
+/* =============================================================================
+ * Functions
+ * =============================================================================
+ */
+/*
+ * =========== transportshm_setup_attach ===========
+ * Function that will be called in messageq_attach. Creates a
+ * transportshm object for a given processor
+ */
+int transportshm_setup_attach(u16 remote_proc_id, u32 *shared_addr)
+{
+ s32 status = 0;
+ struct transportshm_params params;
+ void *handle;
+
+ BUG_ON(remote_proc_id >= MULTIPROC_MAXPROCESSORS);
+
+ if (WARN_ON(unlikely(shared_addr == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* Init the transport parameters */
+ transportshm_params_init(&params);
+ params.gate = gatemp_get_default_remote();
+ params.shared_addr = shared_addr;
+
+ /* Make sure notify driver has been created */
+ if (unlikely(notify_is_registered(remote_proc_id, 0) == false)) {
+ status = TRANSPORTSHM_E_FAIL;
+ goto exit;
+ }
+
+ if (multiproc_self() < remote_proc_id) {
+ handle = transportshm_create(remote_proc_id, &params);
+ if (unlikely(handle == NULL)) {
+ status = TRANSPORTSHM_E_FAIL;
+ goto exit;
+ }
+
+ transportshm_setup_module->handles[remote_proc_id] = handle;
+ } else {
+ status = transportshm_open_by_addr(params.shared_addr, &handle);
+ if (status < 0)
+ goto exit;
+
+ transportshm_setup_module->handles[remote_proc_id] = handle;
+ }
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "transportshm_setup_attach failed! status "
+ "= 0x%x", status);
+ return status;
+}
+
+/*
+ * =========== transportshm_setup_detach ===========
+ * Function that will be called in messageq_detach. Deletes a
+ * transportshm object created by transportshm_setup_attach.
+ */
+int transportshm_setup_detach(u16 remote_proc_id)
+{
+ int status = 0;
+ void *handle = NULL;
+
+ if (WARN_ON(remote_proc_id >= MULTIPROC_MAXPROCESSORS)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ handle = transportshm_setup_module->handles[remote_proc_id];
+ if (WARN_ON(unlikely(handle == NULL))) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (multiproc_self() < remote_proc_id) {
+ /* Delete the transport */
+ status = transportshm_delete(&handle);
+ if (unlikely(status < 0)) {
+ status = TRANSPORTSHM_E_FAIL;
+ goto exit;
+ }
+ transportshm_setup_module->handles[remote_proc_id] = NULL;
+ } else {
+ status = transportshm_close(&handle);
+ if (unlikely(status < 0)) {
+ status = TRANSPORTSHM_E_FAIL;
+ goto exit;
+ }
+ transportshm_setup_module->handles[remote_proc_id] = NULL;
+ }
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "transportshm_setup_detach failed! status "
+ "= 0x%x", status);
+ return status;
+}
+
+
+/*
+ * =========== transportshm_setup_shared_mem_req ===========
+ * Function that returns the amount of shared memory required
+ */
+u32 transportshm_setup_shared_mem_req(u32 *shared_addr)
+{
+ u32 mem_req = 0x0;
+ int status = 0;
+ struct transportshm_params params;
+
+ /* Don't do anything if only 1 processor in system */
+ if (likely(multiproc_get_num_processors() != 1)) {
+ BUG_ON(shared_addr == NULL);
+
+ if (unlikely(shared_addr == NULL)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ transportshm_params_init(&params);
+ params.shared_addr = shared_addr;
+
+ mem_req += transportshm_shared_mem_req(&params);
+ }
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "transportshm_setup_shared_mem_req failed! "
+ "status = 0x%x", status);
+ return mem_req;
+}
+
+/* Determines if a transport has been registered to a remote processor */
+bool transportshm_setup_is_registered(u16 remote_proc_id)
+{
+ bool registered;
+
+ registered = (transportshm_setup_module->handles[remote_proc_id] !=
+ NULL);
+
+ return registered;
+}
diff --git a/drivers/dsp/syslink/notify_ducatidriver/notify_ducati.c b/drivers/dsp/syslink/notify_ducatidriver/notify_ducati.c
new file mode 100644
index 000000000000..048898e83dbc
--- /dev/null
+++ b/drivers/dsp/syslink/notify_ducatidriver/notify_ducati.c
@@ -0,0 +1,1330 @@
+/*
+ * notify_ducati.c
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <plat/mailbox.h>
+
+#include <syslink/multiproc.h>
+#include <syslink/atomic_linux.h>
+#include <syslink/sharedregion.h>
+#include <syslink/notify_driver.h>
+#include <syslink/notifydefs.h>
+#include <syslink/notify_driverdefs.h>
+#include <syslink/notify_ducatidriver.h>
+
+
+
+#define NOTIFYDUCATIDRIVER_MEM_ALIGN 0
+
+#define NOTIFYDUCATIDRIVER_MAX_EVENTS 32
+
+#define NOTIFYNONSHMDRV_MAX_EVENTS 1
+
+#define NOTIFYNONSHMDRV_RESERVED_EVENTS 1
+
+#define NOTIFYDRV_DUCATI_RECV_MBX 2
+
+#define NOTIFYDRV_DUCATI_SEND_MBX 3
+
+/* Get address of event entry. */
+#define EVENTENTRY(event_chart, align, event_id) \
+ ((struct notify_ducatidrv_event_entry *) \
+ ((u32)event_chart + (align * event_id)));
+
+/* Stamp indicating that the Notify Shared Memory driver on the
+ * processor has been initialized. */
+#define NOTIFYDUCATIDRIVER_INIT_STAMP 0xA9C8B7D6
+
+/* Flag indicating event is set. */
+#define NOTIFYDUCATIDRIVER_UP 1
+
+/* Flag indicating event is not set. */
+#define NOTIFYDUCATIDRIVER_DOWN 0
+
+/*FIX ME: Make use of Multi Proc module */
+#define SELF_ID 0
+
+#define OTHER_ID 1
+
+#define PROC_TESLA 0
+#define PROC_DUCATI 1
+#define PROC_GPP 2
+#define PROCSYSM3 2
+#define PROCAPPM3 3
+#define MAX_SUBPROC_EVENTS 15
+
+/* Macro to make a correct module magic number with refCount */
+#define NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(x) \
+ ((NOTIFY_DUCATIDRIVER_MODULEID << 12u) | (x))
+
+#define ROUND_UP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
+
+static struct omap_mbox *ducati_mbox;
+static int notify_ducatidrv_isr(void *ntfy_msg);
+static bool notify_ducatidrv_isr_callback(void *ref_data, void* ntfy_msg);
+
+
+/* Defines the notify_ducatidrv state object, which contains all
+ * the module specific information. */
+struct notify_ducatidrv_module {
+ atomic_t ref_count;
+ /* Reference count */
+ struct notify_ducatidrv_config cfg;
+ /* NotifyDriverShm configuration structure */
+ struct notify_ducatidrv_config def_cfg;
+ /* Default module configuration */
+ struct notify_ducatidrv_params def_inst_params;
+ /* Default instance parameters */
+ struct mutex *gate_handle;
+ /* Handle to the gate for local thread safety */
+ struct notify_ducatidrv_object *driver_handles
+ [MULTIPROC_MAXPROCESSORS][NOTIFY_MAX_INTLINES];
+ /* Loader handle array. */
+ atomic_t mbox_ref_count;
+ /* Reference count for enabling/disabling mailbox interrupt */
+};
+
+/* Notify ducati driver instance object. */
+struct notify_ducatidrv_object {
+ struct notify_ducatidrv_params params;
+ /* Instance parameters (configuration values) */
+ VOLATILE struct notify_ducatidrv_proc_ctrl *self_proc_ctrl;
+ /* Pointer to control structure in shared memory for self processor. */
+ VOLATILE struct notify_ducatidrv_proc_ctrl *other_proc_ctrl;
+ /* Pointer to control structure in shared memory for remote processor.*/
+ VOLATILE struct notify_ducatidrv_event_entry *self_event_chart;
+ /* Pointer to event chart for local processor */
+ VOLATILE struct notify_ducatidrv_event_entry *other_event_chart;
+ /* Pointer to event chart for remote processor */
+ u32 reg_chart[NOTIFY_MAXEVENTS];
+ /* Local event registration chart for tracking registered events. */
+ u16 self_id;
+ /* Self ID used for identification of local control region */
+ u16 other_id;
+ /* Other ID used for identification of remote control region */
+ u16 remote_proc_id;
+ /* Processor ID of the remote processor which which this driver instance
+ communicates. */
+ struct notify_driver_object *drv_handle;
+ /* Common NotifyDriver handle */
+ u32 nesting;
+ /* For disable/restore nesting */
+ u32 cache_enabled;
+ /* Whether to perform cache calls */
+ u32 event_entry_size;
+ /* Spacing between event entries */
+ u32 num_events;
+ /* Number of events configured */
+};
+
+
+static struct notify_ducatidrv_module notify_ducatidriver_state = {
+ .gate_handle = NULL,
+ .def_inst_params.shared_addr = 0x0,
+ .def_inst_params.cache_enabled = false,
+ .def_inst_params.cache_line_size = 128u,
+ .def_inst_params.remote_proc_id = MULTIPROC_INVALIDID,
+ .def_inst_params.line_id = 0,
+ .def_inst_params.local_int_id = (u32) -1,
+ .def_inst_params.remote_int_id = (u32) -1
+};
+
+/* Get the default configuration for the notify_ducatidrv module. */
+void notify_ducatidrv_get_config(struct notify_ducatidrv_config *cfg)
+{
+ int status = NOTIFY_S_SUCCESS;
+
+ if (WARN_ON(unlikely(cfg == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ if (atomic_cmpmask_and_lt(&(notify_ducatidriver_state.ref_count),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(0),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(1))
+ == true)
+ memcpy(cfg, &(notify_ducatidriver_state.def_cfg),
+ sizeof(struct notify_ducatidrv_config));
+ else
+ memcpy(cfg, &(notify_ducatidriver_state.cfg),
+ sizeof(struct notify_ducatidrv_config));
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_ducatidrv_get_config failed! "
+ "status = 0x%x", status);
+ }
+ return;
+}
+EXPORT_SYMBOL(notify_ducatidrv_get_config);
+
+/* Setup the notify_ducatidrv module. */
+int notify_ducatidrv_setup(struct notify_ducatidrv_config *cfg)
+{
+ int status = 0;
+ struct notify_ducatidrv_config tmp_cfg;
+ u16 i;
+ u16 j;
+
+ /* Init the ref_count to 0 */
+ atomic_cmpmask_and_set(&(notify_ducatidriver_state.ref_count),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(0),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(0));
+ if (atomic_inc_return(&(notify_ducatidriver_state.ref_count)) !=
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(1u)) {
+ return NOTIFY_S_ALREADYSETUP;
+ }
+ atomic_set(&(notify_ducatidriver_state.mbox_ref_count), 0);
+
+ if (cfg == NULL) {
+ notify_ducatidrv_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+
+ /* Create a default gate handle here */
+ notify_ducatidriver_state.gate_handle =
+ kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (notify_ducatidriver_state.gate_handle == NULL) {
+ status = NOTIFY_E_MEMORY;
+ goto error_exit;
+ }
+ mutex_init(notify_ducatidriver_state.gate_handle);
+
+ for (i = 0 ; i < MULTIPROC_MAXPROCESSORS; i++)
+ for (j = 0 ; j < NOTIFY_MAX_INTLINES; j++)
+ notify_ducatidriver_state.driver_handles[i][j] = NULL;
+
+ memcpy(&notify_ducatidriver_state.cfg, cfg,
+ sizeof(struct notify_ducatidrv_config));
+
+ /* Initialize the maibox module for Ducati */
+ if (ducati_mbox == NULL) {
+ ducati_mbox = omap_mbox_get("mailbox-2");
+ if (ducati_mbox == NULL) {
+ printk(KERN_ERR "Failed in omap_mbox_get()\n");
+ status = NOTIFY_E_INVALIDSTATE;
+ goto error_mailbox_get_failed;
+ }
+ ducati_mbox->rxq->callback =
+ (int (*)(void *))notify_ducatidrv_isr;
+ }
+ return 0;
+
+error_mailbox_get_failed:
+ kfree(notify_ducatidriver_state.gate_handle);
+error_exit:
+ atomic_set(&(notify_ducatidriver_state.ref_count),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(0));
+ printk(KERN_ERR "notify_ducatidrv_setup failed! status = 0x%x", status);
+ return status;
+}
+EXPORT_SYMBOL(notify_ducatidrv_setup);
+
+/* Destroy the notify_ducatidrv module. */
+int notify_ducatidrv_destroy(void)
+{
+ int status = NOTIFY_S_SUCCESS;
+ u16 i;
+ u16 j;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(notify_ducatidriver_state.ref_count),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(0),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (!(atomic_dec_return(&notify_ducatidriver_state.ref_count) == \
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(0)))
+ return NOTIFY_S_ALREADYSETUP;
+
+ /* Temporarily increment the refcount */
+ atomic_set(&(notify_ducatidriver_state.ref_count),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(1));
+
+ for (i = 0 ; i < MULTIPROC_MAXPROCESSORS; i++) {
+ for (j = 0 ; j < NOTIFY_MAX_INTLINES; j++) {
+ if (notify_ducatidriver_state.driver_handles[i][j] != \
+ NULL) {
+ notify_ducatidrv_delete(
+ &notify_ducatidriver_state.\
+ driver_handles[i][j]);
+ }
+ }
+ }
+
+ if (notify_ducatidriver_state.gate_handle != NULL)
+ kfree(notify_ducatidriver_state.gate_handle);
+
+ atomic_set(&(notify_ducatidriver_state.ref_count),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(0));
+
+ /* Finalize the maibox module for Ducati */
+ omap_mbox_put(ducati_mbox);
+ ducati_mbox = NULL;
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_ducatidrv_destroy failed! "
+ "status = 0x%x", status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(notify_ducatidrv_destroy);
+
+/* Function to initialize the parameters for this notify_ducatidrv instance. */
+void notify_ducatidrv_params_init(struct notify_ducatidrv_params *params)
+{
+ int status = NOTIFY_S_SUCCESS;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(notify_ducatidriver_state.ref_count),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(0),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(params == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ /*Return updated notify_ducatidrv instance specific parameters*/
+ memcpy(params, &(notify_ducatidriver_state.def_inst_params),
+ sizeof(struct notify_ducatidrv_params));
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_ducatidrv_params_init failed! "
+ "status = 0x%x", status);
+ }
+ return;
+}
+EXPORT_SYMBOL(notify_ducatidrv_params_init);
+
+/* Function to create an instance of this Notify ducati driver. */
+struct notify_ducatidrv_object *notify_ducatidrv_create(
+ const struct notify_ducatidrv_params *params)
+{
+ int status = NOTIFY_S_SUCCESS;
+ struct notify_ducatidrv_object *obj = NULL;
+ struct notify_driver_object *drv_handle = NULL;
+ struct notify_driver_fxn_table fxn_table;
+ u32 i;
+ u16 region_id;
+ uint region_cache_size;
+ uint min_align;
+ struct notify_ducatidrv_event_entry *event_entry;
+ u32 proc_ctrl_size;
+ u32 shm_va;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(notify_ducatidriver_state.ref_count),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(0),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(params == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely((params->remote_proc_id == MULTIPROC_INVALIDID)
+ || (params->remote_proc_id == multiproc_self())))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(params->line_id >= NOTIFY_MAX_INTLINES))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(((u32)params->shared_addr % \
+ (u32) params->cache_line_size)) != 0)) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ status = mutex_lock_interruptible(
+ notify_ducatidriver_state.gate_handle);
+ if (status)
+ goto exit;
+
+ /* Check if driver already exists. */
+ drv_handle = notify_get_driver_handle(params->remote_proc_id,
+ params->line_id);
+ if (drv_handle != NULL) {
+ status = NOTIFY_E_ALREADYEXISTS;
+ goto error_unlock_and_return;
+ }
+
+ /* Function table information */
+ fxn_table.register_event = (void *)&notify_ducatidrv_register_event;
+ fxn_table.unregister_event = (void *)&notify_ducatidrv_unregister_event;
+ fxn_table.send_event = (void *)&notify_ducatidrv_send_event;
+ fxn_table.disable = (void *)&notify_ducatidrv_disable;
+ fxn_table.enable = (void *)&notify_ducatidrv_enable;
+ fxn_table.disable_event = (void *)&notify_ducatidrv_disable_event;
+ fxn_table.enable_event = (void *)&notify_ducatidrv_enable_event;
+
+ /* Register driver with the Notify module. */
+ status = notify_register_driver(params->remote_proc_id,
+ params->line_id, &fxn_table,
+ &drv_handle);
+ if (status < 0) {
+ status = NOTIFY_E_FAIL;
+ goto error_clean_and_exit;
+ }
+
+ /* Allocate memory for the notify_ducatidrv_object object. */
+ obj = kzalloc(sizeof(struct notify_ducatidrv_object), GFP_ATOMIC);
+ if (obj == NULL) {
+ status = NOTIFY_E_MEMORY;
+ goto error_clean_and_exit;
+ }
+ memcpy(&(obj->params), (void *) params,
+ sizeof(struct notify_ducatidrv_params));
+ obj->num_events = notify_state.cfg.num_events;
+ /* Set the handle in the driverHandles array. */
+ notify_ducatidriver_state.driver_handles
+ [params->remote_proc_id][params->line_id] = obj;
+ /* Point to the generic drvHandle object from this specific
+ * NotifyDriverShm object. */
+ obj->drv_handle = drv_handle;
+
+ /* Determine obj->cacheEnabled using params->cacheEnabled and
+ * SharedRegion cache flag setting, if applicable. */
+ obj->cache_enabled = params->cache_enabled;
+ min_align = params->cache_line_size;
+ region_id = sharedregion_get_id((void *)params->shared_addr);
+ if (region_id != SHAREDREGION_INVALIDREGIONID) {
+ /* Override the user cacheEnabled setting if the region
+ * cacheEnabled is FALSE. */
+ if (!sharedregion_is_cache_enabled(region_id))
+ obj->cache_enabled = false;
+
+ region_cache_size = sharedregion_get_cache_line_size(region_id);
+
+ /* Override the user cache line size setting if the region
+ * cache line size is smaller. */
+ if (region_cache_size < min_align)
+ min_align = region_cache_size;
+ }
+
+ if ((u32)params->shared_addr % min_align != 0) {
+ status = NOTIFY_E_FAIL;
+ goto error_clean_and_exit;
+ }
+ obj->remote_proc_id = params->remote_proc_id;
+ obj->nesting = 0;
+ if (params->remote_proc_id > multiproc_self()) {
+ obj->self_id = SELF_ID;
+ obj->other_id = OTHER_ID;
+ } else {
+ obj->self_id = OTHER_ID;
+ obj->other_id = SELF_ID;
+ }
+
+ proc_ctrl_size = ROUND_UP(sizeof(struct notify_ducatidrv_proc_ctrl),
+ min_align);
+
+ /* Save the eventEntrySize in obj since we will need it at runtime to
+ * index the event charts */
+ /* TODO: Check if this shm_va needs to be passed instead of params->
+ * shared_addr */
+ shm_va = get_ducati_virt_mem();
+ obj->event_entry_size = ROUND_UP(
+ sizeof(struct notify_ducatidrv_event_entry),
+ min_align);
+ obj->self_proc_ctrl = (struct notify_ducatidrv_proc_ctrl *)
+ ((u32) params->shared_addr + \
+ (obj->self_id * proc_ctrl_size));
+ obj->other_proc_ctrl = (struct notify_ducatidrv_proc_ctrl *)
+ ((u32) params->shared_addr + \
+ (obj->other_id * proc_ctrl_size));
+ obj->self_event_chart = (struct notify_ducatidrv_event_entry *)
+ ((u32) params->shared_addr + \
+ (2 * proc_ctrl_size) + \
+ (obj->event_entry_size * \
+ obj->num_events * obj->self_id));
+ obj->other_event_chart = (struct notify_ducatidrv_event_entry *)
+ ((u32) params->shared_addr + \
+ (2 * proc_ctrl_size) + \
+ (obj->event_entry_size * \
+ obj->num_events * obj->other_id));
+
+ for (i = 0; i < obj->num_events; i++)
+ obj->reg_chart[i] = (u32)-1;
+
+ /* All events initially unflagged */
+ for (i = 0; i < obj->num_events; i++) {
+ event_entry = EVENTENTRY(obj->self_event_chart,
+ obj->event_entry_size, i);
+ event_entry->flag = 0;
+ }
+
+ /* All events initially not registered */
+ obj->self_proc_ctrl->event_reg_mask = 0x0;
+
+ /* Enable all events initially.*/
+ obj->self_proc_ctrl->event_enable_mask = 0xFFFFFFFF;
+
+
+ /*Set up the ISR on the MPU-Ducati FIFO */
+ if (atomic_inc_return(&(notify_ducatidriver_state.mbox_ref_count)) == 1)
+ omap_mbox_enable_irq(ducati_mbox, IRQ_RX);
+ obj->self_proc_ctrl->recv_init_status = NOTIFYDUCATIDRIVER_INIT_STAMP;
+ obj->self_proc_ctrl->send_init_status = NOTIFYDUCATIDRIVER_INIT_STAMP;
+
+#if 0
+ /* Write back our own ProcCtrl */
+ if (obj->cache_enabled) {
+ Cache_wbInv((void *) obj->self_proc_ctrl,
+ sizeof(struct notify_ducatidrv_proc_ctrl),
+ Cache_Type_ALL, true);
+ }
+#endif
+
+ drv_handle->is_init = NOTIFY_DRIVERINITSTATUS_DONE;
+ mutex_unlock(notify_ducatidriver_state.gate_handle);
+ return obj;
+
+error_clean_and_exit:
+ if (obj != NULL) {
+ if (obj->self_proc_ctrl != NULL) {
+ /* Clear initialization status in shared memory. */
+ obj->self_proc_ctrl->recv_init_status = 0x0;
+ obj->self_proc_ctrl->recv_init_status = 0x0;
+ obj->self_proc_ctrl = NULL;
+#if 0
+ /* Write back our own ProcCtrl */
+ if (obj->cache_enabled) {
+ Cache_wbInv((void *) obj->self_proc_ctrl,
+ sizeof(struct notify_ducatidrv_proc_ctrl),
+ Cache_Type_ALL, true);
+ }
+#endif
+ kfree(obj);
+ obj = NULL;
+ }
+ }
+ if (drv_handle != NULL) {
+ /* Unregister driver from the Notify module*/
+ notify_unregister_driver(drv_handle);
+ notify_ducatidriver_state.driver_handles
+ [params->remote_proc_id][params->line_id] = NULL;
+ drv_handle = NULL;
+ }
+error_unlock_and_return:
+ /* Leave critical section protection. */
+ mutex_unlock(notify_ducatidriver_state.gate_handle);
+exit:
+ printk(KERN_ERR "notify_ducatidrv_create failed! status = 0x%x",
+ status);
+ return NULL;
+}
+EXPORT_SYMBOL(notify_ducatidrv_create);
+
+/* Function to delete the instance of shared memory driver */
+int notify_ducatidrv_delete(struct notify_ducatidrv_object **handle_ptr)
+{
+ int status = NOTIFY_S_SUCCESS;
+ int tmp_status = NOTIFY_S_SUCCESS;
+ struct notify_ducatidrv_object *obj = NULL;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(notify_ducatidriver_state.ref_count),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(0),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+
+ if (WARN_ON(unlikely(handle_ptr == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(*handle_ptr == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ obj = (struct notify_ducatidrv_object *)(*handle_ptr);
+ if (obj != NULL) {
+ /* Uninstall the ISRs & Disable the Mailbox interrupt.*/
+ if (atomic_dec_and_test(
+ &(notify_ducatidriver_state.mbox_ref_count)))
+ omap_mbox_disable_irq(ducati_mbox, IRQ_RX);
+
+ if (obj->self_proc_ctrl != NULL) {
+ /* Clear initialization status in shared memory. */
+ obj->self_proc_ctrl->recv_init_status = 0x0;
+ obj->self_proc_ctrl->recv_init_status = 0x0;
+ obj->self_proc_ctrl = NULL;
+#if 0
+ /* Write back our own ProcCtrl */
+ if (obj->cache_enabled) {
+ Cache_wbInv((void *) obj->self_proc_ctrl,
+ sizeof(struct notify_ducatidrv_proc_ctrl),
+ Cache_Type_ALL, true);
+ }
+#endif
+ }
+
+ tmp_status = notify_unregister_driver(obj->drv_handle);
+ if (status >= 0 && tmp_status < 0)
+ status = tmp_status;
+
+ notify_ducatidriver_state.driver_handles
+ [obj->params.remote_proc_id][obj->params.line_id] = \
+ NULL;
+
+ kfree(obj);
+ obj = NULL;
+ }
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_ducatidrv_delete failed! "
+ "status = 0x%x", status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(notify_ducatidrv_delete);
+
+/* Register a callback for an event with the Notify driver. */
+int notify_ducatidrv_register_event(struct notify_driver_object *handle,
+ u32 event_id)
+{
+ int status = NOTIFY_S_SUCCESS;
+ struct notify_ducatidrv_object *obj;
+ VOLATILE struct notify_ducatidrv_event_entry *event_entry;
+ int i;
+ int j;
+
+ if (WARN_ON(unlikely(handle == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->is_init != \
+ NOTIFY_DRIVERINITSTATUS_DONE))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->notify_handle == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->notify_handle->driver_handle == NULL))) {
+ status = NOTIFY_E_DRIVERNOTREGISTERED;
+ goto exit;
+ }
+
+ obj = (struct notify_ducatidrv_object *)
+ handle->notify_handle->driver_handle;
+ if (WARN_ON(unlikely(obj->reg_chart == NULL))) {
+ status = NOTIFY_E_FAIL;
+ goto exit;
+ }
+
+ /* This function is only called for the first register, i.e. when the
+ * first callback is being registered. */
+ /* Add an entry for the registered event into the Event Registration
+ * Chart, in ascending order of event numbers (and decreasing
+ * priorities). There is no need to make this atomic since
+ * Notify_exec cannot preempt: shared memory hasn't been modified yet.
+ */
+ for (i = 0 ; i < obj->num_events; i++) {
+ /* Find the correct slot in the registration array. */
+ if (obj->reg_chart[i] == (u32) -1) {
+ for (j = (i - 1); j >= 0; j--) {
+ if (event_id < obj->reg_chart[j]) {
+ obj->reg_chart[j + 1] = \
+ obj->reg_chart[j];
+ i = j;
+ } else {
+ /* End the loop, slot found. */
+ j = -1;
+ }
+ }
+ obj->reg_chart[i] = event_id;
+ break;
+ }
+ }
+
+ /* Clear any pending unserviced event as there are no listeners
+ * for the pending event */
+ event_entry = EVENTENTRY(obj->self_event_chart, obj->event_entry_size,
+ event_id);
+ event_entry->flag = NOTIFYDUCATIDRIVER_DOWN;
+
+ /* Set the registered bit in shared memory and write back */
+ set_bit(event_id, (unsigned long *)
+ &(obj->self_proc_ctrl->event_reg_mask));
+
+#if 0
+ /* Write back both the flag and the reg mask */
+ if (obj->cache_enabled) {
+ /* Writeback eventRegMask */
+ Cache_wbInv((void *) obj->self_proc_ctrl,
+ sizeof(struct notify_ducatidrv_proc_ctrl),
+ Cache_Type_ALL, true);
+ /* Writeback event entry */
+ Cache_wbInv((void *) event_entry,
+ sizeof(struct notify_ducatidrv_event_entry),
+ Cache_Type_ALL, true);
+ }
+#endif
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_ducatidrv_register_event failed! "
+ "status = 0x%x", status);
+ }
+ return status;
+}
+
+/* Unregister a callback for an event with the Notify driver. */
+int notify_ducatidrv_unregister_event(struct notify_driver_object *handle,
+ u32 event_id)
+{
+ int status = NOTIFY_S_SUCCESS;
+ struct notify_ducatidrv_object *obj;
+ VOLATILE struct notify_ducatidrv_event_entry *event_entry;
+ int i;
+ int j;
+
+ if (WARN_ON(unlikely(handle == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->is_init != \
+ NOTIFY_DRIVERINITSTATUS_DONE))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->notify_handle == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->notify_handle->driver_handle == NULL))) {
+ status = NOTIFY_E_DRIVERNOTREGISTERED;
+ goto exit;
+ }
+
+ obj = (struct notify_ducatidrv_object *)
+ handle->notify_handle->driver_handle;
+ if (WARN_ON(unlikely(obj->reg_chart == NULL))) {
+ status = NOTIFY_E_FAIL;
+ goto exit;
+ }
+
+ /* This function is only called for the last unregister, i.e. when the
+ * final remaining callback is being unregistered.
+ * Unset the registered bit in shared memory */
+ clear_bit(event_id, (unsigned long *)
+ &(obj->self_proc_ctrl->event_reg_mask));
+
+ /* Clear any pending unserviced event as there are no listeners
+ * for the pending event. This should be done only after the event
+ * is unregistered from shared memory so the other processor doesn't
+ * successfully send an event our way immediately after unflagging this
+ * event. */
+ event_entry = EVENTENTRY(obj->self_event_chart, obj->event_entry_size,
+ event_id);
+ event_entry->flag = NOTIFYDUCATIDRIVER_DOWN;
+
+#if 0
+ /* Write back both the flag and the reg mask */
+ if (obj->cache_enabled) {
+ /* Writeback event entry */
+ Cache_wbInv((void *) event_entry,
+ sizeof(struct notify_ducatidrv_event_entry),
+ Cache_Type_ALL, true);
+ /* Writeback eventRegMask */
+ Cache_wbInv((void *) obj->self_proc_ctrl,
+ sizeof(struct notify_ducatidrv_proc_ctrl),
+ Cache_Type_ALL, true);
+ }
+#endif
+
+ /* Re-arrange eventIds in the Event Registration Chart so there is
+ * no gap caused by the removal of this eventId
+ *
+ * There is no need to make this atomic since Notify_exec cannot
+ * preempt: the event has already been disabled in shared memory
+ * (see above) */
+ for (i = 0 ; i < obj->num_events; i++) {
+ /* Find the correct slot in the registration array. */
+ if (event_id == obj->reg_chart[i]) {
+ obj->reg_chart[i] = (u32) -1;
+ for (j = (i + 1); (obj->reg_chart[j] != (u32)-1) && \
+ (j != obj->num_events); j++)
+ obj->reg_chart[j - 1] = obj->reg_chart[j];
+
+ if (j == obj->num_events)
+ obj->reg_chart[j - 1] = (u32)-1;
+
+ break;
+ }
+ }
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_ducatidrv_unregister_event failed! "
+ "status = 0x%x", status);
+ }
+ return status;
+}
+
+/* Send a notification event to the registered users for this
+ * notification on the specified processor. */
+int notify_ducatidrv_send_event(struct notify_driver_object *handle,
+ u32 event_id, u32 payload, bool wait_clear)
+{
+ int status = NOTIFY_S_SUCCESS;
+ struct notify_ducatidrv_object *obj;
+ VOLATILE struct notify_ducatidrv_event_entry *event_entry;
+ int max_poll_count;
+ int i = 0;
+ mbox_msg_t msg;
+
+ if (WARN_ON(unlikely(handle == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->is_init != \
+ NOTIFY_DRIVERINITSTATUS_DONE))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->notify_handle == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->notify_handle->driver_handle == NULL))) {
+ status = NOTIFY_E_DRIVERNOTREGISTERED;
+ goto exit;
+ }
+
+ obj = (struct notify_ducatidrv_object *)
+ handle->notify_handle->driver_handle;
+ if (WARN_ON(unlikely(obj->reg_chart == NULL))) {
+ status = NOTIFY_E_FAIL;
+ goto exit;
+ }
+
+ dsb();
+ event_entry = EVENTENTRY(obj->other_event_chart, obj->event_entry_size,
+ event_id);
+#if 0
+ /* Invalidate cache for the other processor's procCtrl. */
+ if (obj->cache_enabled) {
+ Cache_wbInv((void *) obj->other_proc_ctrl,
+ sizeof(struct notify_ducatidrv_proc_ctrl),
+ Cache_Type_ALL, true);
+ }
+#endif
+ max_poll_count = notify_state.cfg.send_event_poll_count;
+
+ /* Check whether driver on other processor is initialized */
+ if (obj->other_proc_ctrl->recv_init_status != \
+ NOTIFYDUCATIDRIVER_INIT_STAMP) {
+ /* This may be used for polling till other-side driver is ready,
+ * so do not set failure reason. */
+ status = NOTIFY_E_NOTINITIALIZED;
+ goto exit;
+ }
+ /* Check if other side has registered to receive this event. */
+ if (!test_bit(event_id, (unsigned long *)
+ &obj->other_proc_ctrl->event_reg_mask)) {
+ status = NOTIFY_E_EVTNOTREGISTERED;
+ /* This may be used for polling till other-side is ready, so
+ * do not set failure reason. */
+ goto exit;
+ }
+ if (!test_bit(event_id, (unsigned long *)
+ &obj->other_proc_ctrl->event_enable_mask)) {
+ status = NOTIFY_E_EVTDISABLED;
+ /* This may be used for polling till other-side is ready, so
+ * do not set failure reason. */
+ goto exit;
+ }
+#if 0
+ if (obj->cache_enabled) {
+ Cache_inv((void *)event_entry,
+ sizeof(struct notify_ducatidrv_event_entry),
+ Cache_Type_ALL, TRUE);
+ }
+#endif
+ dsb();
+ status = mutex_lock_interruptible(
+ notify_ducatidriver_state.gate_handle);
+ if (status)
+ goto exit;
+
+ if (wait_clear == true) {
+ /*Wait for completion of prev
+ event from other side*/
+ while ((event_entry->flag != NOTIFYDUCATIDRIVER_DOWN) && \
+ (status >= 0)) {
+ /* Leave critical section protection. Create a window
+ * of opportunity for other interrupts to be handled.*/
+ mutex_unlock(notify_ducatidriver_state.gate_handle);
+ i++;
+ if ((max_poll_count != (u32)-1) && \
+ (i == max_poll_count)) {
+ status = NOTIFY_E_TIMEOUT;
+ break;
+ }
+
+#if 0
+ if (obj->cache_enabled) {
+ Cache_inv((void *)event_entry,
+ sizeof(struct notify_ducatidrv_event_entry),
+ Cache_Type_ALL, TRUE);
+ }
+#endif
+ dsb();
+
+ /* Enter critical section protection. */
+ status = mutex_lock_interruptible(
+ notify_ducatidriver_state.gate_handle);
+ }
+ }
+
+ if (status >= 0) {
+ /* Set the event bit field and payload. */
+ event_entry->payload = payload;
+ event_entry->flag = NOTIFYDUCATIDRIVER_UP;
+
+#if 0
+ if (obj->cache_enabled) {
+ Cache_inv((void *)event_entry,
+ sizeof(struct notify_ducatidrv_event_entry),
+ Cache_Type_ALL, TRUE);
+ }
+#endif
+ dsb();
+
+ /* Send an interrupt with the event information to the
+ * remote processor */
+ msg = ((obj->remote_proc_id << 16) | event_id);
+ status = omap_mbox_msg_send(ducati_mbox, msg);
+
+ /* Leave critical section protection. */
+ mutex_unlock(notify_ducatidriver_state.gate_handle);
+ }
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_ducatidrv_sendevent failed! "
+ "status = 0x%x", status);
+ }
+ return status;
+}
+
+/* Disable all events for this Notify driver.*/
+int notify_ducatidrv_disable(struct notify_driver_object *handle)
+{
+ int status = NOTIFY_S_SUCCESS;
+ struct notify_ducatidrv_object *obj;
+
+ /* All the below parameter checking is unnecessary, but added to
+ * make sure the driver object is initialized properly */
+ if (WARN_ON(unlikely(handle == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->is_init != \
+ NOTIFY_DRIVERINITSTATUS_DONE))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->notify_handle == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->notify_handle->driver_handle == NULL))) {
+ status = NOTIFY_E_DRIVERNOTREGISTERED;
+ goto exit;
+ }
+
+ obj = (struct notify_ducatidrv_object *)
+ handle->notify_handle->driver_handle;
+ if (WARN_ON(unlikely(obj->reg_chart == NULL))) {
+ status = NOTIFY_E_FAIL;
+ goto exit;
+ }
+
+ /* Disable the mailbox interrupt associated with ducati mailbox */
+ omap_mbox_disable_irq(ducati_mbox, IRQ_RX);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_ducatidrv_disable failed! "
+ "status = 0x%x", status);
+ }
+ /*No flags to be returned. */
+ return 0;
+}
+
+/* Restore the notify_ducatidrv to the state before the last disable was
+ * called. */
+void notify_ducatidrv_enable(struct notify_driver_object *handle)
+{
+ int status = NOTIFY_S_SUCCESS;
+ struct notify_ducatidrv_object *obj;
+
+ /* All the below parameter checking is unnecessary, but added to
+ * make sure the driver object is initialized properly */
+ if (WARN_ON(unlikely(handle == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->is_init != \
+ NOTIFY_DRIVERINITSTATUS_DONE))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->notify_handle == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->notify_handle->driver_handle == NULL))) {
+ status = NOTIFY_E_DRIVERNOTREGISTERED;
+ goto exit;
+ }
+
+ obj = (struct notify_ducatidrv_object *)
+ handle->notify_handle->driver_handle;
+ if (WARN_ON(unlikely(obj->reg_chart == NULL))) {
+ status = NOTIFY_E_FAIL;
+ goto exit;
+ }
+
+ /*Enable the receive interrupt for ducati */
+ omap_mbox_enable_irq(ducati_mbox, IRQ_RX);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_ducatidrv_enable failed! "
+ "status = 0x%x", status);
+ }
+ return;
+}
+
+/* Disable a specific event for this Notify ducati driver */
+void notify_ducatidrv_disable_event(struct notify_driver_object *handle,
+ u32 event_id)
+{
+ int status = NOTIFY_S_SUCCESS;
+ struct notify_ducatidrv_object *obj;
+ VOLATILE struct notify_ducatidrv_event_entry *event_entry;
+
+ if (WARN_ON(unlikely(handle == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->is_init != \
+ NOTIFY_DRIVERINITSTATUS_DONE))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->notify_handle == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->notify_handle->driver_handle == NULL))) {
+ status = NOTIFY_E_DRIVERNOTREGISTERED;
+ goto exit;
+ }
+
+ obj = (struct notify_ducatidrv_object *)
+ handle->notify_handle->driver_handle;
+ if (event_id > obj->num_events) {
+ status = NOTIFY_E_FAIL;
+ goto exit;
+ }
+
+ /* Enter critical section protection. */
+ status = mutex_lock_interruptible(
+ notify_ducatidriver_state.gate_handle);
+ if (status)
+ goto exit;
+ clear_bit(event_id, (unsigned long *)
+ &(obj->self_proc_ctrl->event_enable_mask));
+ /* Leave critical section protection. */
+ mutex_unlock(notify_ducatidriver_state.gate_handle);
+#if 0
+ if (obj->cache_enabled) {
+ /* Writeback event_enable_mask */
+ Cache_wbInv((void *) obj->self_proc_ctrl,
+ sizeof(struct notify_ducatidrv_proc_ctrl),
+ Cache_Type_ALL, true);
+ }
+#endif
+
+ event_entry = EVENTENTRY(obj->self_event_chart, obj->event_entry_size,
+ event_id)
+#if 0
+ if (obj->cache_enabled) {
+ /* Writeback event entry */
+ Cache_wbInv((void *) event_entry,
+ sizeof(struct notify_ducatidrv_event_entry),
+ Cache_Type_ALL, true);
+ }
+#endif
+
+ /* Disable incoming Notify interrupts. This is done to ensure that the
+ * eventEntry->flag is read atomically with any write back to shared
+ * memory */
+ notify_ducatidrv_disable(handle);
+
+ /* Is the local notify_ducatidrv_disable_event happening between the
+ * following two notify_ducatidrv_send_event operations on the remote
+ * processor?
+ * 1. Writing notify_ducatidrv_UP to shared memory
+ * 2. Sending the interrupt across
+ * If so, we should handle this event so the other core isn't left
+ * spinning until the event is re-enabled and the next
+ * notify_ducatidrv_isr executes This race condition is very rare but we
+ * need to account for it: */
+ if (event_entry->flag == NOTIFYDUCATIDRIVER_UP) {
+ /* Acknowledge the event. No need to store the payload. The
+ * other side will not send this event again even though flag is
+ * down, because the event is now disabled. So the payload
+ * within the eventChart will not get overwritten. */
+ event_entry->flag = NOTIFYDUCATIDRIVER_DOWN;
+#if 0
+ /* Write back acknowledgement */
+ if (obj->cache_enabled) {
+ Cache_wbInv(event_entry,
+ sizeof(struct notify_ducatidrv_event_entry),
+ Cache_Type_ALL, TRUE);
+ }
+#endif
+ /* Execute the callback function. This will execute in a Task
+ * or Swi context (not Hwi!) */
+ notify_exec(obj->drv_handle->notify_handle, event_id,
+ event_entry->payload);
+ }
+
+ /* Re-enable incoming Notify interrupts */
+ notify_ducatidrv_enable(handle);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_ducatidrv_disable_event failed! "
+ "status = 0x%x", status);
+ }
+ return;
+}
+
+/* Enable a specific event for this Notify ducati driver */
+void notify_ducatidrv_enable_event(struct notify_driver_object *handle,
+ u32 event_id)
+{
+ int status = 0;
+ struct notify_ducatidrv_object *obj;
+
+ if (WARN_ON(unlikely(handle == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->is_init != \
+ NOTIFY_DRIVERINITSTATUS_DONE))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->notify_handle == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(handle->notify_handle->driver_handle == NULL))) {
+ status = NOTIFY_E_DRIVERNOTREGISTERED;
+ goto exit;
+ }
+
+ obj = (struct notify_ducatidrv_object *)
+ handle->notify_handle->driver_handle;
+ if (event_id > obj->num_events) {
+ status = NOTIFY_E_FAIL;
+ goto exit;
+ }
+
+ /* Enter critical section protection. */
+ status = mutex_lock_interruptible(
+ notify_ducatidriver_state.gate_handle);
+ if (status)
+ goto exit;
+ set_bit(event_id, (unsigned long *)
+ &(obj->self_proc_ctrl->event_enable_mask));
+ /* Leave critical section protection. */
+ mutex_unlock(notify_ducatidriver_state.gate_handle);
+#if 0
+ if (obj->cache_enabled) {
+ /* Writeback event_enable_mask */
+ Cache_wbInv((void *) obj->self_proc_ctrl,
+ sizeof(struct notify_ducatidrv_proc_ctrl),
+ Cache_Type_ALL, true);
+ }
+#endif
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_ducatidrv_enable_event failed! "
+ "status = 0x%x", status);
+ }
+ return;
+}
+
+/* Get the shared memory requirements for the notify_ducatidrv. */
+uint notify_ducatidrv_shared_mem_req(
+ const struct notify_ducatidrv_params *params)
+{
+ uint mem_req = 0;
+ u16 region_id;
+ uint region_cache_size;
+ uint min_align;
+ s32 status = NOTIFY_S_SUCCESS;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(
+ &(notify_ducatidriver_state.ref_count),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(0),
+ NOTIFYDUCATIDRIVER_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(params == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ /* Determine obj->cache_enabled using params->cache_enabled and
+ * sharedregion cache flag setting, if applicable. */
+ min_align = params->cache_line_size;
+ region_id = sharedregion_get_id((void *) params->shared_addr);
+ if (region_id != SHAREDREGION_INVALIDREGIONID) {
+ region_cache_size = sharedregion_get_cache_line_size(region_id);
+
+ /* Override the user cache line size setting if the region
+ * cache line size is smaller. */
+ if (region_cache_size < min_align)
+ min_align = region_cache_size;
+ }
+
+ /* Determine obj->eventEntrySize which will be used to ROUND_UP
+ * addresses */
+ mem_req = ((ROUND_UP(sizeof(struct notify_ducatidrv_proc_ctrl),
+ min_align)) * 2) + \
+ ((ROUND_UP(sizeof(struct notify_ducatidrv_event_entry),
+ min_align) * 2 * notify_state.cfg.num_events));
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_ducatidrv_shared_mem_req failed!"
+ " status = 0x%x", status);
+ }
+ return mem_req;
+}
+
+/* This function implements the interrupt service routine for the interrupt
+ * received from the Ducati processor. */
+static int notify_ducatidrv_isr(void *ntfy_msg)
+{
+ /* Decode the msg to identify the processor that has sent the message */
+ u32 proc_id = (u32)ntfy_msg;
+
+ /* Call the corresponding prpc_id callback */
+ notify_ducatidrv_isr_callback(notify_ducatidriver_state.driver_handles
+ [proc_id][0], ntfy_msg);
+
+ return 0;
+}
+EXPORT_SYMBOL(notify_ducatidrv_isr);
+
+static bool notify_ducatidrv_isr_callback(void *ref_data, void *notify_msg)
+{
+ u32 payload = 0;
+ u32 i = 0;
+ VOLATILE struct notify_ducatidrv_event_entry *event_entry;
+ struct notify_ducatidrv_object *obj;
+ u32 event_id;
+
+ obj = (struct notify_ducatidrv_object *) ref_data;
+
+ dsb();
+ /* Execute the loop till no asserted event is found for one complete
+ * loop through all registered events */
+ do {
+ /* Check if the entry is a valid registered event.*/
+ event_id = obj->reg_chart[i];
+ if (event_id == (u32) -1)
+ break;
+
+ event_entry = EVENTENTRY(obj->self_event_chart,
+ obj->event_entry_size, event_id);
+#if 0
+ if (obj->cache_enabled) {
+ Cache_inv((void *)event_entry,
+ sizeof(struct notify_ducatidrv_event_entry),
+ Cache_Type_ALL, TRUE);
+ }
+#endif
+ dsb();
+
+ /* Determine the current high priority event.*/
+ /* Check if the event is set and enabled.*/
+ if (event_entry->flag == NOTIFYDUCATIDRIVER_UP &&
+ test_bit(event_id, (unsigned long *)
+ &obj->self_proc_ctrl->event_enable_mask)) {
+ payload = event_entry->payload;
+
+ /* Acknowledge the event. */
+ event_entry->flag = NOTIFYDUCATIDRIVER_DOWN;
+
+ /* Write back acknowledgement */
+#if 0
+ if (obj->cache_enabled) {
+ Cache_inv((void *)event_entry,
+ sizeof(struct notify_ducatidrv_event_entry),
+ Cache_Type_ALL, TRUE);
+ }
+#endif
+ dsb();
+
+ /* Execute the callback function */
+ notify_exec(obj->drv_handle->notify_handle, event_id,
+ payload);
+ /* reinitialize the event check counter. */
+ i = 0;
+ } else {
+ /* check for next event. */
+ i++;
+ }
+ } while ((event_id != (u32) -1) && (i < obj->num_events));
+
+ return true;
+}
diff --git a/drivers/dsp/syslink/omap_notify/drv_notify.c b/drivers/dsp/syslink/omap_notify/drv_notify.c
new file mode 100644
index 000000000000..fcdc4425e837
--- /dev/null
+++ b/drivers/dsp/syslink/omap_notify/drv_notify.c
@@ -0,0 +1,928 @@
+/*
+ * drv_notify.c
+ *
+ * Syslink support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <generated/autoconf.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <asm/pgtable.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+
+
+#include <syslink/platform_mem.h>
+#include <syslink/drv_notify.h>
+#include <syslink/notify_driver.h>
+#include <syslink/notify.h>
+#include <syslink/notify_ioctl.h>
+
+
+/** ============================================================================
+ * Macros and types
+ * ============================================================================
+ */
+/* Maximum number of user supported. */
+#define MAX_PROCESSES 32
+
+/*Structure of Event callback argument passed to register fucntion*/
+struct notify_drv_event_cbck {
+ struct list_head element; /* List element header */
+ u16 proc_id; /* Processor identifier */
+ u16 line_id; /* line identifier */
+ u32 event_id; /* Event identifier */
+ notify_fn_notify_cbck func; /* Callback function for the event. */
+ void *param; /* User callback argument. */
+ u32 pid; /* Process Identifier for user process. */
+};
+
+/* Keeps the information related to Event.*/
+struct notify_drv_event_state {
+ struct list_head buf_list;
+ /* Head of received event list. */
+ u32 pid;
+ /* User process ID. */
+ u32 ref_count;
+ /*Reference count, used when multiple Notify_registerEvent are called
+ from same process space(multi threads/processes). */
+ struct semaphore *semhandle;
+ /* Semaphore for waiting on event. */
+ struct semaphore *tersemhandle;
+ /* Termination synchronization semaphore. */
+};
+
+/* NotifyDrv module state object */
+struct notify_drv_module_object {
+ bool is_setup;
+ /* Indicates whether the module has been already setup */
+ bool open_ref_count;
+ /* Open reference count. */
+ struct mutex *gate_handle;
+ /* Handle of gate to be used for local thread safety */
+ struct list_head event_cbck_list;
+ /* List containg callback arguments for all registered handlers from
+ * user mode. */
+ struct list_head single_event_cbck_list;
+ /* List containg callback arguments for all registered handlers from
+ user mode for 'single' registrations. */
+ struct notify_drv_event_state event_state[MAX_PROCESSES];
+ /* List for all user processes registered. */
+};
+
+struct notify_drv_module_object notifydrv_state = {
+ .is_setup = false,
+ .open_ref_count = 0,
+ .gate_handle = NULL
+ /*.event_cbck_list = NULL,
+ .single_event_cbck_list = NULL*/
+};
+
+
+/* Attach a process to notify user support framework. */
+static int notify_drv_attach(u32 pid);
+
+/* Detach a process from notify user support framework. */
+static int notify_drv_detach(u32 pid);
+
+/* This function implements the callback registered with IPS. Here to pass
+ * event no. back to user function (so that it can do another level of
+ * demultiplexing of callbacks) */
+static void _notify_drv_callback(u16 proc_id, u16 line_id, u32 event_id,
+ uint *arg, u32 payload);
+
+/* This function adds a data to a registered process. */
+static int _notify_drv_add_buf_by_pid(u16 proc_id, u16 line_id, u32 pid,
+ u32 event_id, u32 data, notify_fn_notify_cbck cb_fxn,
+ void *param);
+
+
+
+/*
+ * read data from the driver
+ */
+int notify_drv_read(struct file *filp, char *dst, size_t size,
+ loff_t *offset)
+{
+
+ bool flag = false;
+ struct notify_drv_event_packet *u_buf = NULL;
+ int ret_val = 0;
+ u32 i;
+ struct list_head *elem;
+ struct notify_drv_event_packet t_buf;
+
+ if (WARN_ON(notifydrv_state.is_setup == false)) {
+ ret_val = -EFAULT;
+ goto func_end;
+ }
+
+ ret_val = copy_from_user((void *)&t_buf,
+ (void *)dst,
+ sizeof(struct notify_drv_event_packet));
+ if (WARN_ON(ret_val != 0))
+ ret_val = -EFAULT;
+
+ for (i = 0; i < MAX_PROCESSES; i++) {
+ if (notifydrv_state.event_state[i].pid == t_buf.pid) {
+ flag = true;
+ break;
+ }
+ }
+ if (flag == false) {
+ ret_val = -EFAULT;
+ goto func_end;
+ }
+
+ /* Wait for the event */
+ ret_val = down_interruptible(notifydrv_state.event_state[i].semhandle);
+ if (ret_val < 0) {
+ ret_val = -ERESTARTSYS;
+ goto func_end;
+ }
+ WARN_ON(mutex_lock_interruptible(notifydrv_state.gate_handle));
+ elem = ((struct list_head *)
+ &(notifydrv_state.event_state[i].buf_list))->next;
+ u_buf = container_of(elem, struct notify_drv_event_packet, element);
+ list_del(elem);
+ mutex_unlock(notifydrv_state.gate_handle);
+ if (u_buf == NULL) {
+ ret_val = -EFAULT;
+ goto func_end;
+ }
+ ret_val = copy_to_user((void *)dst, u_buf,
+ sizeof(struct notify_drv_event_packet));
+ if (WARN_ON(ret_val != 0))
+ ret_val = -EFAULT;
+ ret_val = sizeof(struct notify_drv_event_packet);
+
+ if (u_buf->is_exit == true)
+ up(notifydrv_state.event_state[i].tersemhandle);
+
+ kfree(u_buf);
+ u_buf = NULL;
+
+func_end:
+ return ret_val;
+}
+
+int notify_drv_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
+
+ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ return -EAGAIN;
+ return 0;
+}
+
+/* ioctl function for of Linux Notify driver. */
+int notify_drv_ioctl(struct inode *inode, struct file *filp, u32 cmd,
+ unsigned long args)
+{
+ int status = NOTIFY_S_SUCCESS;
+ int os_status = 0;
+ unsigned long size;
+ struct notify_cmd_args *cmd_args = (struct notify_cmd_args *)args;
+ struct notify_cmd_args common_args;
+
+ switch (cmd) {
+ case CMD_NOTIFY_GETCONFIG:
+ {
+ struct notify_cmd_args_get_config *src_args =
+ (struct notify_cmd_args_get_config *)args;
+ struct notify_config cfg;
+
+ notify_get_config(&cfg);
+ size = copy_to_user((void *) (src_args->cfg),
+ (const void *) &cfg, sizeof(struct notify_config));
+ if (WARN_ON(size != 0))
+ os_status = -EFAULT;
+ }
+ break;
+
+ case CMD_NOTIFY_SETUP:
+ {
+ struct notify_cmd_args_setup *src_args =
+ (struct notify_cmd_args_setup *) args;
+ struct notify_config cfg;
+
+ size = copy_from_user((void *) &cfg,
+ (const void *) (src_args->cfg),
+ sizeof(struct notify_config));
+ if (WARN_ON(size != 0)) {
+ os_status = -EFAULT;
+ goto func_end;
+ }
+ status = notify_setup(&cfg);
+ }
+ break;
+
+ case CMD_NOTIFY_DESTROY:
+ {
+ /* copy_from_user is not needed for Notify_getConfig, since the
+ * user's config is not used.
+ */
+ status = notify_destroy();
+ }
+ break;
+
+ case CMD_NOTIFY_REGISTEREVENTSINGLE:
+ {
+ struct notify_cmd_args_register_event src_args;
+ struct notify_drv_event_cbck *cbck = NULL;
+
+ /* Copy the full args from user-side. */
+ size = copy_from_user((void *) &src_args,
+ (const void *) (args),
+ sizeof(struct notify_cmd_args_register_event));
+ if (WARN_ON(size != 0)) {
+ os_status = -EFAULT;
+ goto func_end;
+ }
+
+ cbck = kmalloc(sizeof(struct notify_drv_event_cbck),
+ GFP_ATOMIC);
+ WARN_ON(cbck == NULL);
+ cbck->proc_id = src_args.proc_id;
+ cbck->line_id = src_args.line_id;
+ cbck->event_id = src_args.event_id;
+ cbck->pid = src_args.pid;
+ cbck->func = src_args.fn_notify_cbck;
+ cbck->param = src_args.cbck_arg;
+ status = notify_register_event_single(src_args.proc_id,
+ src_args.line_id, src_args.event_id,
+ _notify_drv_callback, (void *)cbck);
+ if (status < 0) {
+ /* This does not impact return status of this function,
+ * so retval comment is not used. */
+ kfree(cbck);
+ } else {
+ WARN_ON(mutex_lock_interruptible
+ (notifydrv_state.gate_handle));
+ INIT_LIST_HEAD((struct list_head *)&(cbck->element));
+ list_add_tail(&(cbck->element),
+ &(notifydrv_state.single_event_cbck_list));
+ mutex_unlock(notifydrv_state.gate_handle);
+ }
+ }
+ break;
+
+ case CMD_NOTIFY_REGISTEREVENT:
+ {
+ struct notify_cmd_args_register_event src_args;
+ struct notify_drv_event_cbck *cbck = NULL;
+
+ /* Copy the full args from user-side. */
+ size = copy_from_user((void *) &src_args,
+ (const void *) (args),
+ sizeof(struct notify_cmd_args_register_event));
+ if (WARN_ON(size != 0)) {
+ os_status = -EFAULT;
+ goto func_end;
+ }
+
+ cbck = kmalloc(sizeof(struct notify_drv_event_cbck),
+ GFP_ATOMIC);
+ WARN_ON(cbck == NULL);
+ cbck->proc_id = src_args.proc_id;
+ cbck->line_id = src_args.line_id;
+ cbck->event_id = src_args.event_id;
+ cbck->func = src_args.fn_notify_cbck;
+ cbck->param = src_args.cbck_arg;
+ cbck->pid = src_args.pid;
+ status = notify_register_event(src_args.proc_id,
+ src_args.line_id, src_args.event_id,
+ _notify_drv_callback, (void *)cbck);
+ if (status < 0) {
+ /* This does not impact return status of this function,
+ * so retval comment is not used. */
+ kfree(cbck);
+ } else {
+ WARN_ON(mutex_lock_interruptible
+ (notifydrv_state.gate_handle));
+ INIT_LIST_HEAD((struct list_head *)&(cbck->element));
+ list_add_tail(&(cbck->element),
+ &(notifydrv_state.event_cbck_list));
+ mutex_unlock(notifydrv_state.gate_handle);
+ }
+ }
+ break;
+
+ case CMD_NOTIFY_UNREGISTEREVENTSINGLE:
+ {
+ bool found = false;
+ u32 pid;
+ struct notify_drv_event_cbck *cbck = NULL;
+ struct list_head *entry = NULL;
+ struct notify_cmd_args_unregister_event src_args;
+
+ /* Copy the full args from user-side. */
+ size = copy_from_user((void *)&src_args, (const void *)(args),
+ sizeof(struct notify_cmd_args_unregister_event));
+ if (WARN_ON(size != 0)) {
+ os_status = -EFAULT;
+ goto func_end;
+ }
+
+ WARN_ON(mutex_lock_interruptible(notifydrv_state.gate_handle));
+ pid = src_args.pid;
+ list_for_each(entry, (struct list_head *)
+ &(notifydrv_state.single_event_cbck_list)) {
+ cbck = (struct notify_drv_event_cbck *)(entry);
+ if ((cbck->proc_id == src_args.proc_id) &&
+ (cbck->line_id == src_args.line_id) &&
+ (cbck->event_id == src_args.event_id) &&
+ (cbck->pid == pid)) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(notifydrv_state.gate_handle);
+ if (found == false) {
+ status = NOTIFY_E_NOTFOUND;
+ goto func_end;
+ }
+ status = notify_unregister_event_single(src_args.proc_id,
+ src_args.line_id, src_args.event_id);
+ /* This check is needed at run-time also to propagate the
+ * status to user-side. This must not be optimized out. */
+ if (status >= 0) {
+ WARN_ON(mutex_lock_interruptible
+ (notifydrv_state.gate_handle));
+ list_del((struct list_head *)cbck);
+ mutex_unlock(notifydrv_state.gate_handle);
+ kfree(cbck);
+ }
+ }
+ break;
+
+ case CMD_NOTIFY_UNREGISTEREVENT:
+ {
+ bool found = false;
+ u32 pid;
+ struct notify_drv_event_cbck *cbck = NULL;
+ struct list_head *entry = NULL;
+ struct notify_cmd_args_unregister_event src_args;
+
+ /* Copy the full args from user-side. */
+ size = copy_from_user((void *)&src_args, (const void *)(args),
+ sizeof(struct notify_cmd_args_unregister_event));
+ if (WARN_ON(size != 0)) {
+ os_status = -EFAULT;
+ goto func_end;
+ }
+
+ WARN_ON(mutex_lock_interruptible(notifydrv_state.gate_handle));
+ pid = src_args.pid;
+ list_for_each(entry, (struct list_head *)
+ &(notifydrv_state.event_cbck_list)) {
+ cbck = (struct notify_drv_event_cbck *)(entry);
+ if ((cbck->func == src_args.fn_notify_cbck) &&
+ (cbck->param == src_args.cbck_arg) &&
+ (cbck->proc_id == src_args.proc_id) &&
+ (cbck->line_id == src_args.line_id) &&
+ (cbck->event_id == src_args.event_id) &&
+ (cbck->pid == pid)) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(notifydrv_state.gate_handle);
+ if (found == false) {
+ status = NOTIFY_E_NOTFOUND;
+ goto func_end;
+ }
+ status = notify_unregister_event(src_args.proc_id,
+ src_args.line_id, src_args.event_id,
+ _notify_drv_callback, (void *) cbck);
+ /* This check is needed at run-time also to propagate the
+ * status to user-side. This must not be optimized out. */
+ if (status >= 0) {
+ WARN_ON(mutex_lock_interruptible
+ (notifydrv_state.gate_handle));
+ list_del((struct list_head *)cbck);
+ mutex_unlock(notifydrv_state.gate_handle);
+ kfree(cbck);
+ }
+ }
+ break;
+
+ case CMD_NOTIFY_SENDEVENT:
+ {
+ struct notify_cmd_args_send_event src_args;
+
+ /* Copy the full args from user-side. */
+ size = copy_from_user((void *) &src_args,
+ (const void *) (args),
+ sizeof(struct notify_cmd_args_send_event));
+ if (WARN_ON(size != 0)) {
+ os_status = -EFAULT;
+ goto func_end;
+ }
+ status = notify_send_event(src_args.proc_id, src_args.line_id,
+ src_args.event_id, src_args.payload,
+ src_args.wait_clear);
+ }
+ break;
+
+ case CMD_NOTIFY_DISABLE:
+ {
+ struct notify_cmd_args_disable src_args;
+
+ /* Copy the full args from user-side. */
+ size = copy_from_user((void *) &src_args,
+ (const void *) (args),
+ sizeof(struct notify_cmd_args_disable));
+ if (WARN_ON(size != 0)) {
+ os_status = -EFAULT;
+ goto func_end;
+ }
+ src_args.flags = notify_disable(src_args.proc_id,
+ src_args.line_id);
+
+ /* Copy the full args to user-side */
+ size = copy_to_user((void *) (args), (const void *) &src_args,
+ sizeof(struct notify_cmd_args_disable));
+ /* This check is needed at run-time also since it depends on
+ * run environment. It must not be optimized out. */
+ if (WARN_ON(size != 0))
+ os_status = -EFAULT;
+ }
+ break;
+
+ case CMD_NOTIFY_RESTORE:
+ {
+ struct notify_cmd_args_restore src_args;
+
+ /* Copy the full args from user-side. */
+ size = copy_from_user((void *) &src_args,
+ (const void *)(args),
+ sizeof(struct notify_cmd_args_restore));
+ if (WARN_ON(size != 0)) {
+ os_status = -EFAULT;
+ goto func_end;
+ }
+ notify_restore(src_args.proc_id, src_args.line_id,
+ src_args.key);
+ }
+ break;
+
+ case CMD_NOTIFY_DISABLEEVENT:
+ {
+ struct notify_cmd_args_disable_event src_args;
+
+ /* Copy the full args from user-side. */
+ size = copy_from_user((void *) &src_args,
+ (const void *)(args),
+ sizeof(struct notify_cmd_args_disable_event));
+ if (WARN_ON(size != 0)) {
+ os_status = -EFAULT;
+ goto func_end;
+ }
+ notify_disable_event(src_args.proc_id, src_args.line_id,
+ src_args.event_id);
+ }
+ break;
+
+ case CMD_NOTIFY_ENABLEEVENT:
+ {
+ struct notify_cmd_args_enable_event src_args;
+
+ /* Copy the full args from user-side. */
+ size = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct notify_cmd_args_enable_event));
+ if (WARN_ON(size != 0)) {
+ os_status = -EFAULT;
+ goto func_end;
+ }
+ notify_enable_event(src_args.proc_id, src_args.line_id,
+ src_args.event_id);
+ }
+ break;
+
+ case CMD_NOTIFY_THREADATTACH:
+ {
+ u32 pid = *((u32 *)args);
+ status = notify_drv_attach(pid);
+ }
+ break;
+
+ case CMD_NOTIFY_THREADDETACH:
+ {
+ u32 pid = *((u32 *)args);
+ status = notify_drv_detach(pid);
+ }
+ break;
+
+ case CMD_NOTIFY_ATTACH:
+ {
+ struct notify_cmd_args_attach src_args;
+ void *knl_shared_addr;
+
+ size = copy_from_user((void *) &src_args, (const void *)(args),
+ sizeof(struct notify_cmd_args_attach));
+ if (size != 0) {
+ os_status = -EFAULT;
+ goto func_end;
+ }
+
+ /* knl_shared_addr = Memory_translate(src_args.shared_addr,
+ Memory_XltFlags_Phys2Virt); */
+ knl_shared_addr = platform_mem_translate(
+ (void *)src_args.shared_addr,
+ PLATFORM_MEM_XLT_FLAGS_PHYS2VIRT);
+ status = notify_attach(src_args.proc_id, knl_shared_addr);
+ }
+ break;
+
+ case CMD_NOTIFY_DETACH:
+ {
+ struct notify_cmd_args_detach src_args;
+
+ size = copy_from_user((void *) &src_args,
+ (const void *)(args),
+ sizeof(struct notify_cmd_args_detach));
+ if (size != 0) {
+ os_status = -EFAULT;
+ goto func_end;
+ }
+
+ status = notify_detach(src_args.proc_id);
+ }
+ break;
+
+ case CMD_NOTIFY_SHAREDMEMREQ:
+ {
+ struct notify_cmd_args_shared_mem_req src_args;
+ void *knl_shared_addr;
+
+ size = copy_from_user((void *) &src_args,
+ (const void *)(args),
+ sizeof(struct notify_cmd_args_shared_mem_req));
+ if (size != 0) {
+ os_status = -EFAULT;
+ goto func_end;
+ }
+
+ /* knl_shared_addr = Memory_translate(src_args.shared_addr,
+ Memory_XltFlags_Phys2Virt); */
+ knl_shared_addr = platform_mem_translate(
+ (void *)src_args.shared_addr,
+ PLATFORM_MEM_XLT_FLAGS_PHYS2VIRT);
+ status = notify_shared_mem_req(src_args.proc_id,
+ knl_shared_addr);
+ }
+ break;
+
+ case CMD_NOTIFY_ISREGISTERED:
+ {
+ struct notify_cmd_args_is_registered src_args;
+
+ size = copy_from_user((void *) &src_args,
+ (const void *)(args),
+ sizeof(struct notify_cmd_args_is_registered));
+ if (size != 0) {
+ os_status = -EFAULT;
+ goto func_end;
+ }
+
+ src_args.is_registered = notify_is_registered(src_args.proc_id,
+ src_args.line_id);
+ size = copy_to_user((void *) (args),
+ (const void *)&src_args,
+ sizeof(struct notify_cmd_args_is_registered));
+ if (size != 0) {
+ os_status = -EFAULT;
+ goto func_end;
+ }
+ }
+ break;
+
+ default:
+ {
+ /* This does not impact return status of this function,so retval
+ * comment is not used. */
+ status = NOTIFY_E_INVALIDARG;
+ printk(KERN_ERR "not valid command\n");
+ }
+ break;
+ }
+
+func_end:
+ /* Set the status and copy the common args to user-side. */
+ common_args.api_status = status;
+ size = copy_to_user((void *) cmd_args, (const void *) &common_args,
+ sizeof(struct notify_cmd_args));
+ if (size < 0)
+ os_status = -EFAULT;
+ return os_status;
+}
+
+/* This function implements the callback registered with IPS. Here
+ * to pass event no. back to user function(so that it can do another
+ * level of demultiplexing of callbacks) */
+static void _notify_drv_callback(u16 proc_id, u16 line_id, u32 event_id,
+ uint *arg, u32 payload)
+{
+ struct notify_drv_event_cbck *cbck;
+ int status = 0;
+
+ if (WARN_ON(notifydrv_state.is_setup == false)) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ if (WARN_ON(arg == NULL)) {
+ status = -EINVAL;
+ goto func_end;
+ }
+
+ cbck = (struct notify_drv_event_cbck *)arg;
+ status = _notify_drv_add_buf_by_pid(proc_id, line_id, cbck->pid,
+ event_id, payload, cbck->func, cbck->param);
+
+func_end:
+ if (status < 0) {
+ printk(KERN_ERR "_notify_drv_callback failed! status = 0x%x",
+ status);
+ }
+ return;
+}
+
+/* This function adds a data to a registered process. */
+static int _notify_drv_add_buf_by_pid(u16 proc_id, u16 line_id, u32 pid,
+ u32 event_id, u32 data,
+ notify_fn_notify_cbck cb_fxn,
+ void *param)
+{
+ s32 status = 0;
+ bool flag = false;
+ bool is_exit = false;
+ struct notify_drv_event_packet *u_buf = NULL;
+ u32 i;
+
+ WARN_ON(mutex_lock_interruptible(notifydrv_state.gate_handle));
+ for (i = 0; (i < MAX_PROCESSES) && (flag != true); i++) {
+ if (notifydrv_state.event_state[i].pid == pid) {
+ flag = true;
+ break;
+ }
+ }
+ mutex_unlock(notifydrv_state.gate_handle);
+
+ if (WARN_ON(flag == false)) {
+ status = -EFAULT;
+ goto func_end;
+ }
+
+ u_buf = kmalloc(sizeof(struct notify_drv_event_packet), GFP_ATOMIC);
+ if (u_buf == NULL) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ INIT_LIST_HEAD((struct list_head *)&u_buf->element);
+ u_buf->proc_id = proc_id;
+ u_buf->line_id = line_id;
+ u_buf->data = data;
+ u_buf->event_id = event_id;
+ u_buf->func = cb_fxn;
+ u_buf->param = param;
+ u_buf->is_exit = false;
+ if (u_buf->event_id == (u32) -1) {
+ u_buf->is_exit = true;
+ is_exit = true;
+ }
+ WARN_ON(mutex_lock_interruptible(notifydrv_state.gate_handle));
+ list_add_tail((struct list_head *)&(u_buf->element),
+ (struct list_head *)&(notifydrv_state.event_state[i].buf_list));
+ mutex_unlock(notifydrv_state.gate_handle);
+ up(notifydrv_state.event_state[i].semhandle);
+
+ /* Termination packet */
+ if (is_exit == true) {
+ if (down_interruptible(
+ notifydrv_state.event_state[i].tersemhandle))
+ status = NOTIFY_E_OSFAILURE;
+ }
+
+func_end:
+ if (status < 0) {
+ printk(KERN_ERR "_notify_drv_add_buf_by_pid failed! "
+ "status = 0x%x", status);
+ }
+ return status;
+}
+
+/* Module setup function.*/
+void _notify_drv_setup(void)
+{
+ int i;
+
+ INIT_LIST_HEAD((struct list_head *)&(notifydrv_state.event_cbck_list));
+ INIT_LIST_HEAD(
+ (struct list_head *)&(notifydrv_state.single_event_cbck_list));
+ notifydrv_state.gate_handle = kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ mutex_init(notifydrv_state.gate_handle);
+ for (i = 0; i < MAX_PROCESSES; i++) {
+ notifydrv_state.event_state[i].pid = -1;
+ notifydrv_state.event_state[i].ref_count = 0;
+ INIT_LIST_HEAD((struct list_head *)
+ &(notifydrv_state.event_state[i].buf_list));
+ }
+ notifydrv_state.is_setup = true;
+}
+
+/* Module destroy function.*/
+void _notify_drv_destroy(void)
+{
+ int i;
+ struct notify_drv_event_packet *packet;
+ struct list_head *entry;
+ struct notify_drv_event_cbck *cbck;
+
+ for (i = 0; i < MAX_PROCESSES; i++) {
+ notifydrv_state.event_state[i].pid = -1;
+ notifydrv_state.event_state[i].ref_count = 0;
+ /* Free event packets for any received but unprocessed events.*/
+ list_for_each(entry, (struct list_head *)
+ &(notifydrv_state.event_state[i].buf_list)) {
+ packet = (struct notify_drv_event_packet *)entry;
+ if (packet != NULL)
+ kfree(packet);
+ }
+ INIT_LIST_HEAD(&notifydrv_state.event_state[i].buf_list);
+ }
+
+ /* Clear any event registrations that were not unregistered. */
+ list_for_each(entry, (struct list_head *)
+ &(notifydrv_state.event_cbck_list)) {
+ cbck = (struct notify_drv_event_cbck *)(entry);
+ if (cbck != NULL)
+ kfree(cbck);
+ }
+ INIT_LIST_HEAD(&notifydrv_state.event_cbck_list);
+
+ /* Clear any event registrations that were not unregistered from single
+ * list. */
+ list_for_each(entry,
+ (struct list_head *)&(notifydrv_state.single_event_cbck_list)) {
+ cbck = (struct notify_drv_event_cbck *)(entry);
+ if (cbck != NULL)
+ kfree(cbck);
+ }
+ INIT_LIST_HEAD(&notifydrv_state.single_event_cbck_list);
+
+ mutex_destroy(notifydrv_state.gate_handle);
+ kfree(notifydrv_state.gate_handle);
+ notifydrv_state.is_setup = false;
+ return;
+}
+
+/* Attach a process to notify user support framework. */
+static int notify_drv_attach(u32 pid)
+{
+ bool flag = false;
+ bool is_init = false;
+ u32 i;
+ struct semaphore *sem_handle = NULL;
+ struct semaphore *ter_sem_handle = NULL;
+ int ret_val = 0;
+
+ if (WARN_ON(notifydrv_state.is_setup == false)) {
+ ret_val = NOTIFY_E_FAIL;
+ goto exit;
+ }
+
+ WARN_ON(mutex_lock_interruptible(notifydrv_state.gate_handle));
+ for (i = 0; (i < MAX_PROCESSES); i++) {
+ if (notifydrv_state.event_state[i].pid == pid) {
+ notifydrv_state.event_state[i].ref_count++;
+ is_init = true;
+ break;
+ }
+ }
+ if (is_init == true) {
+ mutex_unlock(notifydrv_state.gate_handle);
+ return 0;
+ }
+
+ sem_handle = kmalloc(sizeof(struct semaphore), GFP_ATOMIC);
+ ter_sem_handle = kmalloc(sizeof(struct semaphore), GFP_ATOMIC);
+ if (sem_handle == NULL || ter_sem_handle == NULL) {
+ ret_val = -ENOMEM;
+ goto sem_fail;
+ }
+ sema_init(sem_handle, 0);
+ /* Create the termination semaphore */
+ sema_init(ter_sem_handle, 0);
+
+ /* Search for an available slot for user process. */
+ for (i = 0; i < MAX_PROCESSES; i++) {
+ if (notifydrv_state.event_state[i].pid == -1) {
+ notifydrv_state.event_state[i].semhandle = \
+ sem_handle;
+ notifydrv_state.event_state[i].tersemhandle = \
+ ter_sem_handle;
+ notifydrv_state.event_state[i].pid = pid;
+ notifydrv_state.event_state[i].ref_count = 1;
+ INIT_LIST_HEAD(&(notifydrv_state.event_state[i].
+ buf_list));
+ flag = true;
+ break;
+ }
+ }
+ mutex_unlock(notifydrv_state.gate_handle);
+
+ if (WARN_ON(flag != true)) {
+ /* Max users have registered. No more clients
+ * can be supported */
+ ret_val = NOTIFY_E_RESOURCE;
+ goto sem_fail;
+ }
+
+ return 0;
+
+sem_fail:
+ kfree(ter_sem_handle);
+ kfree(sem_handle);
+exit:
+ return ret_val;
+}
+
+
+/* Detach a process from notify user support framework. */
+static int notify_drv_detach(u32 pid)
+{
+ s32 status = NOTIFY_S_SUCCESS;
+ bool flag = false;
+ u32 i;
+ struct semaphore *sem_handle;
+ struct semaphore *ter_sem_handle;
+
+ if (WARN_ON(notifydrv_state.is_setup == false)) {
+ status = NOTIFY_E_FAIL;
+ goto func_end;
+ }
+
+ /* Send the termination packet to notify thread */
+ status = _notify_drv_add_buf_by_pid(0, 0, pid, (u32)-1, (u32)0, NULL,
+ NULL);
+
+ WARN_ON(mutex_lock_interruptible(notifydrv_state.gate_handle));
+ for (i = 0; i < MAX_PROCESSES; i++) {
+ if (notifydrv_state.event_state[i].pid == pid) {
+ if (notifydrv_state.event_state[i].ref_count == 1) {
+ /* Last client being unregistered for this
+ * process*/
+ notifydrv_state.event_state[i].pid = -1;
+ notifydrv_state.event_state[i].ref_count = 0;
+ sem_handle =
+ notifydrv_state.event_state[i].semhandle;
+ ter_sem_handle =
+ notifydrv_state.event_state[i].tersemhandle;
+ INIT_LIST_HEAD((struct list_head *)
+ &(notifydrv_state.event_state[i].buf_list));
+ notifydrv_state.event_state[i].semhandle =
+ NULL;
+ notifydrv_state.event_state[i].tersemhandle =
+ NULL;
+ flag = true;
+ break;
+ } else
+ notifydrv_state.event_state[i].ref_count--;
+ }
+ }
+ mutex_unlock(notifydrv_state.gate_handle);
+
+ if ((flag == false) && (i == MAX_PROCESSES)) {
+ /* The specified user process was not found registered with
+ * Notify Driver module. */
+ status = NOTIFY_E_NOTFOUND;
+ } else {
+ kfree(sem_handle);
+ kfree(ter_sem_handle);
+ }
+
+func_end:
+ return status;
+}
diff --git a/drivers/dsp/syslink/omap_notify/notify.c b/drivers/dsp/syslink/omap_notify/notify.c
new file mode 100644
index 000000000000..c826121b486e
--- /dev/null
+++ b/drivers/dsp/syslink/omap_notify/notify.c
@@ -0,0 +1,1140 @@
+/*
+ * notify.c
+ *
+ * Syslink driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <asm/pgtable.h>
+
+#include <syslink/atomic_linux.h>
+#include <syslink/multiproc.h>
+#include <syslink/notify.h>
+#include <syslink/_notify.h>
+#include <syslink/notifydefs.h>
+#include <syslink/notify_driver.h>
+#include <syslink/notify_setup_proxy.h>
+
+struct notify_event_listener {
+ struct list_head element;
+ struct notify_event_callback callback;
+};
+
+/* Function registered with notify_exec when multiple registrations are present
+ * for the events. */
+static void _notify_exec_many(u16 proc_id, u16 line_id, u32 event_id, uint *arg,
+ u32 payload);
+
+struct notify_module_object notify_state = {
+ .def_cfg.num_events = 32u,
+ .def_cfg.send_event_poll_count = -1u,
+ .def_cfg.num_lines = 1u,
+ .def_cfg.reserved_events = 3u,
+ .gate_handle = NULL,
+ .local_notify_handle = NULL
+};
+
+/* Get the default configuration for the Notify module. */
+void notify_get_config(struct notify_config *cfg)
+{
+ s32 retval = 0;
+
+ if (WARN_ON(unlikely(cfg == NULL))) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true)
+ memcpy(cfg, &notify_state.def_cfg,
+ sizeof(struct notify_config));
+ else
+ memcpy(cfg, &notify_state.cfg, sizeof(struct notify_config));
+
+exit:
+ if (retval < 0) {
+ printk(KERN_ERR "notify_get_config failed! status = 0x%x",
+ retval);
+ }
+ return;
+}
+EXPORT_SYMBOL(notify_get_config);
+
+/* This function sets up the Notify module. This function must be called
+ * before any other instance-level APIs can be invoked. */
+int notify_setup(struct notify_config *cfg)
+{
+ int status = NOTIFY_S_SUCCESS;
+ struct notify_config tmp_cfg;
+
+ atomic_cmpmask_and_set(&notify_state.ref_count,
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(0));
+ if (atomic_inc_return(&notify_state.ref_count)
+ != NOTIFY_MAKE_MAGICSTAMP(1u)) {
+ return NOTIFY_S_ALREADYSETUP;
+ }
+
+ if (cfg == NULL) {
+ notify_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+
+ if (cfg->num_events > NOTIFY_MAXEVENTS) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (cfg->num_lines > NOTIFY_MAX_INTLINES) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (cfg->reserved_events > cfg->num_events) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ notify_state.gate_handle = kmalloc(sizeof(struct mutex), GFP_ATOMIC);
+ if (notify_state.gate_handle == NULL) {
+ status = NOTIFY_E_FAIL;
+ goto exit;
+ }
+ /*User has not provided any gate handle,
+ so create a default handle.*/
+ mutex_init(notify_state.gate_handle);
+
+ memcpy(&notify_state.cfg, cfg, sizeof(struct notify_config));
+ notify_state.local_enable_mask = -1u;
+ notify_state.start_complete = false;
+ memset(&notify_state.drivers, 0, (sizeof(struct notify_driver_object) *
+ NOTIFY_MAX_DRIVERS * NOTIFY_MAX_INTLINES));
+
+ /* tbd: Should return Notify_Handle */
+ notify_state.local_notify_handle = notify_create(NULL, multiproc_self(),
+ 0, NULL);
+ if (notify_state.local_notify_handle == NULL) {
+ status = NOTIFY_E_FAIL;
+ goto local_notify_fail;
+ }
+ return 0;
+
+local_notify_fail:
+ kfree(notify_state.gate_handle);
+exit:
+ atomic_set(&notify_state.ref_count, NOTIFY_MAKE_MAGICSTAMP(0));
+ printk(KERN_ERR "notify_setup failed! status = 0x%x", status);
+ return status;
+}
+EXPORT_SYMBOL(notify_setup);
+
+/* Once this function is called, other Notify module APIs,
+ * except for the Notify_getConfig API cannot be called anymore. */
+int notify_destroy(void)
+{
+ int i;
+ int j;
+ int status = NOTIFY_S_SUCCESS;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (!(atomic_dec_return(&notify_state.ref_count)
+ == NOTIFY_MAKE_MAGICSTAMP(0)))
+ return NOTIFY_S_ALREADYSETUP;
+
+ /* Temporarily increment refCount here. */
+ atomic_set(&notify_state.ref_count, NOTIFY_MAKE_MAGICSTAMP(1));
+ if (notify_state.local_notify_handle != NULL)
+ status = notify_delete(&notify_state.local_notify_handle);
+ atomic_set(&notify_state.ref_count, NOTIFY_MAKE_MAGICSTAMP(0));
+
+ /* Check if any Notify driver instances have
+ * not been deleted so far. If not, assert. */
+ for (i = 0; i < NOTIFY_MAX_DRIVERS; i++)
+ for (j = 0; j < NOTIFY_MAX_INTLINES; j++)
+ WARN_ON(notify_state.drivers[i][j].is_init != false);
+
+ kfree(notify_state.gate_handle);
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "notify_destroy failed! status = 0x%x", status);
+ return status;
+}
+EXPORT_SYMBOL(notify_destroy);
+
+/* Function to create an instance of Notify driver */
+struct notify_object *notify_create(void *driver_handle, u16 remote_proc_id,
+ u16 line_id, const struct notify_params *params)
+{
+ int status = NOTIFY_S_SUCCESS;
+ struct notify_object *obj = NULL;
+ uint i;
+
+ /* driver_handle can be NULL for local create */
+ /* params can be NULL */
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(remote_proc_id >= \
+ multiproc_get_num_processors()))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(line_id >= NOTIFY_MAX_INTLINES))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ /* Allocate memory for the Notify object. */
+ obj = kzalloc(sizeof(struct notify_object), GFP_KERNEL);
+ if (obj == NULL) {
+ status = NOTIFY_E_MEMORY;
+ goto exit;
+ }
+
+ obj->remote_proc_id = remote_proc_id;
+ obj->line_id = line_id;
+ obj->nesting = 0;
+
+ for (i = 0; i < notify_state.cfg.num_events; i++)
+ INIT_LIST_HEAD(&obj->event_list[i]);
+
+ /* Used solely for remote driver
+ * (NULL if remote_proc_id == self) */
+ obj->driver_handle = driver_handle;
+ /* Send this handle to the NotifyDriver */
+ status = notify_set_driver_handle(remote_proc_id, line_id, obj);
+ if (status < 0)
+ goto notify_handle_fail;
+
+ /* For local notify */
+ if (driver_handle == NULL)
+ /* Set driver status to indicate that it is done. */
+ notify_state.drivers[multiproc_self()][line_id].is_init =
+ NOTIFY_DRIVERINITSTATUS_DONE;
+ return obj;
+
+notify_handle_fail:
+ notify_set_driver_handle(remote_proc_id, line_id, NULL);
+ kfree(obj);
+ obj = NULL;
+exit:
+ if (status < 0)
+ printk(KERN_ERR "notify_create failed! status = 0x%x", status);
+ return obj;
+}
+
+
+/* Function to delete an instance of Notify driver */
+int notify_delete(struct notify_object **handle_ptr)
+{
+ int status = NOTIFY_S_SUCCESS;
+ struct notify_object *obj;
+ u16 i;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(unlikely((handle_ptr == NULL) || (*handle_ptr == NULL)))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ obj = (struct notify_object *)(*handle_ptr);
+
+ if (obj->remote_proc_id == multiproc_self()) {
+ notify_state.drivers[multiproc_self()][obj->line_id].is_init =
+ NOTIFY_DRIVERINITSTATUS_NOTDONE;
+ }
+ notify_set_driver_handle(obj->remote_proc_id, obj->line_id, NULL);
+ for (i = 0; i < notify_state.cfg.num_events; i++)
+ INIT_LIST_HEAD(&obj->event_list[i]);
+
+ kfree(obj);
+ obj = NULL;
+ *handle_ptr = NULL;
+
+exit:
+ if (status < 0)
+ printk(KERN_ERR "notify_delete failed! status = 0x%x", status);
+ return status;
+}
+
+/* This function registers a callback for a specific event with the
+ * Notify module. */
+int notify_register_event(u16 proc_id, u16 line_id, u32 event_id,
+ notify_fn_notify_cbck notify_callback_fxn, void *cbck_arg)
+{
+ int status = NOTIFY_S_SUCCESS;
+ u32 stripped_event_id = (event_id & NOTIFY_EVENT_MASK);
+ struct notify_driver_object *driver_handle;
+ struct list_head *event_list;
+ struct notify_event_listener *listener;
+ bool list_was_empty;
+ struct notify_object *obj;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(proc_id >= multiproc_get_num_processors()))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(line_id >= NOTIFY_MAX_INTLINES))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(notify_callback_fxn == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely((stripped_event_id >= \
+ notify_state.cfg.num_events)))) {
+ status = NOTIFY_E_EVTNOTREGISTERED;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(!ISRESERVED(event_id,
+ notify_state.cfg.reserved_events)))) {
+ status = NOTIFY_E_EVTRESERVED;
+ goto exit;
+ }
+
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ driver_handle = notify_get_driver_handle(proc_id, line_id);
+ if (WARN_ON(driver_handle == NULL)) {
+ status = NOTIFY_E_DRIVERNOTREGISTERED;
+ goto exit_unlock_mutex;
+ }
+ if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ obj = (struct notify_object *)driver_handle->notify_handle;
+ if (WARN_ON(obj == NULL)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ listener = kmalloc(sizeof(struct notify_event_listener), GFP_KERNEL);
+ if (listener == NULL) {
+ status = NOTIFY_E_MEMORY;
+ goto exit_unlock_mutex;
+ }
+ listener->callback.fn_notify_cbck = notify_callback_fxn;
+ listener->callback.cbck_arg = cbck_arg;
+
+ event_list = &(obj->event_list[stripped_event_id]);
+ list_was_empty = list_empty(event_list);
+ list_add_tail((struct list_head *) listener, event_list);
+ mutex_unlock(notify_state.gate_handle);
+ if (list_was_empty) {
+ /* Registering this event for the first time. Need to
+ * register the callback function.
+ */
+ status = notify_register_event_single(proc_id, line_id,
+ event_id, _notify_exec_many,
+ (uint *) obj);
+ }
+ goto exit;
+
+exit_unlock_mutex:
+ mutex_unlock(notify_state.gate_handle);
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_register_event failed! "
+ "status = 0x%x", status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(notify_register_event);
+
+/* This function registers a single callback for a specific event with the
+ * Notify module. */
+int notify_register_event_single(u16 proc_id, u16 line_id, u32 event_id,
+ notify_fn_notify_cbck notify_callback_fxn, void *cbck_arg)
+{
+ int status = NOTIFY_S_SUCCESS;
+ u32 stripped_event_id = (event_id & NOTIFY_EVENT_MASK);
+ struct notify_driver_object *driver_handle;
+ struct notify_object *obj;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(proc_id >= multiproc_get_num_processors()))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(line_id >= NOTIFY_MAX_INTLINES))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(notify_callback_fxn == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely((stripped_event_id >= \
+ notify_state.cfg.num_events)))) {
+ status = NOTIFY_E_EVTNOTREGISTERED;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(!ISRESERVED(event_id,
+ notify_state.cfg.reserved_events)))) {
+ status = NOTIFY_E_EVTRESERVED;
+ goto exit;
+ }
+
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ driver_handle = notify_get_driver_handle(proc_id, line_id);
+ if (WARN_ON(driver_handle == NULL)) {
+ status = NOTIFY_E_DRIVERNOTREGISTERED;
+ goto exit_unlock_mutex;
+ }
+ if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ obj = (struct notify_object *)driver_handle->notify_handle;
+ if (WARN_ON(obj == NULL)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ if (obj->callbacks[stripped_event_id].fn_notify_cbck != NULL) {
+ status = NOTIFY_E_ALREADYEXISTS;
+ goto exit_unlock_mutex;
+ }
+
+ obj->callbacks[stripped_event_id].fn_notify_cbck = notify_callback_fxn;
+ obj->callbacks[stripped_event_id].cbck_arg = cbck_arg;
+
+ if (proc_id != multiproc_self()) {
+ status = driver_handle->fxn_table.register_event(driver_handle,
+ stripped_event_id);
+ }
+
+exit_unlock_mutex:
+ mutex_unlock(notify_state.gate_handle);
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_register_event_single failed! "
+ "status = 0x%x", status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(notify_register_event_single);
+
+/* This function un-registers the callback for the specific event with
+ * the Notify module. */
+int notify_unregister_event(u16 proc_id, u16 line_id, u32 event_id,
+ notify_fn_notify_cbck notify_callback_fxn, void *cbck_arg)
+{
+ int status = NOTIFY_S_SUCCESS;
+ u32 stripped_event_id = (event_id & NOTIFY_EVENT_MASK);
+ struct notify_event_listener *listener;
+ bool found = false;
+ struct notify_driver_object *driver_handle;
+ struct list_head *event_list;
+ struct notify_object *obj;
+ /*int *sys_key;*/
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(proc_id >= multiproc_get_num_processors()))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(line_id >= NOTIFY_MAX_INTLINES))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(notify_callback_fxn == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely((stripped_event_id >= \
+ notify_state.cfg.num_events)))) {
+ status = NOTIFY_E_EVTNOTREGISTERED;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(!ISRESERVED(event_id,
+ notify_state.cfg.reserved_events)))) {
+ status = NOTIFY_E_EVTRESERVED;
+ goto exit;
+ }
+
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ driver_handle = notify_get_driver_handle(proc_id, line_id);
+ if (WARN_ON(driver_handle == NULL)) {
+ status = NOTIFY_E_DRIVERNOTREGISTERED;
+ goto exit_unlock_mutex;
+ }
+ if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ obj = (struct notify_object *)driver_handle->notify_handle;
+ if (WARN_ON(obj == NULL)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ event_list = &(obj->event_list[stripped_event_id]);
+ if (list_empty(event_list)) {
+ status = NOTIFY_E_NOTFOUND;
+ goto exit_unlock_mutex;
+ }
+
+ list_for_each_entry(listener, event_list, element) {
+ /* Hash not matches, take next node */
+ if ((listener->callback.fn_notify_cbck == notify_callback_fxn)
+ && (listener->callback.cbck_arg == cbck_arg)) {
+ found = true;
+ break;
+ }
+ }
+ if (found == false) {
+ status = NOTIFY_E_NOTFOUND;
+ goto exit_unlock_mutex;
+ }
+ /*sys_key = Gate_enterSystem();*/
+ list_del((struct list_head *)listener);
+ /*Gate_leaveSystem(sys_key);*/
+ mutex_unlock(notify_state.gate_handle);
+
+ if (list_empty(event_list)) {
+ status = notify_unregister_event_single(proc_id, line_id,
+ event_id);
+ }
+ kfree(listener);
+ goto exit;
+
+exit_unlock_mutex:
+ mutex_unlock(notify_state.gate_handle);
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_unregister_event failed! "
+ "status = 0x%x", status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(notify_unregister_event);
+
+/* This function un-registers a single callback for the specific event with
+ * the Notify module. */
+int notify_unregister_event_single(u16 proc_id, u16 line_id, u32 event_id)
+{
+ int status = NOTIFY_S_SUCCESS;
+ u32 stripped_event_id = (event_id & NOTIFY_EVENT_MASK);
+ struct notify_driver_object *driver_handle;
+ struct notify_object *obj;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(proc_id >= multiproc_get_num_processors()))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(line_id >= NOTIFY_MAX_INTLINES))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely((stripped_event_id >= \
+ notify_state.cfg.num_events)))) {
+ status = NOTIFY_E_EVTNOTREGISTERED;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(!ISRESERVED(event_id,
+ notify_state.cfg.reserved_events)))) {
+ status = NOTIFY_E_EVTRESERVED;
+ goto exit;
+ }
+
+ status = mutex_lock_interruptible(notify_state.gate_handle);
+ if (status)
+ goto exit;
+ driver_handle = notify_get_driver_handle(proc_id, line_id);
+ if (WARN_ON(driver_handle == NULL)) {
+ status = NOTIFY_E_DRIVERNOTREGISTERED;
+ goto exit_unlock_mutex;
+ }
+ if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ obj = (struct notify_object *)driver_handle->notify_handle;
+ if (WARN_ON(obj == NULL)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ if (obj->callbacks[stripped_event_id].fn_notify_cbck == NULL) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ obj->callbacks[stripped_event_id].fn_notify_cbck = NULL;
+ obj->callbacks[stripped_event_id].cbck_arg = NULL;
+ if (proc_id != multiproc_self()) {
+ status = driver_handle->fxn_table.unregister_event(
+ driver_handle, stripped_event_id);
+ }
+
+exit_unlock_mutex:
+ mutex_unlock(notify_state.gate_handle);
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_unregister_event_single failed! "
+ "status = 0x%x", status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(notify_unregister_event_single);
+
+/* This function sends a notification to the specified event. */
+int notify_send_event(u16 proc_id, u16 line_id, u32 event_id, u32 payload,
+ bool wait_clear)
+{
+ int status = NOTIFY_S_SUCCESS;
+ u32 stripped_event_id = (event_id & NOTIFY_EVENT_MASK);
+ struct notify_driver_object *driver_handle;
+ struct notify_object *obj;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(proc_id >= multiproc_get_num_processors()))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(line_id >= NOTIFY_MAX_INTLINES))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely((stripped_event_id >= \
+ notify_state.cfg.num_events)))) {
+ status = NOTIFY_E_EVTNOTREGISTERED;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(!ISRESERVED(event_id,
+ notify_state.cfg.reserved_events)))) {
+ status = NOTIFY_E_EVTRESERVED;
+ goto exit;
+ }
+
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ driver_handle = notify_get_driver_handle(proc_id, line_id);
+ if (WARN_ON(driver_handle == NULL)) {
+ status = NOTIFY_E_DRIVERNOTREGISTERED;
+ goto exit_unlock_mutex;
+ }
+ if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ obj = (struct notify_object *)driver_handle->notify_handle;
+ if (WARN_ON(obj == NULL)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ if (proc_id != multiproc_self()) {
+ status = driver_handle->fxn_table.send_event(driver_handle,
+ stripped_event_id, payload, wait_clear);
+ } else {
+ /* If nesting == 0 (the driver is enabled) and the event is
+ * enabled, send the event */
+ if (obj->callbacks[stripped_event_id].fn_notify_cbck == NULL) {
+ /* No callbacks are registered locally for the event. */
+ status = NOTIFY_E_EVTNOTREGISTERED;
+ } else if (obj->nesting != 0) {
+ /* Driver is disabled */
+ status = NOTIFY_E_FAIL;
+ } else if (!test_bit(stripped_event_id, (unsigned long *)
+ &notify_state.local_enable_mask)) {
+ /* Event is disabled */
+ status = NOTIFY_E_EVTDISABLED;
+ } else {
+ /* Leave critical section protection. */
+ mutex_unlock(notify_state.gate_handle);
+ /* Execute the callback function registered to the
+ * event */
+ notify_exec(obj, event_id, payload);
+ /* Enter critical section protection. TBD: nesting */
+ if (mutex_lock_interruptible(notify_state.gate_handle))
+ WARN_ON(1);
+ }
+ }
+
+exit_unlock_mutex:
+ mutex_unlock(notify_state.gate_handle);
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_send_event failed! status = 0x%x",
+ status);
+ }
+ return status;
+}
+EXPORT_SYMBOL(notify_send_event);
+
+/* This function disables all events. This is equivalent to global
+ * interrupt disable, however restricted within interrupts handled by
+ * the Notify module. All callbacks registered for all events are
+ * disabled with this API. It is not possible to disable a specific
+ * callback. */
+u32 notify_disable(u16 proc_id, u16 line_id)
+{
+ uint key = 0;
+ int status = NOTIFY_S_SUCCESS;
+ struct notify_driver_object *driver_handle;
+ struct notify_object *obj;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(proc_id >= multiproc_get_num_processors()))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(line_id >= NOTIFY_MAX_INTLINES))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ driver_handle = notify_get_driver_handle(proc_id, line_id);
+ if (WARN_ON(driver_handle == NULL)) {
+ status = NOTIFY_E_DRIVERNOTREGISTERED;
+ goto exit_unlock_mutex;
+ }
+ if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ obj = (struct notify_object *)driver_handle->notify_handle;
+ if (WARN_ON(obj == NULL)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ obj->nesting++;
+ if (obj->nesting == 1) {
+ /* Disable receiving all events */
+ if (proc_id != multiproc_self())
+ driver_handle->fxn_table.disable(driver_handle);
+ }
+ key = obj->nesting;
+
+exit_unlock_mutex:
+ mutex_unlock(notify_state.gate_handle);
+exit:
+ if (status < 0)
+ printk(KERN_ERR "notify_disable failed! status = 0x%x", status);
+ return key;
+}
+EXPORT_SYMBOL(notify_disable);
+
+/* This function restores the Notify module to the state before the
+ * last notify_disable() was called. This is equivalent to global
+ * interrupt restore, however restricted within interrupts handled by
+ * the Notify module. All callbacks registered for all events as
+ * specified in the flags are enabled with this API. It is not possible
+ * to enable a specific callback. */
+void notify_restore(u16 proc_id, u16 line_id, u32 key)
+{
+ int status = NOTIFY_S_SUCCESS;
+ struct notify_driver_object *driver_handle;
+ struct notify_object *obj;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(proc_id >= multiproc_get_num_processors()))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(line_id >= NOTIFY_MAX_INTLINES))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ driver_handle = notify_get_driver_handle(proc_id, line_id);
+ if (WARN_ON(driver_handle == NULL)) {
+ status = NOTIFY_E_DRIVERNOTREGISTERED;
+ goto exit_unlock_mutex;
+ }
+ if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ obj = (struct notify_object *)driver_handle->notify_handle;
+ if (WARN_ON(obj == NULL)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ if (key != obj->nesting) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit_unlock_mutex;
+ }
+
+ obj->nesting--;
+ if (obj->nesting == 0) {
+ /* Enable receiving events */
+ if (proc_id != multiproc_self())
+ driver_handle->fxn_table.enable(driver_handle);
+ }
+
+exit_unlock_mutex:
+ mutex_unlock(notify_state.gate_handle);
+exit:
+ if (status < 0)
+ printk(KERN_ERR "notify_restore failed! status = 0x%x", status);
+ return;
+}
+EXPORT_SYMBOL(notify_restore);
+
+/* This function disables a specific event. All callbacks registered
+ * for the specific event are disabled with this API. It is not
+ * possible to disable a specific callback. */
+void notify_disable_event(u16 proc_id, u16 line_id, u32 event_id)
+{
+ int status = 0;
+ u32 stripped_event_id = (event_id & NOTIFY_EVENT_MASK);
+ struct notify_driver_object *driver_handle;
+ struct notify_object *obj;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(proc_id >= multiproc_get_num_processors()))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(line_id >= NOTIFY_MAX_INTLINES))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely((stripped_event_id >= \
+ notify_state.cfg.num_events)))) {
+ status = NOTIFY_E_EVTNOTREGISTERED;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(!ISRESERVED(event_id,
+ notify_state.cfg.reserved_events)))) {
+ status = NOTIFY_E_EVTRESERVED;
+ goto exit;
+ }
+
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ driver_handle = notify_get_driver_handle(proc_id, line_id);
+ if (WARN_ON(driver_handle == NULL)) {
+ status = NOTIFY_E_DRIVERNOTREGISTERED;
+ goto exit_unlock_mutex;
+ }
+ if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ obj = (struct notify_object *)driver_handle->notify_handle;
+ if (WARN_ON(obj == NULL)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ if (proc_id != multiproc_self()) {
+ driver_handle->fxn_table.disable_event(driver_handle,
+ stripped_event_id);
+ } else {
+ clear_bit(stripped_event_id,
+ (unsigned long *) &notify_state.local_enable_mask);
+ }
+
+exit_unlock_mutex:
+ mutex_unlock(notify_state.gate_handle);
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_disable_event failed! status = 0x%x",
+ status);
+ }
+ return;
+}
+EXPORT_SYMBOL(notify_disable_event);
+
+/* This function enables a specific event. All callbacks registered for
+ * this specific event are enabled with this API. It is not possible to
+ * enable a specific callback. */
+void notify_enable_event(u16 proc_id, u16 line_id, u32 event_id)
+{
+ int status = 0;
+ u32 stripped_event_id = (event_id & NOTIFY_EVENT_MASK);
+ struct notify_driver_object *driver_handle;
+ struct notify_object *obj;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(proc_id >= multiproc_get_num_processors()))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(line_id >= NOTIFY_MAX_INTLINES))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely((stripped_event_id >= \
+ notify_state.cfg.num_events)))) {
+ status = NOTIFY_E_EVTNOTREGISTERED;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(!ISRESERVED(event_id,
+ notify_state.cfg.reserved_events)))) {
+ status = NOTIFY_E_EVTRESERVED;
+ goto exit;
+ }
+
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ driver_handle = notify_get_driver_handle(proc_id, line_id);
+ if (WARN_ON(driver_handle == NULL)) {
+ status = NOTIFY_E_DRIVERNOTREGISTERED;
+ goto exit_unlock_mutex;
+ }
+ if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ obj = (struct notify_object *)driver_handle->notify_handle;
+ if (WARN_ON(obj == NULL)) {
+ status = NOTIFY_E_FAIL;
+ goto exit_unlock_mutex;
+ }
+
+ if (proc_id != multiproc_self()) {
+ driver_handle->fxn_table.enable_event(driver_handle,
+ stripped_event_id);
+ } else {
+ set_bit(stripped_event_id,
+ (unsigned long *)&notify_state.local_enable_mask);
+ }
+
+exit_unlock_mutex:
+ mutex_unlock(notify_state.gate_handle);
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_enable_event failed! status = 0x%x",
+ status);
+ }
+ return;
+}
+EXPORT_SYMBOL(notify_enable_event);
+
+/* Whether notification via interrupt line has been registered. */
+bool notify_is_registered(u16 proc_id, u16 line_id)
+{
+ int status = NOTIFY_S_SUCCESS;
+ bool is_registered = false;
+ struct notify_driver_object *driver_handle;
+
+ if (WARN_ON(unlikely(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true))) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(proc_id >= multiproc_get_num_processors()))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(line_id >= NOTIFY_MAX_INTLINES))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ driver_handle = notify_get_driver_handle(proc_id, line_id);
+ if ((driver_handle != NULL) && (driver_handle->notify_handle != NULL))
+ is_registered = true;
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_is_registered failed! status = 0x%x",
+ status);
+ }
+ return is_registered;
+}
+EXPORT_SYMBOL(notify_is_registered);
+
+/* Creates notify drivers and registers them with Notify */
+int notify_attach(u16 proc_id, void *shared_addr)
+{
+ int status = NOTIFY_S_SUCCESS;
+
+ /* Use the NotifySetup proxy to setup drivers */
+ status = notify_setup_proxy_attach(proc_id, shared_addr);
+
+ notify_state.start_complete = true;
+
+ return status;
+}
+EXPORT_SYMBOL(notify_attach);
+
+/* Creates notify drivers and registers them with Notify */
+int notify_detach(u16 proc_id)
+{
+ int status = 0;
+
+ /* Use the NotifySetup proxy to destroy drivers */
+ status = notify_setup_proxy_detach(proc_id);
+
+ notify_state.start_complete = false;
+
+ return status;
+}
+EXPORT_SYMBOL(notify_detach);
+
+/* Returns the total amount of shared memory used by the Notify module
+ * and all instances after notify_start has been called. */
+uint notify_shared_mem_req(u16 proc_id, void *shared_addr)
+{
+ uint mem_req = 0x0;
+
+ if (multiproc_get_num_processors() > 1)
+ /* Determine device-specific shared memory requirements */
+ mem_req = notify_setup_proxy_shared_mem_req(proc_id,\
+ shared_addr);
+ else
+ /* Only 1 processor: no shared memory needed */
+ mem_req = 0;
+
+ return mem_req;
+}
+
+/* Indicates whether notify_start is completed. */
+inline bool _notify_start_complete(void)
+{
+ return notify_state.start_complete;
+}
+
+/* Function registered as callback with the Notify driver */
+void notify_exec(struct notify_object *obj, u32 event_id, u32 payload)
+{
+ struct notify_event_callback *callback;
+
+ WARN_ON(obj == NULL);
+ WARN_ON(event_id >= notify_state.cfg.num_events);
+
+ callback = &(obj->callbacks[event_id]);
+ WARN_ON(callback->fn_notify_cbck == NULL);
+
+ /* Execute the callback function with its argument and the payload */
+ callback->fn_notify_cbck(obj->remote_proc_id, obj->line_id, event_id,
+ callback->cbck_arg, payload);
+}
+
+
+/* Function registered with notify_exec when multiple registrations are present
+ * for the events. */
+void _notify_exec_many(u16 proc_id, u16 line_id, u32 event_id, uint *arg,
+ u32 payload)
+{
+ struct notify_object *obj = (struct notify_object *)arg;
+ struct list_head *event_list;
+ struct notify_event_listener *listener;
+
+ WARN_ON(proc_id >= multiproc_get_num_processors());
+ WARN_ON(obj == NULL);
+ WARN_ON(line_id >= NOTIFY_MAX_INTLINES);
+ WARN_ON(event_id >= notify_state.cfg.num_events);
+
+ /* Both loopback and the the event itself are enabled */
+ event_list = &(obj->event_list[event_id]);
+
+ /* Enter critical section protection. */
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ /* Use "NULL" to get the first EventListener on the list */
+ list_for_each_entry(listener, event_list, element) {
+ /* Leave critical section protection. */
+ mutex_unlock(notify_state.gate_handle);
+ listener->callback.fn_notify_cbck(proc_id, line_id, event_id,
+ listener->callback.cbck_arg, payload);
+ /* Enter critical section protection. */
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ }
+
+ /* Leave critical section protection. */
+ mutex_unlock(notify_state.gate_handle);
+}
diff --git a/drivers/dsp/syslink/omap_notify/notify_driver.c b/drivers/dsp/syslink/omap_notify/notify_driver.c
new file mode 100644
index 000000000000..2de9ae90d49c
--- /dev/null
+++ b/drivers/dsp/syslink/omap_notify/notify_driver.c
@@ -0,0 +1,186 @@
+/*
+ * notify_driver.c
+ *
+ * Syslink driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <asm/pgtable.h>
+
+#include <syslink/gt.h>
+#include <syslink/notify.h>
+#include <syslink/notify_driver.h>
+#include <syslink/notifydefs.h>
+#include <syslink/atomic_linux.h>
+
+
+/* Function to register driver with the Notify module. */
+int notify_register_driver(u16 remote_proc_id,
+ u16 line_id,
+ struct notify_driver_fxn_table *fxn_table,
+ struct notify_driver_object **driver_handle)
+{
+ int status = NOTIFY_S_SUCCESS;
+ struct notify_driver_object *drv_handle = NULL;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true)) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(fxn_table == NULL)) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(remote_proc_id >= multiproc_get_num_processors())) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(line_id >= NOTIFY_MAX_INTLINES)) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(driver_handle == NULL)) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ *driver_handle = NULL;
+ if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
+ WARN_ON(1);
+ drv_handle = &(notify_state.drivers[remote_proc_id][line_id]);
+ if (drv_handle->is_init == NOTIFY_DRIVERINITSTATUS_DONE) {
+ status = NOTIFY_E_ALREADYEXISTS;
+ mutex_unlock(notify_state.gate_handle);
+ goto exit;
+ }
+ mutex_unlock(notify_state.gate_handle);
+ WARN_ON(status < 0);
+
+ /*Complete registration of the driver. */
+ memcpy(&(drv_handle->fxn_table), fxn_table,
+ sizeof(struct notify_driver_fxn_table));
+ drv_handle->notify_handle = NULL; /* Initialize to NULL. */
+ drv_handle->is_init = NOTIFY_DRIVERINITSTATUS_DONE;
+ *driver_handle = drv_handle;
+
+exit:
+ return status;
+}
+EXPORT_SYMBOL(notify_register_driver);
+
+/* Function to unregister driver with the Notify module. */
+int notify_unregister_driver(struct notify_driver_object *drv_handle)
+{
+ int status = NOTIFY_E_FAIL;
+ s32 key;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true)) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(drv_handle == NULL)) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ key = mutex_lock_interruptible(notify_state.gate_handle);
+ if (key)
+ goto exit;
+
+ /* Unregister the driver. */
+ drv_handle->is_init = NOTIFY_DRIVERINITSTATUS_NOTDONE;
+ mutex_unlock(notify_state.gate_handle);
+ status = NOTIFY_S_SUCCESS;
+
+exit:
+ return status;
+}
+EXPORT_SYMBOL(notify_unregister_driver);
+
+/* Function to set the Notify object handle maintained within the
+ * Notify module. */
+int notify_set_driver_handle(u16 remote_proc_id, u16 line_id,
+ struct notify_object *handle)
+{
+ s32 status = NOTIFY_S_SUCCESS;
+
+ /* Handle can be set to NULL */
+ if (WARN_ON(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true)) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(remote_proc_id >= multiproc_get_num_processors())) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(line_id >= NOTIFY_MAX_INTLINES)) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ notify_state.drivers[remote_proc_id][line_id].notify_handle = handle;
+
+exit:
+ return status;
+}
+EXPORT_SYMBOL(notify_set_driver_handle);
+
+
+/* Function to find and return the driver handle maintained within
+ * the Notify module. */
+struct notify_driver_object *notify_get_driver_handle(u16 remote_proc_id,
+ u16 line_id)
+{
+ struct notify_driver_object *handle = NULL;
+ s32 status = NOTIFY_S_SUCCESS;
+
+ if (WARN_ON(atomic_cmpmask_and_lt(&(notify_state.ref_count),
+ NOTIFY_MAKE_MAGICSTAMP(0),
+ NOTIFY_MAKE_MAGICSTAMP(1)) == true)) {
+ status = NOTIFY_E_INVALIDSTATE;
+ goto exit;
+ }
+ if (WARN_ON(remote_proc_id >= multiproc_get_num_processors())) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(line_id >= NOTIFY_MAX_INTLINES)) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ handle = &(notify_state.drivers[remote_proc_id][line_id]);
+ /* Check whether the driver handle slot is occupied. */
+ if (handle->is_init == NOTIFY_DRIVERINITSTATUS_NOTDONE)
+ handle = NULL;
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_get_driver_handle failed! "
+ "status = 0x%x\n", status);
+ }
+ return handle;
+}
+EXPORT_SYMBOL(notify_get_driver_handle);
diff --git a/drivers/dsp/syslink/omap_notify/plat/omap4_notify_setup.c b/drivers/dsp/syslink/omap_notify/plat/omap4_notify_setup.c
new file mode 100644
index 000000000000..016ffb2fad9f
--- /dev/null
+++ b/drivers/dsp/syslink/omap_notify/plat/omap4_notify_setup.c
@@ -0,0 +1,165 @@
+/*
+ * omap4_notify_setup.c
+ *
+ * OMAP4 device-specific functions to setup the Notify module.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* Linux headers */
+#include <linux/spinlock.h>
+/*#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <asm/pgtable.h>*/
+
+/* Module headers */
+#include <syslink/multiproc.h>
+
+#include <syslink/notify.h>
+#include <syslink/notify_setup_proxy.h>
+#include <syslink/notify_ducatidriver.h>
+#include <syslink/notify_driver.h>
+#include <syslink/notifydefs.h>
+
+
+/* Handle to the NotifyDriver for line 0 */
+static struct notify_ducatidrv_object *notify_setup_driver_handles[
+ MULTIPROC_MAXPROCESSORS];
+
+/* Handle to the Notify objects */
+static
+struct notify_object *notify_setup_notify_handles[MULTIPROC_MAXPROCESSORS];
+
+
+/* Function to perform device specific setup for Notify module.
+ * This function creates the Notify drivers. */
+int notify_setup_omap4_attach(u16 proc_id, void *shared_addr)
+{
+ s32 status = NOTIFY_S_SUCCESS;
+ struct notify_ducatidrv_params notify_shm_params;
+
+ if (WARN_ON(unlikely(shared_addr == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+ if (WARN_ON(unlikely(proc_id == multiproc_self()))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ notify_ducatidrv_params_init(&notify_shm_params);
+
+ /* Currently not supporting caching on host side. */
+ notify_shm_params.cache_enabled = false;
+ notify_shm_params.line_id = 0;
+ notify_shm_params.local_int_id = 77u; /* TBD: Ipc_getConfig */
+ notify_shm_params.remote_int_id = 0u; /* TBD: Ipc_getConfig */
+ notify_shm_params.remote_proc_id = proc_id;
+ notify_shm_params.shared_addr = shared_addr;
+
+ notify_setup_driver_handles[proc_id] = notify_ducatidrv_create(
+ &notify_shm_params);
+ if (notify_setup_driver_handles[proc_id] == NULL) {
+ status = NOTIFY_E_FAIL;
+ printk(KERN_ERR "notify_setup_omap4_attach: "
+ "notify_ducatidrv_create failed! status = 0x%x",
+ status);
+ goto exit;
+ }
+
+ notify_setup_notify_handles[proc_id] = \
+ notify_create(notify_setup_driver_handles[proc_id],
+ proc_id, 0u, NULL);
+ if (notify_setup_notify_handles[proc_id] == NULL) {
+ status = NOTIFY_E_FAIL;
+ printk(KERN_ERR "notify_setup_omap4_attach: notify_create "
+ "failed!");
+ goto exit;
+ }
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_setup_omap4_attach failed! "
+ "status = 0x%x", status);
+ }
+ return status;
+}
+
+
+/* Function to perform device specific destroy for Notify module.
+ * This function deletes the Notify drivers. */
+int notify_setup_omap4_detach(u16 proc_id)
+{
+ s32 status = NOTIFY_S_SUCCESS;
+ s32 tmp_status = NOTIFY_S_SUCCESS;
+
+ if (WARN_ON(unlikely(proc_id == multiproc_self()))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ /* Delete the notify driver to the M3 (Line 0) */
+ status = notify_delete(&(notify_setup_notify_handles[proc_id]));
+ if (status < 0) {
+ printk(KERN_ERR "notify_setup_omap4_detach: notify_delete "
+ "failed for line 0!");
+ }
+
+ tmp_status = notify_ducatidrv_delete(
+ &(notify_setup_driver_handles[proc_id]));
+ if ((tmp_status < 0) && (status >= 0)) {
+ status = tmp_status;
+ printk(KERN_ERR "notify_setup_omap4_detach: "
+ "notify_ducatidrv_delete failed for line 0!");
+ }
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_setup_omap4_detach failed! "
+ "status = 0x%x", status);
+ }
+ return status;
+}
+
+
+/* Return the amount of shared memory required */
+uint notify_setup_omap4_shared_mem_req(u16 remote_proc_id, void *shared_addr)
+{
+ int status = NOTIFY_S_SUCCESS;
+ uint mem_req = 0x0;
+ struct notify_ducatidrv_params params;
+
+ if (WARN_ON(unlikely(shared_addr == NULL))) {
+ status = NOTIFY_E_INVALIDARG;
+ goto exit;
+ }
+
+ notify_ducatidrv_params_init(&params);
+ params.shared_addr = shared_addr;
+
+ mem_req = notify_ducatidrv_shared_mem_req(&params);
+
+exit:
+ if (status < 0) {
+ printk(KERN_ERR "notify_setup_omap4_shared_mem_req failed!"
+ " status = 0x%x", status);
+ }
+ return mem_req;
+}
+
+bool notify_setup_omap4_int_line_available(u16 remote_proc_id)
+{
+ return true;
+}
diff --git a/drivers/dsp/syslink/procmgr/Kbuild b/drivers/dsp/syslink/procmgr/Kbuild
new file mode 100644
index 000000000000..58f6d3155250
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/Kbuild
@@ -0,0 +1,10 @@
+libomap_syslink_proc = processor.o procmgr.o procmgr_drv.o
+
+obj-$(CONFIG_SYSLINK_PROC) += syslink_proc.o
+syslink_proc-objs = $(libomap_syslink_proc)
+
+ccflags-y += -Wno-strict-prototypes
+
+#Header files
+ccflags-y += -Iarch/arm/plat-omap/include/syslink
+
diff --git a/drivers/dsp/syslink/procmgr/proc4430/Kbuild b/drivers/dsp/syslink/procmgr/proc4430/Kbuild
new file mode 100644
index 000000000000..f82f2e23f79a
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/Kbuild
@@ -0,0 +1,10 @@
+libomap_proc4430 = proc4430.o proc4430_drv.o dmm4430.o \
+ ducatienabler.o hw_mmu.o
+
+obj-$(CONFIG_SYSLINK_PROC) += syslink_proc4430.o
+syslink_proc4430-objs = $(libomap_proc4430)
+
+ccflags-y += -Wno-strict-prototypes -DUSE_LEVEL_1_MACROS
+
+#Header files
+ccflags-y += -Iarch/arm/plat-omap/include/syslink
diff --git a/drivers/dsp/syslink/procmgr/proc4430/dmm4430.c b/drivers/dsp/syslink/procmgr/proc4430/dmm4430.c
new file mode 100644
index 000000000000..227cba7ce43b
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/dmm4430.c
@@ -0,0 +1,356 @@
+/*
+ * dmm4430.c
+ *
+ * Syslink support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ * ======== dmm.c ========
+ * Purpose:
+ *The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
+ *space that can be directly mapped to any MPU buffer or memory region
+ *
+ * Public Functions:
+ *dmm_create_tables
+ *dmm_create
+ *dmm_destroy
+ *dmm_exit
+ *dmm_init
+ *dmm_map_memory
+ *DMM_Reset
+ *dmm_reserve_memory
+ *dmm_unmap_memory
+ *dmm_unreserve_memory
+ *
+ * Private Functions:
+ *add_region
+ *create_region
+ *get_region
+ * get_free_region
+ * get_mapped_region
+ *
+ * Notes:
+ *Region: Generic memory entitiy having a start address and a size
+ *Chunk: Reserved region
+ *
+ *!
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+#include "dmm4430.h"
+
+
+#define DMM_ADDR_VIRTUAL(x, a) \
+ do { \
+ x = (((struct map_page *)(a) - p_virt_mapping_tbl) * PAGE_SIZE \
+ + dyn_mem_map_begin);\
+ } while (0)
+
+#define DMM_ADDR_TO_INDEX(i, a) \
+ do { \
+ i = (((a) - dyn_mem_map_begin) / PAGE_SIZE); \
+ } while (0)
+
+struct map_page {
+ u32 region_size:31;
+ u32 b_reserved:1;
+};
+
+/* Create the free list */
+static struct map_page *p_virt_mapping_tbl;
+static u32 i_freeregion; /* The index of free region */
+static u32 i_freesize;
+static u32 table_size;/* The size of virtual and physical pages tables */
+static u32 dyn_mem_map_begin;
+struct mutex *dmm_lock;
+
+static struct map_page *get_free_region(u32 size);
+static struct map_page *get_mapped_region(u32 addr);
+
+/* ======== dmm_create_tables ========
+ * Purpose:
+ *Create table to hold information of the virtual memory that is reserved
+ *for DSP.
+ */
+int dmm_create_tables(u32 addr, u32 size)
+{
+ int status = 0;
+
+ dmm_delete_tables();
+ if (WARN_ON(mutex_lock_interruptible(dmm_lock)) < 0) {
+ status = -EFAULT;
+ goto func_exit;
+ }
+ dyn_mem_map_begin = addr;
+ table_size = (size/PAGE_SIZE) + 1;
+ /* Create the free list */
+ p_virt_mapping_tbl = (struct map_page *)vmalloc(
+ table_size*sizeof(struct map_page));
+ if (WARN_ON(p_virt_mapping_tbl == NULL))
+ status = -ENOMEM;
+ /* On successful allocation,
+ * all entries are zero ('free') */
+ i_freeregion = 0;
+ i_freesize = table_size*PAGE_SIZE;
+ p_virt_mapping_tbl[0].region_size = table_size;
+ mutex_unlock(dmm_lock);
+
+func_exit:
+ return status;
+}
+
+/*
+ * ======== dmm_create ========
+ * Purpose:
+ *Create a dynamic memory manager object.
+ */
+int dmm_create(void)
+{
+ int status = 0;
+ dmm_lock = kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (WARN_ON(dmm_lock == NULL)) {
+ status = -EFAULT;
+ goto func_exit;
+ }
+ mutex_init(dmm_lock);
+func_exit:
+ return status;
+}
+
+/*
+ * ======== dmm_destroy ========
+ * Purpose:
+ *Release the communication memory manager resources.
+ */
+void dmm_destroy(void)
+{
+ dmm_delete_tables();
+ kfree(dmm_lock);
+}
+
+
+/*
+ * ======== dmm_delete_tables ========
+ * Purpose:
+ *Delete DMM Tables.
+ */
+void dmm_delete_tables(void)
+{
+ /* Delete all DMM tables */
+ WARN_ON(mutex_lock_interruptible(dmm_lock));
+ if (p_virt_mapping_tbl != NULL) {
+ vfree(p_virt_mapping_tbl);
+ p_virt_mapping_tbl = NULL;
+ }
+ mutex_unlock(dmm_lock);
+}
+
+/*
+ * ======== dmm_init ========
+ * Purpose:
+ *Initializes private state of DMM module.
+ */
+void dmm_init(void)
+{
+ p_virt_mapping_tbl = NULL ;
+ table_size = 0;
+ return;
+}
+
+/*
+ * ======== dmm_reserve_memory ========
+ * Purpose:
+ *Reserve a chunk of virtually contiguous DSP/IVA address space.
+ */
+int dmm_reserve_memory(u32 size, u32 *p_rsv_addr)
+{
+ int status = 0;
+ struct map_page *node;
+ u32 rsv_addr = 0;
+ u32 rsv_size = 0;
+
+ if (WARN_ON(mutex_lock_interruptible(dmm_lock)) < 0) {
+ status = -EFAULT;
+ goto func_exit;
+ }
+
+ /* Try to get a DSP chunk from the free list */
+ node = get_free_region(size);
+ if (node != NULL) {
+ /* DSP chunk of given size is available. */
+ DMM_ADDR_VIRTUAL(rsv_addr, node);
+ /* Calculate the number entries to use */
+ rsv_size = size/PAGE_SIZE;
+ if (rsv_size < node->region_size) {
+ /* Mark remainder of free region */
+ node[rsv_size].b_reserved = false;
+ node[rsv_size].region_size =
+ node->region_size - rsv_size;
+ }
+ /* get_region will return first fit chunk. But we only use what
+ is requested. */
+ node->b_reserved = true;
+ node->region_size = rsv_size;
+ /* Return the chunk's starting address */
+ *p_rsv_addr = rsv_addr;
+ } else
+ /*dSP chunk of given size is not available */
+ status = -ENOMEM;
+
+ mutex_unlock(dmm_lock);
+func_exit:
+ return status;
+}
+
+
+/*
+ * ======== dmm_unreserve_memory ========
+ * Purpose:
+ *Free a chunk of reserved DSP/IVA address space.
+ */
+int dmm_unreserve_memory(u32 rsv_addr, u32 *psize)
+{
+ struct map_page *chunk;
+ int status = 0;
+
+ WARN_ON(mutex_lock_interruptible(dmm_lock));
+
+ /* Find the chunk containing the reserved address */
+ chunk = get_mapped_region(rsv_addr);
+ if (chunk == NULL)
+ status = -ENXIO;
+ WARN_ON(status < 0);
+ if (status == 0) {
+ chunk->b_reserved = false;
+ *psize = chunk->region_size * PAGE_SIZE;
+ /* NOTE: We do NOT coalesce free regions here.
+ * Free regions are coalesced in get_region(), as it traverses
+ *the whole mapping table
+ */
+ }
+ mutex_unlock(dmm_lock);
+ return status;
+}
+
+/*
+ * ======== get_free_region ========
+ * Purpose:
+ * Returns the requested free region
+ */
+static struct map_page *get_free_region(u32 size)
+{
+ struct map_page *curr_region = NULL;
+ u32 i = 0;
+ u32 region_size = 0;
+ u32 next_i = 0;
+
+ if (p_virt_mapping_tbl == NULL)
+ return curr_region;
+ if (size > i_freesize) {
+ /* Find the largest free region
+ * (coalesce during the traversal) */
+ while (i < table_size) {
+ region_size = p_virt_mapping_tbl[i].region_size;
+ next_i = i+region_size;
+ if (p_virt_mapping_tbl[i].b_reserved == false) {
+ /* Coalesce, if possible */
+ if (next_i < table_size &&
+ p_virt_mapping_tbl[next_i].b_reserved
+ == false) {
+ p_virt_mapping_tbl[i].region_size +=
+ p_virt_mapping_tbl[next_i].region_size;
+ continue;
+ }
+ region_size *= PAGE_SIZE;
+ if (region_size > i_freesize) {
+ i_freeregion = i;
+ i_freesize = region_size;
+ }
+ }
+ i = next_i;
+ }
+ }
+ if (size <= i_freesize) {
+ curr_region = p_virt_mapping_tbl + i_freeregion;
+ i_freeregion += (size / PAGE_SIZE);
+ i_freesize -= size;
+ }
+ return curr_region;
+}
+
+/*
+ * ======== get_mapped_region ========
+ * Purpose:
+ * Returns the requestedmapped region
+ */
+static struct map_page *get_mapped_region(u32 addr)
+{
+ u32 i = 0;
+ struct map_page *curr_region = NULL;
+
+ if (p_virt_mapping_tbl == NULL)
+ return curr_region;
+
+ DMM_ADDR_TO_INDEX(i, addr);
+ if (i < table_size && (p_virt_mapping_tbl[i].b_reserved))
+ curr_region = p_virt_mapping_tbl + i;
+ return curr_region;
+}
+
+#ifdef DSP_DMM_DEBUG
+int dmm_mem_map_dump(void)
+{
+ struct map_page *curNode = NULL;
+ u32 i;
+ u32 freemem = 0;
+ u32 bigsize = 0;
+
+ WARN_ON(mutex_lock_interruptible(dmm_lock));
+
+ if (p_virt_mapping_tbl != NULL) {
+ for (i = 0; i < table_size; i +=
+ p_virt_mapping_tbl[i].region_size) {
+ curNode = p_virt_mapping_tbl + i;
+ if (curNode->b_reserved == true) {
+ /*printk("RESERVED size = 0x%x, "
+ "Map size = 0x%x\n",
+ (curNode->region_size * PAGE_SIZE),
+ (curNode->b_mapped == false) ? 0 :
+ (curNode->mapped_size * PAGE_SIZE));
+*/
+ } else {
+/* printk("UNRESERVED size = 0x%x\n",
+ (curNode->region_size * PAGE_SIZE));
+*/
+ freemem += (curNode->region_size * PAGE_SIZE);
+ if (curNode->region_size > bigsize)
+ bigsize = curNode->region_size;
+ }
+ }
+ }
+ printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
+ freemem/(1024*1024));
+ printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
+ (((table_size * PAGE_SIZE)-freemem))/(1024*1024));
+ printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
+ (bigsize*PAGE_SIZE/(1024*1024)));
+ mutex_unlock(dmm_lock);
+
+ return 0;
+}
+#endif
diff --git a/drivers/dsp/syslink/procmgr/proc4430/dmm4430.h b/drivers/dsp/syslink/procmgr/proc4430/dmm4430.h
new file mode 100644
index 000000000000..7d879f4fe123
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/dmm4430.h
@@ -0,0 +1,50 @@
+/*
+ * dmm.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+/*
+ * ======== dmm.h ========
+ * Purpose:
+ *The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address
+ *space that can be directly mapped to any MPU buffer or memory region
+ *
+ * Public Functions:
+ *
+ */
+
+#ifndef DMM_4430_
+#define DMM_4430_
+
+#include <linux/types.h>
+
+int dmm_reserve_memory(u32 size, u32 *p_rsv_addr);
+
+int dmm_unreserve_memory(u32 rsv_addr, u32 *psize);
+
+void dmm_destroy(void);
+
+void dmm_delete_tables(void);
+
+int dmm_create(void);
+
+void dmm_init(void);
+
+int dmm_create_tables(u32 addr, u32 size);
+
+#ifdef DSP_DMM_DEBUG
+int dmm_mem_map_dump(void);
+#endif
+#endif/* DMM_4430_ */
diff --git a/drivers/dsp/syslink/procmgr/proc4430/ducatienabler.c b/drivers/dsp/syslink/procmgr/proc4430/ducatienabler.c
new file mode 100644
index 000000000000..bd7055b4e563
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/ducatienabler.c
@@ -0,0 +1,866 @@
+/*
+ * ducatienabler.c
+ *
+ * Syslink driver support for TI OMAP processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <asm/page.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+
+
+#include <generated/autoconf.h>
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <asm/irq.h>
+#include <linux/io.h>
+#include <linux/syscalls.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pagemap.h>
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/interrupt.h>
+#include <plat/irqs.h>
+
+#include <syslink/ducatienabler.h>
+#include <syslink/MMUAccInt.h>
+
+#include <plat/iommu.h>
+#include "../../../arch/arm/plat-omap/iopgtable.h"
+
+
+#ifdef DEBUG_DUCATI_IPC
+#define DPRINTK(fmt, args...) printk(KERN_INFO "%s: " fmt, __func__, ## args)
+#else
+#define DPRINTK(fmt, args...)
+#endif
+
+
+#define base_ducati_l2_mmuPhys 0x55082000
+
+/* Attributes of L2 page tables for DSP MMU.*/
+struct page_info {
+ /* Number of valid PTEs in the L2 PT*/
+ u32 num_entries;
+};
+
+enum pagetype {
+ SECTION = 0,
+ LARGE_PAGE = 1,
+ SMALL_PAGE = 2,
+ SUPER_SECTION = 3
+};
+
+
+static u32 shm_phys_addr;
+static u32 shm_virt_addr;
+
+struct iommu *ducati_iommu_ptr;
+
+static void bad_page_dump(u32 pa, struct page *pg)
+{
+ pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
+ pr_emerg("Bad page state in process '%s'\n", current->comm);
+ BUG();
+}
+
+/*============================================
+ * This function calculates PTE address (MPU virtual) to be updated
+ * It also manages the L2 page tables
+ */
+static int pte_set(u32 pa, u32 va, u32 size, struct hw_mmu_map_attrs_t *attrs)
+{
+ struct iotlb_entry tlb_entry;
+ switch (size) {
+ case HW_PAGE_SIZE_16MB:
+ tlb_entry.pgsz = MMU_CAM_PGSZ_16M;
+ break;
+ case HW_PAGE_SIZE_1MB:
+ tlb_entry.pgsz = MMU_CAM_PGSZ_1M;
+ break;
+ case HW_PAGE_SIZE_64KB:
+ tlb_entry.pgsz = MMU_CAM_PGSZ_64K;
+ break;
+ case HW_PAGE_SIZE_4KB:
+ tlb_entry.pgsz = MMU_CAM_PGSZ_4K;
+ break;
+ }
+ tlb_entry.prsvd = MMU_CAM_P;
+ tlb_entry.valid = MMU_CAM_V;
+ switch (attrs->element_size) {
+ case HW_ELEM_SIZE_8BIT:
+ tlb_entry.elsz = MMU_RAM_ELSZ_8;
+ break;
+ case HW_ELEM_SIZE_16BIT:
+ tlb_entry.elsz = MMU_RAM_ELSZ_16;
+ break;
+ case HW_ELEM_SIZE_32BIT:
+ tlb_entry.elsz = MMU_RAM_ELSZ_32;
+ break;
+ case HW_ELEM_SIZE_64BIT:
+ tlb_entry.elsz = 0x3; /* No translation */
+ break;
+ }
+ switch (attrs->endianism) {
+ case HW_LITTLE_ENDIAN:
+ tlb_entry.endian = MMU_RAM_ENDIAN_LITTLE;
+ break;
+ case HW_BIG_ENDIAN:
+ tlb_entry.endian = MMU_RAM_ENDIAN_BIG;
+ break;
+ }
+ switch (attrs->mixedSize) {
+ case HW_MMU_TLBES:
+ tlb_entry.mixed = 0;
+ break;
+ case HW_MMU_CPUES:
+ tlb_entry.mixed = MMU_RAM_MIXED;
+ break;
+ }
+ tlb_entry.da = va;
+ tlb_entry.pa = pa;
+ DPRINTK("pte set ducati_iommu_ptr = 0x%x, tlb_entry = 0x%x \n",
+ ducati_iommu_ptr, tlb_entry);
+ if (iopgtable_store_entry(ducati_iommu_ptr, &tlb_entry))
+ goto error_exit;
+ return 0;
+error_exit:
+ printk(KERN_ERR "pte set failure \n");
+ return -EFAULT;
+}
+
+
+/*=============================================
+ * This function calculates the optimum page-aligned addresses and sizes
+ * Caller must pass page-aligned values
+ */
+static int pte_update(u32 pa, u32 va, u32 size,
+ struct hw_mmu_map_attrs_t *map_attrs)
+{
+ u32 i;
+ u32 all_bits;
+ u32 pa_curr = pa;
+ u32 va_curr = va;
+ u32 num_bytes = size;
+ int status = 0;
+ u32 pg_size[] = {HW_PAGE_SIZE_16MB, HW_PAGE_SIZE_1MB,
+ HW_PAGE_SIZE_64KB, HW_PAGE_SIZE_4KB};
+ DPRINTK("> pte_update pa %x, va %x, "
+ "size %x, map_attrs %x\n", pa, va, size, (u32)map_attrs);
+ while (num_bytes && (status == 0)) {
+ /* To find the max. page size with which both PA & VA are
+ * aligned */
+ all_bits = pa_curr | va_curr;
+ DPRINTK("all_bits %x, pa_curr %x, va_curr %x, "
+ "num_bytes %x\n ",
+ all_bits, pa_curr, va_curr, num_bytes);
+
+ for (i = 0; i < 4; i++) {
+ if ((num_bytes >= pg_size[i]) && ((all_bits &
+ (pg_size[i] - 1)) == 0)) {
+ DPRINTK("pg_size %x\n", pg_size[i]);
+ status = pte_set(pa_curr,
+ va_curr, pg_size[i], map_attrs);
+ pa_curr += pg_size[i];
+ va_curr += pg_size[i];
+ num_bytes -= pg_size[i];
+ /* Don't try smaller sizes. Hopefully we have
+ * reached an address aligned to a bigger page
+ * size */
+ break;
+ }
+ }
+ }
+ DPRINTK("< pte_update status %x num_bytes %x\n", status, num_bytes);
+ return status;
+}
+
+/*
+ * ======== ducati_mem_unmap ========
+ * Invalidate the PTEs for the DSP VA block to be unmapped.
+ *
+ * PTEs of a mapped memory block are contiguous in any page table
+ * So, instead of looking up the PTE address for every 4K block,
+ * we clear consecutive PTEs until we unmap all the bytes
+ */
+int ducati_mem_unmap(u32 da, u32 num_bytes)
+{
+ u32 bytes;
+ struct page *pg = NULL;
+ int temp = 0;
+ u32 nent;
+ u32 phyaddress;
+ s32 numofBytes = num_bytes;
+
+ while (num_bytes > 0) {
+ u32 *iopgd = iopgd_offset(ducati_iommu_ptr, da);
+ if (*iopgd & IOPGD_TABLE) {
+ u32 *iopte = iopte_offset(iopgd, da);
+ if (*iopte & IOPTE_LARGE) {
+ nent = 16;
+ /* rewind to the 1st entry */
+ iopte = (u32 *)((u32)iopte & IOLARGE_MASK);
+ } else
+ nent = 1;
+ phyaddress = (*iopte) & IOPAGE_MASK;
+ } else {
+ if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
+ nent = 4096;
+ /* rewind to the 1st entry */
+ iopgd = (u32 *)((u32)iopgd & IOSUPER_MASK);
+ } else
+ nent = 256;
+ phyaddress = (*iopgd) & IOPGD_MASK;
+ }
+ for (temp = 0; temp < nent; temp++) {
+ if (pfn_valid(__phys_to_pfn(phyaddress))) {
+ pg = phys_to_page(phyaddress);
+ if (page_count(pg) < 1) {
+ pr_info("DSPBRIDGE:UNMAP function: "
+ "COUNT 0 FOR PA 0x%x,"
+ " size = 0x%x\n",
+ phyaddress, numofBytes);
+ bad_page_dump(phyaddress, pg);
+ }
+ SetPageDirty(pg);
+ page_cache_release(pg);
+ }
+ phyaddress += HW_PAGE_SIZE_4KB;
+ }
+ bytes = iopgtable_clear_entry(ducati_iommu_ptr, da);
+ num_bytes -= bytes;
+ da += bytes;
+ }
+ return 0;
+}
+
+
+/*
+ * ======== ducati_mem_virtToPhys ========
+ * This funciton provides the translation from
+ * Remote virtual address to Physical address
+ */
+
+inline u32 ducati_mem_virtToPhys(u32 da)
+{
+#if 0
+ /* FIXME: temp work around till L2MMU issue
+ * is resolved
+ */
+ u32 *iopgd = iopgd_offset(ducati_iommu_ptr, da);
+ u32 phyaddress;
+
+ if (*iopgd & IOPGD_TABLE) {
+ u32 *iopte = iopte_offset(iopgd, da);
+ if (*iopte & IOPTE_LARGE) {
+ phyaddress = *iopte & IOLARGE_MASK;
+ phyaddress |= (da & (IOLARGE_SIZE - 1));
+ } else
+ phyaddress = (*iopte) & IOPAGE_MASK;
+ } else {
+ if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
+ phyaddress = *iopgd & IOSUPER_MASK;
+ phyaddress |= (da & (IOSUPER_SIZE - 1));
+ } else {
+ phyaddress = (*iopgd) & IOPGD_MASK;
+ phyaddress |= (da & (IOPGD_SIZE - 1));
+ }
+ }
+#endif
+ return da;
+}
+
+
+/*
+ * ======== user_va2pa ========
+ * Purpose:
+ * This function walks through the Linux page tables to convert a userland
+ * virtual address to physical address
+ */
+u32 user_va2pa(struct mm_struct *mm, u32 address)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+
+ pgd = pgd_offset(mm, address);
+ if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
+ pmd = pmd_offset(pgd, address);
+ if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
+ ptep = pte_offset_map(pmd, address);
+ if (ptep) {
+ pte = *ptep;
+ if (pte_present(pte))
+ return pte & PAGE_MASK;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*============================================
+ * This function maps MPU buffer to the DSP address space. It performs
+* linear to physical address translation if required. It translates each
+* page since linear addresses can be physically non-contiguous
+* All address & size arguments are assumed to be page aligned (in proc.c)
+ *
+ */
+int ducati_mem_map(u32 mpu_addr, u32 ul_virt_addr,
+ u32 num_bytes, u32 map_attr)
+{
+ u32 attrs;
+ int status = 0;
+ struct hw_mmu_map_attrs_t hw_attrs;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ struct task_struct *curr_task = current;
+ u32 write = 0;
+ u32 da = ul_virt_addr;
+ u32 pa = 0;
+ int pg_i = 0;
+ int pg_num = 0;
+ struct page *mappedPage, *pg;
+ int num_usr_pages = 0;
+
+ DPRINTK("> WMD_BRD_MemMap pa %x, va %x, "
+ "size %x, map_attr %x\n", mpu_addr, ul_virt_addr,
+ num_bytes, map_attr);
+ if (num_bytes == 0)
+ return -EINVAL;
+ if (map_attr != 0) {
+ attrs = map_attr;
+ } else {
+ /* Assign default attributes */
+ attrs = DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE32;
+ }
+ /* Take mapping properties */
+ if (attrs & DSP_MAPBIGENDIAN)
+ hw_attrs.endianism = HW_BIG_ENDIAN;
+ else
+ hw_attrs.endianism = HW_LITTLE_ENDIAN;
+
+ hw_attrs.mixedSize = (enum hw_mmu_mixed_size_t)
+ ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
+ /* Ignore element_size if mixedSize is enabled */
+ if (hw_attrs.mixedSize == 0) {
+ if (attrs & DSP_MAPELEMSIZE8) {
+ /* Size is 8 bit */
+ hw_attrs.element_size = HW_ELEM_SIZE_8BIT;
+ } else if (attrs & DSP_MAPELEMSIZE16) {
+ /* Size is 16 bit */
+ hw_attrs.element_size = HW_ELEM_SIZE_16BIT;
+ } else if (attrs & DSP_MAPELEMSIZE32) {
+ /* Size is 32 bit */
+ hw_attrs.element_size = HW_ELEM_SIZE_32BIT;
+ } else if (attrs & DSP_MAPELEMSIZE64) {
+ /* Size is 64 bit */
+ hw_attrs.element_size = HW_ELEM_SIZE_64BIT;
+ } else {
+ /* Mixedsize isn't enabled, so size can't be
+ * zero here */
+ DPRINTK("WMD_BRD_MemMap: MMU element size is zero\n");
+ return -EINVAL;
+ }
+ } else {
+ /* If mixedSize set to 1, no conversion is
+ * required. for element size */
+ hw_attrs.element_size = HW_ELEM_SIZE_64BIT;
+ }
+ /*
+ * Do OS-specific user-va to pa translation.
+ * Combine physically contiguous regions to reduce TLBs.
+ * Pass the translated pa to PteUpdate.
+ */
+ if ((attrs & DSP_MAPPHYSICALADDR)) {
+ status = pte_update(mpu_addr, ul_virt_addr, num_bytes,
+ &hw_attrs);
+ goto func_cont;
+ }
+ /*
+ * Important Note: mpu_addr is mapped from user application process
+ * to current process - it must lie completely within the current
+ * virtual memory address space in order to be of use to us here!
+ */
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, mpu_addr);
+ /*
+ * It is observed that under some circumstances, the user buffer is
+ * spread across several VMAs. So loop through and check if the entire
+ * user buffer is covered
+ */
+ while ((vma) && (mpu_addr + num_bytes > vma->vm_end)) {
+ /* jump to the next VMA region */
+ vma = find_vma(mm, vma->vm_end + 1);
+ }
+ if (!vma) {
+ status = -EINVAL;
+ up_read(&mm->mmap_sem);
+ goto func_cont;
+ }
+ if (vma->vm_flags & VM_IO) {
+ num_usr_pages = num_bytes / PAGE_SIZE;
+ /* Get the physical addresses for user buffer */
+ for (pg_i = 0; pg_i < num_usr_pages; pg_i++) {
+ pa = user_va2pa(mm, mpu_addr);
+ if (!pa) {
+ status = -EFAULT;
+ pr_err("DSPBRIDGE: VM_IO mapping physical"
+ "address is invalid\n");
+ break;
+ }
+ if (pfn_valid(__phys_to_pfn(pa))) {
+ pg = phys_to_page(pa);
+ get_page(pg);
+ if (page_count(pg) < 1) {
+ pr_err("Bad page in VM_IO buffer\n");
+ bad_page_dump(pa, pg);
+ }
+ }
+ status = pte_set(pa, da, HW_PAGE_SIZE_4KB, &hw_attrs);
+ if (WARN_ON(status < 0))
+ break;
+ mpu_addr += HW_PAGE_SIZE_4KB;
+ da += HW_PAGE_SIZE_4KB;
+ }
+ } else {
+ num_usr_pages = num_bytes / PAGE_SIZE;
+ if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
+ write = 1;
+
+ for (pg_i = 0; pg_i < num_usr_pages; pg_i++) {
+ pg_num = get_user_pages(curr_task, mm, mpu_addr, 1,
+ write, 1, &mappedPage, NULL);
+ if (pg_num > 0) {
+ if (page_count(mappedPage) < 1) {
+ pr_err("Bad page count after doing"
+ "get_user_pages on"
+ "user buffer\n");
+ bad_page_dump(page_to_phys(mappedPage),
+ mappedPage);
+ }
+ status = pte_set(page_to_phys(mappedPage), da,
+ HW_PAGE_SIZE_4KB, &hw_attrs);
+ if (WARN_ON(status < 0))
+ break;
+ da += HW_PAGE_SIZE_4KB;
+ mpu_addr += HW_PAGE_SIZE_4KB;
+ } else {
+ pr_err("DSPBRIDGE: get_user_pages FAILED,"
+ "MPU addr = 0x%x,"
+ "vma->vm_flags = 0x%lx,"
+ "get_user_pages Err"
+ "Value = %d, Buffer"
+ "size=0x%x\n", mpu_addr,
+ vma->vm_flags, pg_num,
+ num_bytes);
+ status = -EFAULT;
+ break;
+ }
+ }
+ }
+ up_read(&mm->mmap_sem);
+func_cont:
+ /* Don't propogate Linux or HW status to upper layers */
+ if (status < 0) {
+ /*
+ * Roll out the mapped pages incase it failed in middle of
+ * mapping
+ */
+ if (pg_i)
+ ducati_mem_unmap(ul_virt_addr, (pg_i * PAGE_SIZE));
+ }
+ WARN_ON(status < 0);
+ DPRINTK("< WMD_BRD_MemMap status %x\n", status);
+ return status;
+
+}
+
+ /*=========================================
+ * Decides a TLB entry size
+ *
+ */
+static int get_mmu_entry_size(u32 pa, u32 size, enum pagetype *size_tlb,
+ u32 *entry_size)
+{
+ int status = 0;
+ bool page_align_4kb = false;
+ bool page_align_64kb = false;
+ bool page_align_1mb = false;
+ bool page_align_16mb = false;
+ u32 phys_addr = pa;
+
+ /* First check the page alignment*/
+ if ((phys_addr % PAGE_SIZE_4KB) == 0)
+ page_align_4kb = true;
+ if ((phys_addr % PAGE_SIZE_64KB) == 0)
+ page_align_64kb = true;
+ if ((phys_addr % PAGE_SIZE_1MB) == 0)
+ page_align_1mb = true;
+ if ((phys_addr % PAGE_SIZE_16MB) == 0)
+ page_align_16mb = true;
+
+ if ((!page_align_64kb) && (!page_align_1mb) && (!page_align_4kb)) {
+ status = -EINVAL;
+ goto error_exit;
+ }
+ /* Now decide the entry size */
+ if (size >= PAGE_SIZE_16MB) {
+ if (page_align_16mb) {
+ *size_tlb = SUPER_SECTION;
+ *entry_size = PAGE_SIZE_16MB;
+ } else if (page_align_1mb) {
+ *size_tlb = SECTION;
+ *entry_size = PAGE_SIZE_1MB;
+ } else if (page_align_64kb) {
+ *size_tlb = LARGE_PAGE;
+ *entry_size = PAGE_SIZE_64KB;
+ } else if (page_align_4kb) {
+ *size_tlb = SMALL_PAGE;
+ *entry_size = PAGE_SIZE_4KB;
+ } else {
+ status = -EINVAL;
+ goto error_exit;
+ }
+ } else if (size >= PAGE_SIZE_1MB && size < PAGE_SIZE_16MB) {
+ if (page_align_1mb) {
+ *size_tlb = SECTION;
+ *entry_size = PAGE_SIZE_1MB;
+ } else if (page_align_64kb) {
+ *size_tlb = LARGE_PAGE;
+ *entry_size = PAGE_SIZE_64KB;
+ } else if (page_align_4kb) {
+ *size_tlb = SMALL_PAGE;
+ *entry_size = PAGE_SIZE_4KB;
+ } else {
+ status = -EINVAL;
+ goto error_exit;
+ }
+ } else if (size > PAGE_SIZE_4KB &&
+ size < PAGE_SIZE_1MB) {
+ if (page_align_64kb) {
+ *size_tlb = LARGE_PAGE;
+ *entry_size = PAGE_SIZE_64KB;
+ } else if (page_align_4kb) {
+ *size_tlb = SMALL_PAGE;
+ *entry_size = PAGE_SIZE_4KB;
+ } else {
+ status = -EINVAL;
+ goto error_exit;
+ }
+ } else if (size == PAGE_SIZE_4KB) {
+ if (page_align_4kb) {
+ *size_tlb = SMALL_PAGE;
+ *entry_size = PAGE_SIZE_4KB;
+ } else {
+ status = -EINVAL;
+ goto error_exit;
+ }
+ } else {
+ status = -EINVAL;
+ goto error_exit;
+ }
+
+ DPRINTK("< GetMMUEntrySize status %x\n", status);
+ return 0;
+error_exit:
+ DPRINTK("< GetMMUEntrySize FAILED !!!!!!\n");
+ return status;
+}
+
+/*=========================================
+ * Add DSP MMU entries corresponding to given MPU-Physical address
+ * and DSP-virtual address
+ */
+static int add_dsp_mmu_entry(u32 *phys_addr, u32 *dsp_addr,
+ u32 size)
+{
+ u32 mapped_size = 0;
+ enum pagetype size_tlb = SECTION;
+ u32 entry_size = 0;
+ int status = 0;
+ struct iotlb_entry tlb_entry;
+ int retval = 0;
+
+
+ DPRINTK("Entered add_dsp_mmu_entry phys_addr = "
+ "0x%x, dsp_addr = 0x%x,size = 0x%x\n",
+ *phys_addr, *dsp_addr, size);
+
+ while ((mapped_size < size) && (status == 0)) {
+ status = get_mmu_entry_size(*phys_addr,
+ (size - mapped_size), &size_tlb, &entry_size);
+ if (status < 0)
+ goto error_exit;
+
+ if (size_tlb == SUPER_SECTION)
+ tlb_entry.pgsz = MMU_CAM_PGSZ_16M;
+
+ else if (size_tlb == SECTION)
+ tlb_entry.pgsz = MMU_CAM_PGSZ_1M;
+
+ else if (size_tlb == LARGE_PAGE)
+ tlb_entry.pgsz = MMU_CAM_PGSZ_64K;
+
+ else if (size_tlb == SMALL_PAGE)
+ tlb_entry.pgsz = MMU_CAM_PGSZ_4K;
+
+ tlb_entry.elsz = MMU_RAM_ELSZ_16;
+ tlb_entry.endian = MMU_RAM_ENDIAN_LITTLE;
+ tlb_entry.mixed = MMU_RAM_MIXED;
+ tlb_entry.prsvd = MMU_CAM_P;
+ tlb_entry.valid = MMU_CAM_V;
+
+ tlb_entry.da = *dsp_addr;
+ tlb_entry.pa = *phys_addr;
+ DPRINTK("pte set ducati_iommu_ptr = 0x%x, tlb_entry = 0x%x \n",
+ ducati_iommu_ptr, tlb_entry);
+ retval = load_iotlb_entry(ducati_iommu_ptr, &tlb_entry);
+ if (retval < 0)
+ goto error_exit;
+ mapped_size += entry_size;
+ *phys_addr += entry_size;
+ *dsp_addr += entry_size;
+ }
+
+ return 0;
+error_exit:
+ printk(KERN_ERR "pte set failure retval = 0x%x, status = 0x%x \n",
+ retval, status);
+ return retval;
+}
+
+
+/*=============================================
+ * Add DSP MMU entries corresponding to given MPU-Physical address
+ * and DSP-virtual address
+ *
+ */
+#if 0
+static int add_entry_ext(u32 *phys_addr, u32 *dsp_addr,
+ u32 size)
+{
+ u32 mapped_size = 0;
+ enum pagetype size_tlb = SECTION;
+ u32 entry_size = 0;
+ int status = 0;
+ u32 page_size = HW_PAGE_SIZE_1MB;
+ u32 flags = 0;
+
+ flags = (DSP_MAPELEMSIZE32 | DSP_MAPLITTLEENDIAN |
+ DSP_MAPPHYSICALADDR);
+ while ((mapped_size < size) && (status == 0)) {
+
+ /* get_mmu_entry_size fills the size_tlb and entry_size
+ based on alignment and size of memory to map
+ to DSP - size */
+ status = get_mmu_entry_size(*phys_addr,
+ (size - mapped_size),
+ &size_tlb,
+ &entry_size);
+
+ if (size_tlb == SUPER_SECTION)
+ page_size = HW_PAGE_SIZE_16MB;
+ else if (size_tlb == SECTION)
+ page_size = HW_PAGE_SIZE_1MB;
+ else if (size_tlb == LARGE_PAGE)
+ page_size = HW_PAGE_SIZE_64KB;
+ else if (size_tlb == SMALL_PAGE)
+ page_size = HW_PAGE_SIZE_4KB;
+
+ if (status == 0) {
+
+ ducati_mem_map(*phys_addr,
+ *dsp_addr, page_size, flags);
+ mapped_size += entry_size;
+ *phys_addr += entry_size;
+ *dsp_addr += entry_size;
+ }
+ }
+ return status;
+}
+#endif
+
+void ducati_tlb_dump(void)
+{
+#if defined CONFIG_OMAP_IOMMU_DEBUG_MODULE
+ char *p;
+
+ p = kmalloc(1000, GFP_KERNEL);
+ dump_tlb_entries(ducati_iommu_ptr, p, 1000);
+ printk(KERN_INFO "%8s %8s %2s\n", "cam:", "ram:", "preserved");
+ printk(KERN_INFO "-----------------------------------------\n");
+ printk(KERN_INFO "%s", p);
+ kfree(p);
+#endif
+ return;
+}
+
+/*================================
+ * Initialize the Ducati MMU.
+ */
+int ducati_mmu_init(u32 a_phy_addr)
+{
+ int ret_val = 0;
+ u32 phys_addr = 0;
+ u32 num_l4_entries;
+ u32 i = 0;
+ u32 num_l3_mem_entries = 0;
+ u32 virt_addr = 0;
+
+ num_l4_entries = (sizeof(l4_map) / sizeof(struct mmu_entry));
+ num_l3_mem_entries = sizeof(l3_memory_regions) /
+ sizeof(struct memory_entry);
+
+ DPRINTK("\n Programming Ducati MMU using linear address \n");
+
+ phys_addr = a_phy_addr;
+
+ printk(KERN_ALERT " Programming Ducati memory regions\n");
+ printk(KERN_ALERT "=========================================\n");
+ for (i = 0; i < num_l3_mem_entries; i++) {
+
+ printk(KERN_ALERT "VA = [0x%x] of size [0x%x] at PA = [0x%x]\n",
+ l3_memory_regions[i].ul_virt_addr,
+ l3_memory_regions[i].ul_size, phys_addr);
+
+ /* OMAP4430 SDC code */
+ /* Adjust below logic if using cacheable shared memory */
+ if (l3_memory_regions[i].ul_virt_addr == \
+ DUCATI_MEM_IPC_HEAP0_ADDR) {
+ shm_phys_addr = phys_addr;
+ }
+ virt_addr = l3_memory_regions[i].ul_virt_addr;
+ ret_val = add_dsp_mmu_entry(&phys_addr, &virt_addr,
+ (l3_memory_regions[i].ul_size));
+
+ if (WARN_ON(ret_val < 0))
+ goto error_exit;
+ }
+
+ printk(KERN_ALERT " Programming Ducati L4 peripherals\n");
+ printk(KERN_ALERT "=========================================\n");
+ for (i = 0; i < num_l4_entries; i++) {
+ printk(KERN_INFO "PA [0x%x] VA [0x%x] size [0x%x]\n",
+ l4_map[i].ul_phy_addr, l4_map[i].ul_virt_addr,
+ l4_map[i].ul_size);
+ virt_addr = l4_map[i].ul_virt_addr;
+ phys_addr = l4_map[i].ul_phy_addr;
+ ret_val = add_dsp_mmu_entry(&phys_addr,
+ &virt_addr, (l4_map[i].ul_size));
+ if (WARN_ON(ret_val < 0)) {
+
+ DPRINTK("**** Failed to map Peripheral ****");
+ DPRINTK("Phys addr [0x%x] Virt addr [0x%x] size [0x%x]",
+ l4_map[i].ul_phy_addr, l4_map[i].ul_virt_addr,
+ l4_map[i].ul_size);
+ DPRINTK(" Status [0x%x]", ret_val);
+ goto error_exit;
+ }
+ }
+ ducati_tlb_dump();
+ return 0;
+error_exit:
+ return ret_val;
+}
+
+
+/*========================================
+ * This sets up the Ducati processor
+ *
+ */
+int ducati_setup(void)
+{
+ int ret_val = 0;
+
+ ducati_iommu_ptr = iommu_get("ducati");
+ /* Disable TWL in iommu */
+ iommu_set_twl(ducati_iommu_ptr, false);
+ if (IS_ERR(ducati_iommu_ptr)) {
+ pr_err("Error iommu_get\n");
+ return -EFAULT;
+ }
+ ret_val = ducati_mmu_init(CONFIG_DUCATI_BASEIMAGE_PHYS_ADDR);
+ if (WARN_ON(ret_val < 0))
+ goto error_exit;
+ return 0;
+error_exit:
+ WARN_ON(1);
+ printk(KERN_ERR "DUCATI SETUP FAILED !!!!!\n");
+ return ret_val;
+}
+EXPORT_SYMBOL(ducati_setup);
+
+/*============================================
+ * De-Initialize the Ducati MMU and free the
+ * memory allocation for L1 and L2 pages
+ *
+ */
+void ducati_destroy(void)
+{
+ iommu_put(ducati_iommu_ptr);
+ return;
+}
+EXPORT_SYMBOL(ducati_destroy);
+
+/*============================================
+ * Returns the ducati virtual address for IPC shared memory
+ *
+ */
+u32 get_ducati_virt_mem(void)
+{
+ /*shm_virt_addr = (u32)ioremap(shm_phys_addr, DUCATI_SHARED_IPC_LEN);*/
+ shm_virt_addr = (u32)ioremap(shm_phys_addr, DUCATI_MEM_IPC_SHMEM_LEN);
+ return shm_virt_addr;
+}
+EXPORT_SYMBOL(get_ducati_virt_mem);
+
+/*============================================
+ * Unmaps the ducati virtual address for IPC shared memory
+ *
+ */
+void unmap_ducati_virt_mem(u32 shm_virt_addr)
+{
+ iounmap((unsigned int *) shm_virt_addr);
+ return;
+}
+EXPORT_SYMBOL(unmap_ducati_virt_mem);
+
diff --git a/drivers/dsp/syslink/procmgr/proc4430/hw_mmu.c b/drivers/dsp/syslink/procmgr/proc4430/hw_mmu.c
new file mode 100644
index 000000000000..ba0547456ab3
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/hw_mmu.c
@@ -0,0 +1,661 @@
+/*
+ * hw_mbox.c
+ *
+ * Syslink driver support for OMAP Processors.
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+#include<linux/kernel.h>
+#include<linux/module.h>
+
+#include <syslink/GlobalTypes.h>
+#include <syslink/MMURegAcM.h>
+#include <syslink/hw_defs.h>
+#include <syslink/hw_mmu.h>
+
+#define MMU_BASE_VAL_MASK 0xFC00
+#define MMU_PAGE_MAX 3
+#define MMU_ELEMENTSIZE_MAX 3
+#define MMU_ADDR_MASK 0xFFFFF000
+#define MMU_TTB_MASK 0xFFFFC000
+#define MMU_SECTION_ADDR_MASK 0xFFF00000
+#define MMU_SSECTION_ADDR_MASK 0xFF000000
+#define MMU_PAGE_TABLE_MASK 0xFFFFFC00
+#define MMU_LARGE_PAGE_MASK 0xFFFF0000
+#define MMU_SMALL_PAGE_MASK 0xFFFFF000
+
+#define MMU_LOAD_TLB 0x00000001
+#define NUM_TLB_ENTRIES 32
+
+
+
+/*
+* type: hw_mmu_pgsiz_t
+*
+* desc: Enumerated Type used to specify the MMU Page Size(SLSS)
+*
+*
+*/
+enum hw_mmu_pgsiz_t {
+ HW_MMU_SECTION,
+ HW_MMU_LARGE_PAGE,
+ HW_MMU_SMALL_PAGE,
+ HW_MMU_SUPERSECTION
+
+};
+
+/*
+* function : mmu_flsh_entry
+*/
+
+static hw_status mmu_flsh_entry(const u32 base_address);
+
+ /*
+* function : mme_set_cam_entry
+*
+*/
+
+static hw_status mme_set_cam_entry(const u32 base_address,
+ const u32 page_size,
+ const u32 preserve_bit,
+ const u32 valid_bit,
+ const u32 virt_addr_tag);
+
+/*
+* function : mmu_set_ram_entry
+*/
+static hw_status mmu_set_ram_entry(const u32 base_address,
+ const u32 physical_addr,
+ enum hw_endianism_t endianism,
+ enum hw_elemnt_siz_t element_size,
+ enum hw_mmu_mixed_size_t mixedSize);
+
+/*
+* hw functions
+*
+*/
+
+hw_status hw_mmu_enable(const u32 base_address)
+{
+ hw_status status = RET_OK;
+
+ MMUMMU_CNTLMMUEnableWrite32(base_address, HW_SET);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_enable);
+
+hw_status hw_mmu_disable(const u32 base_address)
+{
+ hw_status status = RET_OK;
+
+ MMUMMU_CNTLMMUEnableWrite32(base_address, HW_CLEAR);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_disable);
+
+hw_status hw_mmu_autoidle_en(const u32 base_address)
+{
+ hw_status status;
+
+ status = mmu_sisconf_auto_idle_set32(base_address, HW_SET);
+ status = RET_OK;
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_autoidle_en);
+
+hw_status hw_mmu_nulck_set(const u32 base_address, u32 *num_lcked_entries)
+{
+ hw_status status = RET_OK;
+
+ *num_lcked_entries = MMUMMU_LOCKBaseValueRead32(base_address);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_nulck_set);
+
+
+hw_status hw_mmu_numlocked_set(const u32 base_address, u32 num_lcked_entries)
+{
+ hw_status status = RET_OK;
+
+ MMUMMU_LOCKBaseValueWrite32(base_address, num_lcked_entries);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_numlocked_set);
+
+
+hw_status hw_mmu_vctm_numget(const u32 base_address, u32 *vctm_entry_num)
+{
+ hw_status status = RET_OK;
+
+ *vctm_entry_num = MMUMMU_LOCKCurrentVictimRead32(base_address);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_vctm_numget);
+
+
+hw_status hw_mmu_victim_numset(const u32 base_address, u32 vctm_entry_num)
+{
+ hw_status status = RET_OK;
+
+ mmu_lck_crnt_vctmwite32(base_address, vctm_entry_num);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_victim_numset);
+
+hw_status hw_mmu_tlb_flushAll(const u32 base_address)
+{
+ hw_status status = RET_OK;
+
+ MMUMMU_GFLUSHGlobalFlushWrite32(base_address, HW_SET);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_tlb_flushAll);
+
+hw_status hw_mmu_eventack(const u32 base_address, u32 irq_mask)
+{
+ hw_status status = RET_OK;
+
+ MMUMMU_IRQSTATUSWriteRegister32(base_address, irq_mask);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_eventack);
+
+hw_status hw_mmu_event_disable(const u32 base_address, u32 irq_mask)
+{
+ hw_status status = RET_OK;
+ u32 irqReg;
+ irqReg = MMUMMU_IRQENABLEReadRegister32(base_address);
+
+ MMUMMU_IRQENABLEWriteRegister32(base_address, irqReg & ~irq_mask);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_event_disable);
+
+hw_status hw_mmu_event_enable(const u32 base_address, u32 irq_mask)
+{
+ hw_status status = RET_OK;
+ u32 irqReg;
+
+ irqReg = MMUMMU_IRQENABLEReadRegister32(base_address);
+
+ MMUMMU_IRQENABLEWriteRegister32(base_address, irqReg | irq_mask);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_event_enable);
+
+hw_status hw_mmu_event_status(const u32 base_address, u32 *irq_mask)
+{
+ hw_status status = RET_OK;
+
+ *irq_mask = MMUMMU_IRQSTATUSReadRegister32(base_address);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_event_status);
+
+hw_status hw_mmu_flt_adr_rd(const u32 base_address, u32 *addr)
+{
+ hw_status status = RET_OK;
+
+ /*Check the input Parameters*/
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+ /* read values from register */
+ *addr = MMUMMU_FAULT_ADReadRegister32(base_address);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_flt_adr_rd);
+
+
+hw_status hw_mmu_ttbset(const u32 base_address, u32 ttb_phys_addr)
+{
+ hw_status status = RET_OK;
+ u32 loadTTB;
+
+ /*Check the input Parameters*/
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+
+ loadTTB = ttb_phys_addr & ~0x7FUL;
+ /* write values to register */
+ MMUMMU_TTBWriteRegister32(base_address, loadTTB);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_ttbset);
+
+hw_status hw_mmu_twl_enable(const u32 base_address)
+{
+ hw_status status = RET_OK;
+
+ MMUMMU_CNTLTWLEnableWrite32(base_address, HW_SET);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_twl_enable);
+
+hw_status hw_mmu_twl_disable(const u32 base_address)
+{
+ hw_status status = RET_OK;
+
+ MMUMMU_CNTLTWLEnableWrite32(base_address, HW_CLEAR);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_twl_disable);
+
+
+hw_status hw_mmu_tlb_flush(const u32 base_address,
+ u32 virtual_addr,
+ u32 page_size)
+{
+ hw_status status = RET_OK;
+ u32 virt_addr_tag;
+ enum hw_mmu_pgsiz_t pg_sizeBits;
+
+ switch (page_size) {
+ case HW_PAGE_SIZE_4KB:
+ pg_sizeBits = HW_MMU_SMALL_PAGE;
+ break;
+
+ case HW_PAGE_SIZE_64KB:
+ pg_sizeBits = HW_MMU_LARGE_PAGE;
+ break;
+
+ case HW_PAGE_SIZE_1MB:
+ pg_sizeBits = HW_MMU_SECTION;
+ break;
+
+ case HW_PAGE_SIZE_16MB:
+ pg_sizeBits = HW_MMU_SUPERSECTION;
+ break;
+
+ default:
+ return RET_FAIL;
+ }
+
+ /* Generate the 20-bit tag from virtual address */
+ virt_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
+
+ mme_set_cam_entry(base_address, pg_sizeBits, 0, 0, virt_addr_tag);
+
+ mmu_flsh_entry(base_address);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_tlb_flush);
+
+
+hw_status hw_mmu_tlb_add(const u32 base_address,
+ u32 physical_addr,
+ u32 virtual_addr,
+ u32 page_size,
+ u32 entryNum,
+ struct hw_mmu_map_attrs_t *map_attrs,
+ enum hw_set_clear_t preserve_bit,
+ enum hw_set_clear_t valid_bit)
+{
+ hw_status status = RET_OK;
+ u32 lockReg;
+ u32 virt_addr_tag;
+ enum hw_mmu_pgsiz_t mmu_pg_size;
+
+ /*Check the input Parameters*/
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+ CHECK_INPUT_RANGE_MIN0(page_size, MMU_PAGE_MAX, RET_PARAM_OUT_OF_RANGE,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+ CHECK_INPUT_RANGE_MIN0(map_attrs->element_size,
+ MMU_ELEMENTSIZE_MAX, RET_PARAM_OUT_OF_RANGE,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+
+ switch (page_size) {
+ case HW_PAGE_SIZE_4KB:
+ mmu_pg_size = HW_MMU_SMALL_PAGE;
+ break;
+
+ case HW_PAGE_SIZE_64KB:
+ mmu_pg_size = HW_MMU_LARGE_PAGE;
+ break;
+
+ case HW_PAGE_SIZE_1MB:
+ mmu_pg_size = HW_MMU_SECTION;
+ break;
+
+ case HW_PAGE_SIZE_16MB:
+ mmu_pg_size = HW_MMU_SUPERSECTION;
+ break;
+
+ default:
+ return RET_FAIL;
+ }
+
+ lockReg = mmu_lckread_reg_32(base_address);
+
+ /* Generate the 20-bit tag from virtual address */
+ virt_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
+
+ /* Write the fields in the CAM Entry Register */
+ mme_set_cam_entry(base_address, mmu_pg_size, preserve_bit, valid_bit,
+ virt_addr_tag);
+
+ /* Write the different fields of the RAM Entry Register */
+ /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
+ mmu_set_ram_entry(base_address, physical_addr,
+ map_attrs->endianism, map_attrs->element_size, map_attrs->mixedSize);
+
+ /* Update the MMU Lock Register */
+ /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
+ mmu_lck_crnt_vctmwite32(base_address, entryNum);
+
+ /* Enable loading of an entry in TLB by writing 1 into LD_TLB_REG
+ register */
+ mmu_ld_tlbwrt_reg32(base_address, MMU_LOAD_TLB);
+
+
+ mmu_lck_write_reg32(base_address, lockReg);
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_tlb_add);
+
+
+
+hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
+ u32 physical_addr,
+ u32 virtual_addr,
+ u32 page_size,
+ struct hw_mmu_map_attrs_t *map_attrs)
+{
+ hw_status status = RET_OK;
+ u32 pte_addr, pte_val;
+ long int num_entries = 1;
+
+ switch (page_size) {
+
+ case HW_PAGE_SIZE_4KB:
+ pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, virtual_addr &
+ MMU_SMALL_PAGE_MASK);
+ pte_val = ((physical_addr & MMU_SMALL_PAGE_MASK) |
+ (map_attrs->endianism << 9) |
+ (map_attrs->element_size << 4) |
+ (map_attrs->mixedSize << 11) | 2
+ );
+ break;
+
+ case HW_PAGE_SIZE_64KB:
+ num_entries = 16;
+ pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, virtual_addr &
+ MMU_LARGE_PAGE_MASK);
+ pte_val = ((physical_addr & MMU_LARGE_PAGE_MASK) |
+ (map_attrs->endianism << 9) |
+ (map_attrs->element_size << 4) |
+ (map_attrs->mixedSize << 11) | 1
+ );
+ break;
+
+ case HW_PAGE_SIZE_1MB:
+ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, virtual_addr &
+ MMU_SECTION_ADDR_MASK);
+ pte_val = ((((physical_addr & MMU_SECTION_ADDR_MASK) |
+ (map_attrs->endianism << 15) |
+ (map_attrs->element_size << 10) |
+ (map_attrs->mixedSize << 17)) &
+ ~0x40000) | 0x2
+ );
+ break;
+
+ case HW_PAGE_SIZE_16MB:
+ num_entries = 16;
+ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, virtual_addr &
+ MMU_SSECTION_ADDR_MASK);
+ pte_val = (((physical_addr & MMU_SSECTION_ADDR_MASK) |
+ (map_attrs->endianism << 15) |
+ (map_attrs->element_size << 10) |
+ (map_attrs->mixedSize << 17)
+ ) | 0x40000 | 0x2
+ );
+ break;
+
+ case HW_MMU_COARSE_PAGE_SIZE:
+ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, virtual_addr &
+ MMU_SECTION_ADDR_MASK);
+ pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
+ break;
+
+ default:
+ return RET_FAIL;
+ }
+
+ while (--num_entries >= 0)
+ ((u32 *)pte_addr)[num_entries] = pte_val;
+
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_pte_set);
+
+hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
+ u32 virtual_addr,
+ u32 pg_size)
+{
+ hw_status status = RET_OK;
+ u32 pte_addr;
+ long int num_entries = 1;
+
+ switch (pg_size) {
+ case HW_PAGE_SIZE_4KB:
+ pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
+ virtual_addr & MMU_SMALL_PAGE_MASK);
+ break;
+
+ case HW_PAGE_SIZE_64KB:
+ num_entries = 16;
+ pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
+ virtual_addr & MMU_LARGE_PAGE_MASK);
+ break;
+
+ case HW_PAGE_SIZE_1MB:
+ case HW_MMU_COARSE_PAGE_SIZE:
+ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+ virtual_addr & MMU_SECTION_ADDR_MASK);
+ break;
+
+ case HW_PAGE_SIZE_16MB:
+ num_entries = 16;
+ pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+ virtual_addr & MMU_SSECTION_ADDR_MASK);
+ break;
+
+ default:
+ return RET_FAIL;
+ }
+
+ while (--num_entries >= 0)
+ ((u32 *)pte_addr)[num_entries] = 0;
+
+ return status;
+}
+EXPORT_SYMBOL(hw_mmu_pte_clear);
+
+/*
+* function: mmu_flsh_entry
+*/
+static hw_status mmu_flsh_entry(const u32 base_address)
+{
+ hw_status status = RET_OK;
+ u32 flushEntryData = 0x1;
+
+ /*Check the input Parameters*/
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+
+ /* write values to register */
+ MMUMMU_FLUSH_ENTRYWriteRegister32(base_address, flushEntryData);
+
+ return status;
+}
+EXPORT_SYMBOL(mmu_flsh_entry);
+/*
+* function : mme_set_cam_entry
+*/
+static hw_status mme_set_cam_entry(const u32 base_address,
+ const u32 page_size,
+ const u32 preserve_bit,
+ const u32 valid_bit,
+ const u32 virt_addr_tag)
+{
+ hw_status status = RET_OK;
+ u32 mmuCamReg;
+
+ /*Check the input Parameters*/
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+
+ mmuCamReg = (virt_addr_tag << 12);
+ mmuCamReg = (mmuCamReg) | (page_size) | (valid_bit << 2)
+ | (preserve_bit << 3);
+
+ /* write values to register */
+ MMUMMU_CAMWriteRegister32(base_address, mmuCamReg);
+
+ return status;
+}
+EXPORT_SYMBOL(mme_set_cam_entry);
+/*
+* function: mmu_set_ram_entry
+*/
+static hw_status mmu_set_ram_entry(const u32 base_address,
+ const u32 physical_addr,
+ enum hw_endianism_t endianism,
+ enum hw_elemnt_siz_t element_size,
+ enum hw_mmu_mixed_size_t mixedSize)
+{
+ hw_status status = RET_OK;
+ u32 mmuRamReg;
+
+ /*Check the input Parameters*/
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+ CHECK_INPUT_RANGE_MIN0(element_size, MMU_ELEMENTSIZE_MAX,
+ RET_PARAM_OUT_OF_RANGE,
+ RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
+
+
+ mmuRamReg = (physical_addr & MMU_ADDR_MASK);
+ mmuRamReg = (mmuRamReg) | ((endianism << 9) | (element_size << 7)
+ | (mixedSize << 6));
+
+ /* write values to register */
+ MMUMMU_RAMWriteRegister32(base_address, mmuRamReg);
+
+ return status;
+
+}
+EXPORT_SYMBOL(mmu_set_ram_entry);
+
+u32 hw_mmu_fault_dump(const u32 base_address)
+{
+ u32 reg;
+
+ reg = MMUMMU_FAULT_ADReadRegister32(base_address);
+ printk(KERN_INFO "Fault Address Address = 0x%x\n", reg);
+ reg = MMUMMU_FAULT_PCReadRegister32(base_address);
+ printk(KERN_INFO "Fault PC Register Address = 0x%x\n", reg);
+ reg = MMUMMU_FAULT_STATUSReadRegister32(base_address);
+ printk(KERN_INFO "Fault PC address doesn't show right value in DUCATI"
+ "because of HW limitation\n");
+ printk(KERN_INFO "Fault Status Register = 0x%x\n", reg);
+ reg = MMUMMU_FAULT_EMUAddressReadRegister32(base_address);
+ printk(KERN_INFO "Fault EMU Address = 0x%x\n", reg);
+ return 0;
+}
+EXPORT_SYMBOL(hw_mmu_fault_dump);
+
+long hw_mmu_tlb_dump(const u32 base_address, bool shw_inv_entries)
+{
+ u32 i;
+ u32 lockSave;
+ u32 cam;
+ u32 ram;
+
+
+ /* Save off the lock register contents,
+ we'll restore it when we are done */
+
+ lockSave = mmu_lckread_reg_32(base_address);
+
+ printk(KERN_INFO "TLB locked entries = %u, current victim = %u\n",
+ ((lockSave & MMU_MMU_LOCK_BaseValue_MASK)
+ >> MMU_MMU_LOCK_BaseValue_OFFSET),
+ ((lockSave & MMU_MMU_LOCK_CurrentVictim_MASK)
+ >> MMU_MMU_LOCK_CurrentVictim_OFFSET));
+ printk(KERN_INFO "=============================================\n");
+ for (i = 0; i < NUM_TLB_ENTRIES; i++) {
+ mmu_lck_crnt_vctmwite32(base_address, i);
+ cam = MMUMMU_CAMReadRegister32(base_address);
+ ram = MMUMMU_RAMReadRegister32(base_address);
+
+ if ((cam & 0x4) != 0) {
+ printk(KERN_INFO "TLB Entry [0x%2x]: VA = 0x%8x "
+ "PA = 0x%8x Protected = 0x%1x\n",
+ i, (cam & MMU_ADDR_MASK), (ram & MMU_ADDR_MASK),
+ (cam & 0x8) ? 1 : 0);
+
+ } else if (shw_inv_entries != false)
+ printk(KERN_ALERT "TLB Entry [0x%x]: <INVALID>\n", i);
+ }
+ mmu_lck_write_reg32(base_address, lockSave);
+ return RET_OK;
+}
+EXPORT_SYMBOL(hw_mmu_tlb_dump);
+
+u32 hw_mmu_pte_phyaddr(u32 pte_val, u32 pte_size)
+{
+ u32 ret_val = 0;
+
+ switch (pte_size) {
+
+ case HW_PAGE_SIZE_4KB:
+ ret_val = pte_val & MMU_SMALL_PAGE_MASK;
+ break;
+ case HW_PAGE_SIZE_64KB:
+ ret_val = pte_val & MMU_LARGE_PAGE_MASK;
+ break;
+
+ case HW_PAGE_SIZE_1MB:
+ ret_val = pte_val & MMU_SECTION_ADDR_MASK;
+ break;
+ case HW_PAGE_SIZE_16MB:
+ ret_val = pte_val & MMU_SSECTION_ADDR_MASK;
+ break;
+ default:
+ /* Invalid */
+ break;
+
+ }
+
+ return ret_val;
+}
+EXPORT_SYMBOL(hw_mmu_pte_phyaddr);
diff --git a/drivers/dsp/syslink/procmgr/proc4430/proc4430.c b/drivers/dsp/syslink/procmgr/proc4430/proc4430.c
new file mode 100644
index 000000000000..cf798a95c6c7
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/proc4430.c
@@ -0,0 +1,1085 @@
+/*
+ * proc4430.c
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+/* Module level headers */
+#include "../procdefs.h"
+#include "../processor.h"
+#include <procmgr.h>
+#include "../procmgr_drvdefs.h"
+#include "proc4430.h"
+#include "../../ipu_pm/ipu_pm.h"
+#include "dmm4430.h"
+#include <syslink/multiproc.h>
+#include <syslink/ducatienabler.h>
+#include <syslink/platform_mem.h>
+#include <syslink/atomic_linux.h>
+
+#define DUCATI_DMM_START_ADDR 0xa0000000
+#define DUCATI_DMM_POOL_SIZE 0x6000000
+
+#define SYS_M3 2
+#define APP_M3 1
+#define CORE_PRM_BASE OMAP2_L4_IO_ADDRESS(0x4a306700)
+#define CORE_CM2_DUCATI_CLKSTCTRL OMAP2_L4_IO_ADDRESS(0x4A008900)
+#define CORE_CM2_DUCATI_CLKCTRL OMAP2_L4_IO_ADDRESS(0x4A008920)
+#define RM_MPU_M3_RSTCTRL_OFFSET 0x210
+#define RM_MPU_M3_RSTST_OFFSET 0x214
+#define RM_MPU_M3_RST1 0x1
+#define RM_MPU_M3_RST2 0x2
+#define RM_MPU_M3_RST3 0x4
+
+#define OMAP4430PROC_MODULEID (u16) 0xbbec
+
+/* Macro to make a correct module magic number with refCount */
+#define OMAP4430PROC_MAKE_MAGICSTAMP(x) ((OMAP4430PROC_MODULEID << 12u) | (x))
+
+/*OMAP4430 Module state object */
+struct proc4430_module_object {
+ u32 config_size;
+ /* Size of configuration structure */
+ struct proc4430_config cfg;
+ /* OMAP4430 configuration structure */
+ struct proc4430_config def_cfg;
+ /* Default module configuration */
+ struct proc4430_params def_inst_params;
+ /* Default parameters for the OMAP4430 instances */
+ void *proc_handles[MULTIPROC_MAXPROCESSORS];
+ /* Processor handle array. */
+ struct mutex *gate_handle;
+ /* void * of gate to be used for local thread safety */
+ atomic_t ref_count;
+};
+
+/*
+ OMAP4430 instance object.
+ */
+struct proc4430_object {
+ struct proc4430_params params;
+ /* Instance parameters (configuration values) */
+ atomic_t attach_count;
+ /* attach reference count */
+};
+
+
+/* =================================
+ * Globals
+ * =================================
+ */
+/*
+ OMAP4430 state object variable
+ */
+
+static struct proc4430_module_object proc4430_state = {
+ .config_size = sizeof(struct proc4430_config),
+ .gate_handle = NULL,
+ .def_inst_params.num_mem_entries = 0u,
+ .def_inst_params.mem_entries = NULL,
+ .def_inst_params.reset_vector_mem_entry = 0
+};
+
+
+/* =================================
+ * APIs directly called by applications
+ * =================================
+ */
+/*
+ * Function to get the default configuration for the OMAP4430
+ * module.
+ *
+ * This function can be called by the application to get their
+ * configuration parameter to proc4430_setup filled in by the
+ * OMAP4430 module with the default parameters. If the user
+ * does not wish to make any change in the default parameters, this
+ * API is not required to be called.
+ */
+void proc4430_get_config(struct proc4430_config *cfg)
+{
+ BUG_ON(cfg == NULL);
+ memcpy(cfg, &(proc4430_state.def_cfg),
+ sizeof(struct proc4430_config));
+}
+EXPORT_SYMBOL(proc4430_get_config);
+
+/*
+ * Function to setup the OMAP4430 module.
+ *
+ * This function sets up the OMAP4430 module. This function
+ * must be called before any other instance-level APIs can be
+ * invoked.
+ * Module-level configuration needs to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then proc4430_get_config can be called to get the
+ * configuration filled with the default values. After this, only
+ * the required configuration values can be changed. If the user
+ * does not wish to make any change in the default parameters, the
+ * application can simply call proc4430_setup with NULL
+ * parameters. The default parameters would get automatically used.
+ */
+int proc4430_setup(struct proc4430_config *cfg)
+{
+ int retval = 0;
+ struct proc4430_config tmp_cfg;
+ atomic_cmpmask_and_set(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&proc4430_state.ref_count) !=
+ OMAP4430PROC_MAKE_MAGICSTAMP(1)) {
+ return 1;
+ }
+
+ if (cfg == NULL) {
+ proc4430_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+
+ dmm_create();
+ dmm_create_tables(DUCATI_DMM_START_ADDR, DUCATI_DMM_POOL_SIZE);
+
+ /* Create a default gate handle for local module protection. */
+ proc4430_state.gate_handle =
+ kmalloc(sizeof(struct mutex), GFP_KERNEL);
+ if (proc4430_state.gate_handle == NULL) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ mutex_init(proc4430_state.gate_handle);
+
+ /* Initialize the name to handles mapping array. */
+ memset(&proc4430_state.proc_handles, 0,
+ (sizeof(void *) * MULTIPROC_MAXPROCESSORS));
+
+ /* Copy the user provided values into the state object. */
+ memcpy(&proc4430_state.cfg, cfg,
+ sizeof(struct proc4430_config));
+
+ return 0;
+
+error:
+ atomic_dec_return(&proc4430_state.ref_count);
+ dmm_delete_tables();
+ dmm_destroy();
+
+ return retval;
+}
+EXPORT_SYMBOL(proc4430_setup);
+
+/*
+ * Function to destroy the OMAP4430 module.
+ *
+ * Once this function is called, other OMAP4430 module APIs,
+ * except for the proc4430_get_config API cannot be called
+ * anymore.
+ */
+int proc4430_destroy(void)
+{
+ int retval = 0;
+ u16 i;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (!(atomic_dec_return(&proc4430_state.ref_count)
+ == OMAP4430PROC_MAKE_MAGICSTAMP(0))) {
+
+ retval = 1;
+ goto exit;
+ }
+
+ /* Check if any OMAP4430 instances have not been
+ * deleted so far. If not,delete them.
+ */
+
+ for (i = 0; i < MULTIPROC_MAXPROCESSORS; i++) {
+ if (proc4430_state.proc_handles[i] == NULL)
+ continue;
+ proc4430_delete(&(proc4430_state.proc_handles[i]));
+ }
+
+ /* Check if the gate_handle was created internally. */
+ if (proc4430_state.gate_handle != NULL) {
+ mutex_destroy(proc4430_state.gate_handle);
+ kfree(proc4430_state.gate_handle);
+ }
+
+exit:
+ return retval;
+}
+EXPORT_SYMBOL(proc4430_destroy);
+
+/*=================================================
+ * Function to initialize the parameters for this Processor
+ * instance.
+ */
+void proc4430_params_init(void *handle, struct proc4430_params *params)
+{
+ struct proc4430_object *proc_object = (struct proc4430_object *)handle;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_params_init failed "
+ "Module not initialized");
+ return;
+ }
+
+ if (WARN_ON(params == NULL)) {
+ printk(KERN_ERR "proc4430_params_init failed "
+ "Argument of type proc4430_params * "
+ "is NULL");
+ return;
+ }
+
+ if (handle == NULL)
+ memcpy(params, &(proc4430_state.def_inst_params),
+ sizeof(struct proc4430_params));
+ else
+ memcpy(params, &(proc_object->params),
+ sizeof(struct proc4430_params));
+}
+EXPORT_SYMBOL(proc4430_params_init);
+
+/*===================================================
+ *Function to create an instance of this Processor.
+ *
+ */
+void *proc4430_create(u16 proc_id, const struct proc4430_params *params)
+{
+ struct processor_object *handle = NULL;
+ struct proc4430_object *object = NULL;
+
+ BUG_ON(!IS_VALID_PROCID(proc_id));
+ BUG_ON(params == NULL);
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_create failed "
+ "Module not initialized");
+ goto error;
+ }
+
+ /* Enter critical section protection. */
+ WARN_ON(mutex_lock_interruptible(proc4430_state.gate_handle));
+ if (proc4430_state.proc_handles[proc_id] != NULL) {
+ handle = proc4430_state.proc_handles[proc_id];
+ goto func_end;
+ } else {
+ handle = (struct processor_object *)
+ vmalloc(sizeof(struct processor_object));
+ if (WARN_ON(handle == NULL))
+ goto func_end;
+
+ handle->proc_fxn_table.attach = &proc4430_attach;
+ handle->proc_fxn_table.detach = &proc4430_detach;
+ handle->proc_fxn_table.start = &proc4430_start;
+ handle->proc_fxn_table.stop = &proc4430_stop;
+ handle->proc_fxn_table.read = &proc4430_read;
+ handle->proc_fxn_table.write = &proc4430_write;
+ handle->proc_fxn_table.control = &proc4430_control;
+ handle->proc_fxn_table.translateAddr =
+ &proc4430_translate_addr;
+ handle->proc_fxn_table.map = &proc4430_map;
+ handle->proc_fxn_table.unmap = &proc4430_unmap;
+ handle->proc_fxn_table.procinfo = &proc4430_proc_info;
+ handle->proc_fxn_table.virt_to_phys = &proc4430_virt_to_phys;
+ handle->state = PROC_MGR_STATE_UNKNOWN;
+ handle->object = vmalloc(sizeof(struct proc4430_object));
+ handle->proc_id = proc_id;
+ object = (struct proc4430_object *)handle->object;
+ if (params != NULL) {
+ /* Copy params into instance object. */
+ memcpy(&(object->params), (void *)params,
+ sizeof(struct proc4430_params));
+ }
+ if ((params != NULL) && (params->mem_entries != NULL)
+ && (params->num_mem_entries > 0)) {
+ /* Allocate memory for, and copy mem_entries table*/
+ object->params.mem_entries = vmalloc(sizeof(struct
+ proc4430_mem_entry) *
+ params->num_mem_entries);
+ memcpy(object->params.mem_entries,
+ params->mem_entries,
+ (sizeof(struct proc4430_mem_entry) *
+ params->num_mem_entries));
+ }
+ handle->boot_mode = PROC_MGR_BOOTMODE_NOLOAD;
+ /* Set the handle in the state object. */
+ proc4430_state.proc_handles[proc_id] = handle;
+ }
+
+func_end:
+ mutex_unlock(proc4430_state.gate_handle);
+error:
+ return (void *)handle;
+}
+EXPORT_SYMBOL(proc4430_create);
+
+/*=================================================
+ * Function to delete an instance of this Processor.
+ *
+ * The user provided pointer to the handle is reset after
+ * successful completion of this function.
+ *
+ */
+int proc4430_delete(void **handle_ptr)
+{
+ int retval = 0;
+ struct proc4430_object *object = NULL;
+ struct processor_object *handle;
+
+ BUG_ON(handle_ptr == NULL);
+ BUG_ON(*handle_ptr == NULL);
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_delete failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ handle = (struct processor_object *)(*handle_ptr);
+ BUG_ON(!IS_VALID_PROCID(handle->proc_id));
+ /* Enter critical section protection. */
+ WARN_ON(mutex_lock_interruptible(proc4430_state.gate_handle));
+ /* Reset handle in PwrMgr handle array. */
+ proc4430_state.proc_handles[handle->proc_id] = NULL;
+ /* Free memory used for the OMAP4430 object. */
+ if (handle->object != NULL) {
+ object = (struct proc4430_object *)handle->object;
+ if (object->params.mem_entries != NULL) {
+ vfree(object->params.mem_entries);
+ object->params.mem_entries = NULL;
+ }
+ vfree(handle->object);
+ handle->object = NULL;
+ }
+ /* Free memory used for the Processor object. */
+ vfree(handle);
+ *handle_ptr = NULL;
+ /* Leave critical section protection. */
+ mutex_unlock(proc4430_state.gate_handle);
+ return retval;
+}
+EXPORT_SYMBOL(proc4430_delete);
+
+/*===================================================
+ * Function to open a handle to an instance of this Processor. This
+ * function is called when access to the Processor is required from
+ * a different process.
+ */
+int proc4430_open(void **handle_ptr, u16 proc_id)
+{
+ int retval = 0;
+
+ BUG_ON(handle_ptr == NULL);
+ BUG_ON(!IS_VALID_PROCID(proc_id));
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_open failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ /* Initialize return parameter handle. */
+ *handle_ptr = NULL;
+
+ /* Check if the PwrMgr exists and return the handle if found. */
+ if (proc4430_state.proc_handles[proc_id] == NULL) {
+ retval = -ENODEV;
+ goto func_exit;
+ } else
+ *handle_ptr = proc4430_state.proc_handles[proc_id];
+func_exit:
+ return retval;
+}
+EXPORT_SYMBOL(proc4430_open);
+
+/*===============================================
+ * Function to close a handle to an instance of this Processor.
+ *
+ */
+int proc4430_close(void *handle)
+{
+ int retval = 0;
+
+ BUG_ON(handle == NULL);
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_close failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ /* nothing to be done for now */
+ return retval;
+}
+EXPORT_SYMBOL(proc4430_close);
+
+/* =================================
+ * APIs called by Processor module (part of function table interface)
+ * =================================
+ */
+/*================================
+ * Function to initialize the slave processor
+ *
+ */
+int proc4430_attach(void *handle, struct processor_attach_params *params)
+{
+ int retval = 0;
+
+ struct processor_object *proc_handle = NULL;
+ struct proc4430_object *object = NULL;
+ u32 map_count = 0;
+ u32 i;
+ memory_map_info map_info;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_attach failed"
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(handle == NULL)) {
+ printk(KERN_ERR "proc4430_attach failed"
+ "Driver handle is NULL");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(params == NULL)) {
+ printk(KERN_ERR "proc4430_attach failed"
+ "Argument processor_attach_params * is NULL");
+ return -EINVAL;
+ }
+
+ proc_handle = (struct processor_object *)handle;
+
+ object = (struct proc4430_object *)proc_handle->object;
+
+ atomic_cmpmask_and_set(&object->attach_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(0));
+ atomic_inc_return(&object->attach_count);
+
+ printk(KERN_ERR "proc4430_attach num_mem_entries = %d",
+ object->params.num_mem_entries);
+ /* Return memory information in params. */
+ for (i = 0; (i < object->params.num_mem_entries); i++) {
+ /* If the configured master virtual address is invalid, get the
+ * actual address by mapping the physical address into master
+ * kernel memory space.
+ */
+ if ((object->params.mem_entries[i].master_virt_addr == (u32)-1)
+ && (object->params.mem_entries[i].shared == true)) {
+ map_info.src = object->params.mem_entries[i].phys_addr;
+ map_info.size = object->params.mem_entries[i].size;
+ map_info.is_cached = false;
+ retval = platform_mem_map(&map_info);
+ if (retval != 0) {
+ printk(KERN_ERR "proc4430_attach failed\n");
+ return -EFAULT;
+ }
+ map_count++;
+ object->params.mem_entries[i].master_virt_addr =
+ map_info.dst;
+ params->mem_entries[i].addr
+ [PROC_MGR_ADDRTYPE_MASTERKNLVIRT] =
+ map_info.dst;
+ params->mem_entries[i].addr
+ [PROC_MGR_ADDRTYPE_SLAVEVIRT] =
+ (object->params.mem_entries[i].slave_virt_addr);
+ /* User virtual will be filled by user side. For now,
+ fill in the physical address so that it can be used
+ by mmap to remap this region into user-space */
+ params->mem_entries[i].addr
+ [PROC_MGR_ADDRTYPE_MASTERUSRVIRT] = \
+ object->params.mem_entries[i].phys_addr;
+ params->mem_entries[i].size =
+ object->params.mem_entries[i].size;
+ }
+ }
+ params->num_mem_entries = map_count;
+ return retval;
+}
+
+
+/*==========================================
+ * Function to detach from the Processor.
+ *
+ */
+int proc4430_detach(void *handle)
+{
+ struct processor_object *proc_handle = NULL;
+ struct proc4430_object *object = NULL;
+ u32 i;
+ memory_unmap_info unmap_info;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+
+ printk(KERN_ERR "proc4430_detach failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+ if (WARN_ON(handle == NULL)) {
+ printk(KERN_ERR "proc4430_detach failed "
+ "Argument Driverhandle is NULL");
+ return -EINVAL;
+ }
+
+ proc_handle = (struct processor_object *)handle;
+ object = (struct proc4430_object *)proc_handle->object;
+
+ if (!(atomic_dec_return(&object->attach_count) == \
+ OMAP4430PROC_MAKE_MAGICSTAMP(0)))
+ return 1;
+
+ for (i = 0; (i < object->params.num_mem_entries); i++) {
+ if ((object->params.mem_entries[i].master_virt_addr > 0)
+ && (object->params.mem_entries[i].shared == true)) {
+ unmap_info.addr =
+ object->params.mem_entries[i].master_virt_addr;
+ unmap_info.size = object->params.mem_entries[i].size;
+ platform_mem_unmap(&unmap_info);
+ object->params.mem_entries[i].master_virt_addr =
+ (u32)-1;
+ }
+ }
+ return 0;
+}
+
+/*==========================================
+ * Function to start the slave processor
+ *
+ * Start the slave processor running from its entry point.
+ * Depending on the boot mode, this involves configuring the boot
+ * address and releasing the slave from reset.
+ *
+ */
+int proc4430_start(void *handle, u32 entry_pt,
+ struct processor_start_params *start_params)
+{
+ u32 reg;
+ int counter = 10;
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+
+ printk(KERN_ERR "proc4430_start failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ /*FIXME: Remove handle and entry_pt if not used */
+ if (WARN_ON(start_params == NULL)) {
+ printk(KERN_ERR "proc4430_start failed "
+ "Argument processor_start_params * is NULL");
+ return -EINVAL;
+ }
+
+ reg = __raw_readl(CORE_PRM_BASE + RM_MPU_M3_RSTST_OFFSET);
+ printk(KERN_INFO "proc4430_start: Reset Status [0x%x]", reg);
+ reg = __raw_readl(CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ printk(KERN_INFO "proc4430_start: Reset Control [0x%x]", reg);
+
+ switch (start_params->params->proc_id) {
+ case SYS_M3:
+ /* Module is managed automatically by HW */
+ __raw_writel(0x01, CORE_CM2_DUCATI_CLKCTRL);
+ /* Enable the M3 clock */
+ __raw_writel(0x02, CORE_CM2_DUCATI_CLKSTCTRL);
+ do {
+ reg = __raw_readl(CORE_CM2_DUCATI_CLKSTCTRL);
+ if (reg & 0x100) {
+ printk(KERN_INFO "M3 clock enabled:"
+ "CORE_CM2_DUCATI_CLKSTCTRL = 0x%x\n", reg);
+ break;
+ }
+ msleep(1);
+ } while (--counter);
+ if (counter == 0) {
+ printk(KERN_ERR "FAILED TO ENABLE DUCATI M3 CLOCK !\n");
+ return -EFAULT;
+ }
+ /* Check that releasing resets would indeed be effective */
+ reg = __raw_readl(CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ if (reg != 7) {
+ printk(KERN_ERR "proc4430_start: Resets in not proper state!\n");
+ __raw_writel(0x7,
+ CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ }
+
+ /* De-assert RST3, and clear the Reset status */
+ printk(KERN_INFO "De-assert RST3\n");
+ __raw_writel(0x3, CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ while (!(__raw_readl(CORE_PRM_BASE + RM_MPU_M3_RSTST_OFFSET)
+ & 0x4))
+ ;
+ printk(KERN_INFO "RST3 released!");
+ __raw_writel(0x4, CORE_PRM_BASE + RM_MPU_M3_RSTST_OFFSET);
+ ducati_setup();
+
+ /* De-assert RST1, and clear the Reset status */
+ printk(KERN_INFO "De-assert RST1\n");
+ __raw_writel(0x2, CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ while (!(__raw_readl(CORE_PRM_BASE + RM_MPU_M3_RSTST_OFFSET)
+ & 0x1))
+ ;
+ printk(KERN_INFO "RST1 released!");
+ __raw_writel(0x1, CORE_PRM_BASE + RM_MPU_M3_RSTST_OFFSET);
+ break;
+ case APP_M3:
+ /* De-assert RST2, and clear the Reset status */
+ printk(KERN_INFO "De-assert RST2\n");
+ __raw_writel(0x0, CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ while (!(__raw_readl(CORE_PRM_BASE + RM_MPU_M3_RSTST_OFFSET)
+ & 0x2))
+ ;
+ printk(KERN_INFO "RST2 released!");
+ __raw_writel(0x2, CORE_PRM_BASE + RM_MPU_M3_RSTST_OFFSET);
+ break;
+ default:
+ printk(KERN_ERR "proc4430_start: ERROR input\n");
+ break;
+ }
+ return 0;
+}
+
+
+/*
+ * Function to stop the slave processor
+ *
+ * Stop the execution of the slave processor. Depending on the boot
+ * mode, this may result in placing the slave processor in reset.
+ *
+ * @param handle void * to the Processor instance
+ *
+ * @sa proc4430_start, OMAP3530_halResetCtrl
+ */
+int
+proc4430_stop(void *handle, struct processor_stop_params *stop_params)
+{
+ u32 reg;
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_stop failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+ switch (stop_params->params->proc_id) {
+ case SYS_M3:
+ printk(KERN_INFO "Assert RST1 and RST2\n");
+ __raw_writel(0x3, CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ reg = __raw_readl(CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ ducati_destroy();
+ printk(KERN_INFO "Assert RST1 and RST2 and RST3\n");
+ __raw_writel(0x7, CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ /* Disable the M3 clock */
+ __raw_writel(0x01, CORE_CM2_DUCATI_CLKSTCTRL);
+ break;
+ case APP_M3:
+ printk(KERN_INFO "Assert RST2\n");
+ __raw_writel(0x2, CORE_PRM_BASE + RM_MPU_M3_RSTCTRL_OFFSET);
+ break;
+ default:
+ printk(KERN_ERR "proc4430_stop: ERROR input\n");
+ break;
+ }
+ return 0;
+}
+
+
+/*==============================================
+ * Function to read from the slave processor's memory.
+ *
+ * Read from the slave processor's memory and copy into the
+ * provided buffer.
+ */
+int proc4430_read(void *handle, u32 proc_addr, u32 *num_bytes,
+ void *buffer)
+{
+ int retval = 0;
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_read failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ /* TODO */
+ return retval;
+}
+
+
+/*==============================================
+ * Function to write into the slave processor's memory.
+ *
+ * Read from the provided buffer and copy into the slave
+ * processor's memory.
+ *
+ */
+int proc4430_write(void *handle, u32 proc_addr, u32 *num_bytes,
+ void *buffer)
+{
+ int retval = 0;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_write failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ /* TODO */
+ return retval;
+}
+
+
+/*=========================================================
+ * Function to perform device-dependent operations.
+ *
+ * Performs device-dependent control operations as exposed by this
+ * implementation of the Processor module.
+ */
+int proc4430_control(void *handle, int cmd, void *arg)
+{
+ int retval = 0;
+
+ /*FIXME: Remove handle,etc if not used */
+
+#ifdef CONFIG_SYSLINK_DUCATI_PM
+ /* For purpose testing */
+ switch (cmd) {
+ case PM_SUSPEND:
+ case PM_RESUME:
+ retval = ipu_pm_notifications(cmd);
+ break;
+ default:
+ printk(KERN_ERR "Invalid notification\n");
+ }
+ if (retval != PM_SUCCESS)
+ printk(KERN_ERR "Error in notifications\n");
+#endif
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_control failed "
+ "Module not initialized");
+ return -ENODEV;
+ }
+
+ return retval;
+}
+
+
+/*=====================================================
+ * Function to translate between two types of address spaces.
+ *
+ * Translate between the specified address spaces.
+ */
+int proc4430_translate_addr(void *handle,
+ void **dst_addr, enum proc_mgr_addr_type dst_addr_type,
+ void *src_addr, enum proc_mgr_addr_type src_addr_type)
+{
+ int retval = 0;
+ struct processor_object *proc_handle = NULL;
+ struct proc4430_object *object = NULL;
+ struct proc4430_mem_entry *entry = NULL;
+ bool found = false;
+ u32 fm_addr_base = (u32)NULL;
+ u32 to_addr_base = (u32)NULL;
+ u32 i;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_translate_addr failed "
+ "Module not initialized");
+ retval = -ENODEV;
+ goto error_exit;
+ }
+
+ if (WARN_ON(handle == NULL)) {
+ retval = -EINVAL;
+ goto error_exit;
+ }
+ if (WARN_ON(dst_addr == NULL)) {
+ retval = -EINVAL;
+ goto error_exit;
+ }
+ if (WARN_ON(dst_addr_type > PROC_MGR_ADDRTYPE_ENDVALUE)) {
+ retval = -EINVAL;
+ goto error_exit;
+ }
+ if (WARN_ON(src_addr == NULL)) {
+ retval = -EINVAL;
+ goto error_exit;
+ }
+ if (WARN_ON(src_addr_type > PROC_MGR_ADDRTYPE_ENDVALUE)) {
+ retval = -EINVAL;
+ goto error_exit;
+ }
+
+ proc_handle = (struct processor_object *)handle;
+ object = (struct proc4430_object *)proc_handle->object;
+ *dst_addr = NULL;
+ for (i = 0 ; i < object->params.num_mem_entries ; i++) {
+ entry = &(object->params.mem_entries[i]);
+ fm_addr_base =
+ (src_addr_type == PROC_MGR_ADDRTYPE_MASTERKNLVIRT) ?
+ entry->master_virt_addr : entry->slave_virt_addr;
+ to_addr_base =
+ (dst_addr_type == PROC_MGR_ADDRTYPE_MASTERKNLVIRT) ?
+ entry->master_virt_addr : entry->slave_virt_addr;
+ /* Determine whether which way to convert */
+ if (((u32)src_addr < (fm_addr_base + entry->size)) &&
+ ((u32)src_addr >= fm_addr_base)) {
+ found = true;
+ *dst_addr = (void *)(((u32)src_addr - fm_addr_base)
+ + to_addr_base);
+ break;
+ }
+ }
+
+ /* This check must not be removed even with build optimize. */
+ if (WARN_ON(found == false)) {
+ /*Failed to translate address. */
+ retval = -ENXIO;
+ goto error_exit;
+ }
+ return 0;
+
+error_exit:
+ return retval;
+}
+
+
+/*=================================================
+ * Function to map slave address to host address space
+ *
+ * Map the provided slave address to master address space. This
+ * function also maps the specified address to slave MMU space.
+ */
+int proc4430_map(void *handle, u32 proc_addr,
+ u32 size, u32 *mapped_addr, u32 *mapped_size, u32 map_attribs)
+{
+ int retval = 0;
+ u32 da_align;
+ u32 da;
+ u32 va_align;
+ u32 size_align;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_map failed "
+ "Module not initialized");
+ retval = -ENODEV;
+ goto error_exit;
+ }
+
+ /*FIXME: Remove handle,etc if not used */
+
+ /* FIX ME: Temporary work around until the dynamic memory mapping
+ * for Tiler address space is available
+ */
+ if ((map_attribs & DSP_MAPTILERADDR)) {
+ da_align = user_va2pa(current->mm, proc_addr);
+ *mapped_addr = (da_align | (proc_addr & (PAGE_SIZE - 1)));
+ return 0;
+ }
+
+ /* Calculate the page-aligned PA, VA and size */
+ va_align = PG_ALIGN_LOW(proc_addr, PAGE_SIZE);
+ size_align = PG_ALIGN_HIGH(size + (u32)proc_addr - va_align, PAGE_SIZE);
+
+ dmm_reserve_memory(size_align, &da);
+ da_align = PG_ALIGN_LOW((u32)da, PAGE_SIZE);
+ retval = ducati_mem_map(va_align, da_align, size_align, map_attribs);
+
+ /* Mapped address = MSB of DA | LSB of VA */
+ *mapped_addr = (da_align | (proc_addr & (PAGE_SIZE - 1)));
+
+error_exit:
+ return retval;
+}
+
+/*=================================================
+ * Function to unmap slave address to host address space
+ *
+ * UnMap the provided slave address to master address space. This
+ * function also unmaps the specified address to slave MMU space.
+ */
+int proc4430_unmap(void *handle, u32 mapped_addr)
+{
+ int da_align;
+ int ret_val = 0;
+ int size_align;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_map failed "
+ "Module not initialized");
+ ret_val = -1;
+ goto error_exit;
+ }
+
+ /*FIXME: Remove handle,etc if not used */
+
+ da_align = PG_ALIGN_LOW((u32)mapped_addr, PAGE_SIZE);
+ ret_val = dmm_unreserve_memory(da_align, &size_align);
+ if (WARN_ON(ret_val < 0))
+ goto error_exit;
+ ret_val = ducati_mem_unmap(da_align, size_align);
+ if (WARN_ON(ret_val < 0))
+ goto error_exit;
+ return 0;
+
+error_exit:
+ printk(KERN_WARNING "proc4430_unmap failed !!!!\n");
+ return ret_val;
+}
+
+/*=================================================
+ * Function to return list of translated mem entries
+ *
+ * This function takes the remote processor address as
+ * an input and returns the mapped Page entries in the
+ * buffer passed
+ */
+int proc4430_virt_to_phys(void *handle, u32 da, u32 *mapped_entries,
+ u32 num_of_entries)
+{
+ int da_align;
+ int i;
+ int ret_val = 0;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_virt_to_phys failed "
+ "Module not initialized");
+ ret_val = -EFAULT;
+ goto error_exit;
+ }
+
+ if (handle == NULL || mapped_entries == NULL || num_of_entries == 0) {
+ ret_val = -EFAULT;
+ goto error_exit;
+ }
+ da_align = PG_ALIGN_LOW((u32)da, PAGE_SIZE);
+ for (i = 0; i < num_of_entries; i++) {
+ mapped_entries[i] = ducati_mem_virtToPhys(da_align);
+ da_align += PAGE_SIZE;
+ }
+ return 0;
+
+error_exit:
+ printk(KERN_WARNING "proc4430_virtToPhys failed !!!!\n");
+ return ret_val;
+}
+
+
+/*=================================================
+ * Function to return PROC4430 mem_entries info
+ *
+ */
+int proc4430_proc_info(void *handle, struct proc_mgr_proc_info *procinfo)
+{
+ struct processor_object *proc_handle = NULL;
+ struct proc4430_object *object = NULL;
+ struct proc4430_mem_entry *entry = NULL;
+ int i;
+
+ if (atomic_cmpmask_and_lt(&proc4430_state.ref_count,
+ OMAP4430PROC_MAKE_MAGICSTAMP(0),
+ OMAP4430PROC_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc4430_proc_info failed "
+ "Module not initialized");
+ goto error_exit;
+ }
+
+ if (WARN_ON(handle == NULL))
+ goto error_exit;
+ if (WARN_ON(procinfo == NULL))
+ goto error_exit;
+
+ proc_handle = (struct processor_object *)handle;
+
+ object = (struct proc4430_object *)proc_handle->object;
+
+ for (i = 0 ; i < object->params.num_mem_entries ; i++) {
+ entry = &(object->params.mem_entries[i]);
+ procinfo->mem_entries[i].addr[PROC_MGR_ADDRTYPE_MASTERKNLVIRT]
+ = entry->master_virt_addr;
+ procinfo->mem_entries[i].addr[PROC_MGR_ADDRTYPE_SLAVEVIRT]
+ = entry->slave_virt_addr;
+ procinfo->mem_entries[i].size = entry->size;
+ }
+ procinfo->num_mem_entries = object->params.num_mem_entries;
+ procinfo->boot_mode = proc_handle->boot_mode;
+ return 0;
+
+error_exit:
+ printk(KERN_WARNING "proc4430_proc_info failed !!!!\n");
+ return -EFAULT;
+}
diff --git a/drivers/dsp/syslink/procmgr/proc4430/proc4430.h b/drivers/dsp/syslink/procmgr/proc4430/proc4430.h
new file mode 100644
index 000000000000..5903daeadaa3
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/proc4430.h
@@ -0,0 +1,147 @@
+/*
+ * proc4430.h
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+
+
+#ifndef _SYSLINK_PROC_4430_H_
+#define _SYSLINK_PROC_4430_H_
+
+
+/* Module headers */
+#include <procmgr.h>
+#include "../procdefs.h"
+#include <linux/types.h>
+
+/*
+ Module configuration structure.
+ */
+struct proc4430_config {
+ struct mutex *gate_handle;
+ /* void * of gate to be used for local thread safety */
+};
+
+/*
+ Memory entry for slave memory map configuration
+ */
+struct proc4430_mem_entry {
+ char name[PROCMGR_MAX_STRLEN];
+ /* Name identifying the memory region. */
+ u32 phys_addr;
+ /* Physical address of the memory region. */
+ u32 slave_virt_addr;
+ /* Slave virtual address of the memory region. */
+ u32 master_virt_addr;
+ /* Master virtual address of the memory region. If specified as -1,
+ * the master virtual address is assumed to be invalid, and shall be
+ * set internally within the Processor module. */
+ u32 size;
+ /* Size (in bytes) of the memory region. */
+ bool shared;
+ /* Flag indicating whether the memory region is shared between master
+ * and slave. */
+};
+
+/*
+ Configuration parameters specific to this processor.
+ */
+struct proc4430_params {
+ int num_mem_entries;
+ /* Number of memory regions to be configured. */
+ struct proc4430_mem_entry *mem_entries;
+ /* Array of information structures for memory regions
+ * to be configured. */
+ u32 reset_vector_mem_entry;
+ /* Index of the memory entry within the mem_entries array,
+ * which is the resetVector memory region. */
+};
+
+
+/* Function to initialize the slave processor */
+int proc4430_attach(void *handle, struct processor_attach_params *params);
+
+/* Function to finalize the slave processor */
+int proc4430_detach(void *handle);
+
+/* Function to start the slave processor */
+int proc4430_start(void *handle, u32 entry_pt,
+ struct processor_start_params *params);
+
+/* Function to start the stop processor */
+int proc4430_stop(void *handle,
+ struct processor_stop_params *params);
+
+/* Function to read from the slave processor's memory. */
+int proc4430_read(void *handle, u32 proc_addr, u32 *num_bytes,
+ void *buffer);
+
+/* Function to write into the slave processor's memory. */
+int proc4430_write(void *handle, u32 proc_addr, u32 *num_bytes,
+ void *buffer);
+
+/* Function to perform device-dependent operations. */
+int proc4430_control(void *handle, int cmd, void *arg);
+
+/* Function to translate between two types of address spaces. */
+int proc4430_translate_addr(void *handle, void **dst_addr,
+ enum proc_mgr_addr_type dst_addr_type,
+ void *src_addr, enum proc_mgr_addr_type src_addr_type);
+
+/* Function to map slave address to host address space */
+int proc4430_map(void *handle, u32 proc_addr, u32 size, u32 *mapped_addr,
+ u32 *mapped_size, u32 map_attribs);
+
+/* Function to unmap the slave address to host address space */
+int proc4430_unmap(void *handle, u32 mapped_addr);
+
+/* Function to retrive physical address translations */
+int proc4430_virt_to_phys(void *handle, u32 da, u32 *mapped_entries,
+ u32 num_of_entries);
+
+/* =================================================
+ * APIs
+ * =================================================
+ */
+
+/* Function to get the default configuration for the OMAP4430PROC module */
+void proc4430_get_config(struct proc4430_config *cfg);
+
+/* Function to setup the OMAP4430PROC module. */
+int proc4430_setup(struct proc4430_config *cfg);
+
+/* Function to destroy the OMAP4430PROC module. */
+int proc4430_destroy(void);
+
+/* Function to initialize the parameters for this processor instance. */
+void proc4430_params_init(void *handle,
+ struct proc4430_params *params);
+
+/* Function to create an instance of this processor. */
+void *proc4430_create(u16 proc_id, const struct proc4430_params *params);
+
+/* Function to delete an instance of this processor. */
+int proc4430_delete(void **handle_ptr);
+
+/* Function to open an instance of this processor. */
+int proc4430_open(void **handle_ptr, u16 proc_id);
+
+/* Function to close an instance of this processor. */
+int proc4430_close(void *handle);
+
+/* Function to get the proc info */
+int proc4430_proc_info(void *handle, struct proc_mgr_proc_info *procinfo);
+
+#endif
diff --git a/drivers/dsp/syslink/procmgr/proc4430/proc4430_drv.c b/drivers/dsp/syslink/procmgr/proc4430/proc4430_drv.c
new file mode 100644
index 000000000000..77d47ccee9ac
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/proc4430_drv.c
@@ -0,0 +1,401 @@
+/*
+ * proc4430_drv.c
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <generated/autoconf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+
+
+/* Module headers */
+#include "proc4430.h"
+#include "proc4430_drvdefs.h"
+
+
+
+/** ============================================================================
+ * Macros and types
+ * ============================================================================
+ */
+#define PROC4430_NAME "syslink-proc4430"
+
+static char *driver_name = PROC4430_NAME;
+
+static s32 driver_major;
+
+static s32 driver_minor;
+
+struct proc_4430_dev {
+ struct cdev cdev;
+};
+
+static struct proc_4430_dev *proc_4430_device;
+
+static struct class *proc_4430_class;
+
+
+
+/** ============================================================================
+ * Forward declarations of internal functions
+ * ============================================================================
+ */
+/* Linux driver function to open the driver object. */
+static int proc4430_drv_open(struct inode *inode, struct file *filp);
+
+/* Linux driver function to close the driver object. */
+static int proc4430_drv_release(struct inode *inode, struct file *filp);
+
+/* Linux driver function to invoke the APIs through ioctl. */
+static int proc4430_drv_ioctl(struct inode *inode,
+ struct file *filp, unsigned int cmd,
+ unsigned long args);
+
+/* Linux driver function to map memory regions to user space. */
+static int proc4430_drv_mmap(struct file *filp,
+ struct vm_area_struct *vma);
+
+/* Module initialization function for Linux driver. */
+static int __init proc4430_drv_initializeModule(void);
+
+/* Module finalization function for Linux driver. */
+static void __exit proc4430_drv_finalizeModule(void);
+
+
+
+/** ============================================================================
+ * Globals
+ * ============================================================================
+ */
+
+/*
+ File operations table for PROC4430.
+ */
+static const struct file_operations proc_4430_fops = {
+ .open = proc4430_drv_open,
+ .release = proc4430_drv_release,
+ .ioctl = proc4430_drv_ioctl,
+ .mmap = proc4430_drv_mmap,
+};
+
+static int proc4430_drv_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int proc4430_drv_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+
+/*
+ Linux driver function to invoke the APIs through ioctl.
+ *
+ */
+static int proc4430_drv_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ int retval = 0;
+ struct proc_mgr_cmd_args *cmd_args = (struct proc_mgr_cmd_args *)args;
+ struct proc_mgr_cmd_args command_args;
+
+ switch (cmd) {
+ case CMD_PROC4430_GETCONFIG:
+ {
+ struct proc4430_cmd_args_get_config *src_args =
+ (struct proc4430_cmd_args_get_config *)args;
+ struct proc4430_config cfg;
+
+ /* copy_from_useris not needed for
+ * proc4430_get_config, since the
+ * user's config is not used.
+ */
+ proc4430_get_config(&cfg);
+
+ retval = copy_to_user((void *)(src_args->cfg),
+ (const void *)&cfg,
+ sizeof(struct proc4430_config));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ }
+ break;
+
+ case CMD_PROC4430_SETUP:
+ {
+ struct proc4430_cmd_args_setup *src_args =
+ (struct proc4430_cmd_args_setup *)args;
+ struct proc4430_config cfg;
+
+ retval = copy_from_user((void *)&cfg,
+ (const void *)(src_args->cfg),
+ sizeof(struct proc4430_config));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ proc4430_setup(&cfg);
+ }
+ break;
+
+ case CMD_PROC4430_DESTROY:
+ {
+ proc4430_destroy();
+ }
+ break;
+
+ case CMD_PROC4430_PARAMS_INIT:
+ {
+ struct proc4430_cmd_args_params_init src_args;
+ struct proc4430_params params;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc4430_cmd_args_params_init));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ proc4430_params_init(src_args.handle, &params);
+ retval = copy_to_user((void *)(src_args.params),
+ (const void *) &params,
+ sizeof(struct proc4430_params));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ }
+ break;
+
+ case CMD_PROC4430_CREATE:
+ {
+ struct proc4430_cmd_args_create src_args;
+ struct proc4430_params params;
+ struct proc4430_mem_entry *entries = NULL;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc4430_cmd_args_create));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_from_user((void *) &params,
+ (const void *)(src_args.params),
+ sizeof(struct proc4430_params));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ /* Copy the contents of mem_entries from user-side */
+ if (params.num_mem_entries) {
+ entries = vmalloc(params.num_mem_entries * \
+ sizeof(struct proc4430_mem_entry));
+ if (WARN_ON(!entries))
+ goto func_exit;
+ retval = copy_from_user((void *) (entries),
+ (const void *)(params.mem_entries),
+ params.num_mem_entries * \
+ sizeof(struct proc4430_mem_entry));
+ if (WARN_ON(retval < 0)) {
+ vfree(entries);
+ goto func_exit;
+ }
+ params.mem_entries = entries;
+ }
+ src_args.handle = proc4430_create(src_args.proc_id,
+ &params);
+ if (WARN_ON(src_args.handle == NULL))
+ goto func_exit;
+ retval = copy_to_user((void *)(args),
+ (const void *)&src_args,
+ sizeof(struct proc4430_cmd_args_create));
+ /* Free the memory created */
+ if (params.num_mem_entries)
+ vfree(entries);
+ }
+ break;
+
+ case CMD_PROC4430_DELETE:
+ {
+ struct proc4430_cmd_args_delete src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc4430_cmd_args_delete));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = proc4430_delete(&(src_args.handle));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROC4430_OPEN:
+ {
+ struct proc4430_cmd_args_open src_args;
+
+ /*Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc4430_cmd_args_open));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = proc4430_open(&(src_args.handle),
+ src_args.proc_id);
+ retval = copy_to_user((void *)(args),
+ (const void *)&src_args,
+ sizeof(struct proc4430_cmd_args_open));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROC4430_CLOSE:
+ {
+ struct proc4430_cmd_args_close src_args;
+
+ /*Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc4430_cmd_args_close));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = proc4430_close(src_args.handle);
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ default:
+ {
+ printk(KERN_ERR "unsupported ioctl\n");
+ }
+ break;
+ }
+func_exit:
+ /* Set the status and copy the common args to user-side. */
+ command_args.api_status = retval;
+ retval = copy_to_user((void *) cmd_args,
+ (const void *) &command_args,
+ sizeof(struct proc_mgr_cmd_args));
+ WARN_ON(retval < 0);
+ return retval;
+}
+
+
+/*
+ Linux driver function to map memory regions to user space.
+ *
+ */
+static int proc4430_drv_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
+
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+
+/** ==================================================================
+ * Functions required for multiple .ko modules configuration
+ * ==================================================================
+ */
+/*
+ Module initialization function for Linux driver.
+ */
+static int __init proc4430_drv_initializeModule(void)
+{
+ dev_t dev = 0 ;
+ int retval;
+
+ /* Display the version info and created date/time */
+ printk(KERN_INFO "proc4430_drv_initializeModule\n");
+
+ if (driver_major) {
+ dev = MKDEV(driver_major, driver_minor);
+ retval = register_chrdev_region(dev, 1, driver_name);
+ } else {
+ retval = alloc_chrdev_region(&dev, driver_minor, 1,
+ driver_name);
+ driver_major = MAJOR(dev);
+ }
+
+ proc_4430_device = kmalloc(sizeof(struct proc_4430_dev), GFP_KERNEL);
+ if (!proc_4430_device) {
+ retval = -ENOMEM;
+ unregister_chrdev_region(dev, 1);
+ goto exit;
+ }
+ memset(proc_4430_device, 0, sizeof(struct proc_4430_dev));
+ cdev_init(&proc_4430_device->cdev, &proc_4430_fops);
+ proc_4430_device->cdev.owner = THIS_MODULE;
+ proc_4430_device->cdev.ops = &proc_4430_fops;
+
+ retval = cdev_add(&proc_4430_device->cdev, dev, 1);
+
+ if (retval) {
+ printk(KERN_ERR "Failed to add the syslink proc_4430 device\n");
+ goto exit;
+ }
+
+ /* udev support */
+ proc_4430_class = class_create(THIS_MODULE, "syslink-proc4430");
+
+ if (IS_ERR(proc_4430_class)) {
+ printk(KERN_ERR "Error creating bridge class\n");
+ goto exit;
+ }
+ device_create(proc_4430_class, NULL, MKDEV(driver_major, driver_minor),
+ NULL, PROC4430_NAME);
+exit:
+ return 0;
+}
+
+/*
+ function to finalize the driver module.
+ */
+static void __exit proc4430_drv_finalizeModule(void)
+{
+ dev_t devno = 0;
+
+ /* FIX ME: THIS MIGHT NOT BE THE RIGHT PLACE TO CALL THE SETUP */
+ proc4430_destroy();
+
+ devno = MKDEV(driver_major, driver_minor);
+ if (proc_4430_device) {
+ cdev_del(&proc_4430_device->cdev);
+ kfree(proc_4430_device);
+ }
+ unregister_chrdev_region(devno, 1);
+ if (proc_4430_class) {
+ /* remove the device from sysfs */
+ device_destroy(proc_4430_class, MKDEV(driver_major,
+ driver_minor));
+ class_destroy(proc_4430_class);
+ }
+ return;
+}
+
+/*
+ Macro calls that indicate initialization and finalization functions
+ * to the kernel.
+ */
+MODULE_LICENSE("GPL v2");
+module_init(proc4430_drv_initializeModule);
+module_exit(proc4430_drv_finalizeModule);
diff --git a/drivers/dsp/syslink/procmgr/proc4430/proc4430_drvdefs.h b/drivers/dsp/syslink/procmgr/proc4430/proc4430_drvdefs.h
new file mode 100644
index 000000000000..4176d731f1d4
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/proc4430/proc4430_drvdefs.h
@@ -0,0 +1,169 @@
+/*
+ * proc4430_drvdefs.h
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#ifndef _SYSLINK_PROC4430_H
+#define _SYSLINK_PROC4430_H
+
+
+/* Module headers */
+#include "../procmgr_drvdefs.h"
+#include "proc4430.h"
+
+
+/* ----------------------------------------------------------------------------
+ * IOCTL command IDs for OMAP4430PROC
+ * ----------------------------------------------------------------------------
+ */
+/*
+ * Base command ID for OMAP4430PROC
+ */
+#define PROC4430_BASE_CMD 0x200
+
+/*
+ * Command for PROC4430_getConfig
+ */
+#define CMD_PROC4430_GETCONFIG (PROC4430_BASE_CMD + 1)
+
+/*
+ * Command for PROC4430_setup
+ */
+#define CMD_PROC4430_SETUP (PROC4430_BASE_CMD + 2)
+
+/*
+ * Command for PROC4430_setup
+ */
+#define CMD_PROC4430_DESTROY (PROC4430_BASE_CMD + 3)
+
+/*
+ * Command for PROC4430_destroy
+ */
+#define CMD_PROC4430_PARAMS_INIT (PROC4430_BASE_CMD + 4)
+
+/*
+ * Command for PROC4430_create
+ */
+#define CMD_PROC4430_CREATE (PROC4430_BASE_CMD + 5)
+
+/*
+ * Command for PROC4430_delete
+ */
+#define CMD_PROC4430_DELETE (PROC4430_BASE_CMD + 6)
+
+/*
+ * Command for PROC4430_open
+ */
+#define CMD_PROC4430_OPEN (PROC4430_BASE_CMD + 7)
+
+/*
+ * Command for PROC4430_close
+ */
+#define CMD_PROC4430_CLOSE (PROC4430_BASE_CMD + 8)
+
+
+/* ---------------------------------------------------
+ * Command arguments for OMAP4430PROC
+ * ---------------------------------------------------
+ */
+/*
+ * Command arguments for PROC4430_getConfig
+ */
+struct proc4430_cmd_args_get_config {
+ struct proc_mgr_cmd_args command_args;
+ /* Common command args */
+ struct proc4430_config *cfg;
+ /* Pointer to the OMAP4430PROC module configuration structure
+ * in which the default config is to be returned. */
+};
+
+/*
+ * Command arguments for PROC4430_setup
+ */
+struct proc4430_cmd_args_setup {
+ struct proc_mgr_cmd_args command_args;
+ /* Common command args */
+ struct proc4430_config *cfg;
+ /* Optional OMAP4430PROC module configuration. If provided as NULL,
+ * default configuration is used. */
+};
+
+/*
+ * Command arguments for PROC4430_destroy
+ */
+struct proc4430_cmd_args_destroy {
+ struct proc_mgr_cmd_args command_args;
+ /* Common command args */
+};
+
+/*
+ * Command arguments for struct struct proc4430_params_init
+ */
+struct proc4430_cmd_args_params_init {
+ struct proc_mgr_cmd_args command_args;
+ /* Common command args */
+ void *handle;
+ /* void * to the processor instance. */
+ struct proc4430_params *params;
+ /* Configuration parameters. */
+};
+
+/*
+ * Command arguments for PROC4430_create
+ */
+struct proc4430_cmd_args_create {
+ struct proc_mgr_cmd_args command_args;
+ /* Common command args */
+ u16 proc_id;
+ /* Processor ID for which this processor instance is required. */
+ struct proc4430_params *params;
+ /*Configuration parameters. */
+ void *handle;
+ /* void * to the created processor instance. */
+};
+
+/*
+ * Command arguments for PROC4430_delete
+ */
+struct proc4430_cmd_args_delete {
+ struct proc_mgr_cmd_args command_args;
+ /* Common command args */
+ void *handle;
+ /* Pointer to handle to the processor instance */
+};
+
+/*
+ * Command arguments for PROC4430_open
+ */
+struct proc4430_cmd_args_open {
+ struct proc_mgr_cmd_args command_args;
+ /* Common command args */
+ u16 proc_id;
+ /* Processor ID addressed by this OMAP4430PROC instance. */
+ void *handle;
+ /* Return parameter: void * to the processor instance */
+};
+
+/*
+ * Command arguments for PROC4430_close
+ */
+struct proc4430_cmd_args_close {
+ struct proc_mgr_cmd_args command_args;
+ /* Common command args */
+ void *handle;
+ /* void * to the processor instance */
+};
+
+#endif
diff --git a/drivers/dsp/syslink/procmgr/procdefs.h b/drivers/dsp/syslink/procmgr/procdefs.h
new file mode 100644
index 000000000000..eb73626d27e1
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/procdefs.h
@@ -0,0 +1,203 @@
+/*
+ * procdefs.h
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef SYSLINK_PROCDEFS_H
+#define SYSLINK_PROCDEFS_H
+
+#include <linux/types.h>
+
+/* Module level headers */
+#include <procmgr.h>
+
+
+/* =============================
+ * Macros and types
+ * =============================
+ */
+/*
+ * Enumerates the types of Endianism of slave processor.
+ */
+enum processor_endian{
+ PROCESSOR_ENDIAN_DEFAULT = 0,
+ /* Default endianism (no conversion required) */
+ PROCESSOR_ENDIAN_BIG = 1,
+ /* Big endian */
+ PROCESSOR_ENDIAN_LITTLE = 2,
+ /* Little endian */
+ PROCESSOR_ENDIAN_ENDVALUE = 3
+ /* End delimiter indicating start of invalid values for this enum */
+};
+
+
+/*
+ * Configuration parameters for attaching to the slave Processor
+ */
+struct processor_attach_params {
+ struct proc_mgr_attach_params *params;
+ /* Common attach parameters for ProcMgr */
+ u16 num_mem_entries;
+ /* Number of valid memory entries */
+ struct proc_mgr_addr_info mem_entries[PROCMGR_MAX_MEMORY_REGIONS];
+ /* Configuration of memory regions */
+};
+
+/*
+ *Configuration parameters for starting the slave Processor
+ */
+struct processor_start_params {
+ struct proc_mgr_start_params *params;
+ /* Common start parameters for ProcMgr */
+};
+
+/*
+ *Configuration parameters for stopping the slave Processor
+ */
+struct processor_stop_params {
+ struct proc_mgr_stop_params *params;
+ /* Common start parameters for ProcMgr */
+};
+/*
+ * Function pointer type for the function to attach to the processor.
+ */
+typedef int (*processor_attach_fxn) (void *handle,
+ struct processor_attach_params *params);
+
+/*
+ * Function pointer type for the function to detach from the
+ * procssor
+ */
+typedef int (*processor_detach_fxn) (void *handle);
+
+/*
+ * Function pointer type for the function to start the processor.
+ */
+typedef int (*processor_start_fxn) (void *handle, u32 entry_pt,
+ struct processor_start_params *params);
+
+/*
+ *Function pointer type for the function to stop the processor.
+ */
+typedef int (*processor_stop_fxn) (void *handle,
+ struct processor_stop_params *params);
+
+/*
+ * Function pointer type for the function to read from the slave
+ * processor's memory.
+ */
+typedef int (*processor_read_fxn) (void *handle, u32 proc_addr,
+ u32 *num_bytes, void *buffer);
+
+/*
+ *Function pointer type for the function to write into the slave
+ *processor's memory.
+ */
+typedef int (*processor_write_fxn) (void *handle, u32 proc_addr,
+ u32 *num_bytes, void *buffer);
+
+/*
+ *Function pointer type for the function to perform device-dependent
+ * operations.
+ */
+typedef int (*processor_control_fxn) (void *handle, int cmd, void *arg);
+
+/*
+ *Function pointer type for the function to translate between
+ * two types of address spaces.
+ */
+typedef int (*processor_translate_addr_fxn) (void *handle, void **dst_addr,
+ enum proc_mgr_addr_type dstAddrType, void *srcAddr,
+ enum proc_mgr_addr_type srcAddrType);
+
+/*
+ *Function pointer type for the function to map address to slave
+ * address space
+ */
+typedef int (*processor_map_fxn) (void *handle, u32 proc_addr, u32 size,
+ u32 *mapped_addr, u32 *mapped_size, u32 map_attribs);
+
+/*
+ *Function pointer type for the function to map address to slave
+ * address space
+ */
+typedef int (*processor_unmap_fxn) (void *handle, u32 mapped_addr);
+
+/*
+ *Function pointer type for the function that returns proc info
+ */
+typedef int (*processor_proc_info) (void *handle,
+ struct proc_mgr_proc_info *proc_info);
+
+/*
+ *Function pointer type for the function that returns proc info
+ */
+typedef int (*processor_virt_to_phys_fxn) (void *handle, u32 da,
+ u32 *mapped_entries, u32 num_of_entries);
+
+
+/* =============================
+ * Function table interface
+ * =============================
+ */
+/*
+ *Function table interface for Processor.
+ */
+struct processor_fxn_table {
+ processor_attach_fxn attach;
+ /* Function to attach to the slave processor */
+ processor_detach_fxn detach;
+ /* Function to detach from the slave processor */
+ processor_start_fxn start;
+ /* Function to start the slave processor */
+ processor_stop_fxn stop;
+ /* Function to stop the slave processor */
+ processor_read_fxn read;
+ /* Function to read from the slave processor's memory */
+ processor_write_fxn write;
+ /* Function to write into the slave processor's memory */
+ processor_control_fxn control;
+ /* Function to perform device-dependent control function */
+ processor_translate_addr_fxn translateAddr;
+ /* Function to translate between address ranges */
+ processor_map_fxn map;
+ /* Function to map slave addresses to master address space */
+ processor_unmap_fxn unmap;
+ /* Function to unmap slave addresses to master address space */
+ processor_proc_info procinfo;
+ /* Function to convert Virtual to Physical pages */
+ processor_virt_to_phys_fxn virt_to_phys;
+};
+
+/* =============================
+ * Processor structure
+ * =============================
+ */
+/*
+ * Generic Processor object. This object defines the handle type for all
+ * Processor operations.
+ */
+struct processor_object {
+ struct processor_fxn_table proc_fxn_table;
+ /* interface function table to plug into the generic Processor. */
+ enum proc_mgr_state state;
+ /* State of the slave processor */
+ enum proc_mgr_boot_mode boot_mode;
+ /* Boot mode for the slave processor. */
+ void *object;
+ /* Pointer to Processor-specific object. */
+ u16 proc_id;
+ /* Processor ID addressed by this Processor instance. */
+};
+#endif
diff --git a/drivers/dsp/syslink/procmgr/processor.c b/drivers/dsp/syslink/procmgr/processor.c
new file mode 100644
index 000000000000..4548d12ad967
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/processor.c
@@ -0,0 +1,398 @@
+/*
+ * processor.c
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+
+/* Module level headers */
+#include "procdefs.h"
+#include "processor.h"
+
+
+
+/* =========================================
+ * Functions called by ProcMgr
+ * =========================================
+ */
+/*
+ * Function to attach to the Processor.
+ *
+ * This function calls into the specific Processor implementation
+ * to attach to it.
+ * This function is called from the ProcMgr attach function, and
+ * hence is used to perform any activities that may be required
+ * once the slave is powered up.
+ * Depending on the type of Processor, this function may or may not
+ * perform any activities.
+ */
+inline int processor_attach(void *handle,
+ struct processor_attach_params *params)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(params == NULL);
+ BUG_ON(proc_handle->proc_fxn_table.attach == NULL);
+
+ proc_handle->boot_mode = params->params->boot_mode;
+ retval = proc_handle->proc_fxn_table.attach(handle, params);
+
+ if (proc_handle->boot_mode == PROC_MGR_BOOTMODE_BOOT)
+ proc_handle->state = PROC_MGR_STATE_POWERED;
+ else if (proc_handle->boot_mode == PROC_MGR_BOOTMODE_NOLOAD)
+ proc_handle->state = PROC_MGR_STATE_LOADED;
+ else if (proc_handle->boot_mode == PROC_MGR_BOOTMODE_NOBOOT)
+ proc_handle->state = PROC_MGR_STATE_RUNNNING;
+ return retval;
+}
+
+
+/*
+ * Function to detach from the Processor.
+ *
+ * This function calls into the specific Processor implementation
+ * to detach from it.
+ * This function is called from the ProcMgr detach function, and
+ * hence is useful to perform any activities that may be required
+ * before the slave is powered down.
+ * Depending on the type of Processor, this function may or may not
+ * perform any activities.
+ */
+inline int processor_detach(void *handle)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(proc_handle->proc_fxn_table.detach == NULL);
+
+ retval = proc_handle->proc_fxn_table.detach(handle);
+ /* For all boot modes, at the end of detach, the Processor is in
+ * unknown state.
+ */
+ proc_handle->state = PROC_MGR_STATE_UNKNOWN;
+ return retval;
+}
+
+
+/*
+ * Function to start the processor.
+ *
+ * This function calls into the specific Processor implementation
+ * to start the slave processor running.
+ * This function starts the slave processor running, in most
+ * devices, by programming its entry point into the boot location
+ * of the slave processor and releasing it from reset.
+ * The handle specifies the specific Processor instance to be used.
+ *
+ * @param handle void * to the Processor object
+ * @param entryPt Entry point of the file loaded on the slave Processor
+ *
+ * @sa Processor_stop
+ */
+inline int processor_start(void *handle, u32 entry_pt,
+ struct processor_start_params *params)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ /* entryPt may be 0 for some devices. Cannot check for valid/invalid. */
+ BUG_ON(params == NULL);
+ BUG_ON(proc_handle->proc_fxn_table.start == NULL);
+ retval = proc_handle->proc_fxn_table.start(handle, entry_pt, params);
+
+ if ((proc_handle->boot_mode == PROC_MGR_BOOTMODE_BOOT)
+ || (proc_handle->boot_mode == PROC_MGR_BOOTMODE_NOLOAD))
+ proc_handle->state = PROC_MGR_STATE_RUNNNING;
+
+ return retval;
+}
+
+
+/*
+ * Function to stop the processor.
+ *
+ * This function calls into the specific Processor implementation
+ * to stop the slave processor.
+ * This function stops the slave processor running, in most
+ * devices, by placing it in reset.
+ * The handle specifies the specific Processor instance to be used.
+ */
+inline int processor_stop(void *handle,
+ struct processor_stop_params *params)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(proc_handle->proc_fxn_table.stop == NULL);
+
+ retval = proc_handle->proc_fxn_table.stop(handle, params);
+
+ if ((proc_handle->boot_mode == PROC_MGR_BOOTMODE_BOOT)
+ || (proc_handle->boot_mode == PROC_MGR_BOOTMODE_NOLOAD))
+ proc_handle->state = PROC_MGR_STATE_RESET;
+
+ return retval;
+}
+
+
+/*
+ * Function to read from the slave processor's memory.
+ *
+ * This function calls into the specific Processor implementation
+ * to read from the slave processor's memory. It reads from the
+ * specified address in the processor's address space and copies
+ * the required number of bytes into the specified buffer.
+ * It returns the number of bytes actually read in the num_bytes
+ * parameter.
+ * Depending on the processor implementation, it may result in
+ * reading from shared memory or across a peripheral physical
+ * connectivity.
+ * The handle specifies the specific Processor instance to be used.
+ */
+inline int processor_read(void *handle, u32 proc_addr,
+ u32 *num_bytes, void *buffer)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(proc_addr == 0);
+ BUG_ON(num_bytes == 0);
+ BUG_ON(buffer == NULL);
+ BUG_ON(proc_handle->proc_fxn_table.read == NULL);
+
+ retval = proc_handle->proc_fxn_table.read(handle, proc_addr,
+ num_bytes, buffer);
+ return retval;
+}
+
+
+/*
+ * Function to write into the slave processor's memory.
+ *
+ * This function calls into the specific Processor implementation
+ * to write into the slave processor's memory. It writes into the
+ * specified address in the processor's address space and copies
+ * the required number of bytes from the specified buffer.
+ * It returns the number of bytes actually written in the num_bytes
+ * parameter.
+ * Depending on the processor implementation, it may result in
+ * writing into shared memory or across a peripheral physical
+ * connectivity.
+ * The handle specifies the specific Processor instance to be used.
+ */
+inline int processor_write(void *handle, u32 proc_addr, u32 *num_bytes,
+ void *buffer)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+ BUG_ON(handle == NULL);
+ BUG_ON(proc_addr == 0);
+ BUG_ON(num_bytes == 0);
+ BUG_ON(buffer == NULL);
+ BUG_ON(proc_handle->proc_fxn_table.write == NULL);
+
+ retval = proc_handle->proc_fxn_table.write(handle, proc_addr,
+ num_bytes, buffer);
+ return retval;
+}
+
+
+/*
+ * Function to get the current state of the slave Processor.
+ *
+ * This function gets the state of the slave processor as
+ * maintained on the master Processor state machine. It does not
+ * go to the slave processor to get its actual state at the time
+ * when this API is called.
+ */
+enum proc_mgr_state processor_get_state(void *handle)
+{
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+
+ return proc_handle->state;
+}
+
+
+/*
+ * Function to set the current state of the slave Processor
+ * to specified value.
+ *
+ * This function is used to set the state of the processor to the
+ * value as specified. This function may be used by external
+ * entities that affect the state of the slave processor, such as
+ * PwrMgr, error handler, or ProcMgr.
+ */
+void processor_set_state(void *handle, enum proc_mgr_state state)
+{
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ proc_handle->state = state;
+}
+
+
+/*
+ * Function to perform device-dependent operations.
+ *
+ * This function calls into the specific Processor implementation
+ * to perform device dependent control operations. The control
+ * operations supported by the device are exposed directly by the
+ * specific implementation of the Processor interface. These
+ * commands and their specific argument types are used with this
+ * function.
+ */
+inline int processor_control(void *handle, int cmd, void *arg)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(proc_handle->proc_fxn_table.control == NULL);
+
+ retval = proc_handle->proc_fxn_table.control(handle, cmd, arg);
+ return retval;
+}
+
+
+/*
+ * Function to translate between two types of address spaces.
+ *
+ * This function translates addresses between two types of address
+ * spaces. The destination and source address types are indicated
+ * through parameters specified in this function.
+ */
+inline int processor_translate_addr(void *handle, void **dst_addr,
+ enum proc_mgr_addr_type dst_addr_type, void *src_addr,
+ enum proc_mgr_addr_type src_addr_type)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(dst_addr == NULL);
+ BUG_ON(src_addr == NULL);
+ BUG_ON(dst_addr_type >= PROC_MGR_ADDRTYPE_ENDVALUE);
+ BUG_ON(src_addr_type >= PROC_MGR_ADDRTYPE_ENDVALUE);
+ BUG_ON(proc_handle->proc_fxn_table.translateAddr == NULL);
+
+ retval = proc_handle->proc_fxn_table.translateAddr(handle,
+ dst_addr, dst_addr_type, src_addr, src_addr_type);
+ return retval;
+}
+
+
+/*
+ * Function to map address to slave address space.
+ *
+ * This function maps the provided slave address to a host address
+ * and returns the mapped address and size.
+ */
+inline int processor_map(void *handle, u32 proc_addr, u32 size,
+ u32 *mapped_addr, u32 *mapped_size, u32 map_attribs)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(proc_addr == 0);
+ BUG_ON(size == 0);
+ BUG_ON(mapped_addr == NULL);
+ BUG_ON(mapped_size == NULL);
+ BUG_ON(proc_handle->proc_fxn_table.map == NULL);
+
+ retval = proc_handle->proc_fxn_table.map(handle, proc_addr,
+ size, mapped_addr, mapped_size, map_attribs);
+ return retval;
+}
+
+/*
+ * Function to unmap address to slave address space.
+ *
+ * This function unmap the provided slave address
+ */
+inline int processor_unmap(void *handle, u32 mapped_addr)
+{
+ int retval = 0;
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+
+ retval = proc_handle->proc_fxn_table.unmap(handle, mapped_addr);
+ return retval;
+}
+
+/*
+ * Function that registers for notification when the slave
+ * processor transitions to any of the states specified.
+ *
+ * This function allows the user application to register for
+ * changes in processor state and take actions accordingly.
+
+ */
+inline int processor_register_notify(void *handle, proc_mgr_callback_fxn fxn,
+ void *args, enum proc_mgr_state state[])
+{
+ int retval = 0;
+
+ BUG_ON(handle == NULL);
+ BUG_ON(fxn == NULL);
+
+ /* TODO: TBD: To be implemented. */
+ return retval;
+}
+
+/*
+ * Function that returns the proc instance mem info
+ */
+int processor_get_proc_info(void *handle, struct proc_mgr_proc_info *procinfo)
+{
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+ int retval;
+ retval = proc_handle->proc_fxn_table.procinfo(proc_handle, procinfo);
+ return retval;
+}
+
+/*
+ * Function that returns the address translations
+ */
+int processor_virt_to_phys(void *handle, u32 da, u32 *mapped_entries,
+ u32 num_of_entries)
+{
+ struct processor_object *proc_handle =
+ (struct processor_object *)handle;
+ int retval;
+ retval = proc_handle->proc_fxn_table.virt_to_phys(handle, da,
+ mapped_entries, num_of_entries);
+ return retval;
+}
diff --git a/drivers/dsp/syslink/procmgr/processor.h b/drivers/dsp/syslink/procmgr/processor.h
new file mode 100644
index 000000000000..b4f78581839e
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/processor.h
@@ -0,0 +1,84 @@
+/*
+ * processor.h
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef SYSLINK_PROCESSOR_H_
+#define SYSLINK_PROCESSOR_H_
+
+#include <linux/types.h>
+
+/* Module level headers */
+#include "procdefs.h"
+
+/* ===================================
+ * APIs
+ * ===================================
+ */
+/* Function to attach to the Processor. */
+int processor_attach(void *handle, struct processor_attach_params *params);
+
+/* Function to detach from the Processor. */
+int processor_detach(void *handle);
+
+/* Function to start the processor. */
+int processor_start(void *handle, u32 entry_pt,
+ struct processor_start_params *params);
+
+/* Function to stop the processor. */
+int processor_stop(void *handle,
+ struct processor_stop_params *params);
+
+/* Function to read from the slave processor's memory. */
+int processor_read(void *handle, u32 proc_addr, u32 *num_bytes, void *buffer);
+
+/* Function to read write into the slave processor's memory. */
+int processor_write(void *handle, u32 proc_addr, u32 *num_bytes, void *buffer);
+
+/* Function to get the current state of the slave Processor as maintained on
+ * the master Processor state machine.
+ */
+enum proc_mgr_state processor_get_state(void *handle);
+
+/* Function to set the current state of the slave Processor to specified value.
+ */
+void processor_set_state(void *handle, enum proc_mgr_state state);
+
+/* Function to perform device-dependent operations. */
+int processor_control(void *handle, int cmd, void *arg);
+
+/* Function to translate between two types of address spaces. */
+int processor_translate_addr(void *handle, void **dst_addr,
+ enum proc_mgr_addr_type dst_addr_type, void *src_addr,
+ enum proc_mgr_addr_type src_addr_type);
+
+/* Function to map address to slave address space */
+int processor_map(void *handle, u32 proc_addr, u32 size, u32 *mapped_addr,
+ u32 *mapped_size, u32 map_attribs);
+/* Function to unmap address to slave address space */
+int processor_unmap(void *handle, u32 mapped_addr);
+
+/* Function that registers for notification when the slave processor
+ * transitions to any of the states specified.
+ */
+int processor_register_notify(void *handle, proc_mgr_callback_fxn fxn,
+ void *args, enum proc_mgr_state state[]);
+
+/* Function that returns the return value of specific processor info
+ */
+int processor_get_proc_info(void *handle, struct proc_mgr_proc_info *procinfo);
+
+int processor_virt_to_phys(void *handle, u32 da, u32 *mapped_entries,
+ u32 num_of_entries);
+#endif
diff --git a/drivers/dsp/syslink/procmgr/procmgr.c b/drivers/dsp/syslink/procmgr/procmgr.c
new file mode 100644
index 000000000000..f7f963e4d781
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/procmgr.c
@@ -0,0 +1,958 @@
+/*
+ * procmgr.c
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+
+/* Module level headers */
+#include <procmgr.h>
+#include "procdefs.h"
+#include "processor.h"
+#include <syslink/atomic_linux.h>
+
+/* ================================
+ * Macros and types
+ * ================================
+ */
+/*! @brief Macro to make a correct module magic number with refCount */
+#define PROCMGR_MAKE_MAGICSTAMP(x) ((PROCMGR_MODULEID << 12u) | (x))
+
+/*
+ * ProcMgr Module state object
+ */
+struct proc_mgr_module_object {
+ atomic_t ref_count;
+ u32 config_size;
+ /* Size of configuration structure */
+ struct proc_mgr_config cfg;
+ /* ProcMgr configuration structure */
+ struct proc_mgr_config def_cfg;
+ /* Default module configuration */
+ struct proc_mgr_params def_inst_params;
+ /* Default parameters for the ProcMgr instances */
+ struct proc_mgr_attach_params def_attach_params;
+ /* Default parameters for the ProcMgr attach function */
+ struct proc_mgr_start_params def_start_params;
+ /* Default parameters for the ProcMgr start function */
+ struct proc_mgr_stop_params def_stop_params;
+ /* Default parameters for the ProcMgr stop function */
+ struct mutex *gate_handle;
+ /* handle of gate to be used for local thread safety */
+ void *proc_handles[MULTIPROC_MAXPROCESSORS];
+ /* Array of handles of ProcMgr instances */
+};
+
+/*
+ * ProcMgr instance object
+ */
+struct proc_mgr_object {
+ u16 proc_id;
+ /* Processor ID associated with this ProcMgr. */
+ struct processor_object *proc_handle;
+ /* Processor ID of the processor being represented by this instance. */
+ void *loader_handle;
+ /*!< Handle to the Loader object associated with this ProcMgr. */
+ void *pwr_handle;
+ /*!< Handle to the PwrMgr object associated with this ProcMgr. */
+ /*!< Processor ID of the processor being represented by this instance */
+ struct proc_mgr_params params;
+ /* ProcMgr instance params structure */
+ struct proc_mgr_attach_params attach_params;
+ /* ProcMgr attach params structure */
+ struct proc_mgr_start_params start_params;
+ /* ProcMgr start params structure */
+ struct proc_mgr_stop_params stop_params;
+ /* ProcMgr start params structure */
+ u32 file_id;
+ /*!< File ID of the loaded static executable */
+ u16 num_mem_entries;
+ /* Number of valid memory entries */
+ struct proc_mgr_addr_info mem_entries[PROCMGR_MAX_MEMORY_REGIONS];
+ /* Configuration of memory regions */
+};
+
+struct proc_mgr_module_object proc_mgr_obj_state = {
+ .config_size = sizeof(struct proc_mgr_config),
+ .def_cfg.gate_handle = NULL,
+ .gate_handle = NULL,
+ .def_inst_params.proc_handle = NULL,
+ .def_attach_params.boot_mode = PROC_MGR_BOOTMODE_BOOT,
+ .def_start_params.proc_id = 0
+};
+
+
+/*======================================
+ * Function to get the default configuration for the ProcMgr
+ * module.
+ *
+* This function can be called by the application to get their
+* configuration parameter to ProcMgr_setup filled in by the
+* ProcMgr module with the default parameters. If the user does
+* not wish to make any change in the default parameters, this API
+* is not required to be called.
+ */
+void proc_mgr_get_config(struct proc_mgr_config *cfg)
+{
+ BUG_ON(cfg == NULL);
+ memcpy(cfg, &proc_mgr_obj_state.def_cfg,
+ sizeof(struct proc_mgr_config));
+ return;
+}
+EXPORT_SYMBOL(proc_mgr_get_config);
+
+/*
+ * Function to setup the ProcMgr module.
+ *
+ *This function sets up the ProcMgr module. This function must
+ *be called before any other instance-level APIs can be invoked.
+ *Module-level configuration needs to be provided to this
+ *function. If the user wishes to change some specific config
+ *parameters, then ProcMgr_getConfig can be called to get the
+ *configuration filled with the default values. After this, only
+ *the required configuration values can be changed. If the user
+ *does not wish to make any change in the default parameters, the
+ *application can simply call ProcMgr_setup with NULL parameters.
+ *The default parameters would get automatically used.
+ */
+int proc_mgr_setup(struct proc_mgr_config *cfg)
+{
+ int retval = 0;
+ struct proc_mgr_config tmp_cfg;
+
+ /* This sets the refCount variable is not initialized, upper 16 bits is
+ * written with module Id to ensure correctness of refCount variable.
+ */
+ atomic_cmpmask_and_set(&proc_mgr_obj_state.ref_count,
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(0));
+
+ if (atomic_inc_return(&proc_mgr_obj_state.ref_count)
+ != PROCMGR_MAKE_MAGICSTAMP(1u))
+ return 0;
+ if (cfg == NULL) {
+ proc_mgr_get_config(&tmp_cfg);
+ cfg = &tmp_cfg;
+ }
+ if (cfg->gate_handle != NULL) {
+ proc_mgr_obj_state.gate_handle = cfg->gate_handle;
+ } else {
+ /* User has not provided any gate handle, so create a
+ *default handle.
+ */
+ proc_mgr_obj_state.gate_handle = kmalloc(sizeof(struct mutex),
+ GFP_KERNEL);
+ mutex_init(proc_mgr_obj_state.gate_handle);
+ }
+ memcpy(&proc_mgr_obj_state.cfg, cfg, sizeof(struct proc_mgr_config));
+ /* Initialize the procHandles array. */
+ memset(&proc_mgr_obj_state.proc_handles, 0,
+ (sizeof(void *) * MULTIPROC_MAXPROCESSORS));
+ return retval;
+}
+EXPORT_SYMBOL(proc_mgr_setup);
+
+/*==================================
+ * Function to destroy the ProcMgr module.
+ *
+ * Once this function is called, other ProcMgr module APIs, except
+ * for the proc_mgr_get_config API cannot be called anymore.
+ */
+int proc_mgr_destroy(void)
+{
+ int retval = 0;
+ int i;
+
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_destroy: Error - module not initialized\n");
+ return -EFAULT;
+ }
+ if (atomic_dec_return(&proc_mgr_obj_state.ref_count)
+ == PROCMGR_MAKE_MAGICSTAMP(0)) {
+
+ /* Check if any ProcMgr instances have not been deleted so far
+ *. If not,delete them
+ */
+ for (i = 0 ; i < MULTIPROC_MAXPROCESSORS; i++) {
+ if (proc_mgr_obj_state.proc_handles[i] != NULL)
+ proc_mgr_delete
+ (&(proc_mgr_obj_state.proc_handles[i]));
+ }
+
+ mutex_destroy(proc_mgr_obj_state.gate_handle);
+ kfree(proc_mgr_obj_state.gate_handle);
+ /* Decrease the refCount */
+ atomic_set(&proc_mgr_obj_state.ref_count,
+ PROCMGR_MAKE_MAGICSTAMP(0));
+ }
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_destroy);
+
+/*=====================================
+ * Function to initialize the parameters for the ProcMgr instance.
+ *
+ * This function can be called by the application to get their
+ * configuration parameter to ProcMgr_create filled in by the
+ * ProcMgr module with the default parameters.
+ */
+void proc_mgr_params_init(void *handle, struct proc_mgr_params *params)
+{
+ struct proc_mgr_object *proc_handle = (struct proc_mgr_object *)handle;
+
+ if (WARN_ON(params == NULL))
+ goto exit;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_params_init: Error - module not initialized\n");
+ }
+ if (handle == NULL) {
+ memcpy(params, &(proc_mgr_obj_state.def_inst_params),
+ sizeof(struct proc_mgr_params));
+ } else {
+ /* Return updated ProcMgr instance specific parameters. */
+ memcpy(params, &(proc_handle->params),
+ sizeof(struct proc_mgr_params));
+ }
+exit:
+ return;
+}
+EXPORT_SYMBOL(proc_mgr_params_init);
+
+/*=====================================
+ * Function to create a ProcMgr object for a specific slave
+ * processor.
+ *
+ * This function creates an instance of the ProcMgr module and
+ * returns an instance handle, which is used to access the
+ * specified slave processor. The processor ID specified here is
+ * the ID of the slave processor as configured with the MultiProc
+ * module.
+ * Instance-level configuration needs to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then struct proc_mgr_params_init can be called to get the
+ * configuration filled with the default values. After this, only
+ * the required configuration values can be changed. For this
+ * API, the params argument is not optional, since the user needs
+ * to provide some essential values such as loader, PwrMgr and
+ * Processor instances to be used with this ProcMgr instance.
+ */
+void *proc_mgr_create(u16 proc_id, const struct proc_mgr_params *params)
+{
+ struct proc_mgr_object *handle = NULL;
+
+ BUG_ON(!IS_VALID_PROCID(proc_id));
+ BUG_ON(params == NULL);
+ BUG_ON(params->proc_handle == NULL);
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_create: Error - module not initialized\n");
+ return NULL;
+ }
+ if (proc_mgr_obj_state.proc_handles[proc_id] != NULL) {
+ handle = proc_mgr_obj_state.proc_handles[proc_id];
+ printk(KERN_WARNING "proc_mgr_create:"
+ "Processor already exists for specified"
+ "%d proc_id, handle = 0x%x\n", proc_id, (u32)handle);
+ return handle;
+ }
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ handle = (struct proc_mgr_object *)
+ vmalloc(sizeof(struct proc_mgr_object));
+ BUG_ON(handle == NULL);
+ memset(handle, 0, sizeof(struct proc_mgr_object));
+ memcpy(&(handle->params), params, sizeof(struct proc_mgr_params));
+ handle->proc_id = proc_id;
+ handle->proc_handle = params->proc_handle;
+ handle->loader_handle = params->loader_handle;
+ handle->pwr_handle = params->pwr_handle;
+ proc_mgr_obj_state.proc_handles[proc_id] = handle;
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return handle;
+}
+EXPORT_SYMBOL(proc_mgr_create);
+
+/*===================================
+ * Function to delete a ProcMgr object for a specific slave
+ * processor.
+ *
+ * Once this function is called, other ProcMgr instance level APIs
+ * that require the instance handle cannot be called.
+ *
+ */
+int
+proc_mgr_delete(void **handle_ptr)
+{
+ int retval = 0;
+ struct proc_mgr_object *handle;
+
+ BUG_ON(handle_ptr == NULL);
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_delete: Error - module not initialized\n");
+ return -EFAULT;
+ }
+
+ handle = (struct proc_mgr_object *)(*handle_ptr);
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ proc_mgr_obj_state.proc_handles[handle->proc_id] = NULL;
+ vfree(handle);
+ *handle_ptr = NULL;
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;
+}
+EXPORT_SYMBOL(proc_mgr_delete);
+
+/*======================================
+ * Function to open a handle to an existing ProcMgr object handling
+ * the proc_id.
+ *
+ * This function returns a handle to an existing ProcMgr instance
+ * created for this proc_id. It enables other entities to access
+ * and use this ProcMgr instance.
+ */
+int proc_mgr_open(void **handle_ptr, u16 proc_id)
+{
+ int retval = 0;
+
+ BUG_ON(handle_ptr == NULL);
+ BUG_ON(!IS_VALID_PROCID(proc_id));
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_open: Error - module not initialized\n");
+ return -EFAULT;
+ }
+
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ *handle_ptr = proc_mgr_obj_state.proc_handles[proc_id];
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;
+}
+EXPORT_SYMBOL(proc_mgr_open);
+
+/*=====================================
+ * Function to close this handle to the ProcMgr instance.
+ *
+ * This function closes the handle to the ProcMgr instance
+ * obtained through proc_mgr_open call made earlier.
+ */
+int proc_mgr_close(void *handle)
+{
+ int retval = 0;
+
+ BUG_ON(handle == NULL);
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_close: Error - module not initialized\n");
+ return -EFAULT;
+ }
+ /* Nothing to be done for closing the handle. */
+ return retval;
+}
+EXPORT_SYMBOL(proc_mgr_close);
+
+/*========================================
+ * Function to initialize the parameters for the ProcMgr attach
+ * function.
+ *
+ * This function can be called by the application to get their
+ * configuration parameter to proc_mgr_attach filled in by the
+ * ProcMgr module with the default parameters. If the user does
+ * not wish to make any change in the default parameters, this API
+ * is not required to be called.
+ */
+void proc_mgr_get_attach_params(void *handle,
+ struct proc_mgr_attach_params *params)
+{
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ BUG_ON(params == NULL);
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_get_attach_params:"
+ "Error - module not initialized\n");
+ }
+ if (handle == NULL) {
+ memcpy(params, &(proc_mgr_obj_state.def_attach_params),
+ sizeof(struct proc_mgr_attach_params));
+ } else {
+ /* Return updated ProcMgr instance specific parameters. */
+ memcpy(params, &(proc_mgr_handle->attach_params),
+ sizeof(struct proc_mgr_attach_params));
+ }
+ return;
+}
+EXPORT_SYMBOL(proc_mgr_get_attach_params);
+
+/*
+ * Function to attach the client to the specified slave and also
+ * initialize the slave (if required).
+ *
+ * This function attaches to an instance of the ProcMgr module and
+ * performs any hardware initialization required to power up the
+ * slave device. This function also performs the required state
+ * transitions for this ProcMgr instance to ensure that the local
+ * object representing the slave device correctly indicates the
+ * state of the slave device. Depending on the slave boot mode
+ * being used, the slave may be powered up, in reset, or even
+ * running state.
+ * Configuration parameters need to be provided to this
+ * function. If the user wishes to change some specific config
+ * parameters, then proc_mgr_get_attach_params can be called to get
+ * the configuration filled with the default values. After this,
+ * only the required configuration values can be changed. If the
+ * user does not wish to make any change in the default parameters,
+ * the application can simply call proc_mgr_attach with NULL
+ * parameters.
+ * The default parameters would get automatically used.
+ */
+int proc_mgr_attach(void *handle, struct proc_mgr_attach_params *params)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ struct proc_mgr_attach_params tmp_params;
+ struct processor_attach_params proc_attach_params;
+
+ if (params == NULL) {
+ proc_mgr_get_attach_params(handle, &tmp_params);
+ params = &tmp_params;
+ }
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_attach:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ if (WARN_ON(handle == NULL)) {
+ retval = -EFAULT;
+ goto exit;
+ }
+ if (WARN_ON(params->boot_mode == PROC_MGR_BOOTMODE_ENDVALUE)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ /* Copy the user provided values into the instance object. */
+ memcpy(&(proc_mgr_handle->attach_params), params,
+ sizeof(struct proc_mgr_attach_params));
+ proc_attach_params.params = params;
+ proc_attach_params.num_mem_entries = 0;
+ /* Attach to the specified Processor instance. */
+ retval = processor_attach(proc_mgr_handle->proc_handle,
+ &proc_attach_params);
+ proc_mgr_handle->num_mem_entries = proc_attach_params.num_mem_entries;
+ printk(KERN_INFO "proc_mgr_attach:proc_mgr_handle->num_mem_entries = %d\n",
+ proc_mgr_handle->num_mem_entries);
+ /* Store memory information in local object.*/
+ memcpy(&(proc_mgr_handle->mem_entries),
+ &(proc_attach_params.mem_entries),
+ sizeof(proc_mgr_handle->mem_entries));
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+exit:
+ return retval;
+}
+EXPORT_SYMBOL(proc_mgr_attach);
+
+/*===================================
+ * Function to detach the client from the specified slave and also
+ * finalze the slave (if required).
+ *
+ * This function detaches from an instance of the ProcMgr module
+ * and performs any hardware finalization required to power down
+ * the slave device. This function also performs the required state
+ * transitions for this ProcMgr instance to ensure that the local
+ * object representing the slave device correctly indicates the
+ * state of the slave device. Depending on the slave boot mode
+ * being used, the slave may be powered down, in reset, or left in
+ * its original state.
+*/
+int proc_mgr_detach(void *handle)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_detach:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(handle == NULL);
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ /* Detach from the Processor. */
+ retval = processor_detach(proc_mgr_handle->proc_handle);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;
+}
+EXPORT_SYMBOL(proc_mgr_detach);
+
+/*===============================
+ * Function to initialize the parameters for the ProcMgr start
+ * function.
+ *
+ * This function can be called by the application to get their
+ * configuration parameter to proc_mgr_start filled in by the
+ * ProcMgr module with the default parameters. If the user does
+ * not wish to make any change in the default parameters, this API
+ * is not required to be called.
+ *
+ */
+void proc_mgr_get_start_params(void *handle,
+ struct proc_mgr_start_params *params)
+{
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_get_start_params:"
+ "Error - module not initialized\n");
+ }
+ BUG_ON(params == NULL);
+
+ if (handle == NULL) {
+ memcpy(params, &(proc_mgr_obj_state.def_start_params),
+ sizeof(struct proc_mgr_start_params));
+ } else {
+ /* Return updated ProcMgr instance specific parameters. */
+ memcpy(params, &(proc_mgr_handle->start_params),
+ sizeof(struct proc_mgr_start_params));
+ }
+ return;
+}
+EXPORT_SYMBOL(proc_mgr_get_start_params);
+
+/*==========================================
+ * Function to start the slave processor running.
+ *
+ * Function to start execution of the loaded code on the slave
+ * from the entry point specified in the slave executable loaded
+ * earlier by call to proc_mgr_load ().
+ * After successful completion of this function, the ProcMgr
+ * instance is expected to be in the proc_mgr_State_Running state.
+ */
+int proc_mgr_start(void *handle, u32 entry_point,
+ struct proc_mgr_start_params *params)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ struct proc_mgr_start_params tmp_params;
+ struct processor_start_params proc_params;
+
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_start:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(handle == NULL);
+
+ if (params == NULL) {
+ proc_mgr_get_start_params(handle, &tmp_params);
+ params = &tmp_params;
+ }
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ memcpy(&(proc_mgr_handle->start_params), params,
+ sizeof(struct proc_mgr_start_params));
+ /* Start the slave processor running. */
+ proc_params.params = params;
+ retval = processor_start(proc_mgr_handle->proc_handle,
+ entry_point, &proc_params);
+
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_start);
+
+/*========================================
+ * Function to stop the slave processor.
+ *
+ * Function to stop execution of the slave processor.
+ * Depending on the boot mode, after successful completion of this
+ * function, the ProcMgr instance may be in the proc_mgr_State_Reset
+ * state.
+ *
+ */
+int proc_mgr_stop(void *handle, struct proc_mgr_stop_params *params)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ struct processor_stop_params proc_params;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_stop:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(handle == NULL);
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ proc_params.params = params;
+ retval = processor_stop(proc_mgr_handle->proc_handle,
+ &proc_params);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_stop);
+
+/*===================================
+ * Function to get the current state of the slave Processor.
+ *
+ * This function gets the state of the slave processor as
+ * maintained on the master Processor state machine. It does not
+ * go to the slave processor to get its actual state at the time
+ * when this API is called.
+ *
+ */
+enum proc_mgr_state proc_mgr_get_state(void *handle)
+{
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ enum proc_mgr_state state = PROC_MGR_STATE_UNKNOWN;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_get_state:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(handle == NULL);
+
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ state = processor_get_state(proc_mgr_handle->proc_handle);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return state;
+}
+EXPORT_SYMBOL(proc_mgr_get_state);
+
+/*==================================================
+ * Function to read from the slave processor's memory.
+ *
+ * This function reads from the specified address in the
+ * processor's address space and copies the required number of
+ * bytes into the specified buffer.
+ * It returns the number of bytes actually read in thenum_bytes
+ * parameter.
+ */
+int proc_mgr_read(void *handle, u32 proc_addr, u32 *num_bytes, void *buffer)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_read:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(handle == NULL);
+ BUG_ON(proc_addr == 0);
+ BUG_ON(num_bytes == NULL);
+ BUG_ON(buffer == NULL);
+
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+
+ retval = processor_read(proc_mgr_handle->proc_handle, proc_addr,
+ num_bytes, buffer);
+ WARN_ON(retval < 0);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;
+}
+EXPORT_SYMBOL(proc_mgr_read);
+
+/*
+ * Function to write into the slave processor's memory.
+ *
+ * This function writes into the specified address in the
+ * processor's address space and copies the required number of
+ * bytes from the specified buffer.
+ * It returns the number of bytes actually written in thenum_bytes
+ * parameter.
+ */
+int proc_mgr_write(void *handle, u32 proc_addr, u32 *num_bytes, void *buffer)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_write:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(proc_addr == 0);
+ BUG_ON(num_bytes == NULL);
+ BUG_ON(buffer == NULL);
+
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ retval = processor_write(proc_mgr_handle->proc_handle, proc_addr,
+ num_bytes, buffer);
+ WARN_ON(retval < 0);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_write);
+
+
+/*===================================
+ * Function to perform device-dependent operations.
+ *
+ * This function performs control operations supported by the
+ * as exposed directly by the specific implementation of the
+ * Processor interface. These commands and their specific argument
+ * types are used with this function.
+ */
+int proc_mgr_control(void *handle, int cmd, void *arg)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle
+ = (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_control:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(handle == NULL);
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ /* Perform device-dependent control operation. */
+ retval = processor_control(proc_mgr_handle->proc_handle, cmd, arg);
+ WARN_ON(retval < 0);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_control);
+
+/*========================================
+ * Function to translate between two types of address spaces.
+ *
+ * This function translates addresses between two types of address
+ * spaces. The destination and source address types are indicated
+ * through parameters specified in this function.
+ */
+int proc_mgr_translate_addr(void *handle, void **dst_addr,
+ enum proc_mgr_addr_type dst_addr_type, void *src_addr,
+ enum proc_mgr_addr_type src_addr_type)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_translate_addr:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(dst_addr == NULL);
+ BUG_ON(handle == NULL);
+ BUG_ON(dst_addr_type > PROC_MGR_ADDRTYPE_ENDVALUE);
+ BUG_ON(src_addr == NULL);
+ BUG_ON(src_addr_type > PROC_MGR_ADDRTYPE_ENDVALUE);
+
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ /* Translate the address. */
+ retval = processor_translate_addr(proc_mgr_handle->proc_handle,
+ dst_addr, dst_addr_type, src_addr, src_addr_type);
+ WARN_ON(retval < 0);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_translate_addr);
+
+/*============================================
+ * Function to map address to slave address space.
+ *
+ * This function maps the provided slave address to a host address
+ * and returns the mapped address and size.
+ *
+ */
+int proc_mgr_map(void *handle, u32 proc_addr, u32 size, u32 *mapped_addr,
+ u32 *mapped_size, u32 map_attribs)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_map:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(handle == NULL);
+ BUG_ON(proc_addr == 0);
+ BUG_ON(mapped_addr == NULL);
+ BUG_ON(mapped_size == NULL);
+
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+
+ /* Map to host address space. */
+ retval = processor_map(proc_mgr_handle->proc_handle, proc_addr,
+ size, mapped_addr, mapped_size, map_attribs);
+ WARN_ON(retval < 0);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_map);
+
+/*============================================
+ * Function to unmap address to slave address space.
+ *
+ * This function unmaps the provided slave address to a host address
+ *
+ */
+int proc_mgr_unmap(void *handle, u32 mapped_addr)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_unmap:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+
+ /* Map to host address space. */
+ retval = processor_unmap(proc_mgr_handle->proc_handle, mapped_addr);
+ WARN_ON(retval < 0);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_unmap);
+
+/*=================================
+ * Function that registers for notification when the slave
+ * processor transitions to any of the states specified.
+ *
+ * This function allows the user application to register for
+ * changes in processor state and take actions accordingly.
+ *
+ */
+int proc_mgr_register_notify(void *handle, proc_mgr_callback_fxn fxn,
+ void *args, enum proc_mgr_state state[])
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle
+ = (struct proc_mgr_object *)handle;
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_register_notify:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ BUG_ON(handle == NULL);
+ BUG_ON(fxn == NULL);
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+ retval = processor_register_notify(proc_mgr_handle->proc_handle, fxn,
+ args, state);
+ WARN_ON(retval < 0);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;
+}
+EXPORT_SYMBOL(proc_mgr_register_notify);
+
+/*
+ * Function that returns information about the characteristics of
+ * the slave processor.
+ */
+int proc_mgr_get_proc_info(void *handle, struct proc_mgr_proc_info *proc_info)
+{
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+ struct processor_object *proc_handle;
+
+ struct proc_mgr_proc_info proc_info_test;
+
+ if (atomic_cmpmask_and_lt(&(proc_mgr_obj_state.ref_count),
+ PROCMGR_MAKE_MAGICSTAMP(0), PROCMGR_MAKE_MAGICSTAMP(1))
+ == true) {
+ printk(KERN_ERR "proc_mgr_get_proc_info:"
+ "Error - module not initialized\n");
+ return -EFAULT;
+ }
+ if (WARN_ON(handle == NULL))
+ goto error_exit;
+ if (WARN_ON(proc_info == NULL))
+ goto error_exit;
+ proc_handle = proc_mgr_handle->proc_handle;
+ if (WARN_ON(proc_handle == NULL))
+ goto error_exit;
+
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+
+ processor_get_proc_info(proc_handle, &proc_info_test);
+ /* Return bootMode information. */
+ proc_info->boot_mode = proc_mgr_handle->attach_params.boot_mode;
+ /* Return memory information. */
+ proc_info->num_mem_entries = proc_mgr_handle->num_mem_entries;
+ memcpy(&(proc_info->mem_entries),
+ &(proc_mgr_handle->mem_entries),
+ sizeof(proc_mgr_handle->mem_entries));
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return 0;
+error_exit:
+ return -EFAULT;
+}
+EXPORT_SYMBOL(proc_mgr_get_proc_info);
+
+/*============================================
+ * Function to get virtual to physical address translations
+ *
+ * This function retrieves physical entries
+ *
+ */
+int proc_mgr_virt_to_phys(void *handle, u32 da, u32 *mapped_entries,
+ u32 num_of_entries)
+{
+ int retval = 0;
+ struct proc_mgr_object *proc_mgr_handle =
+ (struct proc_mgr_object *)handle;
+
+ WARN_ON(mutex_lock_interruptible(proc_mgr_obj_state.gate_handle));
+
+ /* Map to host address space. */
+ retval = processor_virt_to_phys(proc_mgr_handle->proc_handle, da,
+ mapped_entries, num_of_entries);
+ WARN_ON(retval < 0);
+ mutex_unlock(proc_mgr_obj_state.gate_handle);
+ return retval;;
+}
+EXPORT_SYMBOL(proc_mgr_virt_to_phys);
+
diff --git a/drivers/dsp/syslink/procmgr/procmgr_drv.c b/drivers/dsp/syslink/procmgr/procmgr_drv.c
new file mode 100644
index 000000000000..54c72ab011de
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/procmgr_drv.c
@@ -0,0 +1,759 @@
+/*
+ * procmgr_drv.c
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#include <generated/autoconf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+
+/* Module headers */
+#include <procmgr.h>
+#include "procmgr_drvdefs.h"
+
+#define PROCMGR_NAME "syslink-procmgr"
+
+static char *driver_name = PROCMGR_NAME;
+
+static s32 driver_major;
+
+static s32 driver_minor;
+
+struct procmgr_dev {
+ struct cdev cdev;
+};
+
+struct platform_device *omap_proc_dev;
+static struct platform_device *procmgr_pdev;
+static struct procmgr_dev *procmgr_device;
+
+static struct class *proc_mgr_class;
+
+
+/** ====================================
+ * Forward declarations of internal functions
+ * ====================================
+ */
+/* Linux driver function to open the driver object. */
+static int proc_mgr_drv_open(struct inode *inode, struct file *filp);
+
+/* Linux driver function to close the driver object. */
+static int proc_mgr_drv_release(struct inode *inode, struct file *filp);
+
+/* Linux driver function to invoke the APIs through ioctl. */
+static int proc_mgr_drv_ioctl(struct inode *inode,
+ struct file *filp,
+ unsigned int cmd,
+ unsigned long args);
+
+/* Linux driver function to map memory regions to user space. */
+static int proc_mgr_drv_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* Module initialization function for Linux driver. */
+static int __init proc_mgr_drv_initialize_module(void);
+
+/* Module finalization function for Linux driver. */
+static void __exit proc_mgr_drv_finalize_module(void);
+
+/* Platform driver probe function */
+static int __devinit proc_mgr_probe(struct platform_device *pdev);
+
+/* Platform driver remove function */
+static int __devexit proc_mgr_remove(struct platform_device *pdev);
+
+/*
+ * name DriverOps
+ *
+ * desc Function to invoke the APIs through ioctl
+ *
+ */
+static const struct file_operations procmgr_fops = {
+ .open = proc_mgr_drv_open,
+ .ioctl = proc_mgr_drv_ioctl,
+ .release = proc_mgr_drv_release,
+ .mmap = proc_mgr_drv_mmap,
+} ;
+
+/* Imtiaz changed places */
+static struct platform_driver procmgr_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = PROCMGR_NAME,
+ },
+ .probe = proc_mgr_probe,
+ .remove = __devexit_p(proc_mgr_remove),
+ .shutdown = NULL,
+ .suspend = NULL,
+ .resume = NULL,
+};
+
+/*
+* brief Linux specific function to open the driver.
+ */
+static int proc_mgr_drv_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/*
+* brief Linux driver function to close the driver object.
+ */
+static int proc_mgr_drv_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/*
+* Linux driver function to invoke the APIs through ioctl.
+ */
+static int proc_mgr_drv_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long args)
+{
+ int retval = 0;
+ struct proc_mgr_cmd_args *cmd_args = (struct proc_mgr_cmd_args *)args;
+ struct proc_mgr_cmd_args command_args;
+
+ switch (cmd) {
+ case CMD_PROCMGR_GETCONFIG:
+ {
+ struct proc_mgr_cmd_args_get_config *src_args =
+ (struct proc_mgr_cmd_args_get_config *)args;
+ struct proc_mgr_config cfg;
+
+ /* copy_from_user is not needed for proc_mgr_get_config,
+ * since the user's config is not used.
+ */
+ proc_mgr_get_config(&cfg);
+
+ retval = copy_to_user((void *)(src_args->cfg),
+ (const void *)&cfg,
+ sizeof(struct proc_mgr_config));
+
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_SETUP:
+ {
+ struct proc_mgr_cmd_args_setup *src_args =
+ (struct proc_mgr_cmd_args_setup *)args;
+ struct proc_mgr_config cfg;
+
+ retval = copy_from_user((void *)&cfg,
+ (const void *)(src_args->cfg),
+ sizeof(struct proc_mgr_config));
+
+ /* This check is needed at run-time also since it
+ * depends on run environment.
+ * It must not be optimized out.
+ */
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+
+ retval = proc_mgr_setup(&cfg);
+ }
+ break;
+
+ case CMD_PROCMGR_DESTROY:
+ {
+ retval = proc_mgr_destroy();
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_PARAMS_INIT:
+ {
+ struct proc_mgr_cmd_args_params_init src_args;
+ struct proc_mgr_params params;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_params_init));
+
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+
+ proc_mgr_params_init(src_args.handle, &params);
+
+ /* Copy only the params to user-side */
+ retval = copy_to_user((void *)(src_args.params),
+ (const void *)&params,
+ sizeof(struct proc_mgr_params));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_CREATE:
+ {
+ struct proc_mgr_cmd_args_create src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_create));
+
+ src_args.handle = proc_mgr_create(src_args.proc_id,
+ &(src_args.params));
+ if (src_args.handle == NULL) {
+ retval = -EFAULT;
+ goto func_exit;
+ }
+ retval = copy_to_user((void *)(args),
+ (const void *)&src_args,
+ sizeof(struct proc_mgr_cmd_args_create));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_DELETE:
+ {
+ struct proc_mgr_cmd_args_delete src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_delete));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+
+ retval = proc_mgr_delete(&(src_args.handle));
+ }
+ break;
+
+ case CMD_PROCMGR_OPEN:
+ {
+ struct proc_mgr_cmd_args_open src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_open));
+
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_open(&(src_args.handle),
+ src_args.proc_id);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = proc_mgr_get_proc_info(src_args.handle,
+ &(src_args.proc_info));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(args), (const void *)&src_args,
+ sizeof(struct proc_mgr_cmd_args_open));
+ WARN_ON(retval);
+ }
+ break;
+
+ case CMD_PROCMGR_CLOSE:
+ {
+ struct proc_mgr_cmd_args_close src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_close));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_close(&(src_args.handle));
+ }
+ break;
+
+ case CMD_PROCMGR_GETATTACHPARAMS:
+ {
+ struct proc_mgr_cmd_args_get_attach_params src_args;
+ struct proc_mgr_attach_params params;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_get_attach_params));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ proc_mgr_get_attach_params(src_args.handle, &params);
+ retval = copy_to_user((void *)(src_args.params),
+ (const void *)&params,
+ sizeof(struct proc_mgr_attach_params));
+ WARN_ON(retval);
+ }
+ break;
+
+ case CMD_PROCMGR_ATTACH:
+ {
+ struct proc_mgr_cmd_args_attach src_args;
+ struct proc_mgr_attach_params params;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_attach));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ /* Copy params from user-side. */
+ retval = copy_from_user((void *)&params,
+ (const void *)(src_args.params),
+ sizeof(struct proc_mgr_attach_params));
+ retval = proc_mgr_attach(src_args.handle, &params);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ /* Get memory information. */
+ retval = proc_mgr_get_proc_info(src_args.handle,
+ &(src_args.proc_info));
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(args),
+ (const void *)&src_args,
+ sizeof(struct proc_mgr_cmd_args_attach));
+ }
+ break;
+
+ case CMD_PROCMGR_DETACH:
+ {
+ struct proc_mgr_cmd_args_detach src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_detach));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_detach(src_args.handle);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ }
+ break;
+
+ case CMD_PROCMGR_GETSTARTPARAMS:
+ {
+ struct proc_mgr_cmd_args_get_start_params src_args;
+ struct proc_mgr_start_params params;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_get_start_params));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ proc_mgr_get_start_params(src_args.handle, &params);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(src_args.params),
+ (const void *)&params,
+ sizeof(struct proc_mgr_start_params));
+ WARN_ON(retval);
+ }
+ break;
+
+ case CMD_PROCMGR_START:
+ {
+ struct proc_mgr_cmd_args_start src_args;
+ struct proc_mgr_start_params params;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_start));
+ /* Copy params from user-side. */
+ retval = copy_from_user((void *)&params,
+ (const void *)(src_args.params),
+ sizeof(struct proc_mgr_start_params));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_start(src_args.handle,
+ src_args.entry_point, &params);
+
+ WARN_ON(retval);
+ }
+ break;
+
+ case CMD_PROCMGR_STOP:
+ {
+ struct proc_mgr_cmd_args_stop src_args;
+
+ struct proc_mgr_stop_params params;
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_stop));
+ /* Copy params from user-side. */
+ retval = copy_from_user((void *)&params,
+ (const void *)(src_args.params),
+ sizeof(struct proc_mgr_stop_params));
+
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_stop(src_args.handle, &params);
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_GETSTATE:
+ {
+ struct proc_mgr_cmd_args_get_state src_args;
+ enum proc_mgr_state procmgrstate;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_get_state));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ procmgrstate = proc_mgr_get_state(src_args.handle);
+ src_args.proc_mgr_state = procmgrstate;
+ retval = copy_to_user((void *)(args), (const void *)&src_args,
+ sizeof(struct proc_mgr_cmd_args_get_state));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_READ:
+ {
+ struct proc_mgr_cmd_args_read src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_read));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_read(src_args.handle,
+ src_args.proc_addr, &(src_args.num_bytes),
+ src_args.buffer);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(args),
+ (const void *)&src_args,
+ sizeof(struct proc_mgr_cmd_args_read));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_WRITE:
+ {
+ struct proc_mgr_cmd_args_write src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_write));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_write(src_args.handle,
+ src_args.proc_addr, &(src_args.num_bytes),
+ src_args.buffer);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(args),
+ (const void *)&src_args,
+ sizeof(struct proc_mgr_cmd_args_write));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_CONTROL:
+ {
+ struct proc_mgr_cmd_args_control src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_control));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_control(src_args.handle,
+ src_args.cmd, src_args.arg);
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_TRANSLATEADDR:
+ {
+ struct proc_mgr_cmd_args_translate_addr src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_translate_addr));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_translate_addr(src_args.handle,
+ &(src_args.dst_addr), src_args.dst_addr_type,
+ src_args.src_addr, src_args.src_addr_type);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(args),
+ (const void *)&src_args, sizeof
+ (struct proc_mgr_cmd_args_translate_addr));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_MAP:
+ {
+ struct proc_mgr_cmd_args_map src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_map));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_map(src_args.handle,
+ src_args.proc_addr, src_args.size,
+ &(src_args.mapped_addr),
+ &(src_args.mapped_size),
+ src_args.map_attribs);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(args),
+ (const void *)&src_args,
+ sizeof(struct proc_mgr_cmd_args_map));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_UNMAP:
+ {
+ struct proc_mgr_cmd_args_unmap src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_unmap));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_unmap(src_args.handle,
+ (src_args.mapped_addr));
+ WARN_ON(retval < 0);
+ }
+
+ case CMD_PROCMGR_REGISTERNOTIFY:
+ {
+ struct proc_mgr_cmd_args_register_notify src_args;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_register_notify));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_register_notify(src_args.handle,
+ src_args.callback_fxn,
+ src_args.args, src_args.state);
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_GETPROCINFO:
+ {
+ struct proc_mgr_cmd_args_get_proc_info src_args;
+ struct proc_mgr_proc_info proc_info;
+
+ /* Copy the full args from user-side. */
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_get_proc_info));
+ if (WARN_ON(retval != 0))
+ goto func_exit;
+ retval = proc_mgr_get_proc_info
+ (src_args.handle, &proc_info);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(src_args.proc_info),
+ (const void *) &proc_info,
+ sizeof(struct proc_mgr_proc_info));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ case CMD_PROCMGR_GETVIRTTOPHYS:
+ {
+ struct proc_mgr_cmd_args_get_virt_to_phys src_args;
+
+ retval = copy_from_user((void *)&src_args,
+ (const void *)(args),
+ sizeof(struct proc_mgr_cmd_args_get_virt_to_phys));
+ retval = proc_mgr_virt_to_phys(src_args.handle,
+ src_args.da, (src_args.mem_entries),
+ src_args.num_of_entries);
+ if (WARN_ON(retval < 0))
+ goto func_exit;
+ retval = copy_to_user((void *)(args), (const void *)&src_args,
+ sizeof(struct proc_mgr_cmd_args_get_virt_to_phys));
+ WARN_ON(retval < 0);
+ }
+ break;
+
+ default:
+ printk(KERN_ERR"PROC_MGR_DRV: WRONG IOCTL !!!!\n");
+ BUG_ON(1);
+ break;
+ }
+func_exit:
+ /* Set the retval and copy the common args to user-side. */
+ command_args.api_status = retval;
+ retval = copy_to_user((void *)cmd_args,
+ (const void *)&command_args, sizeof(struct proc_mgr_cmd_args));
+
+ WARN_ON(retval < 0);
+ return retval;
+}
+
+
+/*
+ Driver function to map memory regions to user space.
+ */
+static int proc_mgr_drv_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
+ vma->vm_flags |= VM_RESERVED;
+
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int __devinit proc_mgr_probe(struct platform_device *pdev)
+{
+ dev_t dev = 0 ;
+ int retval = -ENOMEM;
+
+ /* Display the version info and created date/time */
+ dev_dbg(&omap_proc_dev->dev, "Entering %s function\n\n", __func__);
+
+ if (driver_major) {
+ dev = MKDEV(driver_major, driver_minor);
+ retval = register_chrdev_region(dev, 1, driver_name);
+ } else {
+ retval = alloc_chrdev_region(&dev, driver_minor, 1,
+ driver_name);
+ driver_major = MAJOR(dev);
+ }
+
+ procmgr_device = kmalloc(sizeof(struct procmgr_dev), GFP_KERNEL);
+ if (!procmgr_device) {
+ retval = -ENOMEM;
+ unregister_chrdev_region(dev, 1);
+ goto exit;
+ }
+ memset(procmgr_device, 0, sizeof(struct procmgr_dev));
+ cdev_init(&procmgr_device->cdev, &procmgr_fops);
+ procmgr_device->cdev.owner = THIS_MODULE;
+ procmgr_device->cdev.ops = &procmgr_fops;
+
+ retval = cdev_add(&procmgr_device->cdev, dev, 1);
+
+ if (retval) {
+ printk(KERN_ERR "Failed to add the syslink procmgr device\n");
+ goto exit;
+ }
+
+ /* udev support */
+ proc_mgr_class = class_create(THIS_MODULE, "syslink-procmgr");
+
+ if (IS_ERR(proc_mgr_class)) {
+ printk(KERN_ERR "Error creating bridge class\n");
+ goto exit;
+ }
+ device_create(proc_mgr_class, NULL, MKDEV(driver_major, driver_minor),
+ NULL, PROCMGR_NAME);
+
+exit:
+ dev_dbg(&omap_proc_dev->dev, "Leaving %s function\n\n", __func__);
+ return retval;
+}
+
+
+static int __devexit proc_mgr_remove(struct platform_device *pdev)
+{
+ dev_t devno = 0;
+
+ dev_dbg(&omap_proc_dev->dev, "Entering %s function\n", __func__);
+ devno = MKDEV(driver_major, driver_minor);
+ if (procmgr_device) {
+ cdev_del(&procmgr_device->cdev);
+ kfree(procmgr_device);
+ }
+ unregister_chrdev_region(devno, 1);
+ if (proc_mgr_class) {
+ /* remove the device from sysfs */
+ device_destroy(proc_mgr_class, MKDEV(driver_major,
+ driver_minor));
+ class_destroy(proc_mgr_class);
+ }
+ dev_dbg(&omap_proc_dev->dev, "Entering %s function\n", __func__);
+ return 0;
+}
+
+/*
+* Module initialization function for Linux driver.
+ */
+static int __init proc_mgr_drv_initialize_module(void)
+{
+ int retval = -ENOMEM;
+
+ procmgr_pdev = platform_device_alloc(PROCMGR_NAME, -1);
+ if (!procmgr_pdev) {
+ printk(KERN_ERR "%s:device allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+ retval = platform_device_add(procmgr_pdev);
+ if (retval)
+ goto err_out;
+
+ /*Saving the context for future use*/
+ omap_proc_dev = procmgr_pdev;
+
+ retval = platform_driver_register(&procmgr_driver);
+ if (!retval)
+ return retval;
+err_out:
+ platform_device_put(procmgr_pdev);
+ return retval;
+}
+
+/*
+* driver function to finalize the driver module.
+ */
+static void __exit proc_mgr_drv_finalize_module(void)
+{
+
+ dev_dbg(&omap_proc_dev->dev, "Entering %s function\n", __func__);
+ platform_device_unregister(procmgr_pdev);
+ platform_driver_unregister(&procmgr_driver);
+ dev_dbg(&omap_proc_dev->dev, "Leaving %s function\n", __func__);
+}
+
+/*
+* brief Macro calls that indicate initialization and finalization functions
+ * to the kernel.
+ */
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mugdha Kamoolkar");
+module_init(proc_mgr_drv_initialize_module);
+module_exit(proc_mgr_drv_finalize_module);
diff --git a/drivers/dsp/syslink/procmgr/procmgr_drvdefs.h b/drivers/dsp/syslink/procmgr/procmgr_drvdefs.h
new file mode 100644
index 000000000000..2be14bf7a20e
--- /dev/null
+++ b/drivers/dsp/syslink/procmgr/procmgr_drvdefs.h
@@ -0,0 +1,541 @@
+/*
+ * procmgr_drvdefs.h
+ *
+ * Syslink driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+
+#ifndef SYSLINK_PROCMGR_DRVDEFS_H
+#define SYSLINK_PROCMGR_DRVDEFS_H
+
+#include <linux/types.h>
+
+/* Module headers */
+#include <procmgr.h>
+
+
+/* =================================
+ * Macros and types
+ * =================================
+ */
+/*
+ * Base structure for ProcMgr command args. This needs to be the first
+ * field in all command args structures.
+ */
+struct proc_mgr_cmd_args {
+ int api_status;
+ /*Status of the API being called. */
+};
+
+/* --------------------------------------
+ * IOCTL command IDs for ProcMgr
+ * ---------------------------------------
+ */
+/*
+ * Base command ID for ProcMgr
+ */
+#define PROCMGR_BASE_CMD 0x100
+
+/*
+ * Command for ProcMgr_getConfig
+ */
+#define CMD_PROCMGR_GETCONFIG (PROCMGR_BASE_CMD + 1)
+
+/*
+ * Command for ProcMgr_setup
+ */
+#define CMD_PROCMGR_SETUP (PROCMGR_BASE_CMD + 2)
+
+/*
+ * Command for ProcMgr_setup
+ */
+#define CMD_PROCMGR_DESTROY (PROCMGR_BASE_CMD + 3)
+
+/*
+ * Command for ProcMgr_destroy
+ */
+#define CMD_PROCMGR_PARAMS_INIT (PROCMGR_BASE_CMD + 4)
+
+/*
+ * Command for ProcMgr_create
+ */
+#define CMD_PROCMGR_CREATE (PROCMGR_BASE_CMD + 5)
+
+/*
+ * Command for ProcMgr_delete
+ */
+#define CMD_PROCMGR_DELETE (PROCMGR_BASE_CMD + 6)
+
+/*
+ * Command for ProcMgr_open
+ */
+#define CMD_PROCMGR_OPEN (PROCMGR_BASE_CMD + 7)
+
+/*
+ * Command for ProcMgr_close
+ */
+#define CMD_PROCMGR_CLOSE (PROCMGR_BASE_CMD + 8)
+
+/*
+ * Command for ProcMgr_getAttachParams
+ */
+#define CMD_PROCMGR_GETATTACHPARAMS (PROCMGR_BASE_CMD + 9)
+
+/*
+ * Command for ProcMgr_attach
+ */
+#define CMD_PROCMGR_ATTACH (PROCMGR_BASE_CMD + 10)
+
+/*
+ * Command for ProcMgr_detach
+ */
+#define CMD_PROCMGR_DETACH (PROCMGR_BASE_CMD + 11)
+
+/*
+ * Command for ProcMgr_load
+ */
+#define CMD_PROCMGR_LOAD (PROCMGR_BASE_CMD + 12)
+
+/*
+ * Command for ProcMgr_unload
+ */
+#define CMD_PROCMGR_UNLOAD (PROCMGR_BASE_CMD + 13)
+
+/*
+ * Command for ProcMgr_getStartParams
+ */
+#define CMD_PROCMGR_GETSTARTPARAMS (PROCMGR_BASE_CMD + 14)
+
+/*
+ * Command for ProcMgr_start
+ */
+#define CMD_PROCMGR_START (PROCMGR_BASE_CMD + 15)
+
+/*
+ * Command for ProcMgr_stop
+ */
+#define CMD_PROCMGR_STOP (PROCMGR_BASE_CMD + 16)
+
+/*
+ * Command for ProcMgr_getState
+ */
+#define CMD_PROCMGR_GETSTATE (PROCMGR_BASE_CMD + 17)
+
+/*
+ * Command for ProcMgr_read
+ */
+#define CMD_PROCMGR_READ (PROCMGR_BASE_CMD + 18)
+
+/*
+ * Command for ProcMgr_write
+ */
+#define CMD_PROCMGR_WRITE (PROCMGR_BASE_CMD + 19)
+
+/*
+ * Command for ProcMgr_control
+ */
+#define CMD_PROCMGR_CONTROL (PROCMGR_BASE_CMD + 20)
+
+/*
+ * Command for ProcMgr_translateAddr
+ */
+#define CMD_PROCMGR_TRANSLATEADDR (PROCMGR_BASE_CMD + 22)
+
+/*
+ * Command for ProcMgr_getSymbolAddress
+ */
+#define CMD_PROCMGR_GETSYMBOLADDRESS (PROCMGR_BASE_CMD + 23)
+
+/*
+ * Command for ProcMgr_map
+ */
+#define CMD_PROCMGR_MAP (PROCMGR_BASE_CMD + 24)
+
+/*
+ * Command for ProcMgr_registerNotify
+ */
+#define CMD_PROCMGR_REGISTERNOTIFY (PROCMGR_BASE_CMD + 25)
+
+/*
+ * Command for ProcMgr_getProcInfo
+ */
+#define CMD_PROCMGR_GETPROCINFO (PROCMGR_BASE_CMD + 26)
+
+/*
+ * Command for ProcMgr_unmap
+ */
+#define CMD_PROCMGR_UNMAP (PROCMGR_BASE_CMD + 27)
+
+/*
+ * Command for ProcMgr_getVirtToPhysPages
+ */
+#define CMD_PROCMGR_GETVIRTTOPHYS (PROCMGR_BASE_CMD + 28)
+
+
+
+
+/* ----------------------------------------------------------------------------
+ * Command arguments for ProcMgr
+ * ----------------------------------------------------------------------------
+ */
+/*
+ * Command arguments for ProcMgr_getConfig
+ */
+struct proc_mgr_cmd_args_get_config {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ struct proc_mgr_config *cfg;
+ /*Pointer to the ProcMgr module configuration structure in which the
+ default config is to be returned. */
+};
+
+/*
+ * Command arguments for ProcMgr_setup
+ */
+struct proc_mgr_cmd_args_setup {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ struct proc_mgr_config *cfg;
+ /*Optional ProcMgr module configuration. If provided as NULL, default
+ configuration is used. */
+};
+
+/*
+ * Command arguments for ProcMgr_destroy
+ */
+struct proc_mgr_cmd_args_destroy {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+};
+
+/*
+ * Command arguments for ProcMgr_Params_init
+ */
+struct proc_mgr_cmd_args_params_init {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object. */
+ struct proc_mgr_params *params;
+ /*Pointer to the ProcMgr instance params structure in which the default
+ params is to be returned. */
+};
+
+/*
+ * Command arguments for ProcMgr_create
+ */
+struct proc_mgr_cmd_args_create {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ u16 proc_id;
+ /*Processor ID represented by this ProcMgr instance */
+ struct proc_mgr_params params;
+ /*ProcMgr instance configuration parameters. */
+ void *handle;
+ /*Handle to the created ProcMgr object */
+};
+
+/*
+ * Command arguments for ProcMgr_delete
+ */
+struct proc_mgr_cmd_args_delete{
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Pointer to Handle to the ProcMgr object */
+};
+
+/*
+ * Command arguments for ProcMgr_open
+ */
+struct proc_mgr_cmd_args_open {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ u16 proc_id;
+ /*Processor ID represented by this ProcMgr instance */
+ void *handle;
+ /*Handle to the opened ProcMgr object. */
+ struct proc_mgr_proc_info proc_info;
+ /*Processor information. */
+};
+
+/*
+ * Command arguments for ProcMgr_close
+ */
+struct proc_mgr_cmd_args_close{
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ struct proc_mgr_proc_info proc_info;
+ /*Processor information. */
+};
+
+/*
+ * Command arguments for ProcMgr_getAttachParams
+ */
+struct proc_mgr_cmd_args_get_attach_params{
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object. */
+ struct proc_mgr_attach_params *params;
+ /*Pointer to the ProcMgr attach params structure in which the default
+ params is to be returned. */
+};
+
+/*
+ * Command arguments for ProcMgr_attach
+ */
+struct proc_mgr_cmd_args_attach {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object. */
+ struct proc_mgr_attach_params *params;
+ /*Optional ProcMgr attach parameters. */
+ struct proc_mgr_proc_info proc_info;
+ /*Processor information. */
+};
+
+/*
+ * Command arguments for ProcMgr_detach
+ */
+struct proc_mgr_cmd_args_detach {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ struct proc_mgr_proc_info proc_info;
+ /*Processor information. */
+};
+
+
+/*
+ * Command arguments for ProcMgr_getStartParams
+ */
+struct proc_mgr_cmd_args_get_start_params {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Entry point for the image*/
+ u32 entry_point;
+ /*Handle to the ProcMgr object */
+ struct proc_mgr_start_params *params;
+ /*Pointer to the ProcMgr start params structure in which the default
+ params is to be returned. */
+};
+
+/*
+ * Command arguments for ProcMgr_start
+ */
+struct proc_mgr_cmd_args_start {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Entry point for the image*/
+ u32 entry_point;
+ /*Handle to the ProcMgr object */
+ struct proc_mgr_start_params *params;
+ /*Optional ProcMgr start parameters. */
+};
+
+/*
+ * Command arguments for ProcMgr_stop
+ */
+struct proc_mgr_cmd_args_stop {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ struct proc_mgr_stop_params *params;
+ /*Optional ProcMgr stop parameters. */
+};
+
+/*
+ * Command arguments for ProcMgr_getState
+ */
+struct proc_mgr_cmd_args_get_state {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /* Handle to the ProcMgr object */
+ enum proc_mgr_state proc_mgr_state;
+ /*Current state of the ProcMgr object. */
+};
+
+/*
+ * Command arguments for ProcMgr_read
+ */
+struct proc_mgr_cmd_args_read {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ u32 proc_addr;
+ /*Address in space processor's address space of the memory region to
+ read from. */
+ u32 num_bytes;
+ /*IN/OUT parameter. As an IN-parameter, it takes in the number of bytes
+ to be read. When the function returns, this parameter contains the
+ number of bytes actually read. */
+ void *buffer;
+ /*User-provided buffer in which the slave processor's memory contents
+ are to be copied. */
+};
+
+/*
+ * Command arguments for ProcMgr_write
+ */
+struct proc_mgr_cmd_args_write {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ u32 proc_addr;
+ /*Address in space processor's address space of the memory region to
+ write into. */
+ u32 num_bytes;
+ /*IN/OUT parameter. As an IN-parameter, it takes in the number of bytes
+ to be written. When the function returns, this parameter contains the
+ number of bytes actually written. */
+ void *buffer;
+ /*User-provided buffer from which the data is to be written into the
+ slave processor's memory. */
+};
+
+/*
+ * Command arguments for ProcMgr_control
+ */
+struct proc_mgr_cmd_args_control {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ int cmd;
+ /*Device specific processor command */
+ void *arg;
+ /*Arguments specific to the type of command. */
+};
+
+/*
+ * Command arguments for ProcMgr_translateAddr
+ */
+struct proc_mgr_cmd_args_translate_addr {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ void *dst_addr;
+ /*Return parameter: Pointer to receive the translated address. */
+ enum proc_mgr_addr_type dst_addr_type;
+ /*Destination address type requested */
+ void *src_addr;
+ /*Source address in the source address space */
+ enum proc_mgr_addr_type src_addr_type;
+ /*Source address type */
+};
+
+/*
+ * Command arguments for ProcMgr_getSymbolAddress
+ */
+struct proc_mgr_cmd_args_get_symbol_address {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ u32 file_id;
+ /*ID of the file received from the load function */
+ char *symbol_name;
+ /*Name of the symbol */
+ u32 sym_value;
+ /*Return parameter: Symbol address */
+};
+
+/*
+ * Command arguments for ProcMgr_map
+ */
+struct proc_mgr_cmd_args_map {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ u32 proc_addr;
+ /*Slave address to be mapped */
+ u32 size;
+ /*Size (in bytes) of region to be mapped */
+ u32 mapped_addr;
+ /*Return parameter: Mapped address in host address space */
+ u32 mapped_size;
+ /*Return parameter: Mapped size */
+ u32 map_attribs;
+ /*Type of mapping. */
+};
+
+/*
+ * Command arguments for ProcMgr_map
+ */
+struct proc_mgr_cmd_args_unmap {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ u32 mapped_addr;
+ /* Mapped address in host address space */
+};
+
+/*
+ * Command arguments for ProcMgr_registerNotify
+ */
+struct proc_mgr_cmd_args_register_notify {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ int (*callback_fxn)(u16 proc_id, void *handle,
+ enum proc_mgr_state from_state, enum proc_mgr_state to_state);
+ /*Handling function to be registered. */
+ void *args;
+ /*Optional arguments associated with the handler fxn. */
+ enum proc_mgr_state state[];
+ /*Array of target states for which registration is required. */
+};
+
+/*
+ * Command arguments for ProcMgr_getProcInfo
+ */
+struct proc_mgr_cmd_args_get_proc_info {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ /*Handle to the ProcMgr object */
+ struct proc_mgr_proc_info *proc_info;
+ /*Pointer to the ProcInfo object to be populated. */
+};
+
+/*
+ * Command arguments for ProcMgr_virtToPhys
+ */
+struct proc_mgr_cmd_args_get_virt_to_phys {
+ struct proc_mgr_cmd_args commond_args;
+ /*Common command args */
+ void *handle;
+ u32 da;
+ /* mem entries buffer */
+ u32 *mem_entries;
+ /* number of entries */
+ u32 num_of_entries;
+};
+
+#endif
+