lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1277298125-17991-4-git-send-email-ohad@wizery.com>
Date:	Wed, 23 Jun 2010 16:01:57 +0300
From:	Ohad Ben-Cohen <ohad@...ery.com>
To:	Greg KH <greg@...ah.com>
Cc:	<linux-kernel@...r.kernel.org>, <linux-omap@...r.kernel.org>,
	Hebbar Shivananda <x0hebbar@...com>,
	Ramos Falcon Ernesto <ernesto@...com>,
	Anna Suman <s-anna@...com>, Kanigeri Hari <h-kanigeri2@...com>,
	Felipe Contreras <felipe.contreras@...il.com>,
	Felipe Balbi <felipe.balbi@...ia.com>,
	Hiroshi DOYU <Hiroshi.DOYU@...ia.com>,
	Gupta Ramesh <grgupta@...com>,
	Guzman Lugo Fernando <fernando.lugo@...com>,
	Tony Lindgren <tony@...mide.com>,
	Ameya Palande <ameya.palande@...ia.com>,
	Gomez Castellanos Ivan <ivan.gomez@...com>,
	Andy Shevchenko <ext-andriy.shevchenko@...ia.com>,
	Armando Uribe De Leon <x0095078@...com>,
	Deepak Chitriki <deepak.chitriki@...com>,
	Menon Nishanth <nm@...com>,
	Phil Carmody <ext-phil.2.carmody@...ia.com>,
	Pitney Gilbert <gpitney@...com>, Bhavin Shah <bshah@...com>,
	Omar Ramirez Luna <omar.ramirez@...com>,
	Ohad Ben-Cohen <ohad@...ery.com>
Subject: [PATCH 03/11] staging: ti dspbridge: add platform manager code

From: Omar Ramirez Luna <omar.ramirez@...com>

Add TI's DSP Bridge platform manager driver sources

Signed-off-by: Omar Ramirez Luna <omar.ramirez@...com>
Signed-off-by: Kanigeri, Hari <h-kanigeri2@...com>
Signed-off-by: Ameya Palande <ameya.palande@...ia.com>
Signed-off-by: Guzman Lugo, Fernando <fernando.lugo@...com>
Signed-off-by: Hebbar, Shivananda <x0hebbar@...com>
Signed-off-by: Ramos Falcon, Ernesto <ernesto@...com>
Signed-off-by: Felipe Contreras <felipe.contreras@...il.com>
Signed-off-by: Anna, Suman <s-anna@...com>
Signed-off-by: Gupta, Ramesh <grgupta@...com>
Signed-off-by: Gomez Castellanos, Ivan <ivan.gomez@...com>
Signed-off-by: Andy Shevchenko <ext-andriy.shevchenko@...ia.com>
Signed-off-by: Armando Uribe De Leon <x0095078@...com>
Signed-off-by: Deepak Chitriki <deepak.chitriki@...com>
Signed-off-by: Menon, Nishanth <nm@...com>
Signed-off-by: Phil Carmody <ext-phil.2.carmody@...ia.com>
Signed-off-by: Ohad Ben-Cohen <ohad@...ery.com>
---
 drivers/staging/tidspbridge/pmgr/chnl.c    |  163 +++
 drivers/staging/tidspbridge/pmgr/chnlobj.h |   46 +
 drivers/staging/tidspbridge/pmgr/cmm.c     | 1172 +++++++++++++++++++
 drivers/staging/tidspbridge/pmgr/cod.c     |  658 +++++++++++
 drivers/staging/tidspbridge/pmgr/dbll.c    | 1585 ++++++++++++++++++++++++++
 drivers/staging/tidspbridge/pmgr/dev.c     | 1171 +++++++++++++++++++
 drivers/staging/tidspbridge/pmgr/dmm.c     |  533 +++++++++
 drivers/staging/tidspbridge/pmgr/dspapi.c  | 1685 ++++++++++++++++++++++++++++
 drivers/staging/tidspbridge/pmgr/io.c      |  142 +++
 drivers/staging/tidspbridge/pmgr/ioobj.h   |   38 +
 drivers/staging/tidspbridge/pmgr/msg.c     |  129 +++
 drivers/staging/tidspbridge/pmgr/msgobj.h  |   38 +
 12 files changed, 7360 insertions(+), 0 deletions(-)
 create mode 100644 drivers/staging/tidspbridge/pmgr/chnl.c
 create mode 100644 drivers/staging/tidspbridge/pmgr/chnlobj.h
 create mode 100644 drivers/staging/tidspbridge/pmgr/cmm.c
 create mode 100644 drivers/staging/tidspbridge/pmgr/cod.c
 create mode 100644 drivers/staging/tidspbridge/pmgr/dbll.c
 create mode 100644 drivers/staging/tidspbridge/pmgr/dev.c
 create mode 100644 drivers/staging/tidspbridge/pmgr/dmm.c
 create mode 100644 drivers/staging/tidspbridge/pmgr/dspapi.c
 create mode 100644 drivers/staging/tidspbridge/pmgr/io.c
 create mode 100644 drivers/staging/tidspbridge/pmgr/ioobj.h
 create mode 100644 drivers/staging/tidspbridge/pmgr/msg.c
 create mode 100644 drivers/staging/tidspbridge/pmgr/msgobj.h

diff --git a/drivers/staging/tidspbridge/pmgr/chnl.c b/drivers/staging/tidspbridge/pmgr/chnl.c
new file mode 100644
index 0000000..bc969d8
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/chnl.c
@@ -0,0 +1,163 @@
+/*
+ * chnl.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP API channel interface: multiplexes data streams through the single
+ * physical link managed by a Bridge Bridge driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*  ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/sync.h>
+
+/*  ----------------------------------- Platform Manager */
+#include <dspbridge/proc.h>
+#include <dspbridge/dev.h>
+
+/*  ----------------------------------- Others */
+#include <dspbridge/chnlpriv.h>
+#include <chnlobj.h>
+
+/*  ----------------------------------- This */
+#include <dspbridge/chnl.h>
+
+/*  ----------------------------------- Globals */
+static u32 refs;
+
+/*
+ *  ======== chnl_create ========
+ *  Purpose:
+ *      Create a channel manager object, responsible for opening new channels
+ *      and closing old ones for a given 'Bridge board.
+ */
+int chnl_create(OUT struct chnl_mgr **phChnlMgr,
+		       struct dev_object *hdev_obj,
+		       IN CONST struct chnl_mgrattrs *pMgrAttrs)
+{
+	int status;
+	struct chnl_mgr *hchnl_mgr;
+	struct chnl_mgr_ *chnl_mgr_obj = NULL;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phChnlMgr != NULL);
+	DBC_REQUIRE(pMgrAttrs != NULL);
+
+	*phChnlMgr = NULL;
+
+	/* Validate args: */
+	if ((0 < pMgrAttrs->max_channels) &&
+	    (pMgrAttrs->max_channels <= CHNL_MAXCHANNELS))
+		status = 0;
+	else if (pMgrAttrs->max_channels == 0)
+		status = -EINVAL;
+	else
+		status = -ECHRNG;
+
+	if (pMgrAttrs->word_size == 0)
+		status = -EINVAL;
+
+	if (DSP_SUCCEEDED(status)) {
+		status = dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
+		if (DSP_SUCCEEDED(status) && hchnl_mgr != NULL)
+			status = -EEXIST;
+
+	}
+
+	if (DSP_SUCCEEDED(status)) {
+		struct bridge_drv_interface *intf_fxns;
+		dev_get_intf_fxns(hdev_obj, &intf_fxns);
+		/* Let Bridge channel module finish the create: */
+		status = (*intf_fxns->pfn_chnl_create) (&hchnl_mgr, hdev_obj,
+							pMgrAttrs);
+		if (DSP_SUCCEEDED(status)) {
+			/* Fill in DSP API channel module's fields of the
+			 * chnl_mgr structure */
+			chnl_mgr_obj = (struct chnl_mgr_ *)hchnl_mgr;
+			chnl_mgr_obj->intf_fxns = intf_fxns;
+			/* Finally, return the new channel manager handle: */
+			*phChnlMgr = hchnl_mgr;
+		}
+	}
+
+	DBC_ENSURE(DSP_FAILED(status) || chnl_mgr_obj);
+
+	return status;
+}
+
+/*
+ *  ======== chnl_destroy ========
+ *  Purpose:
+ *      Close all open channels, and destroy the channel manager.
+ */
+int chnl_destroy(struct chnl_mgr *hchnl_mgr)
+{
+	struct chnl_mgr_ *chnl_mgr_obj = (struct chnl_mgr_ *)hchnl_mgr;
+	struct bridge_drv_interface *intf_fxns;
+	int status;
+
+	DBC_REQUIRE(refs > 0);
+
+	if (chnl_mgr_obj) {
+		intf_fxns = chnl_mgr_obj->intf_fxns;
+		/* Let Bridge channel module destroy the chnl_mgr: */
+		status = (*intf_fxns->pfn_chnl_destroy) (hchnl_mgr);
+	} else {
+		status = -EFAULT;
+	}
+
+	return status;
+}
+
+/*
+ *  ======== chnl_exit ========
+ *  Purpose:
+ *      Discontinue usage of the CHNL module.
+ */
+void chnl_exit(void)
+{
+	DBC_REQUIRE(refs > 0);
+
+	refs--;
+
+	DBC_ENSURE(refs >= 0);
+}
+
+/*
+ *  ======== chnl_init ========
+ *  Purpose:
+ *      Initialize the CHNL module's private state.
+ */
+bool chnl_init(void)
+{
+	bool ret = true;
+
+	DBC_REQUIRE(refs >= 0);
+
+	if (ret)
+		refs++;
+
+	DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+
+	return ret;
+}
diff --git a/drivers/staging/tidspbridge/pmgr/chnlobj.h b/drivers/staging/tidspbridge/pmgr/chnlobj.h
new file mode 100644
index 0000000..6795e0a
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/chnlobj.h
@@ -0,0 +1,46 @@
+/*
+ * chnlobj.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Structure subcomponents of channel class library channel objects which
+ * are exposed to DSP API from Bridge driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef CHNLOBJ_
+#define CHNLOBJ_
+
+#include <dspbridge/chnldefs.h>
+#include <dspbridge/dspdefs.h>
+
+/*
+ *  This struct is the first field in a chnl_mgr struct. Other. implementation
+ *  specific fields follow this structure in memory.
+ */
+struct chnl_mgr_ {
+	/* These must be the first fields in a chnl_mgr struct: */
+
+	/* Function interface to Bridge driver. */
+	struct bridge_drv_interface *intf_fxns;
+};
+
+/*
+ *  This struct is the first field in a chnl_object struct. Other,
+ *  implementation specific fields follow this structure in memory.
+ */
+struct chnl_object_ {
+	/* These must be the first fields in a chnl_object struct: */
+	struct chnl_mgr_ *chnl_mgr_obj;	/* Pointer back to channel manager. */
+};
+
+#endif /* CHNLOBJ_ */
diff --git a/drivers/staging/tidspbridge/pmgr/cmm.c b/drivers/staging/tidspbridge/pmgr/cmm.c
new file mode 100644
index 0000000..7aa4ca4
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/cmm.c
@@ -0,0 +1,1172 @@
+/*
+ * cmm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * The Communication(Shared) Memory Management(CMM) module provides
+ * shared memory management services for DSP/BIOS Bridge data streaming
+ * and messaging.
+ *
+ * Multiple shared memory segments can be registered with CMM.
+ * Each registered SM segment is represented by a SM "allocator" that
+ * describes a block of physically contiguous shared memory used for
+ * future allocations by CMM.
+ *
+ * Memory is coelesced back to the appropriate heap when a buffer is
+ * freed.
+ *
+ * Notes:
+ *   Va: Virtual address.
+ *   Pa: Physical or kernel system address.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/list.h>
+#include <dspbridge/sync.h>
+#include <dspbridge/utildefs.h>
+
+/*  ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+#include <dspbridge/proc.h>
+
+/*  ----------------------------------- This */
+#include <dspbridge/cmm.h>
+
+/*  ----------------------------------- Defines, Data Structures, Typedefs */
+#define NEXT_PA(pnode)   (pnode->dw_pa + pnode->ul_size)
+
+/* Other bus/platform translations */
+#define DSPPA2GPPPA(base, x, y)  ((x)+(y))
+#define GPPPA2DSPPA(base, x, y)  ((x)-(y))
+
+/*
+ *  Allocators define a block of contiguous memory used for future allocations.
+ *
+ *      sma - shared memory allocator.
+ *      vma - virtual memory allocator.(not used).
+ */
+struct cmm_allocator {		/* sma */
+	unsigned int shm_base;	/* Start of physical SM block */
+	u32 ul_sm_size;		/* Size of SM block in bytes */
+	unsigned int dw_vm_base;	/* Start of VM block. (Dev driver
+					 * context for 'sma') */
+	u32 dw_dsp_phys_addr_offset;	/* DSP PA to GPP PA offset for this
+					 * SM space */
+	s8 c_factor;		/* DSPPa to GPPPa Conversion Factor */
+	unsigned int dw_dsp_base;	/* DSP virt base byte address */
+	u32 ul_dsp_size;	/* DSP seg size in bytes */
+	struct cmm_object *hcmm_mgr;	/* back ref to parent mgr */
+	/* node list of available memory */
+	struct lst_list *free_list_head;
+	/* node list of memory in use */
+	struct lst_list *in_use_list_head;
+};
+
+struct cmm_xlator {		/* Pa<->Va translator object */
+	/* CMM object this translator associated */
+	struct cmm_object *hcmm_mgr;
+	/*
+	 *  Client process virtual base address that corresponds to phys SM
+	 *  base address for translator's ul_seg_id.
+	 *  Only 1 segment ID currently supported.
+	 */
+	unsigned int dw_virt_base;	/* virtual base address */
+	u32 ul_virt_size;	/* size of virt space in bytes */
+	u32 ul_seg_id;		/* Segment Id */
+};
+
+/* CMM Mgr */
+struct cmm_object {
+	/*
+	 * Cmm Lock is used to serialize access mem manager for multi-threads.
+	 */
+	struct mutex cmm_lock;	/* Lock to access cmm mgr */
+	struct lst_list *node_free_list_head;	/* Free list of memory nodes */
+	u32 ul_min_block_size;	/* Min SM block; default 16 bytes */
+	u32 dw_page_size;	/* Memory Page size (1k/4k) */
+	/* GPP SM segment ptrs */
+	struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
+};
+
+/* Default CMM Mgr attributes */
+static struct cmm_mgrattrs cmm_dfltmgrattrs = {
+	/* ul_min_block_size, min block size(bytes) allocated by cmm mgr */
+	16
+};
+
+/* Default allocation attributes */
+static struct cmm_attrs cmm_dfltalctattrs = {
+	1		/* ul_seg_id, default segment Id for allocator */
+};
+
+/* Address translator default attrs */
+static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
+	/* ul_seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
+	1,
+	0,			/* dw_dsp_bufs */
+	0,			/* dw_dsp_buf_size */
+	NULL,			/* vm_base */
+	0,			/* dw_vm_size */
+};
+
+/* SM node representing a block of memory. */
+struct cmm_mnode {
+	struct list_head link;	/* must be 1st element */
+	u32 dw_pa;		/* Phys addr */
+	u32 dw_va;		/* Virtual address in device process context */
+	u32 ul_size;		/* SM block size in bytes */
+	u32 client_proc;	/* Process that allocated this mem block */
+};
+
+/*  ----------------------------------- Globals */
+static u32 refs;		/* module reference count */
+
+/*  ----------------------------------- Function Prototypes */
+static void add_to_free_list(struct cmm_allocator *allocator,
+			     struct cmm_mnode *pnode);
+static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
+					   u32 ul_seg_id);
+static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
+					u32 usize);
+static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
+				  u32 dw_va, u32 ul_size);
+/* get available slot for new allocator */
+static s32 get_slot(struct cmm_object *hcmm_mgr);
+static void un_register_gppsm_seg(struct cmm_allocator *psma);
+
+/*
+ *  ======== cmm_calloc_buf ========
+ *  Purpose:
+ *      Allocate a SM buffer, zero contents, and return the physical address
+ *      and optional driver context virtual address(pp_buf_va).
+ *
+ *      The freelist is sorted in increasing size order. Get the first
+ *      block that satifies the request and sort the remaining back on
+ *      the freelist; if large enough. The kept block is placed on the
+ *      inUseList.
+ */
+void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
+		     struct cmm_attrs *pattrs, OUT void **pp_buf_va)
+{
+	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
+	void *buf_pa = NULL;
+	struct cmm_mnode *pnode = NULL;
+	struct cmm_mnode *new_node = NULL;
+	struct cmm_allocator *allocator = NULL;
+	u32 delta_size;
+	u8 *pbyte = NULL;
+	s32 cnt;
+
+	if (pattrs == NULL)
+		pattrs = &cmm_dfltalctattrs;
+
+	if (pp_buf_va != NULL)
+		*pp_buf_va = NULL;
+
+	if (cmm_mgr_obj && (usize != 0)) {
+		if (pattrs->ul_seg_id > 0) {
+			/* SegId > 0 is SM */
+			/* get the allocator object for this segment id */
+			allocator =
+			    get_allocator(cmm_mgr_obj, pattrs->ul_seg_id);
+			/* keep block size a multiple of ul_min_block_size */
+			usize =
+			    ((usize - 1) & ~(cmm_mgr_obj->ul_min_block_size -
+					     1))
+			    + cmm_mgr_obj->ul_min_block_size;
+			mutex_lock(&cmm_mgr_obj->cmm_lock);
+			pnode = get_free_block(allocator, usize);
+		}
+		if (pnode) {
+			delta_size = (pnode->ul_size - usize);
+			if (delta_size >= cmm_mgr_obj->ul_min_block_size) {
+				/* create a new block with the leftovers and
+				 * add to freelist */
+				new_node =
+				    get_node(cmm_mgr_obj, pnode->dw_pa + usize,
+					     pnode->dw_va + usize,
+					     (u32) delta_size);
+				/* leftovers go free */
+				add_to_free_list(allocator, new_node);
+				/* adjust our node's size */
+				pnode->ul_size = usize;
+			}
+			/* Tag node with client process requesting allocation
+			 * We'll need to free up a process's alloc'd SM if the
+			 * client process goes away.
+			 */
+			/* Return TGID instead of process handle */
+			pnode->client_proc = current->tgid;
+
+			/* put our node on InUse list */
+			lst_put_tail(allocator->in_use_list_head,
+				     (struct list_head *)pnode);
+			buf_pa = (void *)pnode->dw_pa;	/* physical address */
+			/* clear mem */
+			pbyte = (u8 *) pnode->dw_va;
+			for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
+				*pbyte = 0;
+
+			if (pp_buf_va != NULL) {
+				/* Virtual address */
+				*pp_buf_va = (void *)pnode->dw_va;
+			}
+		}
+		mutex_unlock(&cmm_mgr_obj->cmm_lock);
+	}
+	return buf_pa;
+}
+
+/*
+ *  ======== cmm_create ========
+ *  Purpose:
+ *      Create a communication memory manager object.
+ */
+int cmm_create(OUT struct cmm_object **ph_cmm_mgr,
+		      struct dev_object *hdev_obj,
+		      IN CONST struct cmm_mgrattrs *pMgrAttrs)
+{
+	struct cmm_object *cmm_obj = NULL;
+	int status = 0;
+	struct util_sysinfo sys_info;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(ph_cmm_mgr != NULL);
+
+	*ph_cmm_mgr = NULL;
+	/* create, zero, and tag a cmm mgr object */
+	cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
+	if (cmm_obj != NULL) {
+		if (pMgrAttrs == NULL)
+			pMgrAttrs = &cmm_dfltmgrattrs;	/* set defaults */
+
+		/* 4 bytes minimum */
+		DBC_ASSERT(pMgrAttrs->ul_min_block_size >= 4);
+		/* save away smallest block allocation for this cmm mgr */
+		cmm_obj->ul_min_block_size = pMgrAttrs->ul_min_block_size;
+		/* save away the systems memory page size */
+		sys_info.dw_page_size = PAGE_SIZE;
+		sys_info.dw_allocation_granularity = PAGE_SIZE;
+		sys_info.dw_number_of_processors = 1;
+		if (DSP_SUCCEEDED(status)) {
+			cmm_obj->dw_page_size = sys_info.dw_page_size;
+		} else {
+			cmm_obj->dw_page_size = 0;
+			status = -EPERM;
+		}
+		/* Note: DSP SM seg table(aDSPSMSegTab[]) zero'd by
+		 * MEM_ALLOC_OBJECT */
+		if (DSP_SUCCEEDED(status)) {
+			/* create node free list */
+			cmm_obj->node_free_list_head =
+					kzalloc(sizeof(struct lst_list),
+							GFP_KERNEL);
+			if (cmm_obj->node_free_list_head == NULL)
+				status = -ENOMEM;
+			else
+				INIT_LIST_HEAD(&cmm_obj->
+					       node_free_list_head->head);
+		}
+		if (DSP_SUCCEEDED(status))
+			mutex_init(&cmm_obj->cmm_lock);
+
+		if (DSP_SUCCEEDED(status))
+			*ph_cmm_mgr = cmm_obj;
+		else
+			cmm_destroy(cmm_obj, true);
+
+	} else {
+		status = -ENOMEM;
+	}
+	return status;
+}
+
+/*
+ *  ======== cmm_destroy ========
+ *  Purpose:
+ *      Release the communication memory manager resources.
+ */
+int cmm_destroy(struct cmm_object *hcmm_mgr, bool bForce)
+{
+	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
+	struct cmm_info temp_info;
+	int status = 0;
+	s32 slot_seg;
+	struct cmm_mnode *pnode;
+
+	DBC_REQUIRE(refs > 0);
+	if (!hcmm_mgr) {
+		status = -EFAULT;
+		return status;
+	}
+	mutex_lock(&cmm_mgr_obj->cmm_lock);
+	/* If not force then fail if outstanding allocations exist */
+	if (!bForce) {
+		/* Check for outstanding memory allocations */
+		status = cmm_get_info(hcmm_mgr, &temp_info);
+		if (DSP_SUCCEEDED(status)) {
+			if (temp_info.ul_total_in_use_cnt > 0) {
+				/* outstanding allocations */
+				status = -EPERM;
+			}
+		}
+	}
+	if (DSP_SUCCEEDED(status)) {
+		/* UnRegister SM allocator */
+		for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
+			if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) {
+				un_register_gppsm_seg
+				    (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]);
+				/* Set slot to NULL for future reuse */
+				cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL;
+			}
+		}
+	}
+	if (cmm_mgr_obj->node_free_list_head != NULL) {
+		/* Free the free nodes */
+		while (!LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
+			pnode = (struct cmm_mnode *)
+			    lst_get_head(cmm_mgr_obj->node_free_list_head);
+			kfree(pnode);
+		}
+		/* delete NodeFreeList list */
+		kfree(cmm_mgr_obj->node_free_list_head);
+	}
+	mutex_unlock(&cmm_mgr_obj->cmm_lock);
+	if (DSP_SUCCEEDED(status)) {
+		/* delete CS & cmm mgr object */
+		mutex_destroy(&cmm_mgr_obj->cmm_lock);
+		kfree(cmm_mgr_obj);
+	}
+	return status;
+}
+
+/*
+ *  ======== cmm_exit ========
+ *  Purpose:
+ *      Discontinue usage of module; free resources when reference count
+ *      reaches 0.
+ */
+void cmm_exit(void)
+{
+	DBC_REQUIRE(refs > 0);
+
+	refs--;
+}
+
+/*
+ *  ======== cmm_free_buf ========
+ *  Purpose:
+ *      Free the given buffer.
+ */
+int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa,
+			u32 ul_seg_id)
+{
+	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
+	int status = -EFAULT;
+	struct cmm_mnode *mnode_obj = NULL;
+	struct cmm_allocator *allocator = NULL;
+	struct cmm_attrs *pattrs;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(buf_pa != NULL);
+
+	if (ul_seg_id == 0) {
+		pattrs = &cmm_dfltalctattrs;
+		ul_seg_id = pattrs->ul_seg_id;
+	}
+	if (!hcmm_mgr || !(ul_seg_id > 0)) {
+		status = -EFAULT;
+		return status;
+	}
+	/* get the allocator for this segment id */
+	allocator = get_allocator(cmm_mgr_obj, ul_seg_id);
+	if (allocator != NULL) {
+		mutex_lock(&cmm_mgr_obj->cmm_lock);
+		mnode_obj =
+		    (struct cmm_mnode *)lst_first(allocator->in_use_list_head);
+		while (mnode_obj) {
+			if ((u32) buf_pa == mnode_obj->dw_pa) {
+				/* Found it */
+				lst_remove_elem(allocator->in_use_list_head,
+						(struct list_head *)mnode_obj);
+				/* back to freelist */
+				add_to_free_list(allocator, mnode_obj);
+				status = 0;	/* all right! */
+				break;
+			}
+			/* next node. */
+			mnode_obj = (struct cmm_mnode *)
+			    lst_next(allocator->in_use_list_head,
+				     (struct list_head *)mnode_obj);
+		}
+		mutex_unlock(&cmm_mgr_obj->cmm_lock);
+	}
+	return status;
+}
+
+/*
+ *  ======== cmm_get_handle ========
+ *  Purpose:
+ *      Return the communication memory manager object for this device.
+ *      This is typically called from the client process.
+ */
+int cmm_get_handle(void *hprocessor, OUT struct cmm_object ** ph_cmm_mgr)
+{
+	int status = 0;
+	struct dev_object *hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(ph_cmm_mgr != NULL);
+	if (hprocessor != NULL)
+		status = proc_get_dev_object(hprocessor, &hdev_obj);
+	else
+		hdev_obj = dev_get_first();	/* default */
+
+	if (DSP_SUCCEEDED(status))
+		status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr);
+
+	return status;
+}
+
+/*
+ *  ======== cmm_get_info ========
+ *  Purpose:
+ *      Return the current memory utilization information.
+ */
+int cmm_get_info(struct cmm_object *hcmm_mgr,
+			OUT struct cmm_info *cmm_info_obj)
+{
+	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
+	u32 ul_seg;
+	int status = 0;
+	struct cmm_allocator *altr;
+	struct cmm_mnode *mnode_obj = NULL;
+
+	DBC_REQUIRE(cmm_info_obj != NULL);
+
+	if (!hcmm_mgr) {
+		status = -EFAULT;
+		return status;
+	}
+	mutex_lock(&cmm_mgr_obj->cmm_lock);
+	cmm_info_obj->ul_num_gppsm_segs = 0;	/* # of SM segments */
+	/* Total # of outstanding alloc */
+	cmm_info_obj->ul_total_in_use_cnt = 0;
+	/* min block size */
+	cmm_info_obj->ul_min_block_size = cmm_mgr_obj->ul_min_block_size;
+	/* check SM memory segments */
+	for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
+		/* get the allocator object for this segment id */
+		altr = get_allocator(cmm_mgr_obj, ul_seg);
+		if (altr != NULL) {
+			cmm_info_obj->ul_num_gppsm_segs++;
+			cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_pa =
+			    altr->shm_base - altr->ul_dsp_size;
+			cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size =
+			    altr->ul_dsp_size + altr->ul_sm_size;
+			cmm_info_obj->seg_info[ul_seg - 1].dw_gpp_base_pa =
+			    altr->shm_base;
+			cmm_info_obj->seg_info[ul_seg - 1].ul_gpp_size =
+			    altr->ul_sm_size;
+			cmm_info_obj->seg_info[ul_seg - 1].dw_dsp_base_va =
+			    altr->dw_dsp_base;
+			cmm_info_obj->seg_info[ul_seg - 1].ul_dsp_size =
+			    altr->ul_dsp_size;
+			cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_va =
+			    altr->dw_vm_base - altr->ul_dsp_size;
+			cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0;
+			mnode_obj = (struct cmm_mnode *)
+			    lst_first(altr->in_use_list_head);
+			/* Count inUse blocks */
+			while (mnode_obj) {
+				cmm_info_obj->ul_total_in_use_cnt++;
+				cmm_info_obj->seg_info[ul_seg -
+						       1].ul_in_use_cnt++;
+				/* next node. */
+				mnode_obj = (struct cmm_mnode *)
+				    lst_next(altr->in_use_list_head,
+					     (struct list_head *)mnode_obj);
+			}
+		}
+	}			/* end for */
+	mutex_unlock(&cmm_mgr_obj->cmm_lock);
+	return status;
+}
+
+/*
+ *  ======== cmm_init ========
+ *  Purpose:
+ *      Initializes private state of CMM module.
+ */
+bool cmm_init(void)
+{
+	bool ret = true;
+
+	DBC_REQUIRE(refs >= 0);
+	if (ret)
+		refs++;
+
+	DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+
+	return ret;
+}
+
+/*
+ *  ======== cmm_register_gppsm_seg ========
+ *  Purpose:
+ *      Register a block of SM with the CMM to be used for later GPP SM
+ *      allocations.
+ */
+int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
+				  u32 dw_gpp_base_pa, u32 ul_size,
+				  u32 dwDSPAddrOffset, s8 c_factor,
+				  u32 dw_dsp_base, u32 ul_dsp_size,
+				  u32 *pulSegId, u32 dw_gpp_base_va)
+{
+	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
+	struct cmm_allocator *psma = NULL;
+	int status = 0;
+	struct cmm_mnode *new_node;
+	s32 slot_seg;
+
+	DBC_REQUIRE(ul_size > 0);
+	DBC_REQUIRE(pulSegId != NULL);
+	DBC_REQUIRE(dw_gpp_base_pa != 0);
+	DBC_REQUIRE(dw_gpp_base_va != 0);
+	DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
+		    (c_factor >= CMM_SUBFROMDSPPA));
+	dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dwDSPAddrOffset %x "
+		"dw_dsp_base %x ul_dsp_size %x dw_gpp_base_va %x\n", __func__,
+		dw_gpp_base_pa, ul_size, dwDSPAddrOffset, dw_dsp_base,
+		ul_dsp_size, dw_gpp_base_va);
+	if (!hcmm_mgr) {
+		status = -EFAULT;
+		return status;
+	}
+	/* make sure we have room for another allocator */
+	mutex_lock(&cmm_mgr_obj->cmm_lock);
+	slot_seg = get_slot(cmm_mgr_obj);
+	if (slot_seg < 0) {
+		/* get a slot number */
+		status = -EPERM;
+		goto func_end;
+	}
+	/* Check if input ul_size is big enough to alloc at least one block */
+	if (DSP_SUCCEEDED(status)) {
+		if (ul_size < cmm_mgr_obj->ul_min_block_size) {
+			status = -EINVAL;
+			goto func_end;
+		}
+	}
+	if (DSP_SUCCEEDED(status)) {
+		/* create, zero, and tag an SM allocator object */
+		psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
+	}
+	if (psma != NULL) {
+		psma->hcmm_mgr = hcmm_mgr;	/* ref to parent */
+		psma->shm_base = dw_gpp_base_pa;	/* SM Base phys */
+		psma->ul_sm_size = ul_size;	/* SM segment size in bytes */
+		psma->dw_vm_base = dw_gpp_base_va;
+		psma->dw_dsp_phys_addr_offset = dwDSPAddrOffset;
+		psma->c_factor = c_factor;
+		psma->dw_dsp_base = dw_dsp_base;
+		psma->ul_dsp_size = ul_dsp_size;
+		if (psma->dw_vm_base == 0) {
+			status = -EPERM;
+			goto func_end;
+		}
+		if (DSP_SUCCEEDED(status)) {
+			/* return the actual segment identifier */
+			*pulSegId = (u32) slot_seg + 1;
+			/* create memory free list */
+			psma->free_list_head = kzalloc(sizeof(struct lst_list),
+								GFP_KERNEL);
+			if (psma->free_list_head == NULL) {
+				status = -ENOMEM;
+				goto func_end;
+			}
+			INIT_LIST_HEAD(&psma->free_list_head->head);
+		}
+		if (DSP_SUCCEEDED(status)) {
+			/* create memory in-use list */
+			psma->in_use_list_head = kzalloc(sizeof(struct
+							lst_list), GFP_KERNEL);
+			if (psma->in_use_list_head == NULL) {
+				status = -ENOMEM;
+				goto func_end;
+			}
+			INIT_LIST_HEAD(&psma->in_use_list_head->head);
+		}
+		if (DSP_SUCCEEDED(status)) {
+			/* Get a mem node for this hunk-o-memory */
+			new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
+					    psma->dw_vm_base, ul_size);
+			/* Place node on the SM allocator's free list */
+			if (new_node) {
+				lst_put_tail(psma->free_list_head,
+					     (struct list_head *)new_node);
+			} else {
+				status = -ENOMEM;
+				goto func_end;
+			}
+		}
+		if (DSP_FAILED(status)) {
+			/* Cleanup allocator */
+			un_register_gppsm_seg(psma);
+		}
+	} else {
+		status = -ENOMEM;
+		goto func_end;
+	}
+	/* make entry */
+	if (DSP_SUCCEEDED(status))
+		cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;
+
+func_end:
+	mutex_unlock(&cmm_mgr_obj->cmm_lock);
+	return status;
+}
+
+/*
+ *  ======== cmm_un_register_gppsm_seg ========
+ *  Purpose:
+ *      UnRegister GPP SM segments with the CMM.
+ */
+int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
+				     u32 ul_seg_id)
+{
+	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
+	int status = 0;
+	struct cmm_allocator *psma;
+	u32 ul_id = ul_seg_id;
+
+	DBC_REQUIRE(ul_seg_id > 0);
+	if (hcmm_mgr) {
+		if (ul_seg_id == CMM_ALLSEGMENTS)
+			ul_id = 1;
+
+		if ((ul_id > 0) && (ul_id <= CMM_MAXGPPSEGS)) {
+			while (ul_id <= CMM_MAXGPPSEGS) {
+				mutex_lock(&cmm_mgr_obj->cmm_lock);
+				/* slot = seg_id-1 */
+				psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
+				if (psma != NULL) {
+					un_register_gppsm_seg(psma);
+					/* Set alctr ptr to NULL for future
+					 * reuse */
+					cmm_mgr_obj->pa_gppsm_seg_tab[ul_id -
+								      1] = NULL;
+				} else if (ul_seg_id != CMM_ALLSEGMENTS) {
+					status = -EPERM;
+				}
+				mutex_unlock(&cmm_mgr_obj->cmm_lock);
+				if (ul_seg_id != CMM_ALLSEGMENTS)
+					break;
+
+				ul_id++;
+			}	/* end while */
+		} else {
+			status = -EINVAL;
+		}
+	} else {
+		status = -EFAULT;
+	}
+	return status;
+}
+
+/*
+ *  ======== un_register_gppsm_seg ========
+ *  Purpose:
+ *      UnRegister the SM allocator by freeing all its resources and
+ *      nulling cmm mgr table entry.
+ *  Note:
+ *      This routine is always called within cmm lock crit sect.
+ */
+static void un_register_gppsm_seg(struct cmm_allocator *psma)
+{
+	struct cmm_mnode *mnode_obj = NULL;
+	struct cmm_mnode *next_node = NULL;
+
+	DBC_REQUIRE(psma != NULL);
+	if (psma->free_list_head != NULL) {
+		/* free nodes on free list */
+		mnode_obj = (struct cmm_mnode *)lst_first(psma->free_list_head);
+		while (mnode_obj) {
+			next_node =
+			    (struct cmm_mnode *)lst_next(psma->free_list_head,
+							 (struct list_head *)
+							 mnode_obj);
+			lst_remove_elem(psma->free_list_head,
+					(struct list_head *)mnode_obj);
+			kfree((void *)mnode_obj);
+			/* next node. */
+			mnode_obj = next_node;
+		}
+		kfree(psma->free_list_head);	/* delete freelist */
+		/* free nodes on InUse list */
+		mnode_obj =
+		    (struct cmm_mnode *)lst_first(psma->in_use_list_head);
+		while (mnode_obj) {
+			next_node =
+			    (struct cmm_mnode *)lst_next(psma->in_use_list_head,
+							 (struct list_head *)
+							 mnode_obj);
+			lst_remove_elem(psma->in_use_list_head,
+					(struct list_head *)mnode_obj);
+			kfree((void *)mnode_obj);
+			/* next node. */
+			mnode_obj = next_node;
+		}
+		kfree(psma->in_use_list_head);	/* delete InUse list */
+	}
+	if ((void *)psma->dw_vm_base != NULL)
+		MEM_UNMAP_LINEAR_ADDRESS((void *)psma->dw_vm_base);
+
+	/* Free allocator itself */
+	kfree(psma);
+}
+
+/*
+ *  ======== get_slot ========
+ *  Purpose:
+ *      An available slot # is returned. Returns negative on failure.
+ */
+static s32 get_slot(struct cmm_object *cmm_mgr_obj)
+{
+	s32 slot_seg = -1;	/* neg on failure */
+	DBC_REQUIRE(cmm_mgr_obj != NULL);
+	/* get first available slot in cmm mgr SMSegTab[] */
+	for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
+		if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
+			break;
+
+	}
+	if (slot_seg == CMM_MAXGPPSEGS)
+		slot_seg = -1;	/* failed */
+
+	return slot_seg;
+}
+
+/*
+ *  ======== get_node ========
+ *  Purpose:
+ *      Get a memory node from freelist or create a new one.
+ */
+static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
+				  u32 dw_va, u32 ul_size)
+{
+	struct cmm_mnode *pnode = NULL;
+
+	DBC_REQUIRE(cmm_mgr_obj != NULL);
+	DBC_REQUIRE(dw_pa != 0);
+	DBC_REQUIRE(dw_va != 0);
+	DBC_REQUIRE(ul_size != 0);
+	/* Check cmm mgr's node freelist */
+	if (LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
+		pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
+	} else {
+		/* surely a valid element */
+		pnode = (struct cmm_mnode *)
+		    lst_get_head(cmm_mgr_obj->node_free_list_head);
+	}
+	if (pnode) {
+		lst_init_elem((struct list_head *)pnode);	/* set self */
+		pnode->dw_pa = dw_pa;	/* Physical addr of start of block */
+		pnode->dw_va = dw_va;	/* Virtual   "            " */
+		pnode->ul_size = ul_size;	/* Size of block */
+	}
+	return pnode;
+}
+
+/*
+ *  ======== delete_node ========
+ *  Purpose:
+ *      Put a memory node on the cmm nodelist for later use.
+ *      Doesn't actually delete the node. Heap thrashing friendly.
+ */
+static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
+{
+	DBC_REQUIRE(pnode != NULL);
+	lst_init_elem((struct list_head *)pnode);	/* init .self ptr */
+	lst_put_tail(cmm_mgr_obj->node_free_list_head,
+		     (struct list_head *)pnode);
+}
+
+/*
+ * ====== get_free_block ========
+ *  Purpose:
+ *      Scan the free block list and return the first block that satisfies
+ *      the size.
+ */
+static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
+					u32 usize)
+{
+	if (allocator) {
+		struct cmm_mnode *mnode_obj = (struct cmm_mnode *)
+		    lst_first(allocator->free_list_head);
+		while (mnode_obj) {
+			if (usize <= (u32) mnode_obj->ul_size) {
+				lst_remove_elem(allocator->free_list_head,
+						(struct list_head *)mnode_obj);
+				return mnode_obj;
+			}
+			/* next node. */
+			mnode_obj = (struct cmm_mnode *)
+			    lst_next(allocator->free_list_head,
+				     (struct list_head *)mnode_obj);
+		}
+	}
+	return NULL;
+}
+
+/*
+ *  ======== add_to_free_list ========
+ *  Purpose:
+ *      Coelesce node into the freelist in ascending size order.
+ */
+static void add_to_free_list(struct cmm_allocator *allocator,
+			     struct cmm_mnode *pnode)
+{
+	struct cmm_mnode *node_prev = NULL;
+	struct cmm_mnode *node_next = NULL;
+	struct cmm_mnode *mnode_obj;
+	u32 dw_this_pa;
+	u32 dw_next_pa;
+
+	DBC_REQUIRE(pnode != NULL);
+	DBC_REQUIRE(allocator != NULL);
+	dw_this_pa = pnode->dw_pa;
+	dw_next_pa = NEXT_PA(pnode);
+	mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
+	while (mnode_obj) {
+		if (dw_this_pa == NEXT_PA(mnode_obj)) {
+			/* found the block ahead of this one */
+			node_prev = mnode_obj;
+		} else if (dw_next_pa == mnode_obj->dw_pa) {
+			node_next = mnode_obj;
+		}
+		if ((node_prev == NULL) || (node_next == NULL)) {
+			/* next node. */
+			mnode_obj = (struct cmm_mnode *)
+			    lst_next(allocator->free_list_head,
+				     (struct list_head *)mnode_obj);
+		} else {
+			/* got 'em */
+			break;
+		}
+	}			/* while */
+	if (node_prev != NULL) {
+		/* combine with previous block */
+		lst_remove_elem(allocator->free_list_head,
+				(struct list_head *)node_prev);
+		/* grow node to hold both */
+		pnode->ul_size += node_prev->ul_size;
+		pnode->dw_pa = node_prev->dw_pa;
+		pnode->dw_va = node_prev->dw_va;
+		/* place node on mgr nodeFreeList */
+		delete_node((struct cmm_object *)allocator->hcmm_mgr,
+			    node_prev);
+	}
+	if (node_next != NULL) {
+		/* combine with next block */
+		lst_remove_elem(allocator->free_list_head,
+				(struct list_head *)node_next);
+		/* grow da node */
+		pnode->ul_size += node_next->ul_size;
+		/* place node on mgr nodeFreeList */
+		delete_node((struct cmm_object *)allocator->hcmm_mgr,
+			    node_next);
+	}
+	/* Now, let's add to freelist in increasing size order */
+	mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
+	while (mnode_obj) {
+		if (pnode->ul_size <= mnode_obj->ul_size)
+			break;
+
+		/* next node. */
+		mnode_obj =
+		    (struct cmm_mnode *)lst_next(allocator->free_list_head,
+						 (struct list_head *)mnode_obj);
+	}
+	/* if mnode_obj is NULL then add our pnode to the end of the freelist */
+	if (mnode_obj == NULL) {
+		lst_put_tail(allocator->free_list_head,
+			     (struct list_head *)pnode);
+	} else {
+		/* insert our node before the current traversed node */
+		lst_insert_before(allocator->free_list_head,
+				  (struct list_head *)pnode,
+				  (struct list_head *)mnode_obj);
+	}
+}
+
+/*
+ * ======== get_allocator ========
+ *  Purpose:
+ *      Return the allocator for the given SM Segid.
+ *      SegIds:  1,2,3..max.
+ */
+static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
+					   u32 ul_seg_id)
+{
+	struct cmm_allocator *allocator = NULL;
+
+	DBC_REQUIRE(cmm_mgr_obj != NULL);
+	DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS));
+	allocator = cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
+	if (allocator != NULL) {
+		/* make sure it's for real */
+		if (!allocator) {
+			allocator = NULL;
+			DBC_ASSERT(false);
+		}
+	}
+	return allocator;
+}
+
+/*
+ *  The CMM_Xlator[xxx] routines below are used by Node and Stream
+ *  to perform SM address translation to the client process address space.
+ *  A "translator" object is created by a node/stream for each SM seg used.
+ */
+
+/*
+ *  ======== cmm_xlator_create ========
+ *  Purpose:
+ *      Create an address translator object.
+ */
+int cmm_xlator_create(OUT struct cmm_xlatorobject **phXlator,
+			     struct cmm_object *hcmm_mgr,
+			     struct cmm_xlatorattrs *pXlatorAttrs)
+{
+	struct cmm_xlator *xlator_object = NULL;
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phXlator != NULL);
+	DBC_REQUIRE(hcmm_mgr != NULL);
+
+	*phXlator = NULL;
+	if (pXlatorAttrs == NULL)
+		pXlatorAttrs = &cmm_dfltxlatorattrs;	/* set defaults */
+
+	xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
+	if (xlator_object != NULL) {
+		xlator_object->hcmm_mgr = hcmm_mgr;	/* ref back to CMM */
+		/* SM seg_id */
+		xlator_object->ul_seg_id = pXlatorAttrs->ul_seg_id;
+	} else {
+		status = -ENOMEM;
+	}
+	if (DSP_SUCCEEDED(status))
+		*phXlator = (struct cmm_xlatorobject *)xlator_object;
+
+	return status;
+}
+
+/*
+ *  ======== cmm_xlator_delete ========
+ *  Purpose:
+ *      Free the Xlator resources.
+ *      VM gets freed later.
+ */
+int cmm_xlator_delete(struct cmm_xlatorobject *xlator, bool bForce)
+{
+	struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+
+	if (xlator_obj)
+		kfree(xlator_obj);
+	else
+		status = -EFAULT;
+
+	return status;
+}
+
+/*
+ *  ======== cmm_xlator_alloc_buf ========
+ */
+void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *pVaBuf,
+			   u32 uPaSize)
+{
+	struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
+	void *pbuf = NULL;
+	struct cmm_attrs attrs;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(xlator != NULL);
+	DBC_REQUIRE(xlator_obj->hcmm_mgr != NULL);
+	DBC_REQUIRE(pVaBuf != NULL);
+	DBC_REQUIRE(uPaSize > 0);
+	DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
+
+	if (xlator_obj) {
+		attrs.ul_seg_id = xlator_obj->ul_seg_id;
+		*(volatile u32 *)pVaBuf = 0;
+		/* Alloc SM */
+		pbuf =
+		    cmm_calloc_buf(xlator_obj->hcmm_mgr, uPaSize, &attrs, NULL);
+		if (pbuf) {
+			/* convert to translator(node/strm) process Virtual
+			 * address */
+			*(volatile u32 **)pVaBuf =
+			    (u32 *) cmm_xlator_translate(xlator,
+							 pbuf, CMM_PA2VA);
+		}
+	}
+	return pbuf;
+}
+
+/*
+ *  ======== cmm_xlator_free_buf ========
+ *  Purpose:
+ *      Free the given SM buffer and descriptor.
+ *      Does not free virtual memory.
+ */
+int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *pBufVa)
+{
+	struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
+	int status = -EPERM;
+	void *buf_pa = NULL;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(pBufVa != NULL);
+	DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
+
+	if (xlator_obj) {
+		/* convert Va to Pa so we can free it. */
+		buf_pa = cmm_xlator_translate(xlator, pBufVa, CMM_VA2PA);
+		if (buf_pa) {
+			status = cmm_free_buf(xlator_obj->hcmm_mgr, buf_pa,
+					      xlator_obj->ul_seg_id);
+			if (DSP_FAILED(status)) {
+				/* Uh oh, this shouldn't happen. Descriptor
+				 * gone! */
+				DBC_ASSERT(false);	/* CMM is leaking mem */
+			}
+		}
+	}
+	return status;
+}
+
+/*
+ *  ======== cmm_xlator_info ========
+ *  Purpose:
+ *      Set/Get translator info.
+ */
+int cmm_xlator_info(struct cmm_xlatorobject *xlator, IN OUT u8 ** paddr,
+			   u32 ul_size, u32 uSegId, bool set_info)
+{
+	struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(paddr != NULL);
+	DBC_REQUIRE((uSegId > 0) && (uSegId <= CMM_MAXGPPSEGS));
+
+	if (xlator_obj) {
+		if (set_info) {
+			/* set translators virtual address range */
+			xlator_obj->dw_virt_base = (u32) *paddr;
+			xlator_obj->ul_virt_size = ul_size;
+		} else {	/* return virt base address */
+			*paddr = (u8 *) xlator_obj->dw_virt_base;
+		}
+	} else {
+		status = -EFAULT;
+	}
+	return status;
+}
+
+/*
+ *  ======== cmm_xlator_translate ========
+ */
+void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
+			   enum cmm_xlatetype xType)
+{
+	u32 dw_addr_xlate = 0;
+	struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
+	struct cmm_object *cmm_mgr_obj = NULL;
+	struct cmm_allocator *allocator = NULL;
+	u32 dw_offset = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(paddr != NULL);
+	DBC_REQUIRE((xType >= CMM_VA2PA) && (xType <= CMM_DSPPA2PA));
+
+	if (!xlator_obj)
+		goto loop_cont;
+
+	cmm_mgr_obj = (struct cmm_object *)xlator_obj->hcmm_mgr;
+	/* get this translator's default SM allocator */
+	DBC_ASSERT(xlator_obj->ul_seg_id > 0);
+	allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->ul_seg_id - 1];
+	if (!allocator)
+		goto loop_cont;
+
+	if ((xType == CMM_VA2DSPPA) || (xType == CMM_VA2PA) ||
+	    (xType == CMM_PA2VA)) {
+		if (xType == CMM_PA2VA) {
+			/* Gpp Va = Va Base + offset */
+			dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
+							   allocator->
+							   ul_dsp_size);
+			dw_addr_xlate = xlator_obj->dw_virt_base + dw_offset;
+			/* Check if translated Va base is in range */
+			if ((dw_addr_xlate < xlator_obj->dw_virt_base) ||
+			    (dw_addr_xlate >=
+			     (xlator_obj->dw_virt_base +
+			      xlator_obj->ul_virt_size))) {
+				dw_addr_xlate = 0;	/* bad address */
+			}
+		} else {
+			/* Gpp PA =  Gpp Base + offset */
+			dw_offset =
+			    (u8 *) paddr - (u8 *) xlator_obj->dw_virt_base;
+			dw_addr_xlate =
+			    allocator->shm_base - allocator->ul_dsp_size +
+			    dw_offset;
+		}
+	} else {
+		dw_addr_xlate = (u32) paddr;
+	}
+	/*Now convert address to proper target physical address if needed */
+	if ((xType == CMM_VA2DSPPA) || (xType == CMM_PA2DSPPA)) {
+		/* Got Gpp Pa now, convert to DSP Pa */
+		dw_addr_xlate =
+		    GPPPA2DSPPA((allocator->shm_base - allocator->ul_dsp_size),
+				dw_addr_xlate,
+				allocator->dw_dsp_phys_addr_offset *
+				allocator->c_factor);
+	} else if (xType == CMM_DSPPA2PA) {
+		/* Got DSP Pa, convert to GPP Pa */
+		dw_addr_xlate =
+		    DSPPA2GPPPA(allocator->shm_base - allocator->ul_dsp_size,
+				dw_addr_xlate,
+				allocator->dw_dsp_phys_addr_offset *
+				allocator->c_factor);
+	}
+loop_cont:
+	return (void *)dw_addr_xlate;
+}
diff --git a/drivers/staging/tidspbridge/pmgr/cod.c b/drivers/staging/tidspbridge/pmgr/cod.c
new file mode 100644
index 0000000..f9c0f30
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/cod.c
@@ -0,0 +1,658 @@
+/*
+ * cod.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * This module implements DSP code management for the DSP/BIOS Bridge
+ * environment. It is mostly a thin wrapper.
+ *
+ * This module provides an interface for loading both static and
+ * dynamic code objects onto DSP systems.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*  ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/ldr.h>
+
+/*  ----------------------------------- Platform Manager */
+/* Include appropriate loader header file */
+#include <dspbridge/dbll.h>
+
+/*  ----------------------------------- This */
+#include <dspbridge/cod.h>
+
+/* magic number for handle validation */
+#define MAGIC	 0xc001beef
+
+/* macro to validate COD manager handles */
+#define IS_VALID(h)    ((h) != NULL && (h)->ul_magic == MAGIC)
+
+/*
+ *  ======== cod_manager ========
+ */
+struct cod_manager {
+	struct dbll_tar_obj *target;
+	struct dbll_library_obj *base_lib;
+	bool loaded;		/* Base library loaded? */
+	u32 ul_entry;
+	struct ldr_module *dll_obj;
+	struct dbll_fxns fxns;
+	struct dbll_attrs attrs;
+	char sz_zl_file[COD_MAXPATHLENGTH];
+	u32 ul_magic;
+};
+
+/*
+ *  ======== cod_libraryobj ========
+ */
+struct cod_libraryobj {
+	struct dbll_library_obj *dbll_lib;
+	struct cod_manager *cod_mgr;
+};
+
+static u32 refs = 0L;
+
+static struct dbll_fxns ldr_fxns = {
+	(dbll_close_fxn) dbll_close,
+	(dbll_create_fxn) dbll_create,
+	(dbll_delete_fxn) dbll_delete,
+	(dbll_exit_fxn) dbll_exit,
+	(dbll_get_attrs_fxn) dbll_get_attrs,
+	(dbll_get_addr_fxn) dbll_get_addr,
+	(dbll_get_c_addr_fxn) dbll_get_c_addr,
+	(dbll_get_sect_fxn) dbll_get_sect,
+	(dbll_init_fxn) dbll_init,
+	(dbll_load_fxn) dbll_load,
+	(dbll_load_sect_fxn) dbll_load_sect,
+	(dbll_open_fxn) dbll_open,
+	(dbll_read_sect_fxn) dbll_read_sect,
+	(dbll_set_attrs_fxn) dbll_set_attrs,
+	(dbll_unload_fxn) dbll_unload,
+	(dbll_unload_sect_fxn) dbll_unload_sect,
+};
+
+static bool no_op(void);
+
+/*
+ * File operations (originally were under kfile.c)
+ */
+static s32 cod_f_close(struct file *filp)
+{
+	/* Check for valid handle */
+	if (!filp)
+		return -EFAULT;
+
+	filp_close(filp, NULL);
+
+	/* we can't use 0 here */
+	return 0;
+}
+
+static struct file *cod_f_open(CONST char *psz_file_name, CONST char *pszMode)
+{
+	mm_segment_t fs;
+	struct file *filp;
+
+	fs = get_fs();
+	set_fs(get_ds());
+
+	/* ignore given mode and open file as read-only */
+	filp = filp_open(psz_file_name, O_RDONLY, 0);
+
+	if (IS_ERR(filp))
+		filp = NULL;
+
+	set_fs(fs);
+
+	return filp;
+}
+
+static s32 cod_f_read(void __user *pbuffer, s32 size, s32 cCount,
+		      struct file *filp)
+{
+	/* check for valid file handle */
+	if (!filp)
+		return -EFAULT;
+
+	if ((size > 0) && (cCount > 0) && pbuffer) {
+		u32 dw_bytes_read;
+		mm_segment_t fs;
+
+		/* read from file */
+		fs = get_fs();
+		set_fs(get_ds());
+		dw_bytes_read = filp->f_op->read(filp, pbuffer, size * cCount,
+						 &(filp->f_pos));
+		set_fs(fs);
+
+		if (!dw_bytes_read)
+			return -EBADF;
+
+		return dw_bytes_read / size;
+	}
+
+	return -EINVAL;
+}
+
+static s32 cod_f_seek(struct file *filp, s32 lOffset, s32 cOrigin)
+{
+	loff_t dw_cur_pos;
+
+	/* check for valid file handle */
+	if (!filp)
+		return -EFAULT;
+
+	/* based on the origin flag, move the internal pointer */
+	dw_cur_pos = filp->f_op->llseek(filp, lOffset, cOrigin);
+
+	if ((s32) dw_cur_pos < 0)
+		return -EPERM;
+
+	/* we can't use 0 here */
+	return 0;
+}
+
+static s32 cod_f_tell(struct file *filp)
+{
+	loff_t dw_cur_pos;
+
+	if (!filp)
+		return -EFAULT;
+
+	/* Get current position */
+	dw_cur_pos = filp->f_op->llseek(filp, 0, SEEK_CUR);
+
+	if ((s32) dw_cur_pos < 0)
+		return -EPERM;
+
+	return dw_cur_pos;
+}
+
+/*
+ *  ======== cod_close ========
+ */
+void cod_close(struct cod_libraryobj *lib)
+{
+	struct cod_manager *hmgr;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(lib != NULL);
+	DBC_REQUIRE(IS_VALID(((struct cod_libraryobj *)lib)->cod_mgr));
+
+	hmgr = lib->cod_mgr;
+	hmgr->fxns.close_fxn(lib->dbll_lib);
+
+	kfree(lib);
+}
+
+/*
+ *  ======== cod_create ========
+ *  Purpose:
+ *      Create an object to manage code on a DSP system.
+ *      This object can be used to load an initial program image with
+ *      arguments that can later be expanded with
+ *      dynamically loaded object files.
+ *
+ */
+int cod_create(OUT struct cod_manager **phMgr, char *pstrDummyFile,
+		      IN OPTIONAL CONST struct cod_attrs *attrs)
+{
+	struct cod_manager *mgr_new;
+	struct dbll_attrs zl_attrs;
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phMgr != NULL);
+
+	/* assume failure */
+	*phMgr = NULL;
+
+	/* we don't support non-default attrs yet */
+	if (attrs != NULL)
+		return -ENOSYS;
+
+	mgr_new = kzalloc(sizeof(struct cod_manager), GFP_KERNEL);
+	if (mgr_new == NULL)
+		return -ENOMEM;
+
+	mgr_new->ul_magic = MAGIC;
+
+	/* Set up loader functions */
+	mgr_new->fxns = ldr_fxns;
+
+	/* initialize the ZL module */
+	mgr_new->fxns.init_fxn();
+
+	zl_attrs.alloc = (dbll_alloc_fxn) no_op;
+	zl_attrs.free = (dbll_free_fxn) no_op;
+	zl_attrs.fread = (dbll_read_fxn) cod_f_read;
+	zl_attrs.fseek = (dbll_seek_fxn) cod_f_seek;
+	zl_attrs.ftell = (dbll_tell_fxn) cod_f_tell;
+	zl_attrs.fclose = (dbll_f_close_fxn) cod_f_close;
+	zl_attrs.fopen = (dbll_f_open_fxn) cod_f_open;
+	zl_attrs.sym_lookup = NULL;
+	zl_attrs.base_image = true;
+	zl_attrs.log_write = NULL;
+	zl_attrs.log_write_handle = NULL;
+	zl_attrs.write = NULL;
+	zl_attrs.rmm_handle = NULL;
+	zl_attrs.input_params = NULL;
+	zl_attrs.sym_handle = NULL;
+	zl_attrs.sym_arg = NULL;
+
+	mgr_new->attrs = zl_attrs;
+
+	status = mgr_new->fxns.create_fxn(&mgr_new->target, &zl_attrs);
+
+	if (DSP_FAILED(status)) {
+		cod_delete(mgr_new);
+		return -ESPIPE;
+	}
+
+	/* return the new manager */
+	*phMgr = mgr_new;
+
+	return 0;
+}
+
+/*
+ *  ======== cod_delete ========
+ *  Purpose:
+ *      Delete a code manager object.
+ */
+void cod_delete(struct cod_manager *hmgr)
+{
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(IS_VALID(hmgr));
+
+	if (hmgr->base_lib) {
+		if (hmgr->loaded)
+			hmgr->fxns.unload_fxn(hmgr->base_lib, &hmgr->attrs);
+
+		hmgr->fxns.close_fxn(hmgr->base_lib);
+	}
+	if (hmgr->target) {
+		hmgr->fxns.delete_fxn(hmgr->target);
+		hmgr->fxns.exit_fxn();
+	}
+	hmgr->ul_magic = ~MAGIC;
+	kfree(hmgr);
+}
+
+/*
+ *  ======== cod_exit ========
+ *  Purpose:
+ *      Discontinue usage of the COD module.
+ *
+ */
+void cod_exit(void)
+{
+	DBC_REQUIRE(refs > 0);
+
+	refs--;
+
+	DBC_ENSURE(refs >= 0);
+}
+
+/*
+ *  ======== cod_get_base_lib ========
+ *  Purpose:
+ *      Get handle to the base image DBL library.
+ */
+int cod_get_base_lib(struct cod_manager *cod_mgr_obj,
+			    struct dbll_library_obj **plib)
+{
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(IS_VALID(cod_mgr_obj));
+	DBC_REQUIRE(plib != NULL);
+
+	*plib = (struct dbll_library_obj *)cod_mgr_obj->base_lib;
+
+	return status;
+}
+
+/*
+ *  ======== cod_get_base_name ========
+ */
+int cod_get_base_name(struct cod_manager *cod_mgr_obj, char *pszName,
+			     u32 usize)
+{
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(IS_VALID(cod_mgr_obj));
+	DBC_REQUIRE(pszName != NULL);
+
+	if (usize <= COD_MAXPATHLENGTH)
+		strncpy(pszName, cod_mgr_obj->sz_zl_file, usize);
+	else
+		status = -EPERM;
+
+	return status;
+}
+
+/*
+ *  ======== cod_get_entry ========
+ *  Purpose:
+ *      Retrieve the entry point of a loaded DSP program image
+ *
+ */
+int cod_get_entry(struct cod_manager *cod_mgr_obj, u32 *pulEntry)
+{
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(IS_VALID(cod_mgr_obj));
+	DBC_REQUIRE(pulEntry != NULL);
+
+	*pulEntry = cod_mgr_obj->ul_entry;
+
+	return 0;
+}
+
+/*
+ *  ======== cod_get_loader ========
+ *  Purpose:
+ *      Get handle to the DBLL loader.
+ */
+int cod_get_loader(struct cod_manager *cod_mgr_obj,
+			  struct dbll_tar_obj **phLoader)
+{
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(IS_VALID(cod_mgr_obj));
+	DBC_REQUIRE(phLoader != NULL);
+
+	*phLoader = (struct dbll_tar_obj *)cod_mgr_obj->target;
+
+	return status;
+}
+
+/*
+ *  ======== cod_get_section ========
+ *  Purpose:
+ *      Retrieve the starting address and length of a section in the COFF file
+ *      given the section name.
+ */
+int cod_get_section(struct cod_libraryobj *lib, IN char *pstrSect,
+			   OUT u32 *puAddr, OUT u32 *puLen)
+{
+	struct cod_manager *cod_mgr_obj;
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(lib != NULL);
+	DBC_REQUIRE(IS_VALID(lib->cod_mgr));
+	DBC_REQUIRE(pstrSect != NULL);
+	DBC_REQUIRE(puAddr != NULL);
+	DBC_REQUIRE(puLen != NULL);
+
+	*puAddr = 0;
+	*puLen = 0;
+	if (lib != NULL) {
+		cod_mgr_obj = lib->cod_mgr;
+		status = cod_mgr_obj->fxns.get_sect_fxn(lib->dbll_lib, pstrSect,
+							puAddr, puLen);
+	} else {
+		status = -ESPIPE;
+	}
+
+	DBC_ENSURE(DSP_SUCCEEDED(status) || ((*puAddr == 0) && (*puLen == 0)));
+
+	return status;
+}
+
+/*
+ *  ======== cod_get_sym_value ========
+ *  Purpose:
+ *      Retrieve the value for the specified symbol. The symbol is first
+ *      searched for literally and then, if not found, searched for as a
+ *      C symbol.
+ *
+ */
+int cod_get_sym_value(struct cod_manager *hmgr, char *pstrSym,
+			     u32 *pul_value)
+{
+	struct dbll_sym_val *dbll_sym;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(IS_VALID(hmgr));
+	DBC_REQUIRE(pstrSym != NULL);
+	DBC_REQUIRE(pul_value != NULL);
+
+	dev_dbg(bridge, "%s: hmgr: %p pstrSym: %s pul_value: %p\n",
+		__func__, hmgr, pstrSym, pul_value);
+	if (hmgr->base_lib) {
+		if (!hmgr->fxns.
+		    get_addr_fxn(hmgr->base_lib, pstrSym, &dbll_sym)) {
+			if (!hmgr->fxns.
+			    get_c_addr_fxn(hmgr->base_lib, pstrSym, &dbll_sym))
+				return -ESPIPE;
+		}
+	} else {
+		return -ESPIPE;
+	}
+
+	*pul_value = dbll_sym->value;
+
+	return 0;
+}
+
+/*
+ *  ======== cod_init ========
+ *  Purpose:
+ *      Initialize the COD module's private state.
+ *
+ */
+bool cod_init(void)
+{
+	bool ret = true;
+
+	DBC_REQUIRE(refs >= 0);
+
+	if (ret)
+		refs++;
+
+	DBC_ENSURE((ret && refs > 0) || (!ret && refs >= 0));
+	return ret;
+}
+
+/*
+ *  ======== cod_load_base ========
+ *  Purpose:
+ *      Load the initial program image, optionally with command-line arguments,
+ *      on the DSP system managed by the supplied handle. The program to be
+ *      loaded must be the first element of the args array and must be a fully
+ *      qualified pathname.
+ *  Details:
+ *      if nArgc doesn't match the number of arguments in the aArgs array, the
+ *      aArgs array is searched for a NULL terminating entry, and argc is
+ *      recalculated to reflect this.  In this way, we can support NULL
+ *      terminating aArgs arrays, if nArgc is very large.
+ */
+int cod_load_base(struct cod_manager *hmgr, u32 nArgc, char *aArgs[],
+			 cod_writefxn pfn_write, void *pArb, char *envp[])
+{
+	dbll_flags flags;
+	struct dbll_attrs save_attrs;
+	struct dbll_attrs new_attrs;
+	int status;
+	u32 i;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(IS_VALID(hmgr));
+	DBC_REQUIRE(nArgc > 0);
+	DBC_REQUIRE(aArgs != NULL);
+	DBC_REQUIRE(aArgs[0] != NULL);
+	DBC_REQUIRE(pfn_write != NULL);
+	DBC_REQUIRE(hmgr->base_lib != NULL);
+
+	/*
+	 *  Make sure every argv[] stated in argc has a value, or change argc to
+	 *  reflect true number in NULL terminated argv array.
+	 */
+	for (i = 0; i < nArgc; i++) {
+		if (aArgs[i] == NULL) {
+			nArgc = i;
+			break;
+		}
+	}
+
+	/* set the write function for this operation */
+	hmgr->fxns.get_attrs_fxn(hmgr->target, &save_attrs);
+
+	new_attrs = save_attrs;
+	new_attrs.write = (dbll_write_fxn) pfn_write;
+	new_attrs.input_params = pArb;
+	new_attrs.alloc = (dbll_alloc_fxn) no_op;
+	new_attrs.free = (dbll_free_fxn) no_op;
+	new_attrs.log_write = NULL;
+	new_attrs.log_write_handle = NULL;
+
+	/* Load the image */
+	flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB;
+	status = hmgr->fxns.load_fxn(hmgr->base_lib, flags, &new_attrs,
+				     &hmgr->ul_entry);
+	if (DSP_FAILED(status))
+		hmgr->fxns.close_fxn(hmgr->base_lib);
+
+	if (DSP_SUCCEEDED(status))
+		hmgr->loaded = true;
+	else
+		hmgr->base_lib = NULL;
+
+	return status;
+}
+
+/*
+ *  ======== cod_open ========
+ *      Open library for reading sections.
+ */
+int cod_open(struct cod_manager *hmgr, IN char *pszCoffPath,
+		    u32 flags, struct cod_libraryobj **pLib)
+{
+	int status = 0;
+	struct cod_libraryobj *lib = NULL;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(IS_VALID(hmgr));
+	DBC_REQUIRE(pszCoffPath != NULL);
+	DBC_REQUIRE(flags == COD_NOLOAD || flags == COD_SYMB);
+	DBC_REQUIRE(pLib != NULL);
+
+	*pLib = NULL;
+
+	lib = kzalloc(sizeof(struct cod_libraryobj), GFP_KERNEL);
+	if (lib == NULL)
+		status = -ENOMEM;
+
+	if (DSP_SUCCEEDED(status)) {
+		lib->cod_mgr = hmgr;
+		status = hmgr->fxns.open_fxn(hmgr->target, pszCoffPath, flags,
+					     &lib->dbll_lib);
+		if (DSP_SUCCEEDED(status))
+			*pLib = lib;
+	}
+
+	if (DSP_FAILED(status))
+		pr_err("%s: error status 0x%x, pszCoffPath: %s flags: 0x%x\n",
+		       __func__, status, pszCoffPath, flags);
+	return status;
+}
+
+/*
+ *  ======== cod_open_base ========
+ *  Purpose:
+ *      Open base image for reading sections.
+ */
+int cod_open_base(struct cod_manager *hmgr, IN char *pszCoffPath,
+			 dbll_flags flags)
+{
+	int status = 0;
+	struct dbll_library_obj *lib;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(IS_VALID(hmgr));
+	DBC_REQUIRE(pszCoffPath != NULL);
+
+	/* if we previously opened a base image, close it now */
+	if (hmgr->base_lib) {
+		if (hmgr->loaded) {
+			hmgr->fxns.unload_fxn(hmgr->base_lib, &hmgr->attrs);
+			hmgr->loaded = false;
+		}
+		hmgr->fxns.close_fxn(hmgr->base_lib);
+		hmgr->base_lib = NULL;
+	}
+	status = hmgr->fxns.open_fxn(hmgr->target, pszCoffPath, flags, &lib);
+	if (DSP_SUCCEEDED(status)) {
+		/* hang onto the library for subsequent sym table usage */
+		hmgr->base_lib = lib;
+		strncpy(hmgr->sz_zl_file, pszCoffPath, COD_MAXPATHLENGTH - 1);
+		hmgr->sz_zl_file[COD_MAXPATHLENGTH - 1] = '\0';
+	}
+
+	if (DSP_FAILED(status))
+		pr_err("%s: error status 0x%x pszCoffPath: %s\n", __func__,
+		       status, pszCoffPath);
+	return status;
+}
+
+/*
+ *  ======== cod_read_section ========
+ *  Purpose:
+ *      Retrieve the content of a code section given the section name.
+ */
+int cod_read_section(struct cod_libraryobj *lib, IN char *pstrSect,
+			    OUT char *pstrContent, IN u32 cContentSize)
+{
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(lib != NULL);
+	DBC_REQUIRE(IS_VALID(lib->cod_mgr));
+	DBC_REQUIRE(pstrSect != NULL);
+	DBC_REQUIRE(pstrContent != NULL);
+
+	if (lib != NULL)
+		status =
+		    lib->cod_mgr->fxns.read_sect_fxn(lib->dbll_lib, pstrSect,
+						     pstrContent, cContentSize);
+	else
+		status = -ESPIPE;
+
+	return status;
+}
+
+/*
+ *  ======== no_op ========
+ *  Purpose:
+ *      No Operation.
+ *
+ */
+static bool no_op(void)
+{
+	return true;
+}
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c
new file mode 100644
index 0000000..3619d53
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/dbll.c
@@ -0,0 +1,1585 @@
+/*
+ * dbll.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software;  you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*  ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+#include <dspbridge/gh.h>
+
+/*  ----------------------------------- OS Adaptation Layer */
+
+/* Dynamic loader library interface */
+#include <dspbridge/dynamic_loader.h>
+#include <dspbridge/getsection.h>
+
+/*  ----------------------------------- This */
+#include <dspbridge/dbll.h>
+#include <dspbridge/rmm.h>
+
+/* Number of buckets for symbol hash table */
+#define MAXBUCKETS 211
+
+/* Max buffer length */
+#define MAXEXPR 128
+
+#ifndef UINT32_C
+#define UINT32_C(zzz) ((uint32_t)zzz)
+#endif
+#define DOFF_ALIGN(x) (((x) + 3) & ~UINT32_C(3))
+
+/*
+ *  ======== struct dbll_tar_obj* ========
+ *  A target may have one or more libraries of symbols/code/data loaded
+ *  onto it, where a library is simply the symbols/code/data contained
+ *  in a DOFF file.
+ */
+/*
+ *  ======== dbll_tar_obj ========
+ */
+struct dbll_tar_obj {
+	struct dbll_attrs attrs;
+	struct dbll_library_obj *head;	/* List of all opened libraries */
+};
+
+/*
+ *  The following 4 typedefs are "super classes" of the dynamic loader
+ *  library types used in dynamic loader functions (dynamic_loader.h).
+ */
+/*
+ *  ======== dbll_stream ========
+ *  Contains dynamic_loader_stream
+ */
+struct dbll_stream {
+	struct dynamic_loader_stream dl_stream;
+	struct dbll_library_obj *lib;
+};
+
+/*
+ *  ======== ldr_symbol ========
+ */
+struct ldr_symbol {
+	struct dynamic_loader_sym dl_symbol;
+	struct dbll_library_obj *lib;
+};
+
+/*
+ *  ======== dbll_alloc ========
+ */
+struct dbll_alloc {
+	struct dynamic_loader_allocate dl_alloc;
+	struct dbll_library_obj *lib;
+};
+
+/*
+ *  ======== dbll_init_obj ========
+ */
+struct dbll_init_obj {
+	struct dynamic_loader_initialize dl_init;
+	struct dbll_library_obj *lib;
+};
+
+/*
+ *  ======== DBLL_Library ========
+ *  A library handle is returned by DBLL_Open() and is passed to dbll_load()
+ *  to load symbols/code/data, and to dbll_unload(), to remove the
+ *  symbols/code/data loaded by dbll_load().
+ */
+
+/*
+ *  ======== dbll_library_obj ========
+ */
+struct dbll_library_obj {
+	struct dbll_library_obj *next;	/* Next library in target's list */
+	struct dbll_library_obj *prev;	/* Previous in the list */
+	struct dbll_tar_obj *target_obj;	/* target for this library */
+
+	/* Objects needed by dynamic loader */
+	struct dbll_stream stream;
+	struct ldr_symbol symbol;
+	struct dbll_alloc allocate;
+	struct dbll_init_obj init;
+	void *dload_mod_obj;
+
+	char *file_name;	/* COFF file name */
+	void *fp;		/* Opaque file handle */
+	u32 entry;		/* Entry point */
+	void *desc;	/* desc of DOFF file loaded */
+	u32 open_ref;		/* Number of times opened */
+	u32 load_ref;		/* Number of times loaded */
+	struct gh_t_hash_tab *sym_tab;	/* Hash table of symbols */
+	u32 ul_pos;
+};
+
+/*
+ *  ======== dbll_symbol ========
+ */
+struct dbll_symbol {
+	struct dbll_sym_val value;
+	char *name;
+};
+
+static void dof_close(struct dbll_library_obj *zl_lib);
+static int dof_open(struct dbll_library_obj *zl_lib);
+static s32 no_op(struct dynamic_loader_initialize *thisptr, void *bufr,
+		 ldr_addr locn, struct ldr_section_info *info, unsigned bytsiz);
+
+/*
+ *  Functions called by dynamic loader
+ *
+ */
+/* dynamic_loader_stream */
+static int dbll_read_buffer(struct dynamic_loader_stream *this, void *buffer,
+			    unsigned bufsize);
+static int dbll_set_file_posn(struct dynamic_loader_stream *this,
+			      unsigned int pos);
+/* dynamic_loader_sym */
+static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this,
+					       const char *name);
+static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym
+						       *this, const char *name,
+						       unsigned moduleId);
+static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym
+						   *this, const char *name,
+						   unsigned moduleid);
+static void dbll_purge_symbol_table(struct dynamic_loader_sym *this,
+				    unsigned moduleId);
+static void *allocate(struct dynamic_loader_sym *this, unsigned memsize);
+static void deallocate(struct dynamic_loader_sym *this, void *memPtr);
+static void dbll_err_report(struct dynamic_loader_sym *this, const char *errstr,
+			    va_list args);
+/* dynamic_loader_allocate */
+static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
+			  struct ldr_section_info *info, unsigned align);
+static void rmm_dealloc(struct dynamic_loader_allocate *this,
+			struct ldr_section_info *info);
+
+/* dynamic_loader_initialize */
+static int connect(struct dynamic_loader_initialize *this);
+static int read_mem(struct dynamic_loader_initialize *this, void *buf,
+		    ldr_addr addr, struct ldr_section_info *info,
+		    unsigned nbytes);
+static int write_mem(struct dynamic_loader_initialize *this, void *buf,
+		     ldr_addr addr, struct ldr_section_info *info,
+		     unsigned nbytes);
+static int fill_mem(struct dynamic_loader_initialize *this, ldr_addr addr,
+		    struct ldr_section_info *info, unsigned nbytes,
+		    unsigned val);
+static int execute(struct dynamic_loader_initialize *this, ldr_addr start);
+static void release(struct dynamic_loader_initialize *this);
+
+/* symbol table hash functions */
+static u16 name_hash(void *name, u16 max_bucket);
+static bool name_match(void *name, void *sp);
+static void sym_delete(void *sp);
+
+static u32 refs;		/* module reference count */
+
+/* Symbol Redefinition */
+static int redefined_symbol;
+static int gbl_search = 1;
+
+/*
+ *  ======== dbll_close ========
+ */
+void dbll_close(struct dbll_library_obj *zl_lib)
+{
+	struct dbll_tar_obj *zl_target;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(zl_lib);
+	DBC_REQUIRE(zl_lib->open_ref > 0);
+	zl_target = zl_lib->target_obj;
+	zl_lib->open_ref--;
+	if (zl_lib->open_ref == 0) {
+		/* Remove library from list */
+		if (zl_target->head == zl_lib)
+			zl_target->head = zl_lib->next;
+
+		if (zl_lib->prev)
+			(zl_lib->prev)->next = zl_lib->next;
+
+		if (zl_lib->next)
+			(zl_lib->next)->prev = zl_lib->prev;
+
+		/* Free DOF resources */
+		dof_close(zl_lib);
+		kfree(zl_lib->file_name);
+
+		/* remove symbols from symbol table */
+		if (zl_lib->sym_tab)
+			gh_delete(zl_lib->sym_tab);
+
+		/* remove the library object itself */
+		kfree(zl_lib);
+		zl_lib = NULL;
+	}
+}
+
+/*
+ *  ======== dbll_create ========
+ */
+int dbll_create(struct dbll_tar_obj **target_obj,
+		       struct dbll_attrs *pattrs)
+{
+	struct dbll_tar_obj *pzl_target;
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(pattrs != NULL);
+	DBC_REQUIRE(target_obj != NULL);
+
+	/* Allocate DBL target object */
+	pzl_target = kzalloc(sizeof(struct dbll_tar_obj), GFP_KERNEL);
+	if (target_obj != NULL) {
+		if (pzl_target == NULL) {
+			*target_obj = NULL;
+			status = -ENOMEM;
+		} else {
+			pzl_target->attrs = *pattrs;
+			*target_obj = (struct dbll_tar_obj *)pzl_target;
+		}
+		DBC_ENSURE((DSP_SUCCEEDED(status) && *target_obj) ||
+				(DSP_FAILED(status) && *target_obj == NULL));
+	}
+
+	return status;
+}
+
+/*
+ *  ======== dbll_delete ========
+ */
+void dbll_delete(struct dbll_tar_obj *target)
+{
+	struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(zl_target);
+
+	if (zl_target != NULL)
+		kfree(zl_target);
+
+}
+
+/*
+ *  ======== dbll_exit ========
+ *  Discontinue usage of DBL module.
+ */
+void dbll_exit(void)
+{
+	DBC_REQUIRE(refs > 0);
+
+	refs--;
+
+	if (refs == 0)
+		gh_exit();
+
+	DBC_ENSURE(refs >= 0);
+}
+
+/*
+ *  ======== dbll_get_addr ========
+ *  Get address of name in the specified library.
+ */
+bool dbll_get_addr(struct dbll_library_obj *zl_lib, char *name,
+		   struct dbll_sym_val **ppSym)
+{
+	struct dbll_symbol *sym;
+	bool status = false;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(zl_lib);
+	DBC_REQUIRE(name != NULL);
+	DBC_REQUIRE(ppSym != NULL);
+	DBC_REQUIRE(zl_lib->sym_tab != NULL);
+
+	sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, name);
+	if (sym != NULL) {
+		*ppSym = &sym->value;
+		status = true;
+	}
+
+	dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p, status 0x%x\n",
+		__func__, zl_lib, name, ppSym, status);
+	return status;
+}
+
+/*
+ *  ======== dbll_get_attrs ========
+ *  Retrieve the attributes of the target.
+ */
+void dbll_get_attrs(struct dbll_tar_obj *target, struct dbll_attrs *pattrs)
+{
+	struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(zl_target);
+	DBC_REQUIRE(pattrs != NULL);
+
+	if ((pattrs != NULL) && (zl_target != NULL))
+		*pattrs = zl_target->attrs;
+
+}
+
+/*
+ *  ======== dbll_get_c_addr ========
+ *  Get address of a "C" name in the specified library.
+ */
+bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name,
+		     struct dbll_sym_val **ppSym)
+{
+	struct dbll_symbol *sym;
+	char cname[MAXEXPR + 1];
+	bool status = false;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(zl_lib);
+	DBC_REQUIRE(ppSym != NULL);
+	DBC_REQUIRE(zl_lib->sym_tab != NULL);
+	DBC_REQUIRE(name != NULL);
+
+	cname[0] = '_';
+
+	strncpy(cname + 1, name, sizeof(cname) - 2);
+	cname[MAXEXPR] = '\0';	/* insure '\0' string termination */
+
+	/* Check for C name, if not found */
+	sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, cname);
+
+	if (sym != NULL) {
+		*ppSym = &sym->value;
+		status = true;
+	}
+
+	return status;
+}
+
+/*
+ *  ======== dbll_get_sect ========
+ *  Get the base address and size (in bytes) of a COFF section.
+ */
+int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr,
+			 u32 *psize)
+{
+	u32 byte_size;
+	bool opened_doff = false;
+	const struct ldr_section_info *sect = NULL;
+	struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(name != NULL);
+	DBC_REQUIRE(paddr != NULL);
+	DBC_REQUIRE(psize != NULL);
+	DBC_REQUIRE(zl_lib);
+
+	/* If DOFF file is not open, we open it. */
+	if (zl_lib != NULL) {
+		if (zl_lib->fp == NULL) {
+			status = dof_open(zl_lib);
+			if (DSP_SUCCEEDED(status))
+				opened_doff = true;
+
+		} else {
+			(*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
+							      zl_lib->ul_pos,
+							      SEEK_SET);
+		}
+	} else {
+		status = -EFAULT;
+	}
+	if (DSP_SUCCEEDED(status)) {
+		byte_size = 1;
+		if (dload_get_section_info(zl_lib->desc, name, &sect)) {
+			*paddr = sect->load_addr;
+			*psize = sect->size * byte_size;
+			/* Make sure size is even for good swap */
+			if (*psize % 2)
+				(*psize)++;
+
+			/* Align size */
+			*psize = DOFF_ALIGN(*psize);
+		} else {
+			status = -ENXIO;
+		}
+	}
+	if (opened_doff) {
+		dof_close(zl_lib);
+		opened_doff = false;
+	}
+
+	dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p psize: %p, "
+		"status 0x%x\n", __func__, lib, name, paddr, psize, status);
+
+	return status;
+}
+
+/*
+ *  ======== dbll_init ========
+ */
+bool dbll_init(void)
+{
+	DBC_REQUIRE(refs >= 0);
+
+	if (refs == 0)
+		gh_init();
+
+	refs++;
+
+	return true;
+}
+
+/*
+ *  ======== dbll_load ========
+ */
+int dbll_load(struct dbll_library_obj *lib, dbll_flags flags,
+		     struct dbll_attrs *attrs, u32 *pEntry)
+{
+	struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
+	struct dbll_tar_obj *dbzl;
+	bool got_symbols = true;
+	s32 err;
+	int status = 0;
+	bool opened_doff = false;
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(zl_lib);
+	DBC_REQUIRE(pEntry != NULL);
+	DBC_REQUIRE(attrs != NULL);
+
+	/*
+	 *  Load if not already loaded.
+	 */
+	if (zl_lib->load_ref == 0 || !(flags & DBLL_DYNAMIC)) {
+		dbzl = zl_lib->target_obj;
+		dbzl->attrs = *attrs;
+		/* Create a hash table for symbols if not already created */
+		if (zl_lib->sym_tab == NULL) {
+			got_symbols = false;
+			zl_lib->sym_tab = gh_create(MAXBUCKETS,
+						    sizeof(struct dbll_symbol),
+						    name_hash,
+						    name_match, sym_delete);
+			if (zl_lib->sym_tab == NULL)
+				status = -ENOMEM;
+
+		}
+		/*
+		 *  Set up objects needed by the dynamic loader
+		 */
+		/* Stream */
+		zl_lib->stream.dl_stream.read_buffer = dbll_read_buffer;
+		zl_lib->stream.dl_stream.set_file_posn = dbll_set_file_posn;
+		zl_lib->stream.lib = zl_lib;
+		/* Symbol */
+		zl_lib->symbol.dl_symbol.find_matching_symbol =
+		    dbll_find_symbol;
+		if (got_symbols) {
+			zl_lib->symbol.dl_symbol.add_to_symbol_table =
+			    find_in_symbol_table;
+		} else {
+			zl_lib->symbol.dl_symbol.add_to_symbol_table =
+			    dbll_add_to_symbol_table;
+		}
+		zl_lib->symbol.dl_symbol.purge_symbol_table =
+		    dbll_purge_symbol_table;
+		zl_lib->symbol.dl_symbol.dload_allocate = allocate;
+		zl_lib->symbol.dl_symbol.dload_deallocate = deallocate;
+		zl_lib->symbol.dl_symbol.error_report = dbll_err_report;
+		zl_lib->symbol.lib = zl_lib;
+		/* Allocate */
+		zl_lib->allocate.dl_alloc.dload_allocate = dbll_rmm_alloc;
+		zl_lib->allocate.dl_alloc.dload_deallocate = rmm_dealloc;
+		zl_lib->allocate.lib = zl_lib;
+		/* Init */
+		zl_lib->init.dl_init.connect = connect;
+		zl_lib->init.dl_init.readmem = read_mem;
+		zl_lib->init.dl_init.writemem = write_mem;
+		zl_lib->init.dl_init.fillmem = fill_mem;
+		zl_lib->init.dl_init.execute = execute;
+		zl_lib->init.dl_init.release = release;
+		zl_lib->init.lib = zl_lib;
+		/* If COFF file is not open, we open it. */
+		if (zl_lib->fp == NULL) {
+			status = dof_open(zl_lib);
+			if (DSP_SUCCEEDED(status))
+				opened_doff = true;
+
+		}
+		if (DSP_SUCCEEDED(status)) {
+			zl_lib->ul_pos = (*(zl_lib->target_obj->attrs.ftell))
+			    (zl_lib->fp);
+			/* Reset file cursor */
+			(*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
+							      (long)0,
+							      SEEK_SET);
+			symbols_reloaded = true;
+			/* The 5th argument, DLOAD_INITBSS, tells the DLL
+			 * module to zero-init all BSS sections.  In general,
+			 * this is not necessary and also increases load time.
+			 * We may want to make this configurable by the user */
+			err = dynamic_load_module(&zl_lib->stream.dl_stream,
+						  &zl_lib->symbol.dl_symbol,
+						  &zl_lib->allocate.dl_alloc,
+						  &zl_lib->init.dl_init,
+						  DLOAD_INITBSS,
+						  &zl_lib->dload_mod_obj);
+
+			if (err != 0) {
+				status = -EILSEQ;
+			} else if (redefined_symbol) {
+				zl_lib->load_ref++;
+				dbll_unload(zl_lib, (struct dbll_attrs *)attrs);
+				redefined_symbol = false;
+				status = -EILSEQ;
+			} else {
+				*pEntry = zl_lib->entry;
+			}
+		}
+	}
+	if (DSP_SUCCEEDED(status))
+		zl_lib->load_ref++;
+
+	/* Clean up DOFF resources */
+	if (opened_doff)
+		dof_close(zl_lib);
+
+	DBC_ENSURE(DSP_FAILED(status) || zl_lib->load_ref > 0);
+
+	dev_dbg(bridge, "%s: lib: %p flags: 0x%x pEntry: %p, status 0x%x\n",
+		__func__, lib, flags, pEntry, status);
+
+	return status;
+}
+
+/*
+ *  ======== dbll_load_sect ========
+ *  Not supported for COFF.
+ */
+int dbll_load_sect(struct dbll_library_obj *zl_lib, char *sectName,
+			  struct dbll_attrs *attrs)
+{
+	DBC_REQUIRE(zl_lib);
+
+	return -ENOSYS;
+}
+
+/*
+ *  ======== dbll_open ========
+ */
+int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags,
+		     struct dbll_library_obj **pLib)
+{
+	struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
+	struct dbll_library_obj *zl_lib = NULL;
+	s32 err;
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(zl_target);
+	DBC_REQUIRE(zl_target->attrs.fopen != NULL);
+	DBC_REQUIRE(file != NULL);
+	DBC_REQUIRE(pLib != NULL);
+
+	zl_lib = zl_target->head;
+	while (zl_lib != NULL) {
+		if (strcmp(zl_lib->file_name, file) == 0) {
+			/* Library is already opened */
+			zl_lib->open_ref++;
+			break;
+		}
+		zl_lib = zl_lib->next;
+	}
+	if (zl_lib == NULL) {
+		/* Allocate DBL library object */
+		zl_lib = kzalloc(sizeof(struct dbll_library_obj), GFP_KERNEL);
+		if (zl_lib == NULL) {
+			status = -ENOMEM;
+		} else {
+			zl_lib->ul_pos = 0;
+			/* Increment ref count to allow close on failure
+			 * later on */
+			zl_lib->open_ref++;
+			zl_lib->target_obj = zl_target;
+			/* Keep a copy of the file name */
+			zl_lib->file_name = kzalloc(strlen(file) + 1,
+							GFP_KERNEL);
+			if (zl_lib->file_name == NULL) {
+				status = -ENOMEM;
+			} else {
+				strncpy(zl_lib->file_name, file,
+					strlen(file) + 1);
+			}
+			zl_lib->sym_tab = NULL;
+		}
+	}
+	/*
+	 *  Set up objects needed by the dynamic loader
+	 */
+	if (DSP_FAILED(status))
+		goto func_cont;
+
+	/* Stream */
+	zl_lib->stream.dl_stream.read_buffer = dbll_read_buffer;
+	zl_lib->stream.dl_stream.set_file_posn = dbll_set_file_posn;
+	zl_lib->stream.lib = zl_lib;
+	/* Symbol */
+	zl_lib->symbol.dl_symbol.add_to_symbol_table = dbll_add_to_symbol_table;
+	zl_lib->symbol.dl_symbol.find_matching_symbol = dbll_find_symbol;
+	zl_lib->symbol.dl_symbol.purge_symbol_table = dbll_purge_symbol_table;
+	zl_lib->symbol.dl_symbol.dload_allocate = allocate;
+	zl_lib->symbol.dl_symbol.dload_deallocate = deallocate;
+	zl_lib->symbol.dl_symbol.error_report = dbll_err_report;
+	zl_lib->symbol.lib = zl_lib;
+	/* Allocate */
+	zl_lib->allocate.dl_alloc.dload_allocate = dbll_rmm_alloc;
+	zl_lib->allocate.dl_alloc.dload_deallocate = rmm_dealloc;
+	zl_lib->allocate.lib = zl_lib;
+	/* Init */
+	zl_lib->init.dl_init.connect = connect;
+	zl_lib->init.dl_init.readmem = read_mem;
+	zl_lib->init.dl_init.writemem = write_mem;
+	zl_lib->init.dl_init.fillmem = fill_mem;
+	zl_lib->init.dl_init.execute = execute;
+	zl_lib->init.dl_init.release = release;
+	zl_lib->init.lib = zl_lib;
+	if (DSP_SUCCEEDED(status) && zl_lib->fp == NULL)
+		status = dof_open(zl_lib);
+
+	zl_lib->ul_pos = (*(zl_lib->target_obj->attrs.ftell)) (zl_lib->fp);
+	(*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0, SEEK_SET);
+	/* Create a hash table for symbols if flag is set */
+	if (zl_lib->sym_tab != NULL || !(flags & DBLL_SYMB))
+		goto func_cont;
+
+	zl_lib->sym_tab =
+	    gh_create(MAXBUCKETS, sizeof(struct dbll_symbol), name_hash,
+		      name_match, sym_delete);
+	if (zl_lib->sym_tab == NULL) {
+		status = -ENOMEM;
+	} else {
+		/* Do a fake load to get symbols - set write func to no_op */
+		zl_lib->init.dl_init.writemem = no_op;
+		err = dynamic_open_module(&zl_lib->stream.dl_stream,
+					  &zl_lib->symbol.dl_symbol,
+					  &zl_lib->allocate.dl_alloc,
+					  &zl_lib->init.dl_init, 0,
+					  &zl_lib->dload_mod_obj);
+		if (err != 0) {
+			status = -EILSEQ;
+		} else {
+			/* Now that we have the symbol table, we can unload */
+			err = dynamic_unload_module(zl_lib->dload_mod_obj,
+						    &zl_lib->symbol.dl_symbol,
+						    &zl_lib->allocate.dl_alloc,
+						    &zl_lib->init.dl_init);
+			if (err != 0)
+				status = -EILSEQ;
+
+			zl_lib->dload_mod_obj = NULL;
+		}
+	}
+func_cont:
+	if (DSP_SUCCEEDED(status)) {
+		if (zl_lib->open_ref == 1) {
+			/* First time opened - insert in list */
+			if (zl_target->head)
+				(zl_target->head)->prev = zl_lib;
+
+			zl_lib->prev = NULL;
+			zl_lib->next = zl_target->head;
+			zl_target->head = zl_lib;
+		}
+		*pLib = (struct dbll_library_obj *)zl_lib;
+	} else {
+		*pLib = NULL;
+		if (zl_lib != NULL)
+			dbll_close((struct dbll_library_obj *)zl_lib);
+
+	}
+	DBC_ENSURE((DSP_SUCCEEDED(status) && (zl_lib->open_ref > 0) && *pLib)
+				|| (DSP_FAILED(status) && *pLib == NULL));
+
+	dev_dbg(bridge, "%s: target: %p file: %s pLib: %p, status 0x%x\n",
+		__func__, target, file, pLib, status);
+
+	return status;
+}
+
+/*
+ *  ======== dbll_read_sect ========
+ *  Get the content of a COFF section.
+ */
+int dbll_read_sect(struct dbll_library_obj *lib, char *name,
+			  char *pContent, u32 size)
+{
+	struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
+	bool opened_doff = false;
+	u32 byte_size;		/* size of bytes */
+	u32 ul_sect_size;	/* size of section */
+	const struct ldr_section_info *sect = NULL;
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(zl_lib);
+	DBC_REQUIRE(name != NULL);
+	DBC_REQUIRE(pContent != NULL);
+	DBC_REQUIRE(size != 0);
+
+	/* If DOFF file is not open, we open it. */
+	if (zl_lib != NULL) {
+		if (zl_lib->fp == NULL) {
+			status = dof_open(zl_lib);
+			if (DSP_SUCCEEDED(status))
+				opened_doff = true;
+
+		} else {
+			(*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
+							      zl_lib->ul_pos,
+							      SEEK_SET);
+		}
+	} else {
+		status = -EFAULT;
+	}
+	if (DSP_FAILED(status))
+		goto func_cont;
+
+	byte_size = 1;
+	if (!dload_get_section_info(zl_lib->desc, name, &sect)) {
+		status = -ENXIO;
+		goto func_cont;
+	}
+	/*
+	 * Ensure the supplied buffer size is sufficient to store
+	 * the section content to be read.
+	 */
+	ul_sect_size = sect->size * byte_size;
+	/* Make sure size is even for good swap */
+	if (ul_sect_size % 2)
+		ul_sect_size++;
+
+	/* Align size */
+	ul_sect_size = DOFF_ALIGN(ul_sect_size);
+	if (ul_sect_size > size) {
+		status = -EPERM;
+	} else {
+		if (!dload_get_section(zl_lib->desc, sect, pContent))
+			status = -EBADF;
+
+	}
+func_cont:
+	if (opened_doff) {
+		dof_close(zl_lib);
+		opened_doff = false;
+	}
+
+	dev_dbg(bridge, "%s: lib: %p name: %s pContent: %p size: 0x%x, "
+		"status 0x%x\n", __func__, lib, name, pContent, size, status);
+	return status;
+}
+
+/*
+ *  ======== dbll_set_attrs ========
+ *  Set the attributes of the target.
+ */
+void dbll_set_attrs(struct dbll_tar_obj *target, struct dbll_attrs *pattrs)
+{
+	struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(zl_target);
+	DBC_REQUIRE(pattrs != NULL);
+
+	if ((pattrs != NULL) && (zl_target != NULL))
+		zl_target->attrs = *pattrs;
+
+}
+
+/*
+ *  ======== dbll_unload ========
+ */
+void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs)
+{
+	struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib;
+	s32 err = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(zl_lib);
+	DBC_REQUIRE(zl_lib->load_ref > 0);
+	dev_dbg(bridge, "%s: lib: %p\n", __func__, lib);
+	zl_lib->load_ref--;
+	/* Unload only if reference count is 0 */
+	if (zl_lib->load_ref != 0)
+		goto func_end;
+
+	zl_lib->target_obj->attrs = *attrs;
+	if (zl_lib->dload_mod_obj) {
+		err = dynamic_unload_module(zl_lib->dload_mod_obj,
+					    &zl_lib->symbol.dl_symbol,
+					    &zl_lib->allocate.dl_alloc,
+					    &zl_lib->init.dl_init);
+		if (err != 0)
+			dev_dbg(bridge, "%s: failed: 0x%x\n", __func__, err);
+	}
+	/* remove symbols from symbol table */
+	if (zl_lib->sym_tab != NULL) {
+		gh_delete(zl_lib->sym_tab);
+		zl_lib->sym_tab = NULL;
+	}
+	/* delete DOFF desc since it holds *lots* of host OS
+	 * resources */
+	dof_close(zl_lib);
+func_end:
+	DBC_ENSURE(zl_lib->load_ref >= 0);
+}
+
+/*
+ *  ======== dbll_unload_sect ========
+ *  Not supported for COFF.
+ */
+int dbll_unload_sect(struct dbll_library_obj *lib, char *sectName,
+			    struct dbll_attrs *attrs)
+{
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(sectName != NULL);
+
+	return -ENOSYS;
+}
+
+/*
+ *  ======== dof_close ========
+ */
+static void dof_close(struct dbll_library_obj *zl_lib)
+{
+	if (zl_lib->desc) {
+		dload_module_close(zl_lib->desc);
+		zl_lib->desc = NULL;
+	}
+	/* close file */
+	if (zl_lib->fp) {
+		(zl_lib->target_obj->attrs.fclose) (zl_lib->fp);
+		zl_lib->fp = NULL;
+	}
+}
+
+/*
+ *  ======== dof_open ========
+ */
+static int dof_open(struct dbll_library_obj *zl_lib)
+{
+	void *open = *(zl_lib->target_obj->attrs.fopen);
+	int status = 0;
+
+	/* First open the file for the dynamic loader, then open COF */
+	zl_lib->fp =
+	    (void *)((dbll_f_open_fxn) (open)) (zl_lib->file_name, "rb");
+
+	/* Open DOFF module */
+	if (zl_lib->fp && zl_lib->desc == NULL) {
+		(*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0,
+						      SEEK_SET);
+		zl_lib->desc =
+		    dload_module_open(&zl_lib->stream.dl_stream,
+				      &zl_lib->symbol.dl_symbol);
+		if (zl_lib->desc == NULL) {
+			(zl_lib->target_obj->attrs.fclose) (zl_lib->fp);
+			zl_lib->fp = NULL;
+			status = -EBADF;
+		}
+	} else {
+		status = -EBADF;
+	}
+
+	return status;
+}
+
+/*
+ *  ======== name_hash ========
+ */
+static u16 name_hash(void *key, u16 max_bucket)
+{
+	u16 ret;
+	u16 hash;
+	char *name = (char *)key;
+
+	DBC_REQUIRE(name != NULL);
+
+	hash = 0;
+
+	while (*name) {
+		hash <<= 1;
+		hash ^= *name++;
+	}
+
+	ret = hash % max_bucket;
+
+	return ret;
+}
+
+/*
+ *  ======== name_match ========
+ */
+static bool name_match(void *key, void *value)
+{
+	DBC_REQUIRE(key != NULL);
+	DBC_REQUIRE(value != NULL);
+
+	if ((key != NULL) && (value != NULL)) {
+		if (strcmp((char *)key, ((struct dbll_symbol *)value)->name) ==
+		    0)
+			return true;
+	}
+	return false;
+}
+
+/*
+ *  ======== no_op ========
+ */
+static int no_op(struct dynamic_loader_initialize *thisptr, void *bufr,
+		 ldr_addr locn, struct ldr_section_info *info, unsigned bytsize)
+{
+	return 1;
+}
+
+/*
+ *  ======== sym_delete ========
+ */
+static void sym_delete(void *value)
+{
+	struct dbll_symbol *sp = (struct dbll_symbol *)value;
+
+	kfree(sp->name);
+}
+
+/*
+ *  Dynamic Loader Functions
+ */
+
+/* dynamic_loader_stream */
+/*
+ *  ======== dbll_read_buffer ========
+ */
+static int dbll_read_buffer(struct dynamic_loader_stream *this, void *buffer,
+			    unsigned bufsize)
+{
+	struct dbll_stream *pstream = (struct dbll_stream *)this;
+	struct dbll_library_obj *lib;
+	int bytes_read = 0;
+
+	DBC_REQUIRE(this != NULL);
+	lib = pstream->lib;
+	DBC_REQUIRE(lib);
+
+	if (lib != NULL) {
+		bytes_read =
+		    (*(lib->target_obj->attrs.fread)) (buffer, 1, bufsize,
+						       lib->fp);
+	}
+	return bytes_read;
+}
+
+/*
+ *  ======== dbll_set_file_posn ========
+ */
+static int dbll_set_file_posn(struct dynamic_loader_stream *this,
+			      unsigned int pos)
+{
+	struct dbll_stream *pstream = (struct dbll_stream *)this;
+	struct dbll_library_obj *lib;
+	int status = 0;		/* Success */
+
+	DBC_REQUIRE(this != NULL);
+	lib = pstream->lib;
+	DBC_REQUIRE(lib);
+
+	if (lib != NULL) {
+		status = (*(lib->target_obj->attrs.fseek)) (lib->fp, (long)pos,
+							    SEEK_SET);
+	}
+
+	return status;
+}
+
+/* dynamic_loader_sym */
+
+/*
+ *  ======== dbll_find_symbol ========
+ */
+static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this,
+					       const char *name)
+{
+	struct dynload_symbol *ret_sym;
+	struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
+	struct dbll_library_obj *lib;
+	struct dbll_sym_val *dbll_sym = NULL;
+	bool status = false;	/* Symbol not found yet */
+
+	DBC_REQUIRE(this != NULL);
+	lib = ldr_sym->lib;
+	DBC_REQUIRE(lib);
+
+	if (lib != NULL) {
+		if (lib->target_obj->attrs.sym_lookup) {
+			/* Check current lib + base lib + dep lib +
+			 * persistent lib */
+			status = (*(lib->target_obj->attrs.sym_lookup))
+			    (lib->target_obj->attrs.sym_handle,
+			     lib->target_obj->attrs.sym_arg,
+			     lib->target_obj->attrs.rmm_handle, name,
+			     &dbll_sym);
+		} else {
+			/* Just check current lib for symbol */
+			status = dbll_get_addr((struct dbll_library_obj *)lib,
+					       (char *)name, &dbll_sym);
+			if (!status) {
+				status =
+				    dbll_get_c_addr((struct dbll_library_obj *)
+						    lib, (char *)name,
+						    &dbll_sym);
+			}
+		}
+	}
+
+	if (!status && gbl_search)
+		dev_dbg(bridge, "%s: Symbol not found: %s\n", __func__, name);
+
+	DBC_ASSERT((status && (dbll_sym != NULL))
+		   || (!status && (dbll_sym == NULL)));
+
+	ret_sym = (struct dynload_symbol *)dbll_sym;
+	return ret_sym;
+}
+
+/*
+ *  ======== find_in_symbol_table ========
+ */
+static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym
+						   *this, const char *name,
+						   unsigned moduleid)
+{
+	struct dynload_symbol *ret_sym;
+	struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
+	struct dbll_library_obj *lib;
+	struct dbll_symbol *sym;
+
+	DBC_REQUIRE(this != NULL);
+	lib = ldr_sym->lib;
+	DBC_REQUIRE(lib);
+	DBC_REQUIRE(lib->sym_tab != NULL);
+
+	sym = (struct dbll_symbol *)gh_find(lib->sym_tab, (char *)name);
+
+	ret_sym = (struct dynload_symbol *)&sym->value;
+	return ret_sym;
+}
+
+/*
+ *  ======== dbll_add_to_symbol_table ========
+ */
+static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym
+						       *this, const char *name,
+						       unsigned moduleId)
+{
+	struct dbll_symbol *sym_ptr = NULL;
+	struct dbll_symbol symbol;
+	struct dynload_symbol *dbll_sym = NULL;
+	struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
+	struct dbll_library_obj *lib;
+	struct dynload_symbol *ret;
+
+	DBC_REQUIRE(this != NULL);
+	DBC_REQUIRE(name);
+	lib = ldr_sym->lib;
+	DBC_REQUIRE(lib);
+
+	/* Check to see if symbol is already defined in symbol table */
+	if (!(lib->target_obj->attrs.base_image)) {
+		gbl_search = false;
+		dbll_sym = dbll_find_symbol(this, name);
+		gbl_search = true;
+		if (dbll_sym) {
+			redefined_symbol = true;
+			dev_dbg(bridge, "%s already defined in symbol table\n",
+				name);
+			return NULL;
+		}
+	}
+	/* Allocate string to copy symbol name */
+	symbol.name = kzalloc(strlen((char *const)name) + 1, GFP_KERNEL);
+	if (symbol.name == NULL)
+		return NULL;
+
+	if (symbol.name != NULL) {
+		/* Just copy name (value will be filled in by dynamic loader) */
+		strncpy(symbol.name, (char *const)name,
+			strlen((char *const)name) + 1);
+
+		/* Add symbol to symbol table */
+		sym_ptr =
+		    (struct dbll_symbol *)gh_insert(lib->sym_tab, (void *)name,
+						    (void *)&symbol);
+		if (sym_ptr == NULL)
+			kfree(symbol.name);
+
+	}
+	if (sym_ptr != NULL)
+		ret = (struct dynload_symbol *)&sym_ptr->value;
+	else
+		ret = NULL;
+
+	return ret;
+}
+
+/*
+ *  ======== dbll_purge_symbol_table ========
+ */
+static void dbll_purge_symbol_table(struct dynamic_loader_sym *this,
+				    unsigned moduleId)
+{
+	struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
+	struct dbll_library_obj *lib;
+
+	DBC_REQUIRE(this != NULL);
+	lib = ldr_sym->lib;
+	DBC_REQUIRE(lib);
+
+	/* May not need to do anything */
+}
+
+/*
+ *  ======== allocate ========
+ */
+static void *allocate(struct dynamic_loader_sym *this, unsigned memsize)
+{
+	struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
+	struct dbll_library_obj *lib;
+	void *buf;
+
+	DBC_REQUIRE(this != NULL);
+	lib = ldr_sym->lib;
+	DBC_REQUIRE(lib);
+
+	buf = kzalloc(memsize, GFP_KERNEL);
+
+	return buf;
+}
+
+/*
+ *  ======== deallocate ========
+ */
+static void deallocate(struct dynamic_loader_sym *this, void *memPtr)
+{
+	struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
+	struct dbll_library_obj *lib;
+
+	DBC_REQUIRE(this != NULL);
+	lib = ldr_sym->lib;
+	DBC_REQUIRE(lib);
+
+	kfree(memPtr);
+}
+
+/*
+ *  ======== dbll_err_report ========
+ */
+static void dbll_err_report(struct dynamic_loader_sym *this, const char *errstr,
+			    va_list args)
+{
+	struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this;
+	struct dbll_library_obj *lib;
+	char temp_buf[MAXEXPR];
+
+	DBC_REQUIRE(this != NULL);
+	lib = ldr_sym->lib;
+	DBC_REQUIRE(lib);
+	vsnprintf((char *)temp_buf, MAXEXPR, (char *)errstr, args);
+	dev_dbg(bridge, "%s\n", temp_buf);
+}
+
+/* dynamic_loader_allocate */
+
+/*
+ *  ======== dbll_rmm_alloc ========
+ */
+static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
+			  struct ldr_section_info *info, unsigned align)
+{
+	struct dbll_alloc *dbll_alloc_obj = (struct dbll_alloc *)this;
+	struct dbll_library_obj *lib;
+	int status = 0;
+	u32 mem_sect_type;
+	struct rmm_addr rmm_addr_obj;
+	s32 ret = TRUE;
+	unsigned stype = DLOAD_SECTION_TYPE(info->type);
+	char *token = NULL;
+	char *sz_sec_last_token = NULL;
+	char *sz_last_token = NULL;
+	char *sz_sect_name = NULL;
+	char *psz_cur;
+	s32 token_len = 0;
+	s32 seg_id = -1;
+	s32 req = -1;
+	s32 count = 0;
+	u32 alloc_size = 0;
+	u32 run_addr_flag = 0;
+
+	DBC_REQUIRE(this != NULL);
+	lib = dbll_alloc_obj->lib;
+	DBC_REQUIRE(lib);
+
+	mem_sect_type =
+	    (stype == DLOAD_TEXT) ? DBLL_CODE : (stype ==
+						 DLOAD_BSS) ? DBLL_BSS :
+	    DBLL_DATA;
+
+	/* Attempt to extract the segment ID and requirement information from
+	   the name of the section */
+	DBC_REQUIRE(info->name);
+	token_len = strlen((char *)(info->name)) + 1;
+
+	sz_sect_name = kzalloc(token_len, GFP_KERNEL);
+	sz_last_token = kzalloc(token_len, GFP_KERNEL);
+	sz_sec_last_token = kzalloc(token_len, GFP_KERNEL);
+
+	if (sz_sect_name == NULL || sz_sec_last_token == NULL ||
+	    sz_last_token == NULL) {
+		status = -ENOMEM;
+		goto func_cont;
+	}
+	strncpy(sz_sect_name, (char *)(info->name), token_len);
+	psz_cur = sz_sect_name;
+	while ((token = strsep(&psz_cur, ":")) && *token != '\0') {
+		strncpy(sz_sec_last_token, sz_last_token,
+			strlen(sz_last_token) + 1);
+		strncpy(sz_last_token, token, strlen(token) + 1);
+		token = strsep(&psz_cur, ":");
+		count++;	/* optimizes processing */
+	}
+	/* If token is 0 or 1, and sz_sec_last_token is DYN_DARAM or DYN_SARAM,
+	   or DYN_EXTERNAL, then mem granularity information is present
+	   within the section name - only process if there are at least three
+	   tokens within the section name (just a minor optimization) */
+	if (count >= 3)
+		strict_strtol(sz_last_token, 10, (long *)&req);
+
+	if ((req == 0) || (req == 1)) {
+		if (strcmp(sz_sec_last_token, "DYN_DARAM") == 0) {
+			seg_id = 0;
+		} else {
+			if (strcmp(sz_sec_last_token, "DYN_SARAM") == 0) {
+				seg_id = 1;
+			} else {
+				if (strcmp(sz_sec_last_token,
+					   "DYN_EXTERNAL") == 0)
+					seg_id = 2;
+			}
+		}
+	}
+func_cont:
+	kfree(sz_sect_name);
+	sz_sect_name = NULL;
+	kfree(sz_last_token);
+	sz_last_token = NULL;
+	kfree(sz_sec_last_token);
+	sz_sec_last_token = NULL;
+
+	if (mem_sect_type == DBLL_CODE)
+		alloc_size = info->size + GEM_L1P_PREFETCH_SIZE;
+	else
+		alloc_size = info->size;
+
+	if (info->load_addr != info->run_addr)
+		run_addr_flag = 1;
+	/* TODO - ideally, we can pass the alignment requirement also
+	 * from here */
+	if (lib != NULL) {
+		status =
+		    (lib->target_obj->attrs.alloc) (lib->target_obj->attrs.
+						    rmm_handle, mem_sect_type,
+						    alloc_size, align,
+						    (u32 *) &rmm_addr_obj,
+						    seg_id, req, FALSE);
+	}
+	if (DSP_FAILED(status)) {
+		ret = false;
+	} else {
+		/* RMM gives word address. Need to convert to byte address */
+		info->load_addr = rmm_addr_obj.addr * DSPWORDSIZE;
+		if (!run_addr_flag)
+			info->run_addr = info->load_addr;
+		info->context = (u32) rmm_addr_obj.segid;
+		dev_dbg(bridge, "%s: %s base = 0x%x len = 0x%x, "
+			"info->run_addr 0x%x, info->load_addr 0x%x\n",
+			__func__, info->name, info->load_addr / DSPWORDSIZE,
+			info->size / DSPWORDSIZE, info->run_addr,
+			info->load_addr);
+	}
+	return ret;
+}
+
+/*
+ *  ======== rmm_dealloc ========
+ */
+static void rmm_dealloc(struct dynamic_loader_allocate *this,
+			struct ldr_section_info *info)
+{
+	struct dbll_alloc *dbll_alloc_obj = (struct dbll_alloc *)this;
+	struct dbll_library_obj *lib;
+	u32 segid;
+	int status = 0;
+	unsigned stype = DLOAD_SECTION_TYPE(info->type);
+	u32 mem_sect_type;
+	u32 free_size = 0;
+
+	mem_sect_type =
+	    (stype == DLOAD_TEXT) ? DBLL_CODE : (stype ==
+						 DLOAD_BSS) ? DBLL_BSS :
+	    DBLL_DATA;
+	DBC_REQUIRE(this != NULL);
+	lib = dbll_alloc_obj->lib;
+	DBC_REQUIRE(lib);
+	/* segid was set by alloc function */
+	segid = (u32) info->context;
+	if (mem_sect_type == DBLL_CODE)
+		free_size = info->size + GEM_L1P_PREFETCH_SIZE;
+	else
+		free_size = info->size;
+	if (lib != NULL) {
+		status =
+		    (lib->target_obj->attrs.free) (lib->target_obj->attrs.
+						   sym_handle, segid,
+						   info->load_addr /
+						   DSPWORDSIZE, free_size,
+						   false);
+	}
+}
+
+/* dynamic_loader_initialize */
+/*
+ *  ======== connect ========
+ */
+static int connect(struct dynamic_loader_initialize *this)
+{
+	return true;
+}
+
+/*
+ *  ======== read_mem ========
+ *  This function does not need to be implemented.
+ */
+static int read_mem(struct dynamic_loader_initialize *this, void *buf,
+		    ldr_addr addr, struct ldr_section_info *info,
+		    unsigned nbytes)
+{
+	struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this;
+	struct dbll_library_obj *lib;
+	int bytes_read = 0;
+
+	DBC_REQUIRE(this != NULL);
+	lib = init_obj->lib;
+	DBC_REQUIRE(lib);
+	/* Need bridge_brd_read function */
+	return bytes_read;
+}
+
+/*
+ *  ======== write_mem ========
+ */
+static int write_mem(struct dynamic_loader_initialize *this, void *buf,
+		     ldr_addr addr, struct ldr_section_info *info,
+		     unsigned bytes)
+{
+	struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this;
+	struct dbll_library_obj *lib;
+	struct dbll_tar_obj *target_obj;
+	struct dbll_sect_info sect_info;
+	u32 mem_sect_type;
+	bool ret = true;
+
+	DBC_REQUIRE(this != NULL);
+	lib = init_obj->lib;
+	if (!lib)
+		return false;
+
+	target_obj = lib->target_obj;
+
+	mem_sect_type =
+	    (DLOAD_SECTION_TYPE(info->type) ==
+	     DLOAD_TEXT) ? DBLL_CODE : DBLL_DATA;
+	if (target_obj && target_obj->attrs.write) {
+		ret =
+		    (*target_obj->attrs.write) (target_obj->attrs.input_params,
+						addr, buf, bytes,
+						mem_sect_type);
+
+		if (target_obj->attrs.log_write) {
+			sect_info.name = info->name;
+			sect_info.sect_run_addr = info->run_addr;
+			sect_info.sect_load_addr = info->load_addr;
+			sect_info.size = info->size;
+			sect_info.type = mem_sect_type;
+			/* Pass the information about what we've written to
+			 * another module */
+			(*target_obj->attrs.log_write) (target_obj->attrs.
+							log_write_handle,
+							&sect_info, addr,
+							bytes);
+		}
+	}
+	return ret;
+}
+
+/*
+ *  ======== fill_mem ========
+ *  Fill bytes of memory at a given address with a given value by
+ *  writing from a buffer containing the given value.  Write in
+ *  sets of MAXEXPR (128) bytes to avoid large stack buffer issues.
+ */
+static int fill_mem(struct dynamic_loader_initialize *this, ldr_addr addr,
+		    struct ldr_section_info *info, unsigned bytes, unsigned val)
+{
+	bool ret = true;
+	char *pbuf;
+	struct dbll_library_obj *lib;
+	struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this;
+
+	DBC_REQUIRE(this != NULL);
+	lib = init_obj->lib;
+	pbuf = NULL;
+	/* Pass the NULL pointer to write_mem to get the start address of Shared
+	   memory. This is a trick to just get the start address, there is no
+	   writing taking place with this Writemem
+	 */
+	if ((lib->target_obj->attrs.write) != (dbll_write_fxn) no_op)
+		write_mem(this, &pbuf, addr, info, 0);
+	if (pbuf)
+		memset(pbuf, val, bytes);
+
+	return ret;
+}
+
+/*
+ *  ======== execute ========
+ */
+static int execute(struct dynamic_loader_initialize *this, ldr_addr start)
+{
+	struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this;
+	struct dbll_library_obj *lib;
+	bool ret = true;
+
+	DBC_REQUIRE(this != NULL);
+	lib = init_obj->lib;
+	DBC_REQUIRE(lib);
+	/* Save entry point */
+	if (lib != NULL)
+		lib->entry = (u32) start;
+
+	return ret;
+}
+
+/*
+ *  ======== release ========
+ */
+static void release(struct dynamic_loader_initialize *this)
+{
+}
+
+/**
+ *  find_symbol_context - Basic symbol context structure
+ * @address:		Symbol Adress
+ * @offset_range:		Offset range where the search for the DSP symbol
+ *			started.
+ * @cur_best_offset:	Best offset to start looking for the DSP symbol
+ * @sym_addr:		Address of the DSP symbol
+ * @name:		Symbol name
+ *
+ */
+struct find_symbol_context {
+	/* input */
+	u32 address;
+	u32 offset_range;
+	/* state */
+	u32 cur_best_offset;
+	/* output */
+	u32 sym_addr;
+	char name[120];
+};
+
+/**
+ * find_symbol_callback() - Validates symbol address and copies the symbol name
+ *			to the user data.
+ * @elem:		dsp library context
+ * @user_data:		Find symbol context
+ *
+ */
+void find_symbol_callback(void *elem, void *user_data)
+{
+	struct dbll_symbol *symbol = elem;
+	struct find_symbol_context *context = user_data;
+	u32 symbol_addr = symbol->value.value;
+	u32 offset = context->address - symbol_addr;
+
+	/*
+	 * Address given should be greater than symbol address,
+	 * symbol address should be  within specified range
+	 * and the offset should be better than previous one
+	 */
+	if (context->address >= symbol_addr && symbol_addr < (u32)-1 &&
+		offset < context->cur_best_offset) {
+		context->cur_best_offset = offset;
+		context->sym_addr = symbol_addr;
+		strncpy(context->name, symbol->name, sizeof(context->name));
+	}
+
+	return;
+}
+
+/**
+ * dbll_find_dsp_symbol() - This function retrieves the dsp symbol from the dsp binary.
+ * @zl_lib:		DSP binary obj library pointer
+ * @address:		Given address to find the dsp symbol
+ * @offset_range:		offset range to look for dsp symbol
+ * @sym_addr_output:	Symbol Output address
+ * @name_output:		String with the dsp symbol
+ *
+ * 	This function retrieves the dsp symbol from the dsp binary.
+ */
+bool dbll_find_dsp_symbol(struct dbll_library_obj *zl_lib, u32 address,
+				u32 offset_range, u32 *sym_addr_output,
+				char *name_output)
+{
+	bool status = false;
+	struct find_symbol_context context;
+
+	context.address = address;
+	context.offset_range = offset_range;
+	context.cur_best_offset = offset_range;
+	context.sym_addr = 0;
+	context.name[0] = '\0';
+
+	gh_iterate(zl_lib->sym_tab, find_symbol_callback, &context);
+
+	if (context.name[0]) {
+		status = true;
+		strcpy(name_output, context.name);
+		*sym_addr_output = context.sym_addr;
+	}
+
+	return status;
+}
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c
new file mode 100644
index 0000000..50a5d97
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/dev.c
@@ -0,0 +1,1171 @@
+/*
+ * dev.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Implementation of Bridge Bridge driver device operations.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*  ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/ldr.h>
+#include <dspbridge/list.h>
+
+/*  ----------------------------------- Platform Manager */
+#include <dspbridge/cod.h>
+#include <dspbridge/drv.h>
+#include <dspbridge/proc.h>
+#include <dspbridge/dmm.h>
+
+/*  ----------------------------------- Resource Manager */
+#include <dspbridge/mgr.h>
+#include <dspbridge/node.h>
+
+/*  ----------------------------------- Others */
+#include <dspbridge/dspapi.h>	/* DSP API version info. */
+
+#include <dspbridge/chnl.h>
+#include <dspbridge/io.h>
+#include <dspbridge/msg.h>
+#include <dspbridge/cmm.h>
+
+/*  ----------------------------------- This */
+#include <dspbridge/dev.h>
+
+/*  ----------------------------------- Defines, Data Structures, Typedefs */
+
+#define MAKEVERSION(major, minor)   (major * 10 + minor)
+#define BRD_API_VERSION		MAKEVERSION(BRD_API_MAJOR_VERSION,	\
+				BRD_API_MINOR_VERSION)
+
+/* The Bridge device object: */
+struct dev_object {
+	/* LST requires "link" to be first field! */
+	struct list_head link;	/* Link to next dev_object. */
+	u8 dev_type;		/* Device Type */
+	struct cfg_devnode *dev_node_obj;	/* Platform specific dev id */
+	/* Bridge Context Handle */
+	struct bridge_dev_context *hbridge_context;
+	/* Function interface to Bridge driver. */
+	struct bridge_drv_interface bridge_interface;
+	struct brd_object *lock_owner;	/* Client with exclusive access. */
+	struct cod_manager *cod_mgr;	/* Code manager handle. */
+	struct chnl_mgr *hchnl_mgr;	/* Channel manager. */
+	struct deh_mgr *hdeh_mgr;	/* DEH manager. */
+	struct msg_mgr *hmsg_mgr;	/* Message manager. */
+	struct io_mgr *hio_mgr;	/* IO manager (CHNL, msg_ctrl) */
+	struct cmm_object *hcmm_mgr;	/* SM memory manager. */
+	struct dmm_object *dmm_mgr;	/* Dynamic memory manager. */
+	struct ldr_module *module_obj;	/* Bridge Module handle. */
+	u32 word_size;		/* DSP word size: quick access. */
+	struct drv_object *hdrv_obj;	/* Driver Object */
+	struct lst_list *proc_list;	/* List of Proceeosr attached to
+					 * this device */
+	struct node_mgr *hnode_mgr;
+};
+
+/*  ----------------------------------- Globals */
+static u32 refs;		/* Module reference count */
+
+/*  ----------------------------------- Function Prototypes */
+static int fxn_not_implemented(int arg, ...);
+static int init_cod_mgr(struct dev_object *dev_obj);
+static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
+				 OUT struct bridge_drv_interface *intf_fxns);
+/*
+ *  ======== dev_brd_write_fxn ========
+ *  Purpose:
+ *      Exported function to be used as the COD write function.  This function
+ *      is passed a handle to a DEV_hObject, then calls the
+ *      device's bridge_brd_write() function.
+ */
+u32 dev_brd_write_fxn(void *pArb, u32 ulDspAddr, void *pHostBuf,
+		      u32 ul_num_bytes, u32 nMemSpace)
+{
+	struct dev_object *dev_obj = (struct dev_object *)pArb;
+	u32 ul_written = 0;
+	int status;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(pHostBuf != NULL);	/* Required of BrdWrite(). */
+	if (dev_obj) {
+		/* Require of BrdWrite() */
+		DBC_ASSERT(dev_obj->hbridge_context != NULL);
+		status = (*dev_obj->bridge_interface.pfn_brd_write) (
+					dev_obj->hbridge_context, pHostBuf,
+					ulDspAddr, ul_num_bytes, nMemSpace);
+		/* Special case of getting the address only */
+		if (ul_num_bytes == 0)
+			ul_num_bytes = 1;
+		if (DSP_SUCCEEDED(status))
+			ul_written = ul_num_bytes;
+
+	}
+	return ul_written;
+}
+
+/*
+ *  ======== dev_create_device ========
+ *  Purpose:
+ *      Called by the operating system to load the PM Bridge Driver for a
+ *      PM board (device).
+ */
+int dev_create_device(OUT struct dev_object **phDevObject,
+			     IN CONST char *driver_file_name,
+			     struct cfg_devnode *dev_node_obj)
+{
+	struct cfg_hostres *host_res;
+	struct ldr_module *module_obj = NULL;
+	struct bridge_drv_interface *drv_fxns = NULL;
+	struct dev_object *dev_obj = NULL;
+	struct chnl_mgrattrs mgr_attrs;
+	struct io_attrs io_mgr_attrs;
+	u32 num_windows;
+	struct drv_object *hdrv_obj = NULL;
+	int status = 0;
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phDevObject != NULL);
+	DBC_REQUIRE(driver_file_name != NULL);
+
+	status = drv_request_bridge_res_dsp((void *)&host_res);
+
+	if (DSP_FAILED(status)) {
+		dev_dbg(bridge, "%s: Failed to reserve bridge resources\n",
+			__func__);
+		goto leave;
+	}
+
+	/*  Get the Bridge driver interface functions */
+	bridge_drv_entry(&drv_fxns, driver_file_name);
+	if (DSP_FAILED(cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT))) {
+		/* don't propogate CFG errors from this PROC function */
+		status = -EPERM;
+	}
+	/* Create the device object, and pass a handle to the Bridge driver for
+	 * storage. */
+	if (DSP_SUCCEEDED(status)) {
+		DBC_ASSERT(drv_fxns);
+		dev_obj = kzalloc(sizeof(struct dev_object), GFP_KERNEL);
+		if (dev_obj) {
+			/* Fill out the rest of the Dev Object structure: */
+			dev_obj->dev_node_obj = dev_node_obj;
+			dev_obj->module_obj = module_obj;
+			dev_obj->cod_mgr = NULL;
+			dev_obj->hchnl_mgr = NULL;
+			dev_obj->hdeh_mgr = NULL;
+			dev_obj->lock_owner = NULL;
+			dev_obj->word_size = DSPWORDSIZE;
+			dev_obj->hdrv_obj = hdrv_obj;
+			dev_obj->dev_type = DSP_UNIT;
+			/* Store this Bridge's interface functions, based on its
+			 * version. */
+			store_interface_fxns(drv_fxns,
+						&dev_obj->bridge_interface);
+
+			/* Call fxn_dev_create() to get the Bridge's device
+			 * context handle. */
+			status = (dev_obj->bridge_interface.pfn_dev_create)
+			    (&dev_obj->hbridge_context, dev_obj,
+			     host_res);
+			/* Assert bridge_dev_create()'s ensure clause: */
+			DBC_ASSERT(DSP_FAILED(status)
+				   || (dev_obj->hbridge_context != NULL));
+		} else {
+			status = -ENOMEM;
+		}
+	}
+	/* Attempt to create the COD manager for this device: */
+	if (DSP_SUCCEEDED(status))
+		status = init_cod_mgr(dev_obj);
+
+	/* Attempt to create the channel manager for this device: */
+	if (DSP_SUCCEEDED(status)) {
+		mgr_attrs.max_channels = CHNL_MAXCHANNELS;
+		io_mgr_attrs.birq = host_res->birq_registers;
+		io_mgr_attrs.irq_shared =
+		    (host_res->birq_attrib & CFG_IRQSHARED);
+		io_mgr_attrs.word_size = DSPWORDSIZE;
+		mgr_attrs.word_size = DSPWORDSIZE;
+		num_windows = host_res->num_mem_windows;
+		if (num_windows) {
+			/* Assume last memory window is for CHNL */
+			io_mgr_attrs.shm_base = host_res->dw_mem_base[1] +
+			    host_res->dw_offset_for_monitor;
+			io_mgr_attrs.usm_length =
+			    host_res->dw_mem_length[1] -
+			    host_res->dw_offset_for_monitor;
+		} else {
+			io_mgr_attrs.shm_base = 0;
+			io_mgr_attrs.usm_length = 0;
+			pr_err("%s: No memory reserved for shared structures\n",
+			       __func__);
+		}
+		status = chnl_create(&dev_obj->hchnl_mgr, dev_obj, &mgr_attrs);
+		if (status == -ENOSYS) {
+			/* It's OK for a device not to have a channel
+			 * manager: */
+			status = 0;
+		}
+		/* Create CMM mgr even if Msg Mgr not impl. */
+		status = cmm_create(&dev_obj->hcmm_mgr,
+				    (struct dev_object *)dev_obj, NULL);
+		/* Only create IO manager if we have a channel manager */
+		if (DSP_SUCCEEDED(status) && dev_obj->hchnl_mgr) {
+			status = io_create(&dev_obj->hio_mgr, dev_obj,
+					   &io_mgr_attrs);
+		}
+		/* Only create DEH manager if we have an IO manager */
+		if (DSP_SUCCEEDED(status)) {
+			/* Instantiate the DEH module */
+			status = (*dev_obj->bridge_interface.pfn_deh_create)
+			    (&dev_obj->hdeh_mgr, dev_obj);
+		}
+		/* Create DMM mgr . */
+		status = dmm_create(&dev_obj->dmm_mgr,
+				    (struct dev_object *)dev_obj, NULL);
+	}
+	/* Add the new DEV_Object to the global list: */
+	if (DSP_SUCCEEDED(status)) {
+		lst_init_elem(&dev_obj->link);
+		status = drv_insert_dev_object(hdrv_obj, dev_obj);
+	}
+	/* Create the Processor List */
+	if (DSP_SUCCEEDED(status)) {
+		dev_obj->proc_list = kzalloc(sizeof(struct lst_list),
+							GFP_KERNEL);
+		if (!(dev_obj->proc_list))
+			status = -EPERM;
+		else
+			INIT_LIST_HEAD(&dev_obj->proc_list->head);
+	}
+leave:
+	/*  If all went well, return a handle to the dev object;
+	 *  else, cleanup and return NULL in the OUT parameter. */
+	if (DSP_SUCCEEDED(status)) {
+		*phDevObject = dev_obj;
+	} else {
+		if (dev_obj) {
+			kfree(dev_obj->proc_list);
+			if (dev_obj->cod_mgr)
+				cod_delete(dev_obj->cod_mgr);
+			if (dev_obj->dmm_mgr)
+				dmm_destroy(dev_obj->dmm_mgr);
+			kfree(dev_obj);
+		}
+
+		*phDevObject = NULL;
+	}
+
+	DBC_ENSURE((DSP_SUCCEEDED(status) && *phDevObject) ||
+		   (DSP_FAILED(status) && !*phDevObject));
+	return status;
+}
+
+/*
+ *  ======== dev_create2 ========
+ *  Purpose:
+ *      After successful loading of the image from api_init_complete2
+ *      (PROC Auto_Start) or proc_load this fxn is called. This creates
+ *      the Node Manager and updates the DEV Object.
+ */
+int dev_create2(struct dev_object *hdev_obj)
+{
+	int status = 0;
+	struct dev_object *dev_obj = hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(hdev_obj);
+
+	/* There can be only one Node Manager per DEV object */
+	DBC_ASSERT(!dev_obj->hnode_mgr);
+	status = node_create_mgr(&dev_obj->hnode_mgr, hdev_obj);
+	if (DSP_FAILED(status))
+		dev_obj->hnode_mgr = NULL;
+
+	DBC_ENSURE((DSP_SUCCEEDED(status) && dev_obj->hnode_mgr != NULL)
+		   || (DSP_FAILED(status) && dev_obj->hnode_mgr == NULL));
+	return status;
+}
+
+/*
+ *  ======== dev_destroy2 ========
+ *  Purpose:
+ *      Destroys the Node manager for this device.
+ */
+int dev_destroy2(struct dev_object *hdev_obj)
+{
+	int status = 0;
+	struct dev_object *dev_obj = hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(hdev_obj);
+
+	if (dev_obj->hnode_mgr) {
+		if (DSP_FAILED(node_delete_mgr(dev_obj->hnode_mgr)))
+			status = -EPERM;
+		else
+			dev_obj->hnode_mgr = NULL;
+
+	}
+
+	DBC_ENSURE((DSP_SUCCEEDED(status) && dev_obj->hnode_mgr == NULL) ||
+		   DSP_FAILED(status));
+	return status;
+}
+
+/*
+ *  ======== dev_destroy_device ========
+ *  Purpose:
+ *      Destroys the channel manager for this device, if any, calls
+ *      bridge_dev_destroy(), and then attempts to unload the Bridge module.
+ */
+int dev_destroy_device(struct dev_object *hdev_obj)
+{
+	int status = 0;
+	struct dev_object *dev_obj = hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+
+	if (hdev_obj) {
+		if (dev_obj->cod_mgr) {
+			cod_delete(dev_obj->cod_mgr);
+			dev_obj->cod_mgr = NULL;
+		}
+
+		if (dev_obj->hnode_mgr) {
+			node_delete_mgr(dev_obj->hnode_mgr);
+			dev_obj->hnode_mgr = NULL;
+		}
+
+		/* Free the io, channel, and message managers for this board: */
+		if (dev_obj->hio_mgr) {
+			io_destroy(dev_obj->hio_mgr);
+			dev_obj->hio_mgr = NULL;
+		}
+		if (dev_obj->hchnl_mgr) {
+			chnl_destroy(dev_obj->hchnl_mgr);
+			dev_obj->hchnl_mgr = NULL;
+		}
+		if (dev_obj->hmsg_mgr) {
+			msg_delete(dev_obj->hmsg_mgr);
+			dev_obj->hmsg_mgr = NULL;
+		}
+
+		if (dev_obj->hdeh_mgr) {
+			/* Uninitialize DEH module. */
+			(*dev_obj->bridge_interface.pfn_deh_destroy)
+			    (dev_obj->hdeh_mgr);
+			dev_obj->hdeh_mgr = NULL;
+		}
+		if (dev_obj->hcmm_mgr) {
+			cmm_destroy(dev_obj->hcmm_mgr, true);
+			dev_obj->hcmm_mgr = NULL;
+		}
+
+		if (dev_obj->dmm_mgr) {
+			dmm_destroy(dev_obj->dmm_mgr);
+			dev_obj->dmm_mgr = NULL;
+		}
+
+		/* Call the driver's bridge_dev_destroy() function: */
+		/* Require of DevDestroy */
+		if (dev_obj->hbridge_context) {
+			status = (*dev_obj->bridge_interface.pfn_dev_destroy)
+			    (dev_obj->hbridge_context);
+			dev_obj->hbridge_context = NULL;
+		} else
+			status = -EPERM;
+		if (DSP_SUCCEEDED(status)) {
+			kfree(dev_obj->proc_list);
+			dev_obj->proc_list = NULL;
+
+			/* Remove this DEV_Object from the global list: */
+			drv_remove_dev_object(dev_obj->hdrv_obj, dev_obj);
+			/* Free The library * LDR_FreeModule
+			 * (dev_obj->module_obj); */
+			/* Free this dev object: */
+			kfree(dev_obj);
+			dev_obj = NULL;
+		}
+	} else {
+		status = -EFAULT;
+	}
+
+	return status;
+}
+
+/*
+ *  ======== dev_get_chnl_mgr ========
+ *  Purpose:
+ *      Retrieve the handle to the channel manager handle created for this
+ *      device.
+ */
+int dev_get_chnl_mgr(struct dev_object *hdev_obj,
+			    OUT struct chnl_mgr **phMgr)
+{
+	int status = 0;
+	struct dev_object *dev_obj = hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phMgr != NULL);
+
+	if (hdev_obj) {
+		*phMgr = dev_obj->hchnl_mgr;
+	} else {
+		*phMgr = NULL;
+		status = -EFAULT;
+	}
+
+	DBC_ENSURE(DSP_SUCCEEDED(status) || ((phMgr != NULL) &&
+					     (*phMgr == NULL)));
+	return status;
+}
+
+/*
+ *  ======== dev_get_cmm_mgr ========
+ *  Purpose:
+ *      Retrieve the handle to the shared memory manager created for this
+ *      device.
+ */
+int dev_get_cmm_mgr(struct dev_object *hdev_obj,
+			   OUT struct cmm_object **phMgr)
+{
+	int status = 0;
+	struct dev_object *dev_obj = hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phMgr != NULL);
+
+	if (hdev_obj) {
+		*phMgr = dev_obj->hcmm_mgr;
+	} else {
+		*phMgr = NULL;
+		status = -EFAULT;
+	}
+
+	DBC_ENSURE(DSP_SUCCEEDED(status) || ((phMgr != NULL) &&
+					     (*phMgr == NULL)));
+	return status;
+}
+
+/*
+ *  ======== dev_get_dmm_mgr ========
+ *  Purpose:
+ *      Retrieve the handle to the dynamic memory manager created for this
+ *      device.
+ */
+int dev_get_dmm_mgr(struct dev_object *hdev_obj,
+			   OUT struct dmm_object **phMgr)
+{
+	int status = 0;
+	struct dev_object *dev_obj = hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phMgr != NULL);
+
+	if (hdev_obj) {
+		*phMgr = dev_obj->dmm_mgr;
+	} else {
+		*phMgr = NULL;
+		status = -EFAULT;
+	}
+
+	DBC_ENSURE(DSP_SUCCEEDED(status) || ((phMgr != NULL) &&
+					     (*phMgr == NULL)));
+	return status;
+}
+
+/*
+ *  ======== dev_get_cod_mgr ========
+ *  Purpose:
+ *      Retrieve the COD manager create for this device.
+ */
+int dev_get_cod_mgr(struct dev_object *hdev_obj,
+			   OUT struct cod_manager **phCodMgr)
+{
+	int status = 0;
+	struct dev_object *dev_obj = hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phCodMgr != NULL);
+
+	if (hdev_obj) {
+		*phCodMgr = dev_obj->cod_mgr;
+	} else {
+		*phCodMgr = NULL;
+		status = -EFAULT;
+	}
+
+	DBC_ENSURE(DSP_SUCCEEDED(status) || ((phCodMgr != NULL) &&
+					     (*phCodMgr == NULL)));
+	return status;
+}
+
+/*
+ *  ========= dev_get_deh_mgr ========
+ */
+int dev_get_deh_mgr(struct dev_object *hdev_obj,
+			   OUT struct deh_mgr **phDehMgr)
+{
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phDehMgr != NULL);
+	DBC_REQUIRE(hdev_obj);
+	if (hdev_obj) {
+		*phDehMgr = hdev_obj->hdeh_mgr;
+	} else {
+		*phDehMgr = NULL;
+		status = -EFAULT;
+	}
+	return status;
+}
+
+/*
+ *  ======== dev_get_dev_node ========
+ *  Purpose:
+ *      Retrieve the platform specific device ID for this device.
+ */
+int dev_get_dev_node(struct dev_object *hdev_obj,
+			    OUT struct cfg_devnode **phDevNode)
+{
+	int status = 0;
+	struct dev_object *dev_obj = hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phDevNode != NULL);
+
+	if (hdev_obj) {
+		*phDevNode = dev_obj->dev_node_obj;
+	} else {
+		*phDevNode = NULL;
+		status = -EFAULT;
+	}
+
+	DBC_ENSURE(DSP_SUCCEEDED(status) || ((phDevNode != NULL) &&
+					     (*phDevNode == NULL)));
+	return status;
+}
+
+/*
+ *  ======== dev_get_first ========
+ *  Purpose:
+ *      Retrieve the first Device Object handle from an internal linked list
+ *      DEV_OBJECTs maintained by DEV.
+ */
+struct dev_object *dev_get_first(void)
+{
+	struct dev_object *dev_obj = NULL;
+
+	dev_obj = (struct dev_object *)drv_get_first_dev_object();
+
+	return dev_obj;
+}
+
+/*
+ *  ======== dev_get_intf_fxns ========
+ *  Purpose:
+ *      Retrieve the Bridge interface function structure for the loaded driver.
+ *      ppIntfFxns != NULL.
+ */
+int dev_get_intf_fxns(struct dev_object *hdev_obj,
+			     OUT struct bridge_drv_interface **ppIntfFxns)
+{
+	int status = 0;
+	struct dev_object *dev_obj = hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(ppIntfFxns != NULL);
+
+	if (hdev_obj) {
+		*ppIntfFxns = &dev_obj->bridge_interface;
+	} else {
+		*ppIntfFxns = NULL;
+		status = -EFAULT;
+	}
+
+	DBC_ENSURE(DSP_SUCCEEDED(status) || ((ppIntfFxns != NULL) &&
+					     (*ppIntfFxns == NULL)));
+	return status;
+}
+
+/*
+ *  ========= dev_get_io_mgr ========
+ */
+int dev_get_io_mgr(struct dev_object *hdev_obj,
+			  OUT struct io_mgr **phIOMgr)
+{
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phIOMgr != NULL);
+	DBC_REQUIRE(hdev_obj);
+
+	if (hdev_obj) {
+		*phIOMgr = hdev_obj->hio_mgr;
+	} else {
+		*phIOMgr = NULL;
+		status = -EFAULT;
+	}
+
+	return status;
+}
+
+/*
+ *  ======== dev_get_next ========
+ *  Purpose:
+ *      Retrieve the next Device Object handle from an internal linked list
+ *      of DEV_OBJECTs maintained by DEV, after having previously called
+ *      dev_get_first() and zero or more dev_get_next
+ */
+struct dev_object *dev_get_next(struct dev_object *hdev_obj)
+{
+	struct dev_object *next_dev_object = NULL;
+
+	if (hdev_obj) {
+		next_dev_object = (struct dev_object *)
+		    drv_get_next_dev_object((u32) hdev_obj);
+	}
+
+	return next_dev_object;
+}
+
+/*
+ *  ========= dev_get_msg_mgr ========
+ */
+void dev_get_msg_mgr(struct dev_object *hdev_obj, OUT struct msg_mgr **phMsgMgr)
+{
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phMsgMgr != NULL);
+	DBC_REQUIRE(hdev_obj);
+
+	*phMsgMgr = hdev_obj->hmsg_mgr;
+}
+
+/*
+ *  ======== dev_get_node_manager ========
+ *  Purpose:
+ *      Retrieve the Node Manager Handle
+ */
+int dev_get_node_manager(struct dev_object *hdev_obj,
+				OUT struct node_mgr **phNodeMgr)
+{
+	int status = 0;
+	struct dev_object *dev_obj = hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phNodeMgr != NULL);
+
+	if (hdev_obj) {
+		*phNodeMgr = dev_obj->hnode_mgr;
+	} else {
+		*phNodeMgr = NULL;
+		status = -EFAULT;
+	}
+
+	DBC_ENSURE(DSP_SUCCEEDED(status) || ((phNodeMgr != NULL) &&
+					     (*phNodeMgr == NULL)));
+	return status;
+}
+
+/*
+ *  ======== dev_get_symbol ========
+ */
+int dev_get_symbol(struct dev_object *hdev_obj,
+			  IN CONST char *pstrSym, OUT u32 * pul_value)
+{
+	int status = 0;
+	struct cod_manager *cod_mgr;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(pstrSym != NULL && pul_value != NULL);
+
+	if (hdev_obj) {
+		status = dev_get_cod_mgr(hdev_obj, &cod_mgr);
+		if (cod_mgr)
+			status = cod_get_sym_value(cod_mgr, (char *)pstrSym,
+						   pul_value);
+		else
+			status = -EFAULT;
+	}
+
+	return status;
+}
+
+/*
+ *  ======== dev_get_bridge_context ========
+ *  Purpose:
+ *      Retrieve the Bridge Context handle, as returned by the
+ *      bridge_dev_create fxn.
+ */
+int dev_get_bridge_context(struct dev_object *hdev_obj,
+			       OUT struct bridge_dev_context **phbridge_context)
+{
+	int status = 0;
+	struct dev_object *dev_obj = hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phbridge_context != NULL);
+
+	if (hdev_obj) {
+		*phbridge_context = dev_obj->hbridge_context;
+	} else {
+		*phbridge_context = NULL;
+		status = -EFAULT;
+	}
+
+	DBC_ENSURE(DSP_SUCCEEDED(status) || ((phbridge_context != NULL) &&
+					     (*phbridge_context == NULL)));
+	return status;
+}
+
+/*
+ *  ======== dev_exit ========
+ *  Purpose:
+ *      Decrement reference count, and free resources when reference count is
+ *      0.
+ */
+void dev_exit(void)
+{
+	DBC_REQUIRE(refs > 0);
+
+	refs--;
+
+	if (refs == 0) {
+		cmm_exit();
+		dmm_exit();
+	}
+
+	DBC_ENSURE(refs >= 0);
+}
+
+/*
+ *  ======== dev_init ========
+ *  Purpose:
+ *      Initialize DEV's private state, keeping a reference count on each call.
+ */
+bool dev_init(void)
+{
+	bool cmm_ret, dmm_ret, ret = true;
+
+	DBC_REQUIRE(refs >= 0);
+
+	if (refs == 0) {
+		cmm_ret = cmm_init();
+		dmm_ret = dmm_init();
+
+		ret = cmm_ret && dmm_ret;
+
+		if (!ret) {
+			if (cmm_ret)
+				cmm_exit();
+
+			if (dmm_ret)
+				dmm_exit();
+
+		}
+	}
+
+	if (ret)
+		refs++;
+
+	DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+
+	return ret;
+}
+
+/*
+ *  ======== dev_notify_clients ========
+ *  Purpose:
+ *      Notify all clients of this device of a change in device status.
+ */
+int dev_notify_clients(struct dev_object *hdev_obj, u32 ulStatus)
+{
+	int status = 0;
+
+	struct dev_object *dev_obj = hdev_obj;
+	void *proc_obj;
+
+	for (proc_obj = (void *)lst_first(dev_obj->proc_list);
+	     proc_obj != NULL;
+	     proc_obj = (void *)lst_next(dev_obj->proc_list,
+					 (struct list_head *)proc_obj))
+		proc_notify_clients(proc_obj, (u32) ulStatus);
+
+	return status;
+}
+
+/*
+ *  ======== dev_remove_device ========
+ */
+int dev_remove_device(struct cfg_devnode *dev_node_obj)
+{
+	struct dev_object *hdev_obj;	/* handle to device object */
+	int status = 0;
+	struct dev_object *dev_obj;
+
+	/* Retrieve the device object handle originaly stored with
+	 * the dev_node: */
+	status = cfg_get_dev_object(dev_node_obj, (u32 *) &hdev_obj);
+	if (DSP_SUCCEEDED(status)) {
+		/* Remove the Processor List */
+		dev_obj = (struct dev_object *)hdev_obj;
+		/* Destroy the device object. */
+		status = dev_destroy_device(hdev_obj);
+	}
+
+	return status;
+}
+
+/*
+ *  ======== dev_set_chnl_mgr ========
+ *  Purpose:
+ *      Set the channel manager for this device.
+ */
+int dev_set_chnl_mgr(struct dev_object *hdev_obj,
+			    struct chnl_mgr *hmgr)
+{
+	int status = 0;
+	struct dev_object *dev_obj = hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+
+	if (hdev_obj)
+		dev_obj->hchnl_mgr = hmgr;
+	else
+		status = -EFAULT;
+
+	DBC_ENSURE(DSP_FAILED(status) || (dev_obj->hchnl_mgr == hmgr));
+	return status;
+}
+
+/*
+ *  ======== dev_set_msg_mgr ========
+ *  Purpose:
+ *      Set the message manager for this device.
+ */
+void dev_set_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr *hmgr)
+{
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(hdev_obj);
+
+	hdev_obj->hmsg_mgr = hmgr;
+}
+
+/*
+ *  ======== dev_start_device ========
+ *  Purpose:
+ *      Initializes the new device with the BRIDGE environment.
+ */
+int dev_start_device(struct cfg_devnode *dev_node_obj)
+{
+	struct dev_object *hdev_obj = NULL;	/* handle to 'Bridge Device */
+	/* Bridge driver filename */
+	char bridge_file_name[CFG_MAXSEARCHPATHLEN] = "UMA";
+	int status;
+	struct mgr_object *hmgr_obj = NULL;
+
+	DBC_REQUIRE(refs > 0);
+
+	/* Given all resources, create a device object. */
+	status = dev_create_device(&hdev_obj, bridge_file_name,
+				   dev_node_obj);
+	if (DSP_SUCCEEDED(status)) {
+		/* Store away the hdev_obj with the DEVNODE */
+		status = cfg_set_dev_object(dev_node_obj, (u32) hdev_obj);
+		if (DSP_FAILED(status)) {
+			/* Clean up */
+			dev_destroy_device(hdev_obj);
+			hdev_obj = NULL;
+		}
+	}
+	if (DSP_SUCCEEDED(status)) {
+		/* Create the Manager Object */
+		status = mgr_create(&hmgr_obj, dev_node_obj);
+	}
+	if (DSP_FAILED(status)) {
+		if (hdev_obj)
+			dev_destroy_device(hdev_obj);
+
+		/* Ensure the device extension is NULL */
+		cfg_set_dev_object(dev_node_obj, 0L);
+	}
+
+	return status;
+}
+
+/*
+ *  ======== fxn_not_implemented ========
+ *  Purpose:
+ *      Takes the place of a Bridge Null Function.
+ *  Parameters:
+ *      Multiple, optional.
+ *  Returns:
+ *      -ENOSYS:   Always.
+ */
+static int fxn_not_implemented(int arg, ...)
+{
+	return -ENOSYS;
+}
+
+/*
+ *  ======== init_cod_mgr ========
+ *  Purpose:
+ *      Create a COD manager for this device.
+ *  Parameters:
+ *      dev_obj:             Pointer to device object created with
+ *                              dev_create_device()
+ *  Returns:
+ *      0:                Success.
+ *      -EFAULT:            Invalid hdev_obj.
+ *  Requires:
+ *      Should only be called once by dev_create_device() for a given DevObject.
+ *  Ensures:
+ */
+static int init_cod_mgr(struct dev_object *dev_obj)
+{
+	int status = 0;
+	char *sz_dummy_file = "dummy";
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(!dev_obj || (dev_obj->cod_mgr == NULL));
+
+	status = cod_create(&dev_obj->cod_mgr, sz_dummy_file, NULL);
+
+	return status;
+}
+
+/*
+ *  ======== dev_insert_proc_object ========
+ *  Purpose:
+ *      Insert a ProcObject into the list maintained by DEV.
+ *  Parameters:
+ *      p_proc_object:        Ptr to ProcObject to insert.
+ *      dev_obj:         Ptr to Dev Object where the list is.
+  *     pbAlreadyAttached:  Ptr to return the bool
+ *  Returns:
+ *      0:           If successful.
+ *  Requires:
+ *      List Exists
+ *      hdev_obj is Valid handle
+ *      DEV Initialized
+ *      pbAlreadyAttached != NULL
+ *      proc_obj != 0
+ *  Ensures:
+ *      0 and List is not Empty.
+ */
+int dev_insert_proc_object(struct dev_object *hdev_obj,
+				  u32 proc_obj, OUT bool *pbAlreadyAttached)
+{
+	int status = 0;
+	struct dev_object *dev_obj = (struct dev_object *)hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(dev_obj);
+	DBC_REQUIRE(proc_obj != 0);
+	DBC_REQUIRE(dev_obj->proc_list != NULL);
+	DBC_REQUIRE(pbAlreadyAttached != NULL);
+	if (!LST_IS_EMPTY(dev_obj->proc_list))
+		*pbAlreadyAttached = true;
+
+	/* Add DevObject to tail. */
+	lst_put_tail(dev_obj->proc_list, (struct list_head *)proc_obj);
+
+	DBC_ENSURE(DSP_SUCCEEDED(status) && !LST_IS_EMPTY(dev_obj->proc_list));
+
+	return status;
+}
+
+/*
+ *  ======== dev_remove_proc_object ========
+ *  Purpose:
+ *      Search for and remove a Proc object from the given list maintained
+ *      by the DEV
+ *  Parameters:
+ *      p_proc_object:        Ptr to ProcObject to insert.
+ *      dev_obj          Ptr to Dev Object where the list is.
+ *  Returns:
+ *      0:            If successful.
+ *  Requires:
+ *      List exists and is not empty
+ *      proc_obj != 0
+ *      hdev_obj is a valid Dev handle.
+ *  Ensures:
+ *  Details:
+ *      List will be deleted when the DEV is destroyed.
+ */
+int dev_remove_proc_object(struct dev_object *hdev_obj, u32 proc_obj)
+{
+	int status = -EPERM;
+	struct list_head *cur_elem;
+	struct dev_object *dev_obj = (struct dev_object *)hdev_obj;
+
+	DBC_REQUIRE(dev_obj);
+	DBC_REQUIRE(proc_obj != 0);
+	DBC_REQUIRE(dev_obj->proc_list != NULL);
+	DBC_REQUIRE(!LST_IS_EMPTY(dev_obj->proc_list));
+
+	/* Search list for dev_obj: */
+	for (cur_elem = lst_first(dev_obj->proc_list); cur_elem != NULL;
+	     cur_elem = lst_next(dev_obj->proc_list, cur_elem)) {
+		/* If found, remove it. */
+		if ((u32) cur_elem == proc_obj) {
+			lst_remove_elem(dev_obj->proc_list, cur_elem);
+			status = 0;
+			break;
+		}
+	}
+
+	return status;
+}
+
+int dev_get_dev_type(struct dev_object *hdevObject, u8 *dev_type)
+{
+	int status = 0;
+	struct dev_object *dev_obj = (struct dev_object *)hdevObject;
+
+	*dev_type = dev_obj->dev_type;
+
+	return status;
+}
+
+/*
+ *  ======== store_interface_fxns ========
+ *  Purpose:
+ *      Copy the Bridge's interface functions into the device object,
+ *      ensuring that fxn_not_implemented() is set for:
+ *
+ *      1. All Bridge function pointers which are NULL; and
+ *      2. All function slots in the struct dev_object structure which have no
+ *         corresponding slots in the the Bridge's interface, because the Bridge
+ *         is of an *older* version.
+ *  Parameters:
+ *      intf_fxns:      Interface fxn Structure of the Bridge's Dev Object.
+ *      drv_fxns:      Interface Fxns offered by the Bridge during DEV_Create().
+ *  Returns:
+ *  Requires:
+ *      Input pointers are valid.
+ *      Bridge driver is *not* written for a newer DSP API.
+ *  Ensures:
+ *      All function pointers in the dev object's fxn interface are not NULL.
+ */
+static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
+				 OUT struct bridge_drv_interface *intf_fxns)
+{
+	u32 bridge_version;
+
+	/* Local helper macro: */
+#define  STORE_FXN(cast, pfn) \
+    (intf_fxns->pfn = ((drv_fxns->pfn != NULL) ? drv_fxns->pfn : \
+    (cast)fxn_not_implemented))
+
+	DBC_REQUIRE(intf_fxns != NULL);
+	DBC_REQUIRE(drv_fxns != NULL);
+	DBC_REQUIRE(MAKEVERSION(drv_fxns->brd_api_major_version,
+			drv_fxns->brd_api_minor_version) <= BRD_API_VERSION);
+	bridge_version = MAKEVERSION(drv_fxns->brd_api_major_version,
+				     drv_fxns->brd_api_minor_version);
+	intf_fxns->brd_api_major_version = drv_fxns->brd_api_major_version;
+	intf_fxns->brd_api_minor_version = drv_fxns->brd_api_minor_version;
+	/* Install functions up to DSP API version .80 (first alpha): */
+	if (bridge_version > 0) {
+		STORE_FXN(fxn_dev_create, pfn_dev_create);
+		STORE_FXN(fxn_dev_destroy, pfn_dev_destroy);
+		STORE_FXN(fxn_dev_ctrl, pfn_dev_cntrl);
+		STORE_FXN(fxn_brd_monitor, pfn_brd_monitor);
+		STORE_FXN(fxn_brd_start, pfn_brd_start);
+		STORE_FXN(fxn_brd_stop, pfn_brd_stop);
+		STORE_FXN(fxn_brd_status, pfn_brd_status);
+		STORE_FXN(fxn_brd_read, pfn_brd_read);
+		STORE_FXN(fxn_brd_write, pfn_brd_write);
+		STORE_FXN(fxn_brd_setstate, pfn_brd_set_state);
+		STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy);
+		STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write);
+		STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map);
+		STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map);
+		STORE_FXN(fxn_chnl_create, pfn_chnl_create);
+		STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy);
+		STORE_FXN(fxn_chnl_open, pfn_chnl_open);
+		STORE_FXN(fxn_chnl_close, pfn_chnl_close);
+		STORE_FXN(fxn_chnl_addioreq, pfn_chnl_add_io_req);
+		STORE_FXN(fxn_chnl_getioc, pfn_chnl_get_ioc);
+		STORE_FXN(fxn_chnl_cancelio, pfn_chnl_cancel_io);
+		STORE_FXN(fxn_chnl_flushio, pfn_chnl_flush_io);
+		STORE_FXN(fxn_chnl_getinfo, pfn_chnl_get_info);
+		STORE_FXN(fxn_chnl_getmgrinfo, pfn_chnl_get_mgr_info);
+		STORE_FXN(fxn_chnl_idle, pfn_chnl_idle);
+		STORE_FXN(fxn_chnl_registernotify, pfn_chnl_register_notify);
+		STORE_FXN(fxn_deh_create, pfn_deh_create);
+		STORE_FXN(fxn_deh_destroy, pfn_deh_destroy);
+		STORE_FXN(fxn_deh_notify, pfn_deh_notify);
+		STORE_FXN(fxn_deh_registernotify, pfn_deh_register_notify);
+		STORE_FXN(fxn_deh_getinfo, pfn_deh_get_info);
+		STORE_FXN(fxn_io_create, pfn_io_create);
+		STORE_FXN(fxn_io_destroy, pfn_io_destroy);
+		STORE_FXN(fxn_io_onloaded, pfn_io_on_loaded);
+		STORE_FXN(fxn_io_getprocload, pfn_io_get_proc_load);
+		STORE_FXN(fxn_msg_create, pfn_msg_create);
+		STORE_FXN(fxn_msg_createqueue, pfn_msg_create_queue);
+		STORE_FXN(fxn_msg_delete, pfn_msg_delete);
+		STORE_FXN(fxn_msg_deletequeue, pfn_msg_delete_queue);
+		STORE_FXN(fxn_msg_get, pfn_msg_get);
+		STORE_FXN(fxn_msg_put, pfn_msg_put);
+		STORE_FXN(fxn_msg_registernotify, pfn_msg_register_notify);
+		STORE_FXN(fxn_msg_setqueueid, pfn_msg_set_queue_id);
+	}
+	/* Add code for any additional functions in newerBridge versions here */
+	/* Ensure postcondition: */
+	DBC_ENSURE(intf_fxns->pfn_dev_create != NULL);
+	DBC_ENSURE(intf_fxns->pfn_dev_destroy != NULL);
+	DBC_ENSURE(intf_fxns->pfn_dev_cntrl != NULL);
+	DBC_ENSURE(intf_fxns->pfn_brd_monitor != NULL);
+	DBC_ENSURE(intf_fxns->pfn_brd_start != NULL);
+	DBC_ENSURE(intf_fxns->pfn_brd_stop != NULL);
+	DBC_ENSURE(intf_fxns->pfn_brd_status != NULL);
+	DBC_ENSURE(intf_fxns->pfn_brd_read != NULL);
+	DBC_ENSURE(intf_fxns->pfn_brd_write != NULL);
+	DBC_ENSURE(intf_fxns->pfn_chnl_create != NULL);
+	DBC_ENSURE(intf_fxns->pfn_chnl_destroy != NULL);
+	DBC_ENSURE(intf_fxns->pfn_chnl_open != NULL);
+	DBC_ENSURE(intf_fxns->pfn_chnl_close != NULL);
+	DBC_ENSURE(intf_fxns->pfn_chnl_add_io_req != NULL);
+	DBC_ENSURE(intf_fxns->pfn_chnl_get_ioc != NULL);
+	DBC_ENSURE(intf_fxns->pfn_chnl_cancel_io != NULL);
+	DBC_ENSURE(intf_fxns->pfn_chnl_flush_io != NULL);
+	DBC_ENSURE(intf_fxns->pfn_chnl_get_info != NULL);
+	DBC_ENSURE(intf_fxns->pfn_chnl_get_mgr_info != NULL);
+	DBC_ENSURE(intf_fxns->pfn_chnl_idle != NULL);
+	DBC_ENSURE(intf_fxns->pfn_chnl_register_notify != NULL);
+	DBC_ENSURE(intf_fxns->pfn_deh_create != NULL);
+	DBC_ENSURE(intf_fxns->pfn_deh_destroy != NULL);
+	DBC_ENSURE(intf_fxns->pfn_deh_notify != NULL);
+	DBC_ENSURE(intf_fxns->pfn_deh_register_notify != NULL);
+	DBC_ENSURE(intf_fxns->pfn_deh_get_info != NULL);
+	DBC_ENSURE(intf_fxns->pfn_io_create != NULL);
+	DBC_ENSURE(intf_fxns->pfn_io_destroy != NULL);
+	DBC_ENSURE(intf_fxns->pfn_io_on_loaded != NULL);
+	DBC_ENSURE(intf_fxns->pfn_io_get_proc_load != NULL);
+	DBC_ENSURE(intf_fxns->pfn_msg_set_queue_id != NULL);
+
+#undef  STORE_FXN
+}
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c
new file mode 100644
index 0000000..c8abce8
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/dmm.c
@@ -0,0 +1,533 @@
+/*
+ * dmm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
+ * space that can be directly mapped to any MPU buffer or memory region
+ *
+ * Notes:
+ *   Region: Generic memory entitiy having a start address and a size
+ *   Chunk:  Reserved region
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*  ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/sync.h>
+
+/*  ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+#include <dspbridge/proc.h>
+
+/*  ----------------------------------- This */
+#include <dspbridge/dmm.h>
+
+/*  ----------------------------------- Defines, Data Structures, Typedefs */
+#define DMM_ADDR_VIRTUAL(a) \
+	(((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
+	dyn_mem_map_beg)
+#define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
+
+/* DMM Mgr */
+struct dmm_object {
+	/* Dmm Lock is used to serialize access mem manager for
+	 * multi-threads. */
+	spinlock_t dmm_lock;	/* Lock to access dmm mgr */
+};
+
+/*  ----------------------------------- Globals */
+static u32 refs;		/* module reference count */
+struct map_page {
+	u32 region_size:15;
+	u32 mapped_size:15;
+	u32 reserved:1;
+	u32 mapped:1;
+};
+
+/*  Create the free list */
+static struct map_page *virtual_mapping_table;
+static u32 free_region;		/* The index of free region */
+static u32 free_size;
+static u32 dyn_mem_map_beg;	/* The Beginning of dynamic memory mapping */
+static u32 table_size;		/* The size of virt and phys pages tables */
+
+/*  ----------------------------------- Function Prototypes */
+static struct map_page *get_region(u32 addr);
+static struct map_page *get_free_region(u32 aSize);
+static struct map_page *get_mapped_region(u32 aAddr);
+
+/*  ======== dmm_create_tables ========
+ *  Purpose:
+ *      Create table to hold the information of physical address
+ *      the buffer pages that is passed by the user, and the table
+ *      to hold the information of the virtual memory that is reserved
+ *      for DSP.
+ */
+int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
+{
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	int status = 0;
+
+	status = dmm_delete_tables(dmm_obj);
+	if (DSP_SUCCEEDED(status)) {
+		dyn_mem_map_beg = addr;
+		table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
+		/*  Create the free list */
+		virtual_mapping_table = __vmalloc(table_size *
+				sizeof(struct map_page), GFP_KERNEL |
+				__GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+		if (virtual_mapping_table == NULL)
+			status = -ENOMEM;
+		else {
+			/* On successful allocation,
+			 * all entries are zero ('free') */
+			free_region = 0;
+			free_size = table_size * PG_SIZE4K;
+			virtual_mapping_table[0].region_size = table_size;
+		}
+	}
+
+	if (DSP_FAILED(status))
+		pr_err("%s: failure, status 0x%x\n", __func__, status);
+
+	return status;
+}
+
+/*
+ *  ======== dmm_create ========
+ *  Purpose:
+ *      Create a dynamic memory manager object.
+ */
+int dmm_create(OUT struct dmm_object **phDmmMgr,
+		      struct dev_object *hdev_obj,
+		      IN CONST struct dmm_mgrattrs *pMgrAttrs)
+{
+	struct dmm_object *dmm_obj = NULL;
+	int status = 0;
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phDmmMgr != NULL);
+
+	*phDmmMgr = NULL;
+	/* create, zero, and tag a cmm mgr object */
+	dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
+	if (dmm_obj != NULL) {
+		spin_lock_init(&dmm_obj->dmm_lock);
+		*phDmmMgr = dmm_obj;
+	} else {
+		status = -ENOMEM;
+	}
+
+	return status;
+}
+
+/*
+ *  ======== dmm_destroy ========
+ *  Purpose:
+ *      Release the communication memory manager resources.
+ */
+int dmm_destroy(struct dmm_object *dmm_mgr)
+{
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	if (dmm_mgr) {
+		status = dmm_delete_tables(dmm_obj);
+		if (DSP_SUCCEEDED(status))
+			kfree(dmm_obj);
+	} else
+		status = -EFAULT;
+
+	return status;
+}
+
+/*
+ *  ======== dmm_delete_tables ========
+ *  Purpose:
+ *      Delete DMM Tables.
+ */
+int dmm_delete_tables(struct dmm_object *dmm_mgr)
+{
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	/* Delete all DMM tables */
+	if (dmm_mgr)
+		vfree(virtual_mapping_table);
+	else
+		status = -EFAULT;
+	return status;
+}
+
+/*
+ *  ======== dmm_exit ========
+ *  Purpose:
+ *      Discontinue usage of module; free resources when reference count
+ *      reaches 0.
+ */
+void dmm_exit(void)
+{
+	DBC_REQUIRE(refs > 0);
+
+	refs--;
+}
+
+/*
+ *  ======== dmm_get_handle ========
+ *  Purpose:
+ *      Return the dynamic memory manager object for this device.
+ *      This is typically called from the client process.
+ */
+int dmm_get_handle(void *hprocessor, OUT struct dmm_object **phDmmMgr)
+{
+	int status = 0;
+	struct dev_object *hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phDmmMgr != NULL);
+	if (hprocessor != NULL)
+		status = proc_get_dev_object(hprocessor, &hdev_obj);
+	else
+		hdev_obj = dev_get_first();	/* default */
+
+	if (DSP_SUCCEEDED(status))
+		status = dev_get_dmm_mgr(hdev_obj, phDmmMgr);
+
+	return status;
+}
+
+/*
+ *  ======== dmm_init ========
+ *  Purpose:
+ *      Initializes private state of DMM module.
+ */
+bool dmm_init(void)
+{
+	bool ret = true;
+
+	DBC_REQUIRE(refs >= 0);
+
+	if (ret)
+		refs++;
+
+	DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+
+	virtual_mapping_table = NULL;
+	table_size = 0;
+
+	return ret;
+}
+
+/*
+ *  ======== dmm_map_memory ========
+ *  Purpose:
+ *      Add a mapping block to the reserved chunk. DMM assumes that this block
+ *  will be mapped in the DSP/IVA's address space. DMM returns an error if a
+ *  mapping overlaps another one. This function stores the info that will be
+ *  required later while unmapping the block.
+ */
+int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
+{
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	struct map_page *chunk;
+	int status = 0;
+
+	spin_lock(&dmm_obj->dmm_lock);
+	/* Find the Reserved memory chunk containing the DSP block to
+	 * be mapped */
+	chunk = (struct map_page *)get_region(addr);
+	if (chunk != NULL) {
+		/* Mark the region 'mapped', leave the 'reserved' info as-is */
+		chunk->mapped = true;
+		chunk->mapped_size = (size / PG_SIZE4K);
+	} else
+		status = -ENOENT;
+	spin_unlock(&dmm_obj->dmm_lock);
+
+	dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
+		"chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
+
+	return status;
+}
+
+/*
+ *  ======== dmm_reserve_memory ========
+ *  Purpose:
+ *      Reserve a chunk of virtually contiguous DSP/IVA address space.
+ */
+int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
+			      u32 *prsv_addr)
+{
+	int status = 0;
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	struct map_page *node;
+	u32 rsv_addr = 0;
+	u32 rsv_size = 0;
+
+	spin_lock(&dmm_obj->dmm_lock);
+
+	/* Try to get a DSP chunk from the free list */
+	node = get_free_region(size);
+	if (node != NULL) {
+		/*  DSP chunk of given size is available. */
+		rsv_addr = DMM_ADDR_VIRTUAL(node);
+		/* Calculate the number entries to use */
+		rsv_size = size / PG_SIZE4K;
+		if (rsv_size < node->region_size) {
+			/* Mark remainder of free region */
+			node[rsv_size].mapped = false;
+			node[rsv_size].reserved = false;
+			node[rsv_size].region_size =
+			    node->region_size - rsv_size;
+			node[rsv_size].mapped_size = 0;
+		}
+		/*  get_region will return first fit chunk. But we only use what
+		   is requested. */
+		node->mapped = false;
+		node->reserved = true;
+		node->region_size = rsv_size;
+		node->mapped_size = 0;
+		/* Return the chunk's starting address */
+		*prsv_addr = rsv_addr;
+	} else
+		/*dSP chunk of given size is not available */
+		status = -ENOMEM;
+
+	spin_unlock(&dmm_obj->dmm_lock);
+
+	dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
+		"rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
+		prsv_addr, status, rsv_addr, rsv_size);
+
+	return status;
+}
+
+/*
+ *  ======== dmm_un_map_memory ========
+ *  Purpose:
+ *      Remove the mapped block from the reserved chunk.
+ */
+int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
+{
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	struct map_page *chunk;
+	int status = 0;
+
+	spin_lock(&dmm_obj->dmm_lock);
+	chunk = get_mapped_region(addr);
+	if (chunk == NULL)
+		status = -ENOENT;
+
+	if (DSP_SUCCEEDED(status)) {
+		/* Unmap the region */
+		*psize = chunk->mapped_size * PG_SIZE4K;
+		chunk->mapped = false;
+		chunk->mapped_size = 0;
+	}
+	spin_unlock(&dmm_obj->dmm_lock);
+
+	dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
+		"chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
+
+	return status;
+}
+
+/*
+ *  ======== dmm_un_reserve_memory ========
+ *  Purpose:
+ *      Free a chunk of reserved DSP/IVA address space.
+ */
+int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
+{
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	struct map_page *chunk;
+	u32 i;
+	int status = 0;
+	u32 chunk_size;
+
+	spin_lock(&dmm_obj->dmm_lock);
+
+	/* Find the chunk containing the reserved address */
+	chunk = get_mapped_region(rsv_addr);
+	if (chunk == NULL)
+		status = -ENOENT;
+
+	if (DSP_SUCCEEDED(status)) {
+		/* Free all the mapped pages for this reserved region */
+		i = 0;
+		while (i < chunk->region_size) {
+			if (chunk[i].mapped) {
+				/* Remove mapping from the page tables. */
+				chunk_size = chunk[i].mapped_size;
+				/* Clear the mapping flags */
+				chunk[i].mapped = false;
+				chunk[i].mapped_size = 0;
+				i += chunk_size;
+			} else
+				i++;
+		}
+		/* Clear the flags (mark the region 'free') */
+		chunk->reserved = false;
+		/* NOTE: We do NOT coalesce free regions here.
+		 * Free regions are coalesced in get_region(), as it traverses
+		 *the whole mapping table
+		 */
+	}
+	spin_unlock(&dmm_obj->dmm_lock);
+
+	dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
+		__func__, dmm_mgr, rsv_addr, status, chunk);
+
+	return status;
+}
+
+/*
+ *  ======== get_region ========
+ *  Purpose:
+ *      Returns a region containing the specified memory region
+ */
+static struct map_page *get_region(u32 aAddr)
+{
+	struct map_page *curr_region = NULL;
+	u32 i = 0;
+
+	if (virtual_mapping_table != NULL) {
+		/* find page mapped by this address */
+		i = DMM_ADDR_TO_INDEX(aAddr);
+		if (i < table_size)
+			curr_region = virtual_mapping_table + i;
+	}
+
+	dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
+		__func__, curr_region, free_region, free_size);
+	return curr_region;
+}
+
+/*
+ *  ======== get_free_region ========
+ *  Purpose:
+ *  Returns the requested free region
+ */
+static struct map_page *get_free_region(u32 aSize)
+{
+	struct map_page *curr_region = NULL;
+	u32 i = 0;
+	u32 region_size = 0;
+	u32 next_i = 0;
+
+	if (virtual_mapping_table == NULL)
+		return curr_region;
+	if (aSize > free_size) {
+		/* Find the largest free region
+		 * (coalesce during the traversal) */
+		while (i < table_size) {
+			region_size = virtual_mapping_table[i].region_size;
+			next_i = i + region_size;
+			if (virtual_mapping_table[i].reserved == false) {
+				/* Coalesce, if possible */
+				if (next_i < table_size &&
+				    virtual_mapping_table[next_i].reserved
+				    == false) {
+					virtual_mapping_table[i].region_size +=
+					    virtual_mapping_table
+					    [next_i].region_size;
+					continue;
+				}
+				region_size *= PG_SIZE4K;
+				if (region_size > free_size) {
+					free_region = i;
+					free_size = region_size;
+				}
+			}
+			i = next_i;
+		}
+	}
+	if (aSize <= free_size) {
+		curr_region = virtual_mapping_table + free_region;
+		free_region += (aSize / PG_SIZE4K);
+		free_size -= aSize;
+	}
+	return curr_region;
+}
+
+/*
+ *  ======== get_mapped_region ========
+ *  Purpose:
+ *  Returns the requestedmapped region
+ */
+static struct map_page *get_mapped_region(u32 aAddr)
+{
+	u32 i = 0;
+	struct map_page *curr_region = NULL;
+
+	if (virtual_mapping_table == NULL)
+		return curr_region;
+
+	i = DMM_ADDR_TO_INDEX(aAddr);
+	if (i < table_size && (virtual_mapping_table[i].mapped ||
+			       virtual_mapping_table[i].reserved))
+		curr_region = virtual_mapping_table + i;
+	return curr_region;
+}
+
+#ifdef DSP_DMM_DEBUG
+u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
+{
+	struct map_page *curr_node = NULL;
+	u32 i;
+	u32 freemem = 0;
+	u32 bigsize = 0;
+
+	spin_lock(&dmm_mgr->dmm_lock);
+
+	if (virtual_mapping_table != NULL) {
+		for (i = 0; i < table_size; i +=
+		     virtual_mapping_table[i].region_size) {
+			curr_node = virtual_mapping_table + i;
+			if (curr_node->reserved == TRUE) {
+				/*printk("RESERVED size = 0x%x, "
+				   "Map size = 0x%x\n",
+				   (curr_node->region_size * PG_SIZE4K),
+				   (curr_node->mapped == false) ? 0 :
+				   (curr_node->mapped_size * PG_SIZE4K));
+				 */
+			} else {
+/*				printk("UNRESERVED size = 0x%x\n",
+					(curr_node->region_size * PG_SIZE4K));
+ */
+				freemem += (curr_node->region_size * PG_SIZE4K);
+				if (curr_node->region_size > bigsize)
+					bigsize = curr_node->region_size;
+			}
+		}
+	}
+	spin_unlock(&dmm_mgr->dmm_lock);
+	printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
+	       freemem / (1024 * 1024));
+	printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
+	       (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
+	printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
+	       (bigsize * PG_SIZE4K / (1024 * 1024)));
+
+	return 0;
+}
+#endif
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
new file mode 100644
index 0000000..7597210
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -0,0 +1,1685 @@
+/*
+ * dspapi.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Common DSP API functions, also includes the wrapper
+ * functions called directly by the DeviceIOControl interface.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*  ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+#include <dspbridge/ntfy.h>
+#include <dspbridge/services.h>
+
+/*  ----------------------------------- Platform Manager */
+#include <dspbridge/chnl.h>
+#include <dspbridge/dev.h>
+#include <dspbridge/drv.h>
+
+#include <dspbridge/proc.h>
+#include <dspbridge/strm.h>
+
+/*  ----------------------------------- Resource Manager */
+#include <dspbridge/disp.h>
+#include <dspbridge/mgr.h>
+#include <dspbridge/node.h>
+#include <dspbridge/rmm.h>
+
+/*  ----------------------------------- Others */
+#include <dspbridge/msg.h>
+#include <dspbridge/cmm.h>
+#include <dspbridge/io.h>
+
+/*  ----------------------------------- This */
+#include <dspbridge/dspapi.h>
+#include <dspbridge/dbdcd.h>
+
+#include <dspbridge/resourcecleanup.h>
+
+/*  ----------------------------------- Defines, Data Structures, Typedefs */
+#define MAX_TRACEBUFLEN 255
+#define MAX_LOADARGS    16
+#define MAX_NODES       64
+#define MAX_STREAMS     16
+#define MAX_BUFS	64
+
+/* Used to get dspbridge ioctl table */
+#define DB_GET_IOC_TABLE(cmd)	(DB_GET_MODULE(cmd) >> DB_MODULE_SHIFT)
+
+/* Device IOCtl function pointer */
+struct api_cmd {
+	u32(*fxn) (union Trapped_Args *args, void *pr_ctxt);
+	u32 dw_index;
+};
+
+/*  ----------------------------------- Globals */
+static u32 api_c_refs;
+
+/*
+ *  Function tables.
+ *  The order of these functions MUST be the same as the order of the command
+ *  numbers defined in dspapi-ioctl.h  This is how an IOCTL number in user mode
+ *  turns into a function call in kernel mode.
+ */
+
+/* MGR wrapper functions */
+static struct api_cmd mgr_cmd[] = {
+	{mgrwrap_enum_node_info},	/* MGR_ENUMNODE_INFO */
+	{mgrwrap_enum_proc_info},	/* MGR_ENUMPROC_INFO */
+	{mgrwrap_register_object},	/* MGR_REGISTEROBJECT */
+	{mgrwrap_unregister_object},	/* MGR_UNREGISTEROBJECT */
+	{mgrwrap_wait_for_bridge_events},	/* MGR_WAIT */
+	{mgrwrap_get_process_resources_info},	/* MGR_GET_PROC_RES */
+};
+
+/* PROC wrapper functions */
+static struct api_cmd proc_cmd[] = {
+	{procwrap_attach},	/* PROC_ATTACH */
+	{procwrap_ctrl},	/* PROC_CTRL */
+	{procwrap_detach},	/* PROC_DETACH */
+	{procwrap_enum_node_info},	/* PROC_ENUMNODE */
+	{procwrap_enum_resources},	/* PROC_ENUMRESOURCES */
+	{procwrap_get_state},	/* PROC_GET_STATE */
+	{procwrap_get_trace},	/* PROC_GET_TRACE */
+	{procwrap_load},	/* PROC_LOAD */
+	{procwrap_register_notify},	/* PROC_REGISTERNOTIFY */
+	{procwrap_start},	/* PROC_START */
+	{procwrap_reserve_memory},	/* PROC_RSVMEM */
+	{procwrap_un_reserve_memory},	/* PROC_UNRSVMEM */
+	{procwrap_map},		/* PROC_MAPMEM */
+	{procwrap_un_map},	/* PROC_UNMAPMEM */
+	{procwrap_flush_memory},	/* PROC_FLUSHMEMORY */
+	{procwrap_stop},	/* PROC_STOP */
+	{procwrap_invalidate_memory},	/* PROC_INVALIDATEMEMORY */
+	{procwrap_begin_dma},	/* PROC_BEGINDMA */
+	{procwrap_end_dma},	/* PROC_ENDDMA */
+};
+
+/* NODE wrapper functions */
+static struct api_cmd node_cmd[] = {
+	{nodewrap_allocate},	/* NODE_ALLOCATE */
+	{nodewrap_alloc_msg_buf},	/* NODE_ALLOCMSGBUF */
+	{nodewrap_change_priority},	/* NODE_CHANGEPRIORITY */
+	{nodewrap_connect},	/* NODE_CONNECT */
+	{nodewrap_create},	/* NODE_CREATE */
+	{nodewrap_delete},	/* NODE_DELETE */
+	{nodewrap_free_msg_buf},	/* NODE_FREEMSGBUF */
+	{nodewrap_get_attr},	/* NODE_GETATTR */
+	{nodewrap_get_message},	/* NODE_GETMESSAGE */
+	{nodewrap_pause},	/* NODE_PAUSE */
+	{nodewrap_put_message},	/* NODE_PUTMESSAGE */
+	{nodewrap_register_notify},	/* NODE_REGISTERNOTIFY */
+	{nodewrap_run},		/* NODE_RUN */
+	{nodewrap_terminate},	/* NODE_TERMINATE */
+	{nodewrap_get_uuid_props},	/* NODE_GETUUIDPROPS */
+};
+
+/* STRM wrapper functions */
+static struct api_cmd strm_cmd[] = {
+	{strmwrap_allocate_buffer},	/* STRM_ALLOCATEBUFFER */
+	{strmwrap_close},	/* STRM_CLOSE */
+	{strmwrap_free_buffer},	/* STRM_FREEBUFFER */
+	{strmwrap_get_event_handle},	/* STRM_GETEVENTHANDLE */
+	{strmwrap_get_info},	/* STRM_GETINFO */
+	{strmwrap_idle},	/* STRM_IDLE */
+	{strmwrap_issue},	/* STRM_ISSUE */
+	{strmwrap_open},	/* STRM_OPEN */
+	{strmwrap_reclaim},	/* STRM_RECLAIM */
+	{strmwrap_register_notify},	/* STRM_REGISTERNOTIFY */
+	{strmwrap_select},	/* STRM_SELECT */
+};
+
+/* CMM wrapper functions */
+static struct api_cmd cmm_cmd[] = {
+	{cmmwrap_calloc_buf},	/* CMM_ALLOCBUF */
+	{cmmwrap_free_buf},	/* CMM_FREEBUF */
+	{cmmwrap_get_handle},	/* CMM_GETHANDLE */
+	{cmmwrap_get_info},	/* CMM_GETINFO */
+};
+
+/* Array used to store ioctl table sizes. It can hold up to 8 entries */
+static u8 size_cmd[] = {
+	ARRAY_SIZE(mgr_cmd),
+	ARRAY_SIZE(proc_cmd),
+	ARRAY_SIZE(node_cmd),
+	ARRAY_SIZE(strm_cmd),
+	ARRAY_SIZE(cmm_cmd),
+};
+
+static inline void _cp_fm_usr(void *to, const void __user * from,
+			      int *err, unsigned long bytes)
+{
+	if (DSP_FAILED(*err))
+		return;
+
+	if (unlikely(!from)) {
+		*err = -EFAULT;
+		return;
+	}
+
+	if (unlikely(copy_from_user(to, from, bytes)))
+		*err = -EFAULT;
+}
+
+#define CP_FM_USR(to, from, err, n)				\
+	_cp_fm_usr(to, from, &(err), (n) * sizeof(*(to)))
+
+static inline void _cp_to_usr(void __user *to, const void *from,
+			      int *err, unsigned long bytes)
+{
+	if (DSP_FAILED(*err))
+		return;
+
+	if (unlikely(!to)) {
+		*err = -EFAULT;
+		return;
+	}
+
+	if (unlikely(copy_to_user(to, from, bytes)))
+		*err = -EFAULT;
+}
+
+#define CP_TO_USR(to, from, err, n)				\
+	_cp_to_usr(to, from, &(err), (n) * sizeof(*(from)))
+
+/*
+ *  ======== api_call_dev_ioctl ========
+ *  Purpose:
+ *      Call the (wrapper) function for the corresponding API IOCTL.
+ */
+inline int api_call_dev_ioctl(u32 cmd, union Trapped_Args *args,
+				      u32 *result, void *pr_ctxt)
+{
+	u32(*ioctl_cmd) (union Trapped_Args *args, void *pr_ctxt) = NULL;
+	int i;
+
+	if (_IOC_TYPE(cmd) != DB) {
+		pr_err("%s: Incompatible dspbridge ioctl number\n", __func__);
+		goto err;
+	}
+
+	if (DB_GET_IOC_TABLE(cmd) > ARRAY_SIZE(size_cmd)) {
+		pr_err("%s: undefined ioctl module\n", __func__);
+		goto err;
+	}
+
+	/* Check the size of the required cmd table */
+	i = DB_GET_IOC(cmd);
+	if (i > size_cmd[DB_GET_IOC_TABLE(cmd)]) {
+		pr_err("%s: requested ioctl %d out of bounds for table %d\n",
+		       __func__, i, DB_GET_IOC_TABLE(cmd));
+		goto err;
+	}
+
+	switch (DB_GET_MODULE(cmd)) {
+	case DB_MGR:
+		ioctl_cmd = mgr_cmd[i].fxn;
+		break;
+	case DB_PROC:
+		ioctl_cmd = proc_cmd[i].fxn;
+		break;
+	case DB_NODE:
+		ioctl_cmd = node_cmd[i].fxn;
+		break;
+	case DB_STRM:
+		ioctl_cmd = strm_cmd[i].fxn;
+		break;
+	case DB_CMM:
+		ioctl_cmd = cmm_cmd[i].fxn;
+		break;
+	}
+
+	if (!ioctl_cmd) {
+		pr_err("%s: requested ioctl not defined\n", __func__);
+		goto err;
+	} else {
+		*result = (*ioctl_cmd) (args, pr_ctxt);
+	}
+
+	return 0;
+
+err:
+	return -EINVAL;
+}
+
+/*
+ *  ======== api_exit ========
+ */
+void api_exit(void)
+{
+	DBC_REQUIRE(api_c_refs > 0);
+	api_c_refs--;
+
+	if (api_c_refs == 0) {
+		/* Release all modules initialized in api_init(). */
+		cod_exit();
+		dev_exit();
+		chnl_exit();
+		msg_exit();
+		io_exit();
+		strm_exit();
+		disp_exit();
+		node_exit();
+		proc_exit();
+		mgr_exit();
+		rmm_exit();
+		drv_exit();
+	}
+	DBC_ENSURE(api_c_refs >= 0);
+}
+
+/*
+ *  ======== api_init ========
+ *  Purpose:
+ *      Module initialization used by Bridge API.
+ */
+bool api_init(void)
+{
+	bool ret = true;
+	bool fdrv, fdev, fcod, fchnl, fmsg, fio;
+	bool fmgr, fproc, fnode, fdisp, fstrm, frmm;
+
+	if (api_c_refs == 0) {
+		/* initialize driver and other modules */
+		fdrv = drv_init();
+		fmgr = mgr_init();
+		fproc = proc_init();
+		fnode = node_init();
+		fdisp = disp_init();
+		fstrm = strm_init();
+		frmm = rmm_init();
+		fchnl = chnl_init();
+		fmsg = msg_mod_init();
+		fio = io_init();
+		fdev = dev_init();
+		fcod = cod_init();
+		ret = fdrv && fdev && fchnl && fcod && fmsg && fio;
+		ret = ret && fmgr && fproc && frmm;
+		if (!ret) {
+			if (fdrv)
+				drv_exit();
+
+			if (fmgr)
+				mgr_exit();
+
+			if (fstrm)
+				strm_exit();
+
+			if (fproc)
+				proc_exit();
+
+			if (fnode)
+				node_exit();
+
+			if (fdisp)
+				disp_exit();
+
+			if (fchnl)
+				chnl_exit();
+
+			if (fmsg)
+				msg_exit();
+
+			if (fio)
+				io_exit();
+
+			if (fdev)
+				dev_exit();
+
+			if (fcod)
+				cod_exit();
+
+			if (frmm)
+				rmm_exit();
+
+		}
+	}
+	if (ret)
+		api_c_refs++;
+
+	return ret;
+}
+
+/*
+ *  ======== api_init_complete2 ========
+ *  Purpose:
+ *      Perform any required bridge initialization which cannot
+ *      be performed in api_init() or dev_start_device() due
+ *      to the fact that some services are not yet
+ *      completely initialized.
+ *  Parameters:
+ *  Returns:
+ *      0:	Allow this device to load
+ *      -EPERM:      Failure.
+ *  Requires:
+ *      Bridge API initialized.
+ *  Ensures:
+ */
+int api_init_complete2(void)
+{
+	int status = 0;
+	struct cfg_devnode *dev_node;
+	struct dev_object *hdev_obj;
+	u8 dev_type;
+	u32 tmp;
+
+	DBC_REQUIRE(api_c_refs > 0);
+
+	/*  Walk the list of DevObjects, get each devnode, and attempting to
+	 *  autostart the board. Note that this requires COF loading, which
+	 *  requires KFILE. */
+	for (hdev_obj = dev_get_first(); hdev_obj != NULL;
+	     hdev_obj = dev_get_next(hdev_obj)) {
+		if (DSP_FAILED(dev_get_dev_node(hdev_obj, &dev_node)))
+			continue;
+
+		if (DSP_FAILED(dev_get_dev_type(hdev_obj, &dev_type)))
+			continue;
+
+		if ((dev_type == DSP_UNIT) || (dev_type == IVA_UNIT))
+			if (cfg_get_auto_start(dev_node, &tmp) == 0
+									&& tmp)
+				proc_auto_start(dev_node, hdev_obj);
+	}
+
+	return status;
+}
+
+/* TODO: Remove deprecated and not implemented ioctl wrappers */
+
+/*
+ * ======== mgrwrap_enum_node_info ========
+ */
+u32 mgrwrap_enum_node_info(union Trapped_Args *args, void *pr_ctxt)
+{
+	u8 *pndb_props;
+	u32 num_nodes;
+	int status = 0;
+	u32 size = args->args_mgr_enumnode_info.undb_props_size;
+
+	if (size < sizeof(struct dsp_ndbprops))
+		return -EINVAL;
+
+	pndb_props = kmalloc(size, GFP_KERNEL);
+	if (pndb_props == NULL)
+		status = -ENOMEM;
+
+	if (DSP_SUCCEEDED(status)) {
+		status =
+		    mgr_enum_node_info(args->args_mgr_enumnode_info.node_id,
+				       (struct dsp_ndbprops *)pndb_props, size,
+				       &num_nodes);
+	}
+	CP_TO_USR(args->args_mgr_enumnode_info.pndb_props, pndb_props, status,
+		  size);
+	CP_TO_USR(args->args_mgr_enumnode_info.pu_num_nodes, &num_nodes, status,
+		  1);
+	kfree(pndb_props);
+
+	return status;
+}
+
+/*
+ * ======== mgrwrap_enum_proc_info ========
+ */
+u32 mgrwrap_enum_proc_info(union Trapped_Args *args, void *pr_ctxt)
+{
+	u8 *processor_info;
+	u8 num_procs;
+	int status = 0;
+	u32 size = args->args_mgr_enumproc_info.processor_info_size;
+
+	if (size < sizeof(struct dsp_processorinfo))
+		return -EINVAL;
+
+	processor_info = kmalloc(size, GFP_KERNEL);
+	if (processor_info == NULL)
+		status = -ENOMEM;
+
+	if (DSP_SUCCEEDED(status)) {
+		status =
+		    mgr_enum_processor_info(args->args_mgr_enumproc_info.
+					    processor_id,
+					    (struct dsp_processorinfo *)
+					    processor_info, size, &num_procs);
+	}
+	CP_TO_USR(args->args_mgr_enumproc_info.processor_info, processor_info,
+		  status, size);
+	CP_TO_USR(args->args_mgr_enumproc_info.pu_num_procs, &num_procs,
+		  status, 1);
+	kfree(processor_info);
+
+	return status;
+}
+
+#define WRAP_MAP2CALLER(x) x
+/*
+ * ======== mgrwrap_register_object ========
+ */
+u32 mgrwrap_register_object(union Trapped_Args *args, void *pr_ctxt)
+{
+	u32 ret;
+	struct dsp_uuid uuid_obj;
+	u32 path_size = 0;
+	char *psz_path_name = NULL;
+	int status = 0;
+
+	CP_FM_USR(&uuid_obj, args->args_mgr_registerobject.uuid_obj, status, 1);
+	if (DSP_FAILED(status))
+		goto func_end;
+	/* path_size is increased by 1 to accommodate NULL */
+	path_size = strlen_user((char *)
+				args->args_mgr_registerobject.psz_path_name) +
+	    1;
+	psz_path_name = kmalloc(path_size, GFP_KERNEL);
+	if (!psz_path_name)
+		goto func_end;
+	ret = strncpy_from_user(psz_path_name,
+				(char *)args->args_mgr_registerobject.
+				psz_path_name, path_size);
+	if (!ret) {
+		status = -EFAULT;
+		goto func_end;
+	}
+
+	if (args->args_mgr_registerobject.obj_type >= DSP_DCDMAXOBJTYPE)
+		return -EINVAL;
+
+	status = dcd_register_object(&uuid_obj,
+				     args->args_mgr_registerobject.obj_type,
+				     (char *)psz_path_name);
+func_end:
+	kfree(psz_path_name);
+	return status;
+}
+
+/*
+ * ======== mgrwrap_unregister_object ========
+ */
+u32 mgrwrap_unregister_object(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	struct dsp_uuid uuid_obj;
+
+	CP_FM_USR(&uuid_obj, args->args_mgr_registerobject.uuid_obj, status, 1);
+	if (DSP_FAILED(status))
+		goto func_end;
+
+	status = dcd_unregister_object(&uuid_obj,
+				       args->args_mgr_unregisterobject.
+				       obj_type);
+func_end:
+	return status;
+
+}
+
+/*
+ * ======== mgrwrap_wait_for_bridge_events ========
+ */
+u32 mgrwrap_wait_for_bridge_events(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0, real_status = 0;
+	struct dsp_notification *anotifications[MAX_EVENTS];
+	struct dsp_notification notifications[MAX_EVENTS];
+	u32 index, i;
+	u32 count = args->args_mgr_wait.count;
+
+	if (count > MAX_EVENTS)
+		status = -EINVAL;
+
+	/* get the array of pointers to user structures */
+	CP_FM_USR(anotifications, args->args_mgr_wait.anotifications,
+		  status, count);
+	/* get the events */
+	for (i = 0; i < count; i++) {
+		CP_FM_USR(&notifications[i], anotifications[i], status, 1);
+		if (DSP_SUCCEEDED(status)) {
+			/* set the array of pointers to kernel structures */
+			anotifications[i] = &notifications[i];
+		}
+	}
+	if (DSP_SUCCEEDED(status)) {
+		real_status = mgr_wait_for_bridge_events(anotifications, count,
+							 &index,
+							 args->args_mgr_wait.
+							 utimeout);
+	}
+	CP_TO_USR(args->args_mgr_wait.pu_index, &index, status, 1);
+	return real_status;
+}
+
+/*
+ * ======== MGRWRAP_GetProcessResourceInfo ========
+ */
+u32 __deprecated mgrwrap_get_process_resources_info(union Trapped_Args * args,
+						    void *pr_ctxt)
+{
+	pr_err("%s: deprecated dspbridge ioctl\n", __func__);
+	return 0;
+}
+
+/*
+ * ======== procwrap_attach ========
+ */
+u32 procwrap_attach(union Trapped_Args *args, void *pr_ctxt)
+{
+	void *processor;
+	int status = 0;
+	struct dsp_processorattrin proc_attr_in, *attr_in = NULL;
+
+	/* Optional argument */
+	if (args->args_proc_attach.attr_in) {
+		CP_FM_USR(&proc_attr_in, args->args_proc_attach.attr_in, status,
+			  1);
+		if (DSP_SUCCEEDED(status))
+			attr_in = &proc_attr_in;
+		else
+			goto func_end;
+
+	}
+	status = proc_attach(args->args_proc_attach.processor_id, attr_in,
+			     &processor, pr_ctxt);
+	CP_TO_USR(args->args_proc_attach.ph_processor, &processor, status, 1);
+func_end:
+	return status;
+}
+
+/*
+ * ======== procwrap_ctrl ========
+ */
+u32 procwrap_ctrl(union Trapped_Args *args, void *pr_ctxt)
+{
+	u32 cb_data_size, __user * psize = (u32 __user *)
+	    args->args_proc_ctrl.pargs;
+	u8 *pargs = NULL;
+	int status = 0;
+
+	if (psize) {
+		if (get_user(cb_data_size, psize)) {
+			status = -EPERM;
+			goto func_end;
+		}
+		cb_data_size += sizeof(u32);
+		pargs = kmalloc(cb_data_size, GFP_KERNEL);
+		if (pargs == NULL) {
+			status = -ENOMEM;
+			goto func_end;
+		}
+
+		CP_FM_USR(pargs, args->args_proc_ctrl.pargs, status,
+			  cb_data_size);
+	}
+	if (DSP_SUCCEEDED(status)) {
+		status = proc_ctrl(args->args_proc_ctrl.hprocessor,
+				   args->args_proc_ctrl.dw_cmd,
+				   (struct dsp_cbdata *)pargs);
+	}
+
+	/* CP_TO_USR(args->args_proc_ctrl.pargs, pargs, status, 1); */
+	kfree(pargs);
+func_end:
+	return status;
+}
+
+/*
+ * ======== procwrap_detach ========
+ */
+u32 __deprecated procwrap_detach(union Trapped_Args * args, void *pr_ctxt)
+{
+	/* proc_detach called at bridge_release only */
+	pr_err("%s: deprecated dspbridge ioctl\n", __func__);
+	return 0;
+}
+
+/*
+ * ======== procwrap_enum_node_info ========
+ */
+u32 procwrap_enum_node_info(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status;
+	void *node_tab[MAX_NODES];
+	u32 num_nodes;
+	u32 alloc_cnt;
+
+	if (!args->args_proc_enumnode_info.node_tab_size)
+		return -EINVAL;
+
+	status = proc_enum_nodes(args->args_proc_enumnode_info.hprocessor,
+				 node_tab,
+				 args->args_proc_enumnode_info.node_tab_size,
+				 &num_nodes, &alloc_cnt);
+	CP_TO_USR(args->args_proc_enumnode_info.node_tab, node_tab, status,
+		  num_nodes);
+	CP_TO_USR(args->args_proc_enumnode_info.pu_num_nodes, &num_nodes,
+		  status, 1);
+	CP_TO_USR(args->args_proc_enumnode_info.pu_allocated, &alloc_cnt,
+		  status, 1);
+	return status;
+}
+
+u32 procwrap_end_dma(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status;
+
+	if (args->args_proc_dma.dir >= DMA_NONE)
+		return -EINVAL;
+
+	status = proc_end_dma(pr_ctxt,
+				   args->args_proc_dma.pmpu_addr,
+				   args->args_proc_dma.ul_size,
+				   args->args_proc_dma.dir);
+	return status;
+}
+
+u32 procwrap_begin_dma(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status;
+
+	if (args->args_proc_dma.dir >= DMA_NONE)
+		return -EINVAL;
+
+	status = proc_begin_dma(pr_ctxt,
+				   args->args_proc_dma.pmpu_addr,
+				   args->args_proc_dma.ul_size,
+				   args->args_proc_dma.dir);
+	return status;
+}
+
+/*
+ * ======== procwrap_flush_memory ========
+ */
+u32 procwrap_flush_memory(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status;
+
+	if (args->args_proc_flushmemory.ul_flags >
+	    PROC_WRITEBACK_INVALIDATE_MEM)
+		return -EINVAL;
+
+	status = proc_flush_memory(pr_ctxt,
+				   args->args_proc_flushmemory.pmpu_addr,
+				   args->args_proc_flushmemory.ul_size,
+				   args->args_proc_flushmemory.ul_flags);
+	return status;
+}
+
+/*
+ * ======== procwrap_invalidate_memory ========
+ */
+u32 procwrap_invalidate_memory(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status;
+
+	status =
+	    proc_invalidate_memory(pr_ctxt,
+				   args->args_proc_invalidatememory.pmpu_addr,
+				   args->args_proc_invalidatememory.ul_size);
+	return status;
+}
+
+/*
+ * ======== procwrap_enum_resources ========
+ */
+u32 procwrap_enum_resources(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	struct dsp_resourceinfo resource_info;
+
+	if (args->args_proc_enumresources.resource_info_size <
+	    sizeof(struct dsp_resourceinfo))
+		return -EINVAL;
+
+	status =
+	    proc_get_resource_info(args->args_proc_enumresources.hprocessor,
+				   args->args_proc_enumresources.resource_type,
+				   &resource_info,
+				   args->args_proc_enumresources.
+				   resource_info_size);
+
+	CP_TO_USR(args->args_proc_enumresources.resource_info, &resource_info,
+		  status, 1);
+
+	return status;
+
+}
+
+/*
+ * ======== procwrap_get_state ========
+ */
+u32 procwrap_get_state(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status;
+	struct dsp_processorstate proc_state;
+
+	if (args->args_proc_getstate.state_info_size <
+	    sizeof(struct dsp_processorstate))
+		return -EINVAL;
+
+	status =
+	    proc_get_state(args->args_proc_getstate.hprocessor, &proc_state,
+			   args->args_proc_getstate.state_info_size);
+	CP_TO_USR(args->args_proc_getstate.proc_state_obj, &proc_state, status,
+		  1);
+	return status;
+
+}
+
+/*
+ * ======== procwrap_get_trace ========
+ */
+u32 procwrap_get_trace(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status;
+	u8 *pbuf;
+
+	if (args->args_proc_gettrace.max_size > MAX_TRACEBUFLEN)
+		return -EINVAL;
+
+	pbuf = kzalloc(args->args_proc_gettrace.max_size, GFP_KERNEL);
+	if (pbuf != NULL) {
+		status = proc_get_trace(args->args_proc_gettrace.hprocessor,
+					pbuf,
+					args->args_proc_gettrace.max_size);
+	} else {
+		status = -ENOMEM;
+	}
+	CP_TO_USR(args->args_proc_gettrace.pbuf, pbuf, status,
+		  args->args_proc_gettrace.max_size);
+	kfree(pbuf);
+
+	return status;
+}
+
+/*
+ * ======== procwrap_load ========
+ */
+u32 procwrap_load(union Trapped_Args *args, void *pr_ctxt)
+{
+	s32 i, len;
+	int status = 0;
+	char *temp;
+	s32 count = args->args_proc_load.argc_index;
+	u8 **argv = NULL, **envp = NULL;
+
+	if (count <= 0 || count > MAX_LOADARGS) {
+		status = -EINVAL;
+		goto func_cont;
+	}
+
+	argv = kmalloc(count * sizeof(u8 *), GFP_KERNEL);
+	if (!argv) {
+		status = -ENOMEM;
+		goto func_cont;
+	}
+
+	CP_FM_USR(argv, args->args_proc_load.user_args, status, count);
+	if (DSP_FAILED(status)) {
+		kfree(argv);
+		argv = NULL;
+		goto func_cont;
+	}
+
+	for (i = 0; i < count; i++) {
+		if (argv[i]) {
+			/* User space pointer to argument */
+			temp = (char *)argv[i];
+			/* len is increased by 1 to accommodate NULL */
+			len = strlen_user((char *)temp) + 1;
+			/* Kernel space pointer to argument */
+			argv[i] = kmalloc(len, GFP_KERNEL);
+			if (argv[i]) {
+				CP_FM_USR(argv[i], temp, status, len);
+				if (DSP_FAILED(status)) {
+					kfree(argv[i]);
+					argv[i] = NULL;
+					goto func_cont;
+				}
+			} else {
+				status = -ENOMEM;
+				goto func_cont;
+			}
+		}
+	}
+	/* TODO: validate this */
+	if (args->args_proc_load.user_envp) {
+		/* number of elements in the envp array including NULL */
+		count = 0;
+		do {
+			get_user(temp, args->args_proc_load.user_envp + count);
+			count++;
+		} while (temp);
+		envp = kmalloc(count * sizeof(u8 *), GFP_KERNEL);
+		if (!envp) {
+			status = -ENOMEM;
+			goto func_cont;
+		}
+
+		CP_FM_USR(envp, args->args_proc_load.user_envp, status, count);
+		if (DSP_FAILED(status)) {
+			kfree(envp);
+			envp = NULL;
+			goto func_cont;
+		}
+		for (i = 0; envp[i]; i++) {
+			/* User space pointer to argument */
+			temp = (char *)envp[i];
+			/* len is increased by 1 to accommodate NULL */
+			len = strlen_user((char *)temp) + 1;
+			/* Kernel space pointer to argument */
+			envp[i] = kmalloc(len, GFP_KERNEL);
+			if (envp[i]) {
+				CP_FM_USR(envp[i], temp, status, len);
+				if (DSP_FAILED(status)) {
+					kfree(envp[i]);
+					envp[i] = NULL;
+					goto func_cont;
+				}
+			} else {
+				status = -ENOMEM;
+				goto func_cont;
+			}
+		}
+	}
+
+	if (DSP_SUCCEEDED(status)) {
+		status = proc_load(args->args_proc_load.hprocessor,
+				   args->args_proc_load.argc_index,
+				   (CONST char **)argv, (CONST char **)envp);
+	}
+func_cont:
+	if (envp) {
+		i = 0;
+		while (envp[i])
+			kfree(envp[i++]);
+
+		kfree(envp);
+	}
+
+	if (argv) {
+		count = args->args_proc_load.argc_index;
+		for (i = 0; (i < count) && argv[i]; i++)
+			kfree(argv[i]);
+
+		kfree(argv);
+	}
+
+	return status;
+}
+
+/*
+ * ======== procwrap_map ========
+ */
+u32 procwrap_map(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status;
+	void *map_addr;
+
+	if (!args->args_proc_mapmem.ul_size)
+		return -EINVAL;
+
+	status = proc_map(args->args_proc_mapmem.hprocessor,
+			  args->args_proc_mapmem.pmpu_addr,
+			  args->args_proc_mapmem.ul_size,
+			  args->args_proc_mapmem.req_addr, &map_addr,
+			  args->args_proc_mapmem.ul_map_attr, pr_ctxt);
+	if (DSP_SUCCEEDED(status)) {
+		if (put_user(map_addr, args->args_proc_mapmem.pp_map_addr)) {
+			status = -EINVAL;
+			proc_un_map(args->args_proc_mapmem.hprocessor,
+				    map_addr, pr_ctxt);
+		}
+
+	}
+	return status;
+}
+
+/*
+ * ======== procwrap_register_notify ========
+ */
+u32 procwrap_register_notify(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status;
+	struct dsp_notification notification;
+
+	/* Initialize the notification data structure */
+	notification.ps_name = NULL;
+	notification.handle = NULL;
+
+	status =
+	    proc_register_notify(args->args_proc_register_notify.hprocessor,
+				 args->args_proc_register_notify.event_mask,
+				 args->args_proc_register_notify.notify_type,
+				 &notification);
+	CP_TO_USR(args->args_proc_register_notify.hnotification, &notification,
+		  status, 1);
+	return status;
+}
+
+/*
+ * ======== procwrap_reserve_memory ========
+ */
+u32 procwrap_reserve_memory(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status;
+	void *prsv_addr;
+
+	if ((args->args_proc_rsvmem.ul_size <= 0) ||
+	    (args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0)
+		return -EINVAL;
+
+	status = proc_reserve_memory(args->args_proc_rsvmem.hprocessor,
+				     args->args_proc_rsvmem.ul_size, &prsv_addr,
+				     pr_ctxt);
+	if (DSP_SUCCEEDED(status)) {
+		if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) {
+			status = -EINVAL;
+			proc_un_reserve_memory(args->args_proc_rsvmem.
+					       hprocessor, prsv_addr, pr_ctxt);
+		}
+	}
+	return status;
+}
+
+/*
+ * ======== procwrap_start ========
+ */
+u32 procwrap_start(union Trapped_Args *args, void *pr_ctxt)
+{
+	u32 ret;
+
+	ret = proc_start(args->args_proc_start.hprocessor);
+	return ret;
+}
+
+/*
+ * ======== procwrap_un_map ========
+ */
+u32 procwrap_un_map(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status;
+
+	status = proc_un_map(args->args_proc_unmapmem.hprocessor,
+			     args->args_proc_unmapmem.map_addr, pr_ctxt);
+	return status;
+}
+
+/*
+ * ======== procwrap_un_reserve_memory ========
+ */
+u32 procwrap_un_reserve_memory(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status;
+
+	status = proc_un_reserve_memory(args->args_proc_unrsvmem.hprocessor,
+					args->args_proc_unrsvmem.prsv_addr,
+					pr_ctxt);
+	return status;
+}
+
+/*
+ * ======== procwrap_stop ========
+ */
+u32 procwrap_stop(union Trapped_Args *args, void *pr_ctxt)
+{
+	u32 ret;
+
+	ret = proc_stop(args->args_proc_stop.hprocessor);
+
+	return ret;
+}
+
+/*
+ * ======== nodewrap_allocate ========
+ */
+u32 nodewrap_allocate(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	struct dsp_uuid node_uuid;
+	u32 cb_data_size = 0;
+	u32 __user *psize = (u32 __user *) args->args_node_allocate.pargs;
+	u8 *pargs = NULL;
+	struct dsp_nodeattrin proc_attr_in, *attr_in = NULL;
+	struct node_object *hnode;
+
+	/* Optional argument */
+	if (psize) {
+		if (get_user(cb_data_size, psize))
+			status = -EPERM;
+
+		cb_data_size += sizeof(u32);
+		if (DSP_SUCCEEDED(status)) {
+			pargs = kmalloc(cb_data_size, GFP_KERNEL);
+			if (pargs == NULL)
+				status = -ENOMEM;
+
+		}
+		CP_FM_USR(pargs, args->args_node_allocate.pargs, status,
+			  cb_data_size);
+	}
+	CP_FM_USR(&node_uuid, args->args_node_allocate.node_id_ptr, status, 1);
+	if (DSP_FAILED(status))
+		goto func_cont;
+	/* Optional argument */
+	if (args->args_node_allocate.attr_in) {
+		CP_FM_USR(&proc_attr_in, args->args_node_allocate.attr_in,
+			  status, 1);
+		if (DSP_SUCCEEDED(status))
+			attr_in = &proc_attr_in;
+		else
+			status = -ENOMEM;
+
+	}
+	if (DSP_SUCCEEDED(status)) {
+		status = node_allocate(args->args_node_allocate.hprocessor,
+				       &node_uuid, (struct dsp_cbdata *)pargs,
+				       attr_in, &hnode, pr_ctxt);
+	}
+	if (DSP_SUCCEEDED(status)) {
+		CP_TO_USR(args->args_node_allocate.ph_node, &hnode, status, 1);
+		if (DSP_FAILED(status)) {
+			status = -EFAULT;
+			node_delete(hnode, pr_ctxt);
+		}
+	}
+func_cont:
+	kfree(pargs);
+
+	return status;
+}
+
+/*
+ *  ======== nodewrap_alloc_msg_buf ========
+ */
+u32 nodewrap_alloc_msg_buf(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	struct dsp_bufferattr *pattr = NULL;
+	struct dsp_bufferattr attr;
+	u8 *pbuffer = NULL;
+
+	if (!args->args_node_allocmsgbuf.usize)
+		return -EINVAL;
+
+	if (args->args_node_allocmsgbuf.pattr) {	/* Optional argument */
+		CP_FM_USR(&attr, args->args_node_allocmsgbuf.pattr, status, 1);
+		if (DSP_SUCCEEDED(status))
+			pattr = &attr;
+
+	}
+	/* IN OUT argument */
+	CP_FM_USR(&pbuffer, args->args_node_allocmsgbuf.pbuffer, status, 1);
+	if (DSP_SUCCEEDED(status)) {
+		status = node_alloc_msg_buf(args->args_node_allocmsgbuf.hnode,
+					    args->args_node_allocmsgbuf.usize,
+					    pattr, &pbuffer);
+	}
+	CP_TO_USR(args->args_node_allocmsgbuf.pbuffer, &pbuffer, status, 1);
+	return status;
+}
+
+/*
+ * ======== nodewrap_change_priority ========
+ */
+u32 nodewrap_change_priority(union Trapped_Args *args, void *pr_ctxt)
+{
+	u32 ret;
+
+	ret = node_change_priority(args->args_node_changepriority.hnode,
+				   args->args_node_changepriority.prio);
+
+	return ret;
+}
+
+/*
+ * ======== nodewrap_connect ========
+ */
+u32 nodewrap_connect(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	struct dsp_strmattr attrs;
+	struct dsp_strmattr *pattrs = NULL;
+	u32 cb_data_size;
+	u32 __user *psize = (u32 __user *) args->args_node_connect.conn_param;
+	u8 *pargs = NULL;
+
+	/* Optional argument */
+	if (psize) {
+		if (get_user(cb_data_size, psize))
+			status = -EPERM;
+
+		cb_data_size += sizeof(u32);
+		if (DSP_SUCCEEDED(status)) {
+			pargs = kmalloc(cb_data_size, GFP_KERNEL);
+			if (pargs == NULL) {
+				status = -ENOMEM;
+				goto func_cont;
+			}
+
+		}
+		CP_FM_USR(pargs, args->args_node_connect.conn_param, status,
+			  cb_data_size);
+		if (DSP_FAILED(status))
+			goto func_cont;
+	}
+	if (args->args_node_connect.pattrs) {	/* Optional argument */
+		CP_FM_USR(&attrs, args->args_node_connect.pattrs, status, 1);
+		if (DSP_SUCCEEDED(status))
+			pattrs = &attrs;
+
+	}
+	if (DSP_SUCCEEDED(status)) {
+		status = node_connect(args->args_node_connect.hnode,
+				      args->args_node_connect.stream_id,
+				      args->args_node_connect.other_node,
+				      args->args_node_connect.other_stream,
+				      pattrs, (struct dsp_cbdata *)pargs);
+	}
+func_cont:
+	kfree(pargs);
+
+	return status;
+}
+
+/*
+ * ======== nodewrap_create ========
+ */
+u32 nodewrap_create(union Trapped_Args *args, void *pr_ctxt)
+{
+	u32 ret;
+
+	ret = node_create(args->args_node_create.hnode);
+
+	return ret;
+}
+
+/*
+ * ======== nodewrap_delete ========
+ */
+u32 nodewrap_delete(union Trapped_Args *args, void *pr_ctxt)
+{
+	u32 ret;
+
+	ret = node_delete(args->args_node_delete.hnode, pr_ctxt);
+
+	return ret;
+}
+
+/*
+ *  ======== nodewrap_free_msg_buf ========
+ */
+u32 nodewrap_free_msg_buf(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	struct dsp_bufferattr *pattr = NULL;
+	struct dsp_bufferattr attr;
+	if (args->args_node_freemsgbuf.pattr) {	/* Optional argument */
+		CP_FM_USR(&attr, args->args_node_freemsgbuf.pattr, status, 1);
+		if (DSP_SUCCEEDED(status))
+			pattr = &attr;
+
+	}
+
+	if (!args->args_node_freemsgbuf.pbuffer)
+		return -EFAULT;
+
+	if (DSP_SUCCEEDED(status)) {
+		status = node_free_msg_buf(args->args_node_freemsgbuf.hnode,
+					   args->args_node_freemsgbuf.pbuffer,
+					   pattr);
+	}
+
+	return status;
+}
+
+/*
+ * ======== nodewrap_get_attr ========
+ */
+u32 nodewrap_get_attr(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	struct dsp_nodeattr attr;
+
+	status = node_get_attr(args->args_node_getattr.hnode, &attr,
+			       args->args_node_getattr.attr_size);
+	CP_TO_USR(args->args_node_getattr.pattr, &attr, status, 1);
+
+	return status;
+}
+
+/*
+ * ======== nodewrap_get_message ========
+ */
+u32 nodewrap_get_message(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status;
+	struct dsp_msg msg;
+
+	status = node_get_message(args->args_node_getmessage.hnode, &msg,
+				  args->args_node_getmessage.utimeout);
+
+	CP_TO_USR(args->args_node_getmessage.message, &msg, status, 1);
+
+	return status;
+}
+
+/*
+ * ======== nodewrap_pause ========
+ */
+u32 nodewrap_pause(union Trapped_Args *args, void *pr_ctxt)
+{
+	u32 ret;
+
+	ret = node_pause(args->args_node_pause.hnode);
+
+	return ret;
+}
+
+/*
+ * ======== nodewrap_put_message ========
+ */
+u32 nodewrap_put_message(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	struct dsp_msg msg;
+
+	CP_FM_USR(&msg, args->args_node_putmessage.message, status, 1);
+
+	if (DSP_SUCCEEDED(status)) {
+		status =
+		    node_put_message(args->args_node_putmessage.hnode, &msg,
+				     args->args_node_putmessage.utimeout);
+	}
+
+	return status;
+}
+
+/*
+ * ======== nodewrap_register_notify ========
+ */
+u32 nodewrap_register_notify(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	struct dsp_notification notification;
+
+	/* Initialize the notification data structure */
+	notification.ps_name = NULL;
+	notification.handle = NULL;
+
+	if (!args->args_proc_register_notify.event_mask)
+		CP_FM_USR(&notification,
+			  args->args_proc_register_notify.hnotification,
+			  status, 1);
+
+	status = node_register_notify(args->args_node_registernotify.hnode,
+				      args->args_node_registernotify.event_mask,
+				      args->args_node_registernotify.
+				      notify_type, &notification);
+	CP_TO_USR(args->args_node_registernotify.hnotification, &notification,
+		  status, 1);
+	return status;
+}
+
+/*
+ * ======== nodewrap_run ========
+ */
+u32 nodewrap_run(union Trapped_Args *args, void *pr_ctxt)
+{
+	u32 ret;
+
+	ret = node_run(args->args_node_run.hnode);
+
+	return ret;
+}
+
+/*
+ * ======== nodewrap_terminate ========
+ */
+u32 nodewrap_terminate(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status;
+	int tempstatus;
+
+	status = node_terminate(args->args_node_terminate.hnode, &tempstatus);
+
+	CP_TO_USR(args->args_node_terminate.pstatus, &tempstatus, status, 1);
+
+	return status;
+}
+
+/*
+ * ======== nodewrap_get_uuid_props ========
+ */
+u32 nodewrap_get_uuid_props(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	struct dsp_uuid node_uuid;
+	struct dsp_ndbprops *pnode_props = NULL;
+
+	CP_FM_USR(&node_uuid, args->args_node_getuuidprops.node_id_ptr, status,
+		  1);
+	if (DSP_FAILED(status))
+		goto func_cont;
+	pnode_props = kmalloc(sizeof(struct dsp_ndbprops), GFP_KERNEL);
+	if (pnode_props != NULL) {
+		status =
+		    node_get_uuid_props(args->args_node_getuuidprops.hprocessor,
+					&node_uuid, pnode_props);
+		CP_TO_USR(args->args_node_getuuidprops.node_props, pnode_props,
+			  status, 1);
+	} else
+		status = -ENOMEM;
+func_cont:
+	kfree(pnode_props);
+	return status;
+}
+
+/*
+ * ======== strmwrap_allocate_buffer ========
+ */
+u32 strmwrap_allocate_buffer(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status;
+	u8 **ap_buffer = NULL;
+	u32 num_bufs = args->args_strm_allocatebuffer.num_bufs;
+
+	if (num_bufs > MAX_BUFS)
+		return -EINVAL;
+
+	ap_buffer = kmalloc((num_bufs * sizeof(u8 *)), GFP_KERNEL);
+
+	status = strm_allocate_buffer(args->args_strm_allocatebuffer.hstream,
+				      args->args_strm_allocatebuffer.usize,
+				      ap_buffer, num_bufs, pr_ctxt);
+	if (DSP_SUCCEEDED(status)) {
+		CP_TO_USR(args->args_strm_allocatebuffer.ap_buffer, ap_buffer,
+			  status, num_bufs);
+		if (DSP_FAILED(status)) {
+			status = -EFAULT;
+			strm_free_buffer(args->args_strm_allocatebuffer.hstream,
+					 ap_buffer, num_bufs, pr_ctxt);
+		}
+	}
+	kfree(ap_buffer);
+
+	return status;
+}
+
+/*
+ * ======== strmwrap_close ========
+ */
+u32 strmwrap_close(union Trapped_Args *args, void *pr_ctxt)
+{
+	return strm_close(args->args_strm_close.hstream, pr_ctxt);
+}
+
+/*
+ * ======== strmwrap_free_buffer ========
+ */
+u32 strmwrap_free_buffer(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	u8 **ap_buffer = NULL;
+	u32 num_bufs = args->args_strm_freebuffer.num_bufs;
+
+	if (num_bufs > MAX_BUFS)
+		return -EINVAL;
+
+	ap_buffer = kmalloc((num_bufs * sizeof(u8 *)), GFP_KERNEL);
+
+	CP_FM_USR(ap_buffer, args->args_strm_freebuffer.ap_buffer, status,
+		  num_bufs);
+
+	if (DSP_SUCCEEDED(status)) {
+		status = strm_free_buffer(args->args_strm_freebuffer.hstream,
+					  ap_buffer, num_bufs, pr_ctxt);
+	}
+	CP_TO_USR(args->args_strm_freebuffer.ap_buffer, ap_buffer, status,
+		  num_bufs);
+	kfree(ap_buffer);
+
+	return status;
+}
+
+/*
+ * ======== strmwrap_get_event_handle ========
+ */
+u32 __deprecated strmwrap_get_event_handle(union Trapped_Args * args,
+					   void *pr_ctxt)
+{
+	pr_err("%s: deprecated dspbridge ioctl\n", __func__);
+	return -ENOSYS;
+}
+
+/*
+ * ======== strmwrap_get_info ========
+ */
+u32 strmwrap_get_info(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	struct stream_info strm_info;
+	struct dsp_streaminfo user;
+	struct dsp_streaminfo *temp;
+
+	CP_FM_USR(&strm_info, args->args_strm_getinfo.stream_info, status, 1);
+	temp = strm_info.user_strm;
+
+	strm_info.user_strm = &user;
+
+	if (DSP_SUCCEEDED(status)) {
+		status = strm_get_info(args->args_strm_getinfo.hstream,
+				       &strm_info,
+				       args->args_strm_getinfo.
+				       stream_info_size);
+	}
+	CP_TO_USR(temp, strm_info.user_strm, status, 1);
+	strm_info.user_strm = temp;
+	CP_TO_USR(args->args_strm_getinfo.stream_info, &strm_info, status, 1);
+	return status;
+}
+
+/*
+ * ======== strmwrap_idle ========
+ */
+u32 strmwrap_idle(union Trapped_Args *args, void *pr_ctxt)
+{
+	u32 ret;
+
+	ret = strm_idle(args->args_strm_idle.hstream,
+			args->args_strm_idle.flush_flag);
+
+	return ret;
+}
+
+/*
+ * ======== strmwrap_issue ========
+ */
+u32 strmwrap_issue(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+
+	if (!args->args_strm_issue.pbuffer)
+		return -EFAULT;
+
+	/* No need of doing CP_FM_USR for the user buffer (pbuffer)
+	   as this is done in Bridge internal function bridge_chnl_add_io_req
+	   in chnl_sm.c */
+	status = strm_issue(args->args_strm_issue.hstream,
+			    args->args_strm_issue.pbuffer,
+			    args->args_strm_issue.dw_bytes,
+			    args->args_strm_issue.dw_buf_size,
+			    args->args_strm_issue.dw_arg);
+
+	return status;
+}
+
+/*
+ * ======== strmwrap_open ========
+ */
+u32 strmwrap_open(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	struct strm_attr attr;
+	struct strm_object *strm_obj;
+	struct dsp_streamattrin strm_attr_in;
+
+	CP_FM_USR(&attr, args->args_strm_open.attr_in, status, 1);
+
+	if (attr.stream_attr_in != NULL) {	/* Optional argument */
+		CP_FM_USR(&strm_attr_in, attr.stream_attr_in, status, 1);
+		if (DSP_SUCCEEDED(status)) {
+			attr.stream_attr_in = &strm_attr_in;
+			if (attr.stream_attr_in->strm_mode == STRMMODE_LDMA)
+				return -ENOSYS;
+		}
+
+	}
+	status = strm_open(args->args_strm_open.hnode,
+			   args->args_strm_open.direction,
+			   args->args_strm_open.index, &attr, &strm_obj,
+			   pr_ctxt);
+	CP_TO_USR(args->args_strm_open.ph_stream, &strm_obj, status, 1);
+	return status;
+}
+
+/*
+ * ======== strmwrap_reclaim ========
+ */
+u32 strmwrap_reclaim(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	u8 *buf_ptr;
+	u32 ul_bytes;
+	u32 dw_arg;
+	u32 ul_buf_size;
+
+	status = strm_reclaim(args->args_strm_reclaim.hstream, &buf_ptr,
+			      &ul_bytes, &ul_buf_size, &dw_arg);
+	CP_TO_USR(args->args_strm_reclaim.buf_ptr, &buf_ptr, status, 1);
+	CP_TO_USR(args->args_strm_reclaim.bytes, &ul_bytes, status, 1);
+	CP_TO_USR(args->args_strm_reclaim.pdw_arg, &dw_arg, status, 1);
+
+	if (args->args_strm_reclaim.buf_size_ptr != NULL) {
+		CP_TO_USR(args->args_strm_reclaim.buf_size_ptr, &ul_buf_size,
+			  status, 1);
+	}
+
+	return status;
+}
+
+/*
+ * ======== strmwrap_register_notify ========
+ */
+u32 strmwrap_register_notify(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	struct dsp_notification notification;
+
+	/* Initialize the notification data structure */
+	notification.ps_name = NULL;
+	notification.handle = NULL;
+
+	status = strm_register_notify(args->args_strm_registernotify.hstream,
+				      args->args_strm_registernotify.event_mask,
+				      args->args_strm_registernotify.
+				      notify_type, &notification);
+	CP_TO_USR(args->args_strm_registernotify.hnotification, &notification,
+		  status, 1);
+
+	return status;
+}
+
+/*
+ * ======== strmwrap_select ========
+ */
+u32 strmwrap_select(union Trapped_Args *args, void *pr_ctxt)
+{
+	u32 mask;
+	struct strm_object *strm_tab[MAX_STREAMS];
+	int status = 0;
+
+	if (args->args_strm_select.strm_num > MAX_STREAMS)
+		return -EINVAL;
+
+	CP_FM_USR(strm_tab, args->args_strm_select.stream_tab, status,
+		  args->args_strm_select.strm_num);
+	if (DSP_SUCCEEDED(status)) {
+		status = strm_select(strm_tab, args->args_strm_select.strm_num,
+				     &mask, args->args_strm_select.utimeout);
+	}
+	CP_TO_USR(args->args_strm_select.pmask, &mask, status, 1);
+	return status;
+}
+
+/* CMM */
+
+/*
+ * ======== cmmwrap_calloc_buf ========
+ */
+u32 __deprecated cmmwrap_calloc_buf(union Trapped_Args * args, void *pr_ctxt)
+{
+	/* This operation is done in kernel */
+	pr_err("%s: deprecated dspbridge ioctl\n", __func__);
+	return -ENOSYS;
+}
+
+/*
+ * ======== cmmwrap_free_buf ========
+ */
+u32 __deprecated cmmwrap_free_buf(union Trapped_Args * args, void *pr_ctxt)
+{
+	/* This operation is done in kernel */
+	pr_err("%s: deprecated dspbridge ioctl\n", __func__);
+	return -ENOSYS;
+}
+
+/*
+ * ======== cmmwrap_get_handle ========
+ */
+u32 cmmwrap_get_handle(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	struct cmm_object *hcmm_mgr;
+
+	status = cmm_get_handle(args->args_cmm_gethandle.hprocessor, &hcmm_mgr);
+
+	CP_TO_USR(args->args_cmm_gethandle.ph_cmm_mgr, &hcmm_mgr, status, 1);
+
+	return status;
+}
+
+/*
+ * ======== cmmwrap_get_info ========
+ */
+u32 cmmwrap_get_info(union Trapped_Args *args, void *pr_ctxt)
+{
+	int status = 0;
+	struct cmm_info cmm_info_obj;
+
+	status = cmm_get_info(args->args_cmm_getinfo.hcmm_mgr, &cmm_info_obj);
+
+	CP_TO_USR(args->args_cmm_getinfo.cmm_info_obj, &cmm_info_obj, status,
+		  1);
+
+	return status;
+}
diff --git a/drivers/staging/tidspbridge/pmgr/io.c b/drivers/staging/tidspbridge/pmgr/io.c
new file mode 100644
index 0000000..c6ad203
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/io.c
@@ -0,0 +1,142 @@
+/*
+ * io.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * IO manager interface: Manages IO between CHNL and msg_ctrl.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*  ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/cfg.h>
+
+/*  ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+
+/*  ----------------------------------- This */
+#include <ioobj.h>
+#include <dspbridge/iodefs.h>
+#include <dspbridge/io.h>
+
+/*  ----------------------------------- Globals */
+static u32 refs;
+
+/*
+ *  ======== io_create ========
+ *  Purpose:
+ *      Create an IO manager object, responsible for managing IO between
+ *      CHNL and msg_ctrl
+ */
+int io_create(OUT struct io_mgr **phIOMgr, struct dev_object *hdev_obj,
+		     IN CONST struct io_attrs *pMgrAttrs)
+{
+	struct bridge_drv_interface *intf_fxns;
+	struct io_mgr *hio_mgr = NULL;
+	struct io_mgr_ *pio_mgr = NULL;
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phIOMgr != NULL);
+	DBC_REQUIRE(pMgrAttrs != NULL);
+
+	*phIOMgr = NULL;
+
+	/* A memory base of 0 implies no memory base: */
+	if ((pMgrAttrs->shm_base != 0) && (pMgrAttrs->usm_length == 0))
+		status = -EINVAL;
+
+	if (pMgrAttrs->word_size == 0)
+		status = -EINVAL;
+
+	if (DSP_SUCCEEDED(status)) {
+		dev_get_intf_fxns(hdev_obj, &intf_fxns);
+
+		/* Let Bridge channel module finish the create: */
+		status = (*intf_fxns->pfn_io_create) (&hio_mgr, hdev_obj,
+						      pMgrAttrs);
+
+		if (DSP_SUCCEEDED(status)) {
+			pio_mgr = (struct io_mgr_ *)hio_mgr;
+			pio_mgr->intf_fxns = intf_fxns;
+			pio_mgr->hdev_obj = hdev_obj;
+
+			/* Return the new channel manager handle: */
+			*phIOMgr = hio_mgr;
+		}
+	}
+
+	return status;
+}
+
+/*
+ *  ======== io_destroy ========
+ *  Purpose:
+ *      Delete IO manager.
+ */
+int io_destroy(struct io_mgr *hio_mgr)
+{
+	struct bridge_drv_interface *intf_fxns;
+	struct io_mgr_ *pio_mgr = (struct io_mgr_ *)hio_mgr;
+	int status;
+
+	DBC_REQUIRE(refs > 0);
+
+	intf_fxns = pio_mgr->intf_fxns;
+
+	/* Let Bridge channel module destroy the io_mgr: */
+	status = (*intf_fxns->pfn_io_destroy) (hio_mgr);
+
+	return status;
+}
+
+/*
+ *  ======== io_exit ========
+ *  Purpose:
+ *      Discontinue usage of the IO module.
+ */
+void io_exit(void)
+{
+	DBC_REQUIRE(refs > 0);
+
+	refs--;
+
+	DBC_ENSURE(refs >= 0);
+}
+
+/*
+ *  ======== io_init ========
+ *  Purpose:
+ *      Initialize the IO module's private state.
+ */
+bool io_init(void)
+{
+	bool ret = true;
+
+	DBC_REQUIRE(refs >= 0);
+
+	if (ret)
+		refs++;
+
+	DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+
+	return ret;
+}
diff --git a/drivers/staging/tidspbridge/pmgr/ioobj.h b/drivers/staging/tidspbridge/pmgr/ioobj.h
new file mode 100644
index 0000000..f46355f
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/ioobj.h
@@ -0,0 +1,38 @@
+/*
+ * ioobj.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Structure subcomponents of channel class library IO objects which
+ * are exposed to DSP API from Bridge driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef IOOBJ_
+#define IOOBJ_
+
+#include <dspbridge/devdefs.h>
+#include <dspbridge/dspdefs.h>
+
+/*
+ *  This struct is the first field in a io_mgr struct. Other, implementation
+ *  specific fields follow this structure in memory.
+ */
+struct io_mgr_ {
+	/* These must be the first fields in a io_mgr struct: */
+	struct bridge_dev_context *hbridge_context;	/* Bridge context. */
+	/* Function interface to Bridge driver. */
+	struct bridge_drv_interface *intf_fxns;
+	struct dev_object *hdev_obj;	/* Device this board represents. */
+};
+
+#endif /* IOOBJ_ */
diff --git a/drivers/staging/tidspbridge/pmgr/msg.c b/drivers/staging/tidspbridge/pmgr/msg.c
new file mode 100644
index 0000000..64f1cb4
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/msg.c
@@ -0,0 +1,129 @@
+/*
+ * msg.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * DSP/BIOS Bridge msg_ctrl Module.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*  ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/std.h>
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- Bridge Driver */
+#include <dspbridge/dspdefs.h>
+
+/*  ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+
+/*  ----------------------------------- This */
+#include <msgobj.h>
+#include <dspbridge/msg.h>
+
+/*  ----------------------------------- Globals */
+static u32 refs;		/* module reference count */
+
+/*
+ *  ======== msg_create ========
+ *  Purpose:
+ *      Create an object to manage message queues. Only one of these objects
+ *      can exist per device object.
+ */
+int msg_create(OUT struct msg_mgr **phMsgMgr,
+		      struct dev_object *hdev_obj, msg_onexit msgCallback)
+{
+	struct bridge_drv_interface *intf_fxns;
+	struct msg_mgr_ *msg_mgr_obj;
+	struct msg_mgr *hmsg_mgr;
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(phMsgMgr != NULL);
+	DBC_REQUIRE(msgCallback != NULL);
+	DBC_REQUIRE(hdev_obj != NULL);
+
+	*phMsgMgr = NULL;
+
+	dev_get_intf_fxns(hdev_obj, &intf_fxns);
+
+	/* Let Bridge message module finish the create: */
+	status =
+	    (*intf_fxns->pfn_msg_create) (&hmsg_mgr, hdev_obj, msgCallback);
+
+	if (DSP_SUCCEEDED(status)) {
+		/* Fill in DSP API message module's fields of the msg_mgr
+		 * structure */
+		msg_mgr_obj = (struct msg_mgr_ *)hmsg_mgr;
+		msg_mgr_obj->intf_fxns = intf_fxns;
+
+		/* Finally, return the new message manager handle: */
+		*phMsgMgr = hmsg_mgr;
+	} else {
+		status = -EPERM;
+	}
+	return status;
+}
+
+/*
+ *  ======== msg_delete ========
+ *  Purpose:
+ *      Delete a msg_ctrl manager allocated in msg_create().
+ */
+void msg_delete(struct msg_mgr *hmsg_mgr)
+{
+	struct msg_mgr_ *msg_mgr_obj = (struct msg_mgr_ *)hmsg_mgr;
+	struct bridge_drv_interface *intf_fxns;
+
+	DBC_REQUIRE(refs > 0);
+
+	if (msg_mgr_obj) {
+		intf_fxns = msg_mgr_obj->intf_fxns;
+
+		/* Let Bridge message module destroy the msg_mgr: */
+		(*intf_fxns->pfn_msg_delete) (hmsg_mgr);
+	} else {
+		dev_dbg(bridge, "%s: Error hmsg_mgr handle: %p\n",
+			__func__, hmsg_mgr);
+	}
+}
+
+/*
+ *  ======== msg_exit ========
+ */
+void msg_exit(void)
+{
+	DBC_REQUIRE(refs > 0);
+	refs--;
+
+	DBC_ENSURE(refs >= 0);
+}
+
+/*
+ *  ======== msg_mod_init ========
+ */
+bool msg_mod_init(void)
+{
+	DBC_REQUIRE(refs >= 0);
+
+	refs++;
+
+	DBC_ENSURE(refs >= 0);
+
+	return true;
+}
diff --git a/drivers/staging/tidspbridge/pmgr/msgobj.h b/drivers/staging/tidspbridge/pmgr/msgobj.h
new file mode 100644
index 0000000..14ca633
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/msgobj.h
@@ -0,0 +1,38 @@
+/*
+ * msgobj.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Structure subcomponents of channel class library msg_ctrl objects which
+ * are exposed to DSP API from Bridge driver.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef MSGOBJ_
+#define MSGOBJ_
+
+#include <dspbridge/dspdefs.h>
+
+#include <dspbridge/msgdefs.h>
+
+/*
+ *  This struct is the first field in a msg_mgr struct. Other, implementation
+ *  specific fields follow this structure in memory.
+ */
+struct msg_mgr_ {
+	/* The first field must match that in _msg_sm.h */
+
+	/* Function interface to Bridge driver. */
+	struct bridge_drv_interface *intf_fxns;
+};
+
+#endif /* MSGOBJ_ */
-- 
1.7.0.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ